From c5d6decfaf4c8be907975531f7d7bd41dda2da4e Mon Sep 17 00:00:00 2001 From: dpavlov Date: Wed, 4 Oct 2017 09:46:16 +0300 Subject: [PATCH 001/243] IGNITE-6285 Enhance persistent store paths handling - Fixes #2775. Signed-off-by: Alexey Goncharuk --- .../FoldersReuseCompatibilityTest.java | 224 ++++++ ...ePersistenceCompatibilityAbstractTest.java | 3 + .../IgniteCompatibilityBasicTestSuite.java | 3 + .../apache/ignite/IgniteSystemProperties.java | 7 + .../ignite/internal/GridKernalContext.java | 6 + .../internal/GridKernalContextImpl.java | 17 +- .../apache/ignite/internal/IgniteKernal.java | 4 +- .../discovery/GridDiscoveryManager.java | 55 +- .../cache/binary/BinaryMetadataFileStore.java | 6 +- .../GridCacheDatabaseSharedManager.java | 91 ++- .../IgniteCacheDatabaseSharedManager.java | 13 +- .../file/FilePageStoreManager.java | 29 +- .../filename/PdsConsistentIdProcessor.java | 568 ++++++++++++++ .../filename/PdsFolderSettings.java | 138 ++++ .../filename/PdsFoldersResolver.java | 33 + .../wal/FileWriteAheadLogManager.java | 20 +- .../reader/StandaloneGridKernalContext.java | 32 +- .../spi/discovery/tcp/TcpDiscoverySpi.java | 4 +- .../tcp/internal/TcpDiscoveryNode.java | 15 + .../IgniteUidAsConsistentIdMigrationTest.java | 712 ++++++++++++++++++ .../db/wal/IgniteWalRecoveryTest.java | 13 +- .../db/wal/reader/IgniteWalReaderTest.java | 187 +++-- .../db/wal/reader/MockWalIteratorFactory.java | 25 +- .../testsuites/IgnitePdsTestSuite2.java | 4 + 24 files changed, 1995 insertions(+), 214 deletions(-) create mode 100644 modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/FoldersReuseCompatibilityTest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFolderSettings.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFoldersResolver.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/FoldersReuseCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/FoldersReuseCompatibilityTest.java new file mode 100644 index 0000000000000..177501331a871 --- /dev/null +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/FoldersReuseCompatibilityTest.java @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.compatibility.persistence; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; +import java.util.TreeSet; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.GridCacheAbstractFullApiSelfTest; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.jetbrains.annotations.NotNull; + +import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.parseSubFolderName; + +/** + * Test for new and old style persistent storage folders generation and compatible startup of current ignite version + */ +public class FoldersReuseCompatibilityTest extends IgnitePersistenceCompatibilityAbstractTest { + /** Cache name for test. */ + private static final String CACHE_NAME = "dummy"; + + /** Key to store in previous version of ignite */ + private static final String KEY = "ObjectFromPast"; + + /** Value to store in previous version of ignite */ + private static final String VAL = "ValueFromPast"; + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + // No-op. super.afterTest(); + stopAllGrids(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + final IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + configPersistence(cfg); + + return cfg; + } + + /** + * Test startup of current ignite version using DB storage folder from previous version of Ignite. Expected to start + * successfully with existing DB + * + * @throws Exception if failed. + */ + public void testFoldersReuseCompatibility_2_2() throws Exception { + runFoldersReuse("2.2.0"); + } + + /** + * Test startup of current ignite version using DB storage folder from previous version of Ignite. Expected to start + * successfully with existing DB + * + * @throws Exception if failed. + */ + public void testFoldersReuseCompatibility_2_1() throws Exception { + runFoldersReuse("2.1.0"); + } + + /** + * Test startup of current ignite version using DB storage folder from previous version of Ignite. Expected to start + * successfully with existing DB + * + * @param ver 3 digit Ignite version to check compatibility with + * @throws Exception if failed. + */ + private void runFoldersReuse(String ver) throws Exception { + final IgniteEx grid = startGrid(1, ver, new ConfigurationClosure(), new PostStartupClosure()); + + grid.close(); + stopAllGrids(); + + IgniteEx ignite = startGrid(0); + + ignite.active(true); + ignite.getOrCreateCache("cache2createdForNewGrid").put("Object", "Value"); + assertEquals(1, ignite.context().discovery().topologyVersion()); + + assertPdsDirsDefaultExist(U.maskForFileName(ignite.cluster().node().consistentId().toString())); + + assertEquals(VAL, ignite.cache(CACHE_NAME).get(KEY)); + + assertNodeIndexesInFolder();// should not create any new style directories + + stopAllGrids(); + } + + /** Started node test actions closure. */ + private static class PostStartupClosure implements IgniteInClosure { + /** {@inheritDoc} */ + @Override public void apply(Ignite ignite) { + ignite.active(true); + ignite.getOrCreateCache(CACHE_NAME).put(KEY, VAL); + } + } + + /** Setup compatible node closure. */ + private static class ConfigurationClosure implements IgniteInClosure { + /** {@inheritDoc} */ + @Override public void apply(IgniteConfiguration cfg) { + cfg.setLocalHost("127.0.0.1"); + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + disco.setIpFinder(GridCacheAbstractFullApiSelfTest.LOCAL_IP_FINDER); + + cfg.setDiscoverySpi(disco); + + configPersistence(cfg); + } + } + + /** + * Setup persistence for compatible and current version node. + * + * @param cfg ignite config to setup. + */ + private static void configPersistence(IgniteConfiguration cfg) { + final PersistentStoreConfiguration psCfg = new PersistentStoreConfiguration(); + + cfg.setPersistentStoreConfiguration(psCfg); + + final MemoryConfiguration memCfg = new MemoryConfiguration(); + final MemoryPolicyConfiguration memPolCfg = new MemoryPolicyConfiguration(); + + memPolCfg.setMaxSize(32 * 1024 * 1024); // we don't need much memory for this test + memCfg.setMemoryPolicies(memPolCfg); + cfg.setMemoryConfiguration(memCfg); + } + + /** + * @param indexes expected new style node indexes in folders + * @throws IgniteCheckedException if failed + */ + private void assertNodeIndexesInFolder(Integer... indexes) throws IgniteCheckedException { + assertEquals(new TreeSet<>(Arrays.asList(indexes)), getAllNodeIndexesInFolder()); + } + + /** + * @return set of all indexes of nodes found in work folder + * @throws IgniteCheckedException if failed. + */ + @NotNull private Set getAllNodeIndexesInFolder() throws IgniteCheckedException { + final File curFolder = new File(U.defaultWorkDirectory(), PdsConsistentIdProcessor.DB_DEFAULT_FOLDER); + final Set indexes = new TreeSet<>(); + final File[] files = curFolder.listFiles(PdsConsistentIdProcessor.DB_SUBFOLDERS_NEW_STYLE_FILTER); + + for (File file : files) { + final PdsConsistentIdProcessor.FolderCandidate uid + = parseSubFolderName(file, log); + + if (uid != null) + indexes.add(uid.nodeIndex()); + } + + return indexes; + } + + /** + * Checks existence of all storage-related directories + * + * @param subDirName sub directories name expected + * @throws IgniteCheckedException if IO error occur + */ + private void assertPdsDirsDefaultExist(String subDirName) throws IgniteCheckedException { + assertDirectoryExist("binary_meta", subDirName); + assertDirectoryExist(PersistentStoreConfiguration.DFLT_WAL_STORE_PATH, subDirName); + assertDirectoryExist(PersistentStoreConfiguration.DFLT_WAL_ARCHIVE_PATH, subDirName); + assertDirectoryExist(PdsConsistentIdProcessor.DB_DEFAULT_FOLDER, subDirName); + } + + /** + * Checks one folder existence + * + * @param subFolderNames subfolders array to touch + * @throws IgniteCheckedException if IO error occur + */ + private void assertDirectoryExist(String... subFolderNames) throws IgniteCheckedException { + File curFolder = new File(U.defaultWorkDirectory()); + + for (String name : subFolderNames) { + curFolder = new File(curFolder, name); + } + + final String path; + try { + path = curFolder.getCanonicalPath(); + } + catch (IOException e) { + throw new IgniteCheckedException("Failed to convert path: [" + curFolder.getAbsolutePath() + "]", e); + } + + assertTrue("Directory " + Arrays.asList(subFolderNames).toString() + + " is expected to exist [" + path + "]", curFolder.exists() && curFolder.isDirectory()); + } + +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/IgnitePersistenceCompatibilityAbstractTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/IgnitePersistenceCompatibilityAbstractTest.java index d76b8626163be..f39b6f646c445 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/IgnitePersistenceCompatibilityAbstractTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/IgnitePersistenceCompatibilityAbstractTest.java @@ -45,6 +45,9 @@ public abstract class IgnitePersistenceCompatibilityAbstractTest extends IgniteC @Override protected void afterTest() throws Exception { super.afterTest(); + //protection if test failed to finish, e.g. by error + stopAllGrids(); + assert deleteDefaultDBWorkDirectory() : "Couldn't delete DB work directory."; } diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java index b54b396845993..351a0e7429701 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java @@ -19,6 +19,7 @@ import junit.framework.TestSuite; import org.apache.ignite.compatibility.persistence.DummyPersistenceCompatibilityTest; +import org.apache.ignite.compatibility.persistence.FoldersReuseCompatibilityTest; /** * Compatibility tests basic test suite. @@ -33,6 +34,8 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(DummyPersistenceCompatibilityTest.class); + suite.addTestSuite(FoldersReuseCompatibilityTest.class); + return suite; } } diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index e1e72f7c2e9d8..d7b4de95c6ef7 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -736,6 +736,13 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_WAL_SERIALIZER_VERSION = "IGNITE_WAL_SERIALIZER_VERSION"; + /** + * When set to {@code true}, Data store folders are generated only by consistent id, and no consistent ID will be + * set based on existing data store folders. This option also enables compatible folder generation mode as it was + * before 2.3. + */ + public static final String IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID = "IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID"; + /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index 99c7ccefa4677..210b401ac26de 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -34,6 +34,7 @@ import org.apache.ignite.internal.managers.loadbalancer.GridLoadBalancerManager; import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; import org.apache.ignite.internal.processors.cluster.ClusterProcessor; @@ -643,4 +644,9 @@ public interface GridKernalContext extends Iterable { * @return Platform processor. */ public PlatformProcessor platform(); + + /** + * @return PDS mode folder name resolver, also generates consistent ID in case new folder naming is used + */ + public PdsFoldersResolver pdsFolderResolver(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index 07e5970fc588b..1f0292c912147 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -49,6 +49,7 @@ import org.apache.ignite.internal.processors.cache.CacheConflictResolutionManager; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; import org.apache.ignite.internal.processors.cluster.ClusterProcessor; @@ -377,6 +378,9 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** */ private volatile boolean disconnected; + /** PDS mode folder name resolver, also generates consistent ID in case new folder naming is used */ + private PdsFoldersResolver pdsFolderRslvr; + /** * No-arg constructor is required by externalization. */ @@ -536,7 +540,7 @@ else if (comp instanceof GridJobMetricsProcessor) else if (comp instanceof GridCacheProcessor) cacheProc = (GridCacheProcessor)comp; else if (comp instanceof GridClusterStateProcessor) - stateProc =(GridClusterStateProcessor)comp; + stateProc = (GridClusterStateProcessor)comp; else if (comp instanceof GridTaskSessionProcessor) sesProc = (GridTaskSessionProcessor)comp; else if (comp instanceof GridPortProcessor) @@ -576,9 +580,11 @@ else if (comp instanceof ClusterProcessor) else if (comp instanceof PlatformProcessor) platformProc = (PlatformProcessor)comp; else if (comp instanceof PoolProcessor) - poolProc = (PoolProcessor) comp; + poolProc = (PoolProcessor)comp; else if (comp instanceof GridMarshallerMappingProcessor) mappingProc = (GridMarshallerMappingProcessor)comp; + else if (comp instanceof PdsFoldersResolver) + pdsFolderRslvr = (PdsFoldersResolver)comp; else if (!(comp instanceof DiscoveryNodeValidationProcessor || comp instanceof PlatformPluginProcessor)) assert (comp instanceof GridPluginComponent) : "Unknown manager class: " + comp.getClass(); @@ -1005,7 +1011,7 @@ protected Object readResolve() throws ObjectStreamException { } /** {@inheritDoc} */ - public Map customExecutors() { + @Override public Map customExecutors() { return customExecSvcs; } @@ -1069,6 +1075,11 @@ void disconnected(boolean disconnected) { this.disconnected = disconnected; } + /**{@inheritDoc}*/ + @Override public PdsFoldersResolver pdsFolderResolver() { + return pdsFolderRslvr; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridKernalContextImpl.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index f5d736a67f3fa..759bf643af94c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -115,6 +115,7 @@ import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; import org.apache.ignite.internal.processors.cluster.ClusterProcessor; @@ -930,13 +931,14 @@ public void start( // Assign discovery manager to context before other processors start so they // are able to register custom event listener. - GridManager discoMgr = new GridDiscoveryManager(ctx); + final GridManager discoMgr = new GridDiscoveryManager(ctx); ctx.add(discoMgr, false); // Start processors before discovery manager, so they will // be able to start receiving messages once discovery completes. try { + startProcessor(new PdsConsistentIdProcessor(ctx)); startProcessor(createComponent(DiscoveryNodeValidationProcessor.class, ctx)); startProcessor(new GridAffinityProcessor(ctx)); startProcessor(createComponent(GridSegmentationProcessor.class, ctx)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 527399dc56a53..14485d2625f27 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.managers.discovery; +import java.io.Serializable; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; @@ -116,6 +117,7 @@ import org.apache.ignite.spi.discovery.DiscoverySpiNodeAuthenticator; import org.apache.ignite.spi.discovery.DiscoverySpiOrderSupport; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode; import org.apache.ignite.thread.IgniteThread; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -281,12 +283,12 @@ public class GridDiscoveryManager extends GridManagerAdapter { /** */ private final CountDownLatch startLatch = new CountDownLatch(1); - /** */ - private Object consistentId; - /** Discovery spi registered flag. */ private boolean registeredDiscoSpi; + /** Local node compatibility consistent ID. */ + private Serializable consistentId; + /** @param ctx Context. */ public GridDiscoveryManager(GridKernalContext ctx) { super(ctx, ctx.config().getDiscoverySpi()); @@ -549,6 +551,13 @@ private void updateClientNodes(UUID leftNodeId) { @Override public void onLocalNodeInitialized(ClusterNode locNode) { for (IgniteInClosure lsnr : locNodeInitLsnrs) lsnr.apply(locNode); + + if (locNode instanceof TcpDiscoveryNode) { + final TcpDiscoveryNode node = (TcpDiscoveryNode)locNode; + + if (consistentId != null) + node.setConsistentId(consistentId); + } } @Override public void onDiscovery( @@ -2017,22 +2026,42 @@ public ClusterNode localNode() { /** * @return Consistent ID. + * @deprecated Use PdsConsistentIdProcessor to get actual consistent ID */ - public Object consistentId() { - if (consistentId == null) { - try { - inject(); - } - catch (IgniteCheckedException e) { - throw new IgniteException("Failed to init consistent ID.", e); - } + @Deprecated + public Serializable consistentId() { + if (consistentId == null) + consistentId = getInjectedDiscoverySpi().consistentId(); + + return consistentId; + } - consistentId = getSpi().consistentId(); + /** + * Performs injection of discovery SPI if needed, then provides DiscoverySpi SPI. + * Manual injection is required because normal startup of SPI is done after processor started. + * + * @return Wrapped DiscoverySpi SPI. + */ + private DiscoverySpi getInjectedDiscoverySpi() { + try { + inject(); + } + catch (IgniteCheckedException e) { + throw new IgniteException("Failed to init consistent ID.", e); } + return getSpi(); + } - return consistentId; + /** + * Sets TCP local node consistent ID. This setter is to be called before node init in SPI + * + * @param consistentId New value of consistent ID to be used in local node initialization + */ + public void consistentId(final Serializable consistentId) { + this.consistentId = consistentId; } + /** @return Topology version. */ public long topologyVersion() { return topSnap.get().topVer.topologyVersion(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java index 2d4114f302f36..420cde5dffc0f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java @@ -34,7 +34,7 @@ * which may lead to segmentation of nodes from cluster. */ class BinaryMetadataFileStore { - /** */ + /** Link to resolved binary metadata directory. Null for non persistent mode */ private File workDir; /** */ @@ -68,14 +68,14 @@ class BinaryMetadataFileStore { if (binaryMetadataFileStoreDir != null) workDir = binaryMetadataFileStoreDir; else { - String consId = U.maskForFileName(ctx.discovery().consistentId().toString()); + final String subFolder = ctx.pdsFolderResolver().resolveFolders().folderName(); workDir = new File(U.resolveWorkDirectory( ctx.config().getWorkDirectory(), "binary_meta", false ), - consId); + subFolder); } U.ensureDirectory(workDir, "directory for serialized binary metadata", log); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 9f1ccb4faaa91..2d89942cb4da2 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -62,6 +62,7 @@ import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.PersistenceMetrics; +import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CheckpointWriteOrder; import org.apache.ignite.configuration.DataPageEvictionMode; import org.apache.ignite.configuration.IgniteConfiguration; @@ -74,6 +75,7 @@ import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.NodeStoppingException; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdUtils; @@ -280,8 +282,11 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** */ private final ConcurrentMap idxRebuildFuts = new ConcurrentHashMap<>(); - /** Lock holder. */ - private FileLockHolder fileLockHolder; + /** + * Lock holder for compatible folders mode. Null if lock holder was created at start node.
+ * In this case lock is held on PDS resover manager and it is not required to manage locking here + */ + @Nullable private FileLockHolder fileLockHolder; /** Lock wait time. */ private final long lockWaitTime; @@ -367,7 +372,9 @@ public IgniteInternalFuture enableCheckpoints(boolean enable) { snapshotMgr = cctx.snapshot(); - if (!cctx.kernalContext().clientNode()) { + final GridKernalContext kernalCtx = cctx.kernalContext(); + + if (!kernalCtx.clientNode()) { IgnitePageStoreManager store = cctx.pageStore(); assert store instanceof FilePageStoreManager : "Invalid page store manager was created: " + store; @@ -379,7 +386,11 @@ public IgniteInternalFuture enableCheckpoints(boolean enable) { if (!U.mkdirs(cpDir)) throw new IgniteCheckedException("Could not create directory for checkpoint metadata: " + cpDir); - fileLockHolder = new FileLockHolder(storeMgr.workDir().getPath(), cctx.kernalContext(), log); + final FileLockHolder preLocked = kernalCtx.pdsFolderResolver() + .resolveFolders() + .getLockedFileLockHolder(); + if (preLocked == null) + fileLockHolder = new FileLockHolder(storeMgr.workDir().getPath(), kernalCtx, log); persStoreMetrics.wal(cctx.wal()); } @@ -488,8 +499,11 @@ public static long checkpointBufferSize(IgniteConfiguration cfg) { /* Must be here, because after deactivate we can invoke activate and file lock must be already configured */ stopping = false; - if (!cctx.localNode().isClient()) - fileLockHolder = new FileLockHolder(storeMgr.workDir().getPath(), cctx.kernalContext(), log); + if (!cctx.localNode().isClient()) { + //we replace lock with new instance (only if we're responsible for locking folders) + if (fileLockHolder != null) + fileLockHolder = new FileLockHolder(storeMgr.workDir().getPath(), cctx.kernalContext(), log); + } } /** @@ -592,20 +606,24 @@ private void unRegistrateMetricsMBean() { /** {@inheritDoc} */ @Override public void lock() throws IgniteCheckedException { - if (log.isDebugEnabled()) - log.debug("Try to capture file lock [nodeId=" + - cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]"); + if (fileLockHolder != null) { + if (log.isDebugEnabled()) + log.debug("Try to capture file lock [nodeId=" + + cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]"); - fileLockHolder.tryLock(lockWaitTime); + fileLockHolder.tryLock(lockWaitTime); + } } /** {@inheritDoc} */ @Override public void unLock() { - if (log.isDebugEnabled()) - log.debug("Release file lock [nodeId=" + - cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]"); + if (fileLockHolder != null) { + if (log.isDebugEnabled()) + log.debug("Release file lock [nodeId=" + + cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]"); - fileLockHolder.release(); + fileLockHolder.release(); + } } /** {@inheritDoc} */ @@ -628,7 +646,8 @@ private void unRegistrateMetricsMBean() { if (!cctx.kernalContext().clientNode()) { unLock(); - fileLockHolder.close(); + if (fileLockHolder != null) + fileLockHolder.close(); } unRegistrateMetricsMBean(); @@ -875,7 +894,7 @@ private void shutdownCheckpointer(boolean cancel) { idxRebuildFuts.remove(cacheId, rebuildFut); log().info("Finished indexes rebuilding for cache: [name=" + cacheCtx.config().getName() - + ", grpName=" + cacheCtx.config().getGroupName()); + + ", grpName=" + cacheCtx.config().getGroupName()); } }); } @@ -1189,7 +1208,6 @@ private void restoreState() throws IgniteCheckedException { /** * For debugging only. TODO: remove. - * */ public Map, T2> reservedForPreloading() { return reservedForPreloading; @@ -2310,7 +2328,6 @@ private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws cpRec.addCacheGroupState(grp.groupId(), state); } - cpPagesTuple = beginAllCheckpoints(); hasPages = hasPageForWrite(cpPagesTuple.get1()); @@ -2400,7 +2417,8 @@ private boolean hasPageForWrite(Collection>, Integer> beginAllCheckpoints() { Collection> res = new ArrayList(memoryPolicies().size()); @@ -2792,9 +2810,9 @@ private static class CheckpointProgressSnapshot { } /** - * Checkpoint history. Holds chronological ordered map with {@link GridCacheDatabaseSharedManager.CheckpointEntry CheckpointEntries}. - * Data is loaded from corresponding checkpoint directory. - * This directory holds files for checkpoint start and end. + * Checkpoint history. Holds chronological ordered map with {@link GridCacheDatabaseSharedManager.CheckpointEntry + * CheckpointEntries}. Data is loaded from corresponding checkpoint directory. This directory holds files for + * checkpoint start and end. */ @SuppressWarnings("PublicInnerClass") public class CheckpointHistory { @@ -3117,7 +3135,7 @@ private void initIfNeeded(GridCacheSharedContext cctx) throws IgniteCheckedExcep /** * */ - private static class FileLockHolder { + public static class FileLockHolder implements AutoCloseable { /** Lock file name. */ private static final String lockFileName = "lock"; @@ -3130,8 +3148,8 @@ private static class FileLockHolder { /** Lock. */ private FileLock lock; - /** Id. */ - private GridKernalContext ctx; + /** Kernal context to generate Id of locked node in file. */ + @NotNull private GridKernalContext ctx; /** Logger. */ private IgniteLogger log; @@ -3139,7 +3157,7 @@ private static class FileLockHolder { /** * @param path Path. */ - private FileLockHolder(String path, GridKernalContext ctx, IgniteLogger log) { + public FileLockHolder(String path, @NotNull GridKernalContext ctx, IgniteLogger log) { try { file = Paths.get(path, lockFileName).toFile(); @@ -3168,7 +3186,14 @@ public void tryLock(long lockWaitTimeMillis) throws IgniteCheckedException { sb.a("[").a(ctx.localNodeId().toString()).a("]"); //write ip addresses - sb.a(ctx.discovery().localNode().addresses()); + final GridDiscoveryManager discovery = ctx.discovery(); + + if (discovery != null) { //discovery may be not up and running + final ClusterNode node = discovery.localNode(); + + if (node != null) + sb.a(node.addresses()); + } //write ports sb.a("["); @@ -3264,17 +3289,13 @@ private String readContent() throws IOException { return content; } - /** - * - */ - private void release() { + /** Releases file lock */ + public void release() { U.releaseQuiet(lock); } - /** - * - */ - private void close() { + /** Closes file channel */ + public void close() { U.closeQuiet(lockFile); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 6ea6eff85403c..d7682f0b9a5a6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -50,6 +50,7 @@ import org.apache.ignite.internal.processors.cache.persistence.evict.PageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.evict.Random2LruPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.evict.RandomLruPageEvictionTracker; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeListImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; @@ -939,17 +940,21 @@ private PageEvictionTracker createPageEvictionTracker(MemoryPolicyConfiguration * * @throws IgniteCheckedException If resolving swap directory fails. */ - @Nullable protected File buildAllocPath(MemoryPolicyConfiguration plc) throws IgniteCheckedException { + @Nullable private File buildAllocPath(MemoryPolicyConfiguration plc) throws IgniteCheckedException { String path = plc.getSwapFilePath(); if (path == null) return null; - String consId = String.valueOf(cctx.discovery().consistentId()); + final PdsFolderSettings folderSettings = cctx.kernalContext().pdsFolderResolver().resolveFolders(); + final String folderName; - consId = consId.replaceAll("[:,\\.]", "_"); + if(folderSettings.isCompatible()) + folderName = String.valueOf(folderSettings.consistentId()).replaceAll("[:,\\.]", "_"); + else + folderName = folderSettings.folderName(); - return buildPath(path, consId); + return buildPath(path, folderName); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index a20bda1eb87de..ed821275bf3e2 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -47,6 +47,7 @@ import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.GridCacheSharedManagerAdapter; import org.apache.ignite.internal.processors.cache.StoredCacheData; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.marshaller.Marshaller; @@ -93,7 +94,7 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen /** */ private PersistentStoreConfiguration pstCfg; - /** Absolute directory for file page store */ + /** Absolute directory for file page store. Includes consistent id based folder. */ private File storeWorkDir; /** */ @@ -117,29 +118,13 @@ public FilePageStoreManager(GridKernalContext ctx) { /** {@inheritDoc} */ @Override public void start0() throws IgniteCheckedException { - if (cctx.kernalContext().clientNode()) + final GridKernalContext ctx = cctx.kernalContext(); + if (ctx.clientNode()) return; - String consId = U.maskForFileName(cctx.kernalContext().discovery().consistentId().toString()); - - if (pstCfg.getPersistentStorePath() != null) { - File workDir0 = new File(pstCfg.getPersistentStorePath()); - - if (!workDir0.isAbsolute()) - workDir0 = U.resolveWorkDirectory( - igniteCfg.getWorkDirectory(), - pstCfg.getPersistentStorePath(), - false - ); + final PdsFolderSettings folderSettings = ctx.pdsFolderResolver().resolveFolders(); - storeWorkDir = new File(workDir0, consId); - } - else - storeWorkDir = new File(U.resolveWorkDirectory( - igniteCfg.getWorkDirectory(), - DFLT_STORE_DIR, - false - ), consId); + storeWorkDir = new File(folderSettings.persistentStoreRootPath(), folderSettings.folderName()); U.ensureDirectory(storeWorkDir, "page store work directory", log); } @@ -551,7 +536,7 @@ private StoredCacheData readCacheData(File conf) throws IgniteCheckedException { } /** - * @return Store work dir. + * @return Store work dir. Includes consistent-id based folder */ public File workDir() { return storeWorkDir; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java new file mode 100644 index 0000000000000..c73a952f500ee --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java @@ -0,0 +1,568 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.filename; + +import java.io.File; +import java.io.FileFilter; +import java.io.IOException; +import java.io.Serializable; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.UUID; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.processors.GridProcessorAdapter; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.internal.util.typedef.internal.SB; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID; +import static org.apache.ignite.IgniteSystemProperties.getBoolean; + +/** + * Component for resolving PDS storage file names, also used for generating consistent ID for case PDS mode is enabled + */ +public class PdsConsistentIdProcessor extends GridProcessorAdapter implements PdsFoldersResolver { + /** Database subfolders constant prefix. */ + private static final String DB_FOLDER_PREFIX = "node"; + + /** Node index and uid separator in subfolders name. */ + private static final String NODEIDX_UID_SEPARATOR = "-"; + + /** Constant node subfolder prefix and node index pattern (nodeII, where II - node index as decimal integer) */ + private static final String NODE_PATTERN = DB_FOLDER_PREFIX + "[0-9]*" + NODEIDX_UID_SEPARATOR; + + /** Uuid as string pattern. */ + private static final String UUID_STR_PATTERN = "[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}"; + + /** + * Subdir (nodeII-UID, where II - node index as decimal integer, UID - string representation of consistent ID) + * pattern. + */ + private static final String SUBDIR_PATTERN = NODE_PATTERN + UUID_STR_PATTERN; + + /** Database subfolders for new style filter. */ + public static final FileFilter DB_SUBFOLDERS_NEW_STYLE_FILTER = new FileFilter() { + @Override public boolean accept(File pathname) { + return pathname.isDirectory() && pathname.getName().matches(SUBDIR_PATTERN); + } + }; + + /** Database subfolders for old style filter. */ + private static final FileFilter DB_SUBFOLDERS_OLD_STYLE_FILTER = new FileFilter() { + @Override public boolean accept(File pathname) { + return pathname.isDirectory() + && !"wal".equals(pathname.getName()) + && !pathname.getName().matches(SUBDIR_PATTERN); + } + }; + + /** Database default folder. */ + public static final String DB_DEFAULT_FOLDER = "db"; + + /** Config. */ + private IgniteConfiguration cfg; + + /** Logger. */ + private IgniteLogger log; + + /** Context. */ + private GridKernalContext ctx; + + /** Cached folder settings. */ + private PdsFolderSettings settings; + + /** + * Creates folders resolver + * + * @param ctx Context. + */ + public PdsConsistentIdProcessor(final GridKernalContext ctx) { + super(ctx); + + this.cfg = ctx.config(); + this.log = ctx.log(PdsFoldersResolver.class); + this.ctx = ctx; + } + + /** + * Prepares compatible PDS folder settings. No locking is performed, consistent ID is not overridden. + * + * @param pstStoreBasePath DB storage base path or null if persistence is not enabled. + * @param consistentId compatibility consistent ID + * @return PDS folder settings compatible with previous versions. + */ + private PdsFolderSettings compatibleResolve( + @Nullable final File pstStoreBasePath, + @NotNull final Serializable consistentId) { + + if (cfg.getConsistentId() != null) { + // compatible mode from configuration is used fot this case, no locking, no consitent id change + return new PdsFolderSettings(pstStoreBasePath, cfg.getConsistentId()); + } + + return new PdsFolderSettings(pstStoreBasePath, consistentId); + } + + /** {@inheritDoc} */ + @Override public PdsFolderSettings resolveFolders() throws IgniteCheckedException { + if (settings == null) { + settings = prepareNewSettings(); + + if (!settings.isCompatible()) { + if (log.isInfoEnabled()) + log.info("Consistent ID used for local node is [" + settings.consistentId() + "] " + + "according to persistence data storage folders"); + + ctx.discovery().consistentId(settings.consistentId()); + } + } + return settings; + } + + /** + * Creates new settings when we don't have cached one. + * + * @return new settings with prelocked directory (if appropriate). + * @throws IgniteCheckedException if IO failed. + */ + private PdsFolderSettings prepareNewSettings() throws IgniteCheckedException { + final File pstStoreBasePath = resolvePersistentStoreBasePath(); + //here deprecated method is used to get compatible version of consistentId + final Serializable consistentId = ctx.discovery().consistentId(); + + if (!cfg.isPersistentStoreEnabled()) + return compatibleResolve(pstStoreBasePath, consistentId); + + if (getBoolean(IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID, false)) + return compatibleResolve(pstStoreBasePath, consistentId); + + // compatible mode from configuration is used fot this case + if (cfg.getConsistentId() != null) { + // compatible mode from configuration is used fot this case, no locking, no consistent id change + return new PdsFolderSettings(pstStoreBasePath, cfg.getConsistentId()); + } + // The node scans the work directory and checks if there is a folder matching the consistent ID. + // If such a folder exists, we start up with this ID (compatibility mode) + final String subFolder = U.maskForFileName(consistentId.toString()); + + final GridCacheDatabaseSharedManager.FileLockHolder oldStyleFolderLockHolder = tryLock(new File(pstStoreBasePath, subFolder)); + + if (oldStyleFolderLockHolder != null) + return new PdsFolderSettings(pstStoreBasePath, + subFolder, + consistentId, + oldStyleFolderLockHolder, + true); + + final File[] oldStyleFolders = pstStoreBasePath.listFiles(DB_SUBFOLDERS_OLD_STYLE_FILTER); + + if (oldStyleFolders != null && oldStyleFolders.length != 0) { + for (File folder : oldStyleFolders) { + final String path = getPathDisplayableInfo(folder); + + U.warn(log, "There is other non-empty storage folder under storage base directory [" + path + "]"); + } + } + + for (FolderCandidate next : getNodeIndexSortedCandidates(pstStoreBasePath)) { + final GridCacheDatabaseSharedManager.FileLockHolder fileLockHolder = tryLock(next.subFolderFile()); + + if (fileLockHolder != null) { + if (log.isInfoEnabled()) + log.info("Successfully locked persistence storage folder [" + next.subFolderFile() + "]"); + + return new PdsFolderSettings(pstStoreBasePath, + next.subFolderFile().getName(), + next.uuid(), + fileLockHolder, + false); + } + } + + // was not able to find free slot, allocating new + final GridCacheDatabaseSharedManager.FileLockHolder rootDirLock = lockRootDirectory(pstStoreBasePath); + + try { + final List sortedCandidates = getNodeIndexSortedCandidates(pstStoreBasePath); + final int nodeIdx = sortedCandidates.isEmpty() ? 0 : (sortedCandidates.get(sortedCandidates.size() - 1).nodeIndex() + 1); + + return generateAndLockNewDbStorage(pstStoreBasePath, nodeIdx); + } + finally { + rootDirLock.release(); + rootDirLock.close(); + } + } + + /** + * Calculate overall folder size. + * + * @param dir directory to scan. + * @return total size in bytes. + */ + private static FolderParams folderSize(File dir) { + final FolderParams params = new FolderParams(); + + visitFolder(dir, params); + + return params; + } + + /** + * Scans provided directory and its sub dirs, collects found metrics. + * + * @param dir directory to start scan from. + * @param params input/output. + */ + private static void visitFolder(final File dir, final FolderParams params) { + for (File file : dir.listFiles()) { + if (file.isDirectory()) + visitFolder(file, params); + else { + params.size += file.length(); + params.lastModified = Math.max(params.lastModified, dir.lastModified()); + } + } + } + + /** + * @param folder folder to scan. + * @return folder displayable information. + */ + @NotNull private String getPathDisplayableInfo(final File folder) { + final SB res = new SB(); + + res.a(getCanonicalPath(folder)); + res.a(", "); + final FolderParams params = folderSize(folder); + + res.a(params.size); + res.a(" bytes, modified "); + final SimpleDateFormat simpleDateFormat = new SimpleDateFormat("MM/dd/yyyy hh:mm a"); + + res.a(simpleDateFormat.format(params.lastModified)); + res.a(" "); + + return res.toString(); + } + + /** + * Returns the canonical pathname string of this abstract pathname. + * + * @param file path to convert. + * @return canonical pathname or at leas absolute if convert to canonical failed. + */ + @NotNull private String getCanonicalPath(final File file) { + try { + return file.getCanonicalPath(); + } + catch (IOException ignored) { + return file.getAbsolutePath(); + } + } + + /** + * Pad start of string with provided character. + * + * @param str sting to pad. + * @param minLength expected length. + * @param padChar padding character. + * @return padded string. + */ + private static String padStart(String str, int minLength, char padChar) { + A.notNull(str, "String should not be empty"); + if (str.length() >= minLength) + return str; + + final SB sb = new SB(minLength); + + for (int i = str.length(); i < minLength; ++i) + sb.a(padChar); + + sb.a(str); + + return sb.toString(); + + } + + /** + * Creates new DB storage folder. + * + * @param pstStoreBasePath DB root path. + * @param nodeIdx next node index to use in folder name. + * @return new settings to be used in this node. + * @throws IgniteCheckedException if failed. + */ + @NotNull private PdsFolderSettings generateAndLockNewDbStorage(final File pstStoreBasePath, + final int nodeIdx) throws IgniteCheckedException { + + final UUID uuid = UUID.randomUUID(); + final String consIdBasedFolder = genNewStyleSubfolderName(nodeIdx, uuid); + final File newRandomFolder = U.resolveWorkDirectory(pstStoreBasePath.getAbsolutePath(), consIdBasedFolder, false); //mkdir here + final GridCacheDatabaseSharedManager.FileLockHolder fileLockHolder = tryLock(newRandomFolder); + + if (fileLockHolder != null) { + if (log.isInfoEnabled()) + log.info("Successfully created new persistent storage folder [" + newRandomFolder + "]"); + + return new PdsFolderSettings(pstStoreBasePath, consIdBasedFolder, uuid, fileLockHolder, false); + } + throw new IgniteCheckedException("Unable to lock file generated randomly [" + newRandomFolder + "]"); + } + + /** + * Generates DB subfolder name for provided node index (local) and UUID (consistent ID) + * + * @param nodeIdx node index. + * @param uuid consistent ID. + * @return folder file name + */ + @NotNull public static String genNewStyleSubfolderName(final int nodeIdx, final UUID uuid) { + final String uuidAsStr = uuid.toString(); + + assert uuidAsStr.matches(UUID_STR_PATTERN); + + final String nodeIdxPadded = padStart(Integer.toString(nodeIdx), 2, '0'); + + return DB_FOLDER_PREFIX + nodeIdxPadded + NODEIDX_UID_SEPARATOR + uuidAsStr; + } + + /** + * Acquires lock to root storage directory, used to lock root directory in case creating new files is required. + * + * @param pstStoreBasePath rood DB dir to lock + * @return locked directory, should be released and closed later + * @throws IgniteCheckedException if failed + */ + @NotNull private GridCacheDatabaseSharedManager.FileLockHolder lockRootDirectory(File pstStoreBasePath) + throws IgniteCheckedException { + + GridCacheDatabaseSharedManager.FileLockHolder rootDirLock; + int retry = 0; + + while ((rootDirLock = tryLock(pstStoreBasePath)) == null) { + if (retry > 600) + throw new IgniteCheckedException("Unable to start under DB storage path [" + pstStoreBasePath + "]" + + ". Lock is being held to root directory"); + retry++; + } + + return rootDirLock; + } + + /** + * @param pstStoreBasePath root storage folder to scan. + * @return empty list if there is no files in folder to test. Non null value is returned for folder having + * applicable new style files. Collection is sorted ascending according to node ID, 0 node index is coming first. + */ + @Nullable private List getNodeIndexSortedCandidates(File pstStoreBasePath) { + final File[] files = pstStoreBasePath.listFiles(DB_SUBFOLDERS_NEW_STYLE_FILTER); + + if (files == null) + return Collections.emptyList(); + + final List res = new ArrayList<>(); + + for (File file : files) { + final FolderCandidate candidate = parseFileName(file); + + if (candidate != null) + res.add(candidate); + } + Collections.sort(res, new Comparator() { + @Override public int compare(FolderCandidate c1, FolderCandidate c2) { + return Integer.compare(c1.nodeIndex(), c2.nodeIndex()); + } + }); + + return res; + } + + /** + * Tries to lock subfolder within storage root folder. + * + * @param dbStoreDirWithSubdirectory DB store directory, is to be absolute and should include consistent ID based + * sub folder. + * @return non null holder if lock was successful, null in case lock failed. If directory does not exist method will + * always fail to lock. + */ + private GridCacheDatabaseSharedManager.FileLockHolder tryLock(File dbStoreDirWithSubdirectory) { + if (!dbStoreDirWithSubdirectory.exists()) + return null; + + final String path = dbStoreDirWithSubdirectory.getAbsolutePath(); + final GridCacheDatabaseSharedManager.FileLockHolder fileLockHolder + = new GridCacheDatabaseSharedManager.FileLockHolder(path, ctx, log); + + try { + fileLockHolder.tryLock(1000); + + return fileLockHolder; + } + catch (IgniteCheckedException e) { + U.closeQuiet(fileLockHolder); + + if (log.isInfoEnabled()) + log.info("Unable to acquire lock to file [" + path + "], reason: " + e.getMessage()); + + return null; + } + } + + /** + * @return DB storage absolute root path resolved as 'db' folder in Ignite work dir (by default) or using persistent + * store configuration. Null if persistence is not enabled. Returned folder is created automatically. + * @throws IgniteCheckedException if I/O failed. + */ + @Nullable private File resolvePersistentStoreBasePath() throws IgniteCheckedException { + final PersistentStoreConfiguration pstCfg = cfg.getPersistentStoreConfiguration(); + + if (pstCfg == null) + return null; + + final String pstPath = pstCfg.getPersistentStorePath(); + + return U.resolveWorkDirectory( + cfg.getWorkDirectory(), + pstPath != null ? pstPath : DB_DEFAULT_FOLDER, + false + ); + + } + + /** + * @param subFolderFile new style folder name to parse + * @return Pair of UUID and node index + */ + private FolderCandidate parseFileName(@NotNull final File subFolderFile) { + return parseSubFolderName(subFolderFile, log); + } + + /** + * @param subFolderFile new style file to parse. + * @param log Logger. + * @return Pair of UUID and node index. + */ + @Nullable public static FolderCandidate parseSubFolderName( + @NotNull final File subFolderFile, + @NotNull final IgniteLogger log) { + + final String fileName = subFolderFile.getName(); + final Matcher matcher = Pattern.compile(NODE_PATTERN).matcher(fileName); + if (!matcher.find()) + return null; + + int uidStart = matcher.end(); + + try { + final String uid = fileName.substring(uidStart); + final UUID uuid = UUID.fromString(uid); + final String substring = fileName.substring(DB_FOLDER_PREFIX.length(), uidStart - NODEIDX_UID_SEPARATOR.length()); + final int idx = Integer.parseInt(substring); + + return new FolderCandidate(subFolderFile, idx, uuid); + } + catch (Exception e) { + U.warn(log, "Unable to parse new style file format from [" + subFolderFile.getAbsolutePath() + "]: " + e); + + return null; + } + } + + /** {@inheritDoc} */ + @Override public void stop(boolean cancel) throws IgniteCheckedException { + if (settings != null) { + final GridCacheDatabaseSharedManager.FileLockHolder fileLockHolder = settings.getLockedFileLockHolder(); + + if (fileLockHolder != null) { + fileLockHolder.release(); + fileLockHolder.close(); + } + } + super.stop(cancel); + } + + /** Path metrics */ + private static class FolderParams { + /** Overall size in bytes. */ + private long size; + + /** Last modified. */ + private long lastModified; + } + + /** + * Represents parsed new style file and encoded parameters in this file name + */ + public static class FolderCandidate { + /** Absolute file path pointing to DB subfolder within DB storage root folder. */ + private final File subFolderFile; + + /** Node index (local, usually 0 if multiple nodes are not started at local PC). */ + private final int nodeIdx; + + /** Uuid contained in file name, is to be set as consistent ID. */ + private final UUID uuid; + + /** + * @param subFolderFile Absolute file path pointing to DB subfolder. + * @param nodeIdx Node index. + * @param uuid Uuid. + */ + public FolderCandidate(File subFolderFile, int nodeIdx, UUID uuid) { + this.subFolderFile = subFolderFile; + this.nodeIdx = nodeIdx; + this.uuid = uuid; + } + + /** + * @return Node index (local, usually 0 if multiple nodes are not started at local PC). + */ + public int nodeIndex() { + return nodeIdx; + } + + /** + * @return Uuid contained in file name, is to be set as consistent ID. + */ + public Serializable uuid() { + return uuid; + } + + /** + * @return Absolute file path pointing to DB subfolder within DB storage root folder. + */ + public File subFolderFile() { + return subFolderFile; + } + } +} + + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFolderSettings.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFolderSettings.java new file mode 100644 index 0000000000000..20fb5cae0aa50 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFolderSettings.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.filename; + +import java.io.File; +import java.io.Serializable; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Class holds information required for folder generation for ignite persistent store + */ +public class PdsFolderSettings { + /** + * DB storage absolute root path resolved as 'db' folder in Ignite work dir (by default) or using persistent store + * configuration.
+ * Note WAL storage may be configured outside this path.
+ * This value may be null if persistence is not enabled. + */ + @Nullable private final File persistentStoreRootPath; + + /** Sub folder name containing consistent ID and optionally node index. */ + private final String folderName; + + /** Consistent id to be set to local node. */ + private final Serializable consistentId; + + /** + * File lock holder with prelocked db directory. For non compatible mode this holder contains prelocked work + * directory. This value is to be used at activate instead of locking.
May be null in case preconfigured + * consistent ID is used or in case lock holder was already taken by other processor. + */ + @Nullable private final GridCacheDatabaseSharedManager.FileLockHolder fileLockHolder; + + /** + * Indicates if compatible mode is enabled, in that case all sub folders are generated from consistent ID without + * 'node' and node index prefix. In compatible mode there is no overriding for consistent ID is done. + */ + private final boolean compatible; + + /** + * Creates settings in for new PST(DB) folder mode. + * + * @param persistentStoreRootPath Persistent store root path or null if non PDS mode. + * @param folderName Sub folder name containing consistent ID and optionally node index. + * @param consistentId Consistent id. + * @param fileLockHolder File lock holder with prelocked db directory. + * @param compatible Compatible mode flag. + */ + public PdsFolderSettings(@Nullable final File persistentStoreRootPath, + final String folderName, + final Serializable consistentId, + @Nullable final GridCacheDatabaseSharedManager.FileLockHolder fileLockHolder, + final boolean compatible) { + + this.consistentId = consistentId; + this.folderName = folderName; + this.fileLockHolder = fileLockHolder; + this.compatible = compatible; + this.persistentStoreRootPath = persistentStoreRootPath; + } + + /** + * Creates settings for compatible mode. Folder name is consistent ID (masked), no node prefix is added. + * + * @param persistentStoreRootPath root DB path. + * @param consistentId node consistent ID. + */ + public PdsFolderSettings( + @Nullable final File persistentStoreRootPath, + @NotNull final Serializable consistentId) { + + this.consistentId = consistentId; + this.compatible = true; + this.folderName = U.maskForFileName(consistentId.toString()); + this.persistentStoreRootPath = persistentStoreRootPath; + this.fileLockHolder = null; + } + + /** + * @return sub folders name based on consistent ID. In compatible mode this is escaped consistent ID, in new mode + * this is UUID based folder name. + */ + public String folderName() { + return folderName; + } + + /** + * @return Consistent id to be set to local node. + */ + public Serializable consistentId() { + return consistentId; + } + + /** + * @return flag indicating if compatible mode is enabled for folder generation. In that case all sub folders names are + * generated from consistent ID without 'node' and node index prefix. In compatible mode there is no overriding for + * consistent ID is done for cluster node. Locking files is independent to compatibility mode. + */ + public boolean isCompatible() { + return compatible; + } + + /** + * Returns already locked file lock holder to lock file in {@link #persistentStoreRootPath}. Unlock and close this + * lock is not required. + * + * @return File lock holder with prelocked db directory. + */ + @Nullable public GridCacheDatabaseSharedManager.FileLockHolder getLockedFileLockHolder() { + return fileLockHolder; + } + + /** + * @return DB storage absolute root path resolved as 'db' folder in Ignite work dir (by default) or using persistent + * store configuration. Note WAL storage may be configured outside this path. May return null for non pds mode. + */ + @Nullable public File persistentStoreRootPath() { + return persistentStoreRootPath; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFoldersResolver.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFoldersResolver.java new file mode 100644 index 0000000000000..cefaa04d15a89 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsFoldersResolver.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.filename; + +import org.apache.ignite.IgniteCheckedException; + +/** + * Resolves folders for PDS mode, may have side effect as setting random UUID as local node consistent ID. + */ +public interface PdsFoldersResolver { + /** + * Prepares and caches PDS folder settings. Subsequent call to this method will provide same settings. + * + * @return PDS folder settings, consistentID and prelocked DB file lock. + * @throws IgniteCheckedException if failed. + */ + public PdsFolderSettings resolveFolders() throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index c8715aafb8225..6a75dd2322b09 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -64,6 +64,7 @@ import org.apache.ignite.internal.processors.cache.persistence.PersistenceMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.PureJavaCrc32; import org.apache.ignite.internal.processors.cache.persistence.wal.record.HeaderRecord; import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordDataV1Serializer; @@ -162,7 +163,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** WAL archive directory (including consistent ID as subfolder) */ private File walArchiveDir; - /** Serializer of latest version. */ + /** Serializer of latest version, used to read header record and for write records */ private RecordSerializer serializer; /** Serializer latest version to use. */ @@ -258,25 +259,21 @@ public FileWriteAheadLogManager(@NotNull final GridKernalContext ctx) { /** {@inheritDoc} */ @Override public void start0() throws IgniteCheckedException { if (!cctx.kernalContext().clientNode()) { - String consId = consistentId(); - - A.notNullOrEmpty(consId, "consistentId"); - - consId = U.maskForFileName(consId); + final PdsFolderSettings resolveFolders = cctx.kernalContext().pdsFolderResolver().resolveFolders(); checkWalConfiguration(); walWorkDir = initDirectory( psCfg.getWalStorePath(), PersistentStoreConfiguration.DFLT_WAL_STORE_PATH, - consId, + resolveFolders.folderName(), "write ahead log work directory" ); walArchiveDir = initDirectory( psCfg.getWalArchivePath(), PersistentStoreConfiguration.DFLT_WAL_ARCHIVE_PATH, - consId, + resolveFolders.folderName(), "write ahead log archive directory" ); @@ -317,13 +314,6 @@ private void checkWalConfiguration() throws IgniteCheckedException { } } - /** - * @return Consistent ID. - */ - protected String consistentId() { - return cctx.discovery().consistentId().toString(); - } - /** {@inheritDoc} */ @Override protected void stop0(boolean cancel) { final GridTimeoutProcessor.CancelableTask schedule = backgroundFlushSchedule; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index 07be8b497df9c..e23476625380e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.wal.reader; import java.io.File; +import java.io.Serializable; import java.lang.reflect.Field; import java.util.Iterator; import java.util.List; @@ -47,6 +48,8 @@ import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; import org.apache.ignite.internal.processors.cluster.ClusterProcessor; @@ -79,6 +82,7 @@ import org.apache.ignite.internal.suggestions.GridPerformanceSuggestions; import org.apache.ignite.internal.util.IgniteExceptionRegistry; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.marshaller.Marshaller; import org.apache.ignite.plugin.PluginNotFoundException; import org.apache.ignite.plugin.PluginProvider; @@ -106,7 +110,7 @@ public class StandaloneGridKernalContext implements GridKernalContext { @Nullable private IgniteCacheObjectProcessor cacheObjProcessor; /** Marshaller context implementation. */ - private MarshallerContextImpl marshallerContext; + private MarshallerContextImpl marshallerCtx; /** * @param log Logger. @@ -130,13 +134,13 @@ public class StandaloneGridKernalContext implements GridKernalContext { throw new IllegalStateException("Must not fail on empty providers list.", e); } - this.marshallerContext = new MarshallerContextImpl(null); + this.marshallerCtx = new MarshallerContextImpl(null); this.cfg = prepareIgniteConfiguration(); this.cacheObjProcessor = binaryMetadataFileStoreDir != null ? binaryProcessor(this, binaryMetadataFileStoreDir) : null; if (marshallerMappingFileStoreDir != null) { - marshallerContext.setMarshallerMappingFileStoreDir(marshallerMappingFileStoreDir); - marshallerContext.onMarshallerProcessorStarted(this, null); + marshallerCtx.setMarshallerMappingFileStoreDir(marshallerMappingFileStoreDir); + marshallerCtx.onMarshallerProcessorStarted(this, null); } } @@ -176,7 +180,7 @@ private IgniteConfiguration prepareIgniteConfiguration() { PersistentStoreConfiguration pstCfg = new PersistentStoreConfiguration(); cfg.setPersistentStoreConfiguration(pstCfg); - marshaller.setContext(marshallerContext); + marshaller.setContext(marshallerCtx); return cfg; } @@ -392,11 +396,7 @@ private IgniteConfiguration prepareIgniteConfiguration() { /** {@inheritDoc} */ @Override public GridDiscoveryManager discovery() { - return new GridDiscoveryManager(StandaloneGridKernalContext.this) { - @Override public Object consistentId() { - return ""; // some non null value is required - } - }; + return new GridDiscoveryManager(this); } /** {@inheritDoc} */ @@ -579,7 +579,7 @@ private IgniteConfiguration prepareIgniteConfiguration() { /** {@inheritDoc} */ @Override public MarshallerContextImpl marshallerContext() { - return marshallerContext; + return marshallerCtx; } /** {@inheritDoc} */ @@ -597,6 +597,16 @@ private IgniteConfiguration prepareIgniteConfiguration() { return null; } + /** {@inheritDoc} */ + @Override public PdsFoldersResolver pdsFolderResolver() { + return new PdsFoldersResolver() { + /** {@inheritDoc} */ + @Override public PdsFolderSettings resolveFolders() { + return new PdsFolderSettings(new File("."), U.maskForFileName("")); + } + }; + } + /** {@inheritDoc} */ @NotNull @Override public Iterator iterator() { return null; diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index e6eaa8ef643e1..eb8ee30d77aee 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -966,10 +966,10 @@ public UUID getLocalNodeId() { initAddresses(); - Serializable cfgId = ignite.configuration().getConsistentId(); + final Serializable cfgId = ignite.configuration().getConsistentId(); if (cfgId == null) { - List sortedAddrs = new ArrayList<>(addrs.get1()); + final List sortedAddrs = new ArrayList<>(addrs.get1()); Collections.sort(sortedAddrs); diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java index 20fb6c584f964..38c2a1b41f256 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java @@ -225,6 +225,21 @@ public void lastSuccessfulAddress(InetSocketAddress lastSuccessfulAddr) { return consistentId; } + /** + * Sets consistent globally unique node ID which survives node restarts. + * + * @param consistentId Consistent globally unique node ID. + */ + public void setConsistentId(Serializable consistentId) { + this.consistentId = consistentId; + + final Map map = new HashMap<>(attrs); + + map.put(ATTR_NODE_CONSISTENT_ID, consistentId); + + attrs = Collections.unmodifiableMap(map); + } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public T attribute(String name) { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java new file mode 100644 index 0000000000000..fe7e4df8fb225 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java @@ -0,0 +1,712 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.filename; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; +import java.util.regex.Pattern; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridStringLogger; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.NotNull; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID; +import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.parseSubFolderName; + +/** + * Test for new and old style persistent storage folders generation + */ +public class IgniteUidAsConsistentIdMigrationTest extends GridCommonAbstractTest { + /** Cache name for test. */ + public static final String CACHE_NAME = "dummy"; + + /** Clear DB folder after each test. May be set to false for local debug */ + private static final boolean deleteAfter = true; + + /** Clear DB folder before each test. */ + private static final boolean deleteBefore = true; + + /** Fail test if delete of DB folder was not completed. */ + private static final boolean failIfDeleteNotCompleted = true; + + /** Configured consistent id. */ + private String configuredConsistentId; + + /** Logger to accumulate messages, null will cause logger won't be customized */ + private GridStringLogger strLog; + + /** Clear properties after this test run. Flag protects from failed test */ + private boolean clearPropsAfterTest = false; + + /** Place storage in temp folder for current test run. */ + private boolean placeStorageInTemp; + + /** A path to persistent store custom path for current test run. */ + private File pstStoreCustomPath; + + /** A path to persistent store WAL work custom path. */ + private File pstWalStoreCustomPath; + + /** A path to persistent store WAL archive custom path. */ + private File pstWalArchCustomPath; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + stopAllGrids(); + + if (deleteBefore) + deleteWorkFiles(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + if (deleteAfter) + deleteWorkFiles(); + + if (clearPropsAfterTest) { + System.clearProperty(IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID); + System.clearProperty(IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT); + } + } + + /** + * @throws IgniteCheckedException If failed. + */ + private void deleteWorkFiles() throws IgniteCheckedException { + boolean ok = true; + + if (pstStoreCustomPath != null) + ok &= deleteRecursively(pstStoreCustomPath); + else + ok &= deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false)); + + if (pstWalArchCustomPath != null) + ok &= deleteRecursively(pstWalArchCustomPath); + + if (pstWalStoreCustomPath != null) + ok &= deleteRecursively(pstWalStoreCustomPath); + + ok &= deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "binary_meta", false)); + + if (failIfDeleteNotCompleted) + assertTrue(ok); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + final IgniteConfiguration cfg = super.getConfiguration(gridName); + + if (configuredConsistentId != null) + cfg.setConsistentId(configuredConsistentId); + + final PersistentStoreConfiguration psCfg = new PersistentStoreConfiguration(); + + if (placeStorageInTemp) { + final File tempDir = new File(System.getProperty("java.io.tmpdir")); + + pstStoreCustomPath = new File(tempDir, "Store"); + pstWalStoreCustomPath = new File(tempDir, "WalStore"); + pstWalArchCustomPath = new File(tempDir, "WalArchive"); + + psCfg.setPersistentStorePath(pstStoreCustomPath.getAbsolutePath()); + psCfg.setWalStorePath(pstWalStoreCustomPath.getAbsolutePath()); + psCfg.setWalArchivePath(pstWalArchCustomPath.getAbsolutePath()); + } + + cfg.setPersistentStoreConfiguration(psCfg); + + final MemoryConfiguration memCfg = new MemoryConfiguration(); + final MemoryPolicyConfiguration memPolCfg = new MemoryPolicyConfiguration(); + + memPolCfg.setMaxSize(32 * 1024 * 1024); // we don't need much memory for this test + memCfg.setMemoryPolicies(memPolCfg); + cfg.setMemoryConfiguration(memCfg); + + if (strLog != null) + cfg.setGridLogger(strLog); + + return cfg; + } + + /** + * Checks start on empty PDS folder, in that case node 0 should start with random UUID. + * + * @throws Exception if failed. + */ + public void testNewStyleIdIsGenerated() throws Exception { + final Ignite ignite = startActivateFillDataGrid(0); + + //test UUID is parsable from consistent ID test + UUID.fromString(ignite.cluster().localNode().consistentId().toString()); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite)); + stopGrid(0); + } + + /** + * Checks start on empty PDS folder, in that case node 0 should start with random UUID. + * + * @throws Exception if failed. + */ + public void testNewStyleIdIsGeneratedInCustomStorePath() throws Exception { + placeStorageInTemp = true; + final Ignite ignite = startActivateFillDataGrid(0); + + //test UUID is parsable from consistent ID test + UUID.fromString(ignite.cluster().localNode().consistentId().toString()); + final String subfolderName = genNewStyleSubfolderName(0, ignite); + + assertDirectoryExist("binary_meta", subfolderName); + + assertDirectoryExist(pstWalArchCustomPath, subfolderName); + assertDirectoryExist(pstWalArchCustomPath, subfolderName); + assertDirectoryExist(pstStoreCustomPath, subfolderName); + + stopGrid(0); + } + + /** + * Checks start on empty PDS folder using configured ConsistentId. We should start using this ID in compatible mode. + * + * @throws Exception if failed. + */ + public void testPreconfiguredConsitentIdIsApplied() throws Exception { + this.configuredConsistentId = "someConfiguredConsistentId"; + Ignite ignite = startActivateFillDataGrid(0); + + assertPdsDirsDefaultExist(configuredConsistentId); + stopGrid(0); + } + + /** + * Checks start on configured ConsistentId with same value as default, this emulate old style folder is already + * available. We should restart using this folder. + * + * @throws Exception if failed + */ + public void testRestartOnExistingOldStyleId() throws Exception { + final String expDfltConsistentId = "127.0.0.1:47500"; + + this.configuredConsistentId = expDfltConsistentId; //this is for create old node folder + + final Ignite igniteEx = startActivateGrid(0); + + final String expVal = "there is compatible mode with old style folders!"; + + igniteEx.getOrCreateCache(CACHE_NAME).put("hi", expVal); + + assertPdsDirsDefaultExist(U.maskForFileName(configuredConsistentId)); + stopGrid(0); + + this.configuredConsistentId = null; //now set up grid on existing folder + + final Ignite igniteRestart = startActivateGrid(0); + + assertEquals(expDfltConsistentId, igniteRestart.cluster().localNode().consistentId()); + final IgniteCache cache = igniteRestart.cache(CACHE_NAME); + + assertNotNull("Expected to have cache [" + CACHE_NAME + "] using [" + expDfltConsistentId + "] as PDS folder", cache); + final Object valFromCache = cache.get("hi"); + + assertNotNull("Expected to load data from cache using [" + expDfltConsistentId + "] as PDS folder", valFromCache); + assertTrue(expVal.equals(valFromCache)); + stopGrid(0); + } + + /** + * Start stop grid without activation should cause lock to be released and restarted node should have index 0 + * + * @throws Exception if failed + */ + public void testStartWithoutActivate() throws Exception { + //start stop grid without activate + startGrid(0); + stopGrid(0); + + Ignite igniteRestart = startActivateFillDataGrid(0); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, igniteRestart)); + stopGrid(0); + } + + /** + * Checks start on empty PDS folder, in that case node 0 should start with random UUID + * + * @throws Exception if failed + */ + public void testRestartOnSameFolderWillCauseSameUuidGeneration() throws Exception { + final UUID uuid; + { + final Ignite ignite = startActivateFillDataGrid(0); + + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite)); + + uuid = (UUID)ignite.cluster().localNode().consistentId(); + stopGrid(0); + } + + { + final Ignite igniteRestart = startActivateGrid(0); + + assertTrue("there!".equals(igniteRestart.cache(CACHE_NAME).get("hi"))); + + final Object consIdRestart = igniteRestart.cluster().localNode().consistentId(); + + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, igniteRestart)); + stopGrid(0); + + assertEquals(uuid, consIdRestart); + } + } + + /** + * This test starts node, activates, deactivates node, and then start second node. + * Expected behaviour is following: second node will join topology with separate node folder + * + * @throws Exception if failed + */ + public void testStartNodeAfterDeactivate() throws Exception { + final UUID uuid; + { + final Ignite ignite = startActivateFillDataGrid(0); + + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite)); + + uuid = (UUID)ignite.cluster().localNode().consistentId(); + ignite.active(false); + } + { + final Ignite igniteRestart = startActivateGrid(1); + + grid(0).active(true); + final Object consIdRestart = igniteRestart.cluster().localNode().consistentId(); + + assertPdsDirsDefaultExist(genNewStyleSubfolderName(1, igniteRestart)); + + stopGrid(1); + assertFalse(consIdRestart.equals(uuid)); + } + stopGrid(0); + assertNodeIndexesInFolder(0, 1); + } + + /** + * @param idx Index of the grid to start. + * @return Started and activated grid. + * @throws Exception If failed. + */ + @NotNull private Ignite startActivateFillDataGrid(int idx) throws Exception { + final Ignite ignite = startActivateGrid(idx); + + ignite.getOrCreateCache(CACHE_NAME).put("hi", "there!"); + + return ignite; + } + + /** + * Starts and activates new grid with given index. + * + * @param idx Index of the grid to start. + * @return Started and activated grid. + * @throws Exception If anything failed. + */ + @NotNull private Ignite startActivateGrid(int idx) throws Exception { + final Ignite ignite = startGrid(idx); + + ignite.active(true); + + return ignite; + } + + /** + * Generates folder name in new style using constant prefix and UUID + * + * @param nodeIdx expected node index to check + * @param ignite ignite instance + * @return name of storage related subfolders + */ + @NotNull private String genNewStyleSubfolderName(final int nodeIdx, final Ignite ignite) { + final Object consistentId = ignite.cluster().localNode().consistentId(); + + assertTrue("For new style folders consistent ID should be UUID," + + " but actual class is " + (consistentId == null ? null : consistentId.getClass()), + consistentId instanceof UUID); + + return PdsConsistentIdProcessor.genNewStyleSubfolderName(nodeIdx, (UUID)consistentId); + } + + /** + * test two nodes started at the same db root folder, second node should get index 1 + * + * @throws Exception if failed + */ + public void testNodeIndexIncremented() throws Exception { + final Ignite ignite0 = startGrid(0); + final Ignite ignite1 = startGrid(1); + + ignite0.active(true); + + ignite0.getOrCreateCache(CACHE_NAME).put("hi", "there!"); + ignite1.getOrCreateCache(CACHE_NAME).put("hi1", "there!"); + + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite0)); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(1, ignite1)); + + stopGrid(0); + stopGrid(1); + assertNodeIndexesInFolder(0, 1); + } + + /** + * Test verified that new style folder is taken always with lowest index + * + * @throws Exception if failed + */ + public void testNewStyleAlwaysSmallestNodeIndexIsCreated() throws Exception { + final Ignite ignite0 = startGrid(0); + final Ignite ignite1 = startGrid(1); + final Ignite ignite2 = startGrid(2); + final Ignite ignite3 = startGrid(3); + + ignite0.active(true); + + ignite0.getOrCreateCache(CACHE_NAME).put("hi", "there!"); + ignite3.getOrCreateCache(CACHE_NAME).put("hi1", "there!"); + + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite0)); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(1, ignite1)); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(2, ignite2)); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(3, ignite3)); + + assertNodeIndexesInFolder(0, 1, 2, 3); + stopAllGrids(); + + //this grid should take folder with index 0 as unlocked + final Ignite ignite4Restart = startActivateGrid(3); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite4Restart)); + + assertNodeIndexesInFolder(0, 1, 2, 3); + stopAllGrids(); + } + + /** + * Test verified that new style folder is taken always with lowest index + * + * @throws Exception if failed + */ + public void testNewStyleAlwaysSmallestNodeIndexIsCreatedMultithreaded() throws Exception { + final Ignite ignite0 = startGridsMultiThreaded(11); + + ignite0.active(true); + + ignite0.getOrCreateCache(CACHE_NAME).put("hi", "there!"); + ignite0.getOrCreateCache(CACHE_NAME).put("hi1", "there!"); + + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite0)); + + assertNodeIndexesInFolder(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + stopAllGrids(); + + //this grid should take folder with index 0 as unlocked + final Ignite ignite4Restart = startActivateGrid(4); + assertPdsDirsDefaultExist(genNewStyleSubfolderName(0, ignite4Restart)); + stopAllGrids(); + + assertNodeIndexesInFolder(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + } + + /** + * Test start two nodes with predefined conistent ID (emulate old fashion node). Then restart two nodes. Expected + * both nodes will get its own old folders + * + * @throws Exception if failed. + */ + public void testStartTwoOldStyleNodes() throws Exception { + final String expDfltConsistentId1 = "127.0.0.1:47500"; + + this.configuredConsistentId = expDfltConsistentId1; //this is for create old node folder + final Ignite ignite = startGrid(0); + + final String expDfltConsistentId2 = "127.0.0.1:47501"; + + this.configuredConsistentId = expDfltConsistentId2; //this is for create old node folder + final Ignite ignite2 = startGrid(1); + + ignite.active(true); + + final String expVal = "there is compatible mode with old style folders!"; + + ignite2.getOrCreateCache(CACHE_NAME).put("hi", expVal); + + assertPdsDirsDefaultExist(U.maskForFileName(expDfltConsistentId1)); + assertPdsDirsDefaultExist(U.maskForFileName(expDfltConsistentId2)); + stopAllGrids(); + + this.configuredConsistentId = null; //now set up grid on existing folder + + final Ignite igniteRestart = startGrid(0); + final Ignite igniteRestart2 = startGrid(1); + + igniteRestart2.active(true); + + assertEquals(expDfltConsistentId1, igniteRestart.cluster().localNode().consistentId()); + assertEquals(expDfltConsistentId2, igniteRestart2.cluster().localNode().consistentId()); + + final IgniteCache cache = igniteRestart.cache(CACHE_NAME); + + assertNotNull("Expected to have cache [" + CACHE_NAME + "] using [" + expDfltConsistentId1 + "] as PDS folder", cache); + final Object valFromCache = cache.get("hi"); + + assertNotNull("Expected to load data from cache using [" + expDfltConsistentId1 + "] as PDS folder", valFromCache); + assertTrue(expVal.equals(valFromCache)); + + assertNodeIndexesInFolder(); //no new style nodes should be found + stopGrid(0); + } + + /** + * Tests compatible mode enabled by this test to start. + * Expected to be 2 folders and no new style folders in this case. + * + * @throws Exception if failed. + */ + public void testStartOldStyleNodesByCompatibleProperty() throws Exception { + clearPropsAfterTest = true; + System.setProperty(IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID, "true"); + + final Ignite ignite1 = startGrid(0); + final Ignite ignite2 = startGrid(1); + + ignite1.active(true); + + final String expVal = "there is compatible mode with old style folders!"; + + ignite2.getOrCreateCache(CACHE_NAME).put("hi", expVal); + + assertNodeIndexesInFolder(); // expected to have no new style folders + + final Object consistentId1 = ignite1.cluster().localNode().consistentId(); + + assertPdsDirsDefaultExist(U.maskForFileName(consistentId1.toString())); + final Object consistentId2 = ignite2.cluster().localNode().consistentId(); + + assertPdsDirsDefaultExist(U.maskForFileName(consistentId2.toString())); + stopAllGrids(); + + System.clearProperty(IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID); + final Ignite igniteRestart = startGrid(0); + final Ignite igniteRestart2 = startGrid(1); + + igniteRestart2.active(true); + + assertEquals(consistentId1, igniteRestart.cluster().localNode().consistentId()); + assertEquals(consistentId2, igniteRestart2.cluster().localNode().consistentId()); + + assertNodeIndexesInFolder(); //new style nodes should not be found + stopGrid(0); + } + + /** + * Tests compatible mode enabled by this test to start, also no port is enabled. + * Expected to be 1 folder and no new style folders in this case. + * + * @throws Exception if failed. + */ + public void testStartOldStyleNoPortsNodesByCompatibleProperty() throws Exception { + clearPropsAfterTest = true; + System.setProperty(IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID, "true"); + System.setProperty(IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT, "true"); + + final Ignite ignite1 = startGrid(0); + + ignite1.active(true); + + final String expVal = "there is compatible mode with old style folders!"; + + ignite1.getOrCreateCache(CACHE_NAME).put("hi", expVal); + + assertNodeIndexesInFolder(); // expected to have no new style folders + + final Object consistentId1 = ignite1.cluster().localNode().consistentId(); + + assertPdsDirsDefaultExist(U.maskForFileName(consistentId1.toString())); + stopAllGrids(); + + System.clearProperty(IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID); + + final Ignite igniteRestart = startGrid(0); + + igniteRestart.active(true); + + assertEquals(consistentId1, igniteRestart.cluster().localNode().consistentId()); + + assertNodeIndexesInFolder(); //new style nodes should not be found + stopGrid(0); + System.clearProperty(IGNITE_CONSISTENT_ID_BY_HOST_WITHOUT_PORT); + } + + /** + * Test case If there are no matching folders, + * but the directory contains old-style consistent IDs. + * Ignite should print out a warning. + * + * @throws Exception if failed. + */ + public void testOldStyleNodeWithUnexpectedPort() throws Exception { + this.configuredConsistentId = "127.0.0.1:49999"; //emulated old-style node with not appropriate consistent ID + final Ignite ignite = startActivateFillDataGrid(0); + final IgniteCache second = ignite.getOrCreateCache("second"); + + final int entries = 100; + + for (int i = 0; i < entries; i++) + second.put((int)(Math.random() * entries), getClass().getName()); + + final String prevVerFolder = U.maskForFileName(ignite.cluster().localNode().consistentId().toString()); + final String path = new File(new File(U.defaultWorkDirectory(), "db"), prevVerFolder).getCanonicalPath(); + + assertPdsDirsDefaultExist(prevVerFolder); + stopAllGrids(); + + this.configuredConsistentId = null; + this.strLog = new GridStringLogger(); + startActivateGrid(0); + assertNodeIndexesInFolder(0); //one 0 index folder is created + + final String wholeNodeLog = strLog.toString(); + stopAllGrids(); + + String foundWarning = null; + for (String line : wholeNodeLog.split("\n")) { + if (line.contains("There is other non-empty storage folder under storage base directory")) { + foundWarning = line; + break; + } + } + + if (foundWarning != null) + log.info("\nWARNING generated successfully [\n" + foundWarning + "\n]"); + + assertTrue("Expected to warn user on existence of old style path", + foundWarning != null); + + assertTrue("Expected to warn user on existence of old style path [" + path + "]", + foundWarning.contains(path)); + + assertTrue("Expected to print some size for [" + path + "]", + Pattern.compile(" [0-9]* bytes").matcher(foundWarning).find()); + + strLog = null; + startActivateGrid(0); + assertNodeIndexesInFolder(0); //one 0 index folder is created + stopAllGrids(); + } + + /** + * @param indexes expected new style node indexes in folders + * @throws IgniteCheckedException if failed + */ + private void assertNodeIndexesInFolder(Integer... indexes) throws IgniteCheckedException { + assertEquals(new TreeSet<>(Arrays.asList(indexes)), getAllNodeIndexesInFolder()); + } + + /** + * @return set of all indexes of nodes found in work folder + * @throws IgniteCheckedException if failed. + */ + @NotNull private Set getAllNodeIndexesInFolder() throws IgniteCheckedException { + final File curFolder = new File(U.defaultWorkDirectory(), PdsConsistentIdProcessor.DB_DEFAULT_FOLDER); + final Set indexes = new TreeSet<>(); + final File[] files = curFolder.listFiles(PdsConsistentIdProcessor.DB_SUBFOLDERS_NEW_STYLE_FILTER); + + for (File file : files) { + final PdsConsistentIdProcessor.FolderCandidate uid = parseSubFolderName(file, log); + + if (uid != null) + indexes.add(uid.nodeIndex()); + } + + return indexes; + } + + /** + * Checks existence of all storage-related directories + * + * @param subDirName sub directories name expected + * @throws IgniteCheckedException if IO error occur + */ + private void assertPdsDirsDefaultExist(String subDirName) throws IgniteCheckedException { + assertDirectoryExist("binary_meta", subDirName); + assertDirectoryExist(PersistentStoreConfiguration.DFLT_WAL_STORE_PATH, subDirName); + assertDirectoryExist(PersistentStoreConfiguration.DFLT_WAL_ARCHIVE_PATH, subDirName); + assertDirectoryExist(PdsConsistentIdProcessor.DB_DEFAULT_FOLDER, subDirName); + } + + /** + * Checks one folder existence. + * + * @param subFolderNames sub folders chain array to touch. + * @throws IgniteCheckedException if IO error occur. + */ + private void assertDirectoryExist(String... subFolderNames) throws IgniteCheckedException { + final File curFolder = new File(U.defaultWorkDirectory()); + + assertDirectoryExist(curFolder, subFolderNames); + } + + + /** + * Checks one folder existence. + * + * @param workFolder current work folder. + * @param subFolderNames sub folders chain array to touch. + * @throws IgniteCheckedException if IO error occur. + */ + private void assertDirectoryExist(final File workFolder, String... subFolderNames) throws IgniteCheckedException { + File curFolder = workFolder; + + for (String name : subFolderNames) { + curFolder = new File(curFolder, name); + } + + final String path; + + try { + path = curFolder.getCanonicalPath(); + } + catch (IOException e) { + throw new IgniteCheckedException("Failed to convert path: [" + curFolder.getAbsolutePath() + "]", e); + } + assertTrue("Directory " + Arrays.asList(subFolderNames).toString() + + " is expected to exist [" + path + "]", curFolder.exists() && curFolder.isDirectory()); + } + +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java index c160f60eb78de..bf8cd85f6fd5a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java @@ -28,6 +28,7 @@ import java.util.Objects; import java.util.Random; import java.util.Set; +import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; @@ -68,6 +69,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.PageDeltaRecord; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.tree.io.TrackingPageIO; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; @@ -575,9 +577,11 @@ public void testWalRenameDirSimple() throws Exception { for (int i = 0; i < 100; i++) cache.put(i, new IndexedObject(i)); + final Object consistentId = ignite.cluster().localNode().consistentId(); + stopGrid(1); - final File cacheDir = cacheDir("partitioned", ignite.context().discovery().consistentId().toString()); + final File cacheDir = cacheDir("partitioned", consistentId.toString()); final boolean renamed = cacheDir.renameTo(new File(cacheDir.getParent(), "cache-partitioned0")); @@ -605,14 +609,15 @@ public void testWalRenameDirSimple() throws Exception { * @return Cache dir. * @throws IgniteCheckedException If fail. */ - private File cacheDir(final String cacheName, String consId) throws IgniteCheckedException { - consId = consId.replaceAll("[\\.:]", "_"); + private File cacheDir(final String cacheName, final String consId) throws IgniteCheckedException { + final String subfolderName + = PdsConsistentIdProcessor.genNewStyleSubfolderName(0, UUID.fromString(consId)); final File dbDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false); assert dbDir.exists(); - final File consIdDir = new File(dbDir.getAbsolutePath(), consId); + final File consIdDir = new File(dbDir.getAbsolutePath(), subfolderName); assert consIdDir.exists(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java index e2f58bd51db2a..b9c60b255f057 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -64,14 +65,17 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.apache.ignite.transactions.Transaction; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED; +import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.genNewStyleSubfolderName; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; /** @@ -87,9 +91,6 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { /** additional cache for testing different combinations of types in WAL */ private static final String CACHE_ADDL_NAME = "cache1"; - /** Fill wal with some data before iterating. Should be true for non local run */ - private static final boolean fillWalBeforeTest = true; - /** Delete DB dir before test. */ private static final boolean deleteBefore = true; @@ -152,7 +153,7 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { } /** {@inheritDoc} */ - @Override protected void beforeTestsStarted() throws Exception { + @Override protected void beforeTest() throws Exception { stopAllGrids(); if (deleteBefore) @@ -171,8 +172,7 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { * @throws IgniteCheckedException If failed. */ private void deleteWorkFiles() throws IgniteCheckedException { - if (fillWalBeforeTest) - deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false)); + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false)); } /** @@ -181,27 +181,23 @@ private void deleteWorkFiles() throws IgniteCheckedException { public void testFillWalAndReadRecords() throws Exception { final int cacheObjectsToWrite = 10000; - final String consistentId; - if (fillWalBeforeTest) { - final Ignite ignite0 = startGrid("node0"); + final Ignite ignite0 = startGrid("node0"); - ignite0.active(true); + ignite0.active(true); - consistentId = U.maskForFileName(ignite0.cluster().localNode().consistentId().toString()); + final Serializable consistentId = (Serializable)ignite0.cluster().localNode().consistentId(); + final String subfolderName = genNewStyleSubfolderName(0, (UUID)consistentId); - putDummyRecords(ignite0, cacheObjectsToWrite); + putDummyRecords(ignite0, cacheObjectsToWrite); - stopGrid("node0"); - } - else - consistentId = "127_0_0_1_47500"; + stopGrid("node0"); final String workDir = U.defaultWorkDirectory(); final File db = U.resolveWorkDirectory(workDir, DFLT_STORE_DIR, false); final File wal = new File(db, "wal"); final File walArchive = new File(wal, "archive"); - final MockWalIteratorFactory mockItFactory = new MockWalIteratorFactory(log, PAGE_SIZE, consistentId, WAL_SEGMENTS); + final MockWalIteratorFactory mockItFactory = new MockWalIteratorFactory(log, PAGE_SIZE, consistentId, subfolderName, WAL_SEGMENTS); final WALIterator it = mockItFactory.iterator(wal, walArchive); final int cntUsingMockIter = iterateAndCount(it, false); @@ -209,11 +205,11 @@ public void testFillWalAndReadRecords() throws Exception { assert cntUsingMockIter > 0; assert cntUsingMockIter > cacheObjectsToWrite; - final File walArchiveDirWithConsistentId = new File(walArchive, consistentId); - final File walWorkDirWithConsistentId = new File(wal, consistentId); + final File walArchiveDirWithConsistentId = new File(walArchive, subfolderName); + final File walWorkDirWithConsistentId = new File(wal, subfolderName); final File binaryMeta = U.resolveWorkDirectory(workDir, "binary_meta", false); - final File binaryMetaWithConsId = new File(binaryMeta, consistentId); + final File binaryMetaWithConsId = new File(binaryMeta, subfolderName); final File marshaller = U.resolveWorkDirectory(workDir, "marshaller", false); final IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log, PAGE_SIZE, binaryMetaWithConsId, marshaller); @@ -304,7 +300,7 @@ public void testArchiveCompletedEventFired() throws Exception { final IgniteEvents evts = ignite.events(); if (!evts.isEnabled(EVT_WAL_SEGMENT_ARCHIVED)) - return; //nothing to test + assertTrue("nothing to test", false); evts.localListen(new IgnitePredicate() { @Override public boolean apply(Event e) { @@ -428,29 +424,23 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { final int cntEntries = 1000; final int txCnt = 100; - final Map ctrlMap = new HashMap<>(); - final String consistentId; - if (fillWalBeforeTest) { - final Ignite ignite0 = startGrid("node0"); - - ignite0.active(true); - - final IgniteCache entries = txPutDummyRecords(ignite0, cntEntries, txCnt); + final Ignite ignite0 = startGrid("node0"); - for (Cache.Entry next : entries) { - ctrlMap.put(next.getKey(), next.getValue()); - } + ignite0.active(true); - consistentId = U.maskForFileName(ignite0.cluster().localNode().consistentId().toString()); + final IgniteCache entries = txPutDummyRecords(ignite0, cntEntries, txCnt); - stopGrid("node0"); + final Map ctrlMap = new HashMap<>(); + for (Cache.Entry next : entries) { + ctrlMap.put(next.getKey(), next.getValue()); } - else - consistentId = "127_0_0_1_47500"; + + final String subfolderName = genDbSubfolderName(ignite0, 0); + stopGrid("node0"); final String workDir = U.defaultWorkDirectory(); final File binaryMeta = U.resolveWorkDirectory(workDir, "binary_meta", false); - final File binaryMetaWithConsId = new File(binaryMeta, consistentId); + final File binaryMetaWithConsId = new File(binaryMeta, subfolderName); final File marshallerMapping = U.resolveWorkDirectory(workDir, "marshaller", false); final IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log, @@ -474,17 +464,28 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { } } }; - scanIterateAndCount(factory, workDir, consistentId, cntEntries, txCnt, objConsumer, null); + scanIterateAndCount(factory, workDir, subfolderName, cntEntries, txCnt, objConsumer, null); assert ctrlMap.isEmpty() : " Control Map is not empty after reading entries " + ctrlMap; } + /** + * Generates DB subfolder name for provided node index (local) and UUID (consistent ID) + * + * @param ignite ignite instance. + * @param nodeIdx node index. + * @return folder file name + */ + @NotNull private String genDbSubfolderName(Ignite ignite, int nodeIdx) { + return genNewStyleSubfolderName(nodeIdx, (UUID)ignite.cluster().localNode().consistentId()); + } + /** * Scan WAL and WAL archive for logical records and its entries. * * @param factory WAL iterator factory. * @param workDir Ignite work directory. - * @param consistentId consistent ID. + * @param subfolderName DB subfolder name based on consistent ID. * @param expCntEntries minimum expected entries count to find. * @param expTxCnt minimum expected transaction count to find. * @param objConsumer object handler, called for each object found in logical data records. @@ -494,7 +495,7 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { private void scanIterateAndCount( final IgniteWalIteratorFactory factory, final String workDir, - final String consistentId, + final String subfolderName, final int expCntEntries, final int expTxCnt, @Nullable final BiConsumer objConsumer, @@ -504,11 +505,10 @@ private void scanIterateAndCount( final File wal = new File(db, "wal"); final File walArchive = new File(wal, "archive"); - final File walArchiveDirWithConsistentId = new File(walArchive, consistentId); + final File walArchiveDirWithConsistentId = new File(walArchive, subfolderName); final File[] files = walArchiveDirWithConsistentId.listFiles(FileWriteAheadLogManager.WAL_SEGMENT_FILE_FILTER); - - assert files != null : "Can't iterate over files [" + walArchiveDirWithConsistentId + "] Directory is N/A"; + A.notNull(files, "Can't iterate over files [" + walArchiveDirWithConsistentId + "] Directory is N/A"); final WALIterator iter = factory.iteratorArchiveFiles(files); final Map cntArch = iterateAndCountDataRecord(iter, objConsumer, dataRecordHnd); @@ -520,8 +520,8 @@ private void scanIterateAndCount( log.info("Total tx found loaded using archive directory (file-by-file): " + txCntObservedArch); - final File walWorkDirWithConsistentId = new File(wal, consistentId); - final File[] workFiles = walWorkDirWithConsistentId.listFiles(FileWriteAheadLogManager.WAL_SEGMENT_FILE_FILTER); + final File walWorkDirWithNodeSubDir = new File(wal, subfolderName); + final File[] workFiles = walWorkDirWithNodeSubDir.listFiles(FileWriteAheadLogManager.WAL_SEGMENT_FILE_FILTER); final WALIterator tuples = factory.iteratorWorkFiles(workFiles); final Map cntWork = iterateAndCountDataRecord(tuples, objConsumer, dataRecordHnd); @@ -541,71 +541,66 @@ private void scanIterateAndCount( */ public void testFillWalWithDifferentTypes() throws Exception { int cntEntries = 0; - final String consistentId; final Map ctrlMap = new HashMap<>(); final Map ctrlMapForBinaryObjects = new HashMap<>(); final Collection ctrlStringsToSearch = new HashSet<>(); final Collection ctrlStringsForBinaryObjSearch = new HashSet<>(); - if (fillWalBeforeTest) { - final Ignite ignite0 = startGrid("node0"); - ignite0.active(true); - - final IgniteCache addlCache = ignite0.getOrCreateCache(CACHE_ADDL_NAME); - addlCache.put("1", "2"); - addlCache.put(1, 2); - addlCache.put(1L, 2L); - addlCache.put(TestEnum.A, "Enum_As_Key"); - addlCache.put("Enum_As_Value", TestEnum.B); - addlCache.put(TestEnum.C, TestEnum.C); - - addlCache.put("Serializable", new TestSerializable(42)); - addlCache.put(new TestSerializable(42), "Serializable_As_Key"); - addlCache.put("Externalizable", new TestExternalizable(42)); - addlCache.put(new TestExternalizable(42), "Externalizable_As_Key"); - addlCache.put(292, new IndexedObject(292)); - - final String search1 = "SomeUnexpectedStringValueAsKeyToSearch"; - ctrlStringsToSearch.add(search1); - ctrlStringsForBinaryObjSearch.add(search1); - addlCache.put(search1, "SearchKey"); - - String search2 = "SomeTestStringContainerToBePrintedLongLine"; - final TestStringContainerToBePrinted val = new TestStringContainerToBePrinted(search2); - ctrlStringsToSearch.add(val.toString()); //will validate original toString() was called - ctrlStringsForBinaryObjSearch.add(search2); - addlCache.put("SearchValue", val); - - String search3 = "SomeTestStringContainerToBePrintedLongLine2"; - final TestStringContainerToBePrinted key = new TestStringContainerToBePrinted(search3); - ctrlStringsToSearch.add(key.toString()); //will validate original toString() was called - ctrlStringsForBinaryObjSearch.add(search3); //validate only string itself - addlCache.put(key, "SearchKey"); - - cntEntries = addlCache.size(); - for (Cache.Entry next : addlCache) { - ctrlMap.put(next.getKey(), next.getValue()); - } + final Ignite ignite0 = startGrid("node0"); + ignite0.active(true); + + final IgniteCache addlCache = ignite0.getOrCreateCache(CACHE_ADDL_NAME); + addlCache.put("1", "2"); + addlCache.put(1, 2); + addlCache.put(1L, 2L); + addlCache.put(TestEnum.A, "Enum_As_Key"); + addlCache.put("Enum_As_Value", TestEnum.B); + addlCache.put(TestEnum.C, TestEnum.C); + + addlCache.put("Serializable", new TestSerializable(42)); + addlCache.put(new TestSerializable(42), "Serializable_As_Key"); + addlCache.put("Externalizable", new TestExternalizable(42)); + addlCache.put(new TestExternalizable(42), "Externalizable_As_Key"); + addlCache.put(292, new IndexedObject(292)); + + final String search1 = "SomeUnexpectedStringValueAsKeyToSearch"; + ctrlStringsToSearch.add(search1); + ctrlStringsForBinaryObjSearch.add(search1); + addlCache.put(search1, "SearchKey"); + + String search2 = "SomeTestStringContainerToBePrintedLongLine"; + final TestStringContainerToBePrinted val = new TestStringContainerToBePrinted(search2); + ctrlStringsToSearch.add(val.toString()); //will validate original toString() was called + ctrlStringsForBinaryObjSearch.add(search2); + addlCache.put("SearchValue", val); + + String search3 = "SomeTestStringContainerToBePrintedLongLine2"; + final TestStringContainerToBePrinted key = new TestStringContainerToBePrinted(search3); + ctrlStringsToSearch.add(key.toString()); //will validate original toString() was called + ctrlStringsForBinaryObjSearch.add(search3); //validate only string itself + addlCache.put(key, "SearchKey"); + + cntEntries = addlCache.size(); + for (Cache.Entry next : addlCache) { + ctrlMap.put(next.getKey(), next.getValue()); + } for (Cache.Entry next : addlCache) { ctrlMapForBinaryObjects.put(next.getKey(), next.getValue()); } - consistentId = U.maskForFileName(ignite0.cluster().localNode().consistentId().toString()); + final String subfolderName = genDbSubfolderName(ignite0, 0); - stopGrid("node0"); - } - else - consistentId = "127_0_0_1_47500"; + stopGrid("node0"); final String workDir = U.defaultWorkDirectory(); final File binaryMeta = U.resolveWorkDirectory(workDir, "binary_meta", false); - final File binaryMetaWithConsId = new File(binaryMeta, consistentId); + final File binaryMetaWithNodeSubfolder = new File(binaryMeta, subfolderName); final File marshallerMapping = U.resolveWorkDirectory(workDir, "marshaller", false); final IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log, PAGE_SIZE, - binaryMetaWithConsId, + binaryMetaWithNodeSubfolder, marshallerMapping); final BiConsumer objConsumer = new BiConsumer() { @Override public void accept(Object key, Object val) { @@ -634,7 +629,7 @@ public void testFillWalWithDifferentTypes() throws Exception { } } }; - scanIterateAndCount(factory, workDir, consistentId, cntEntries, 0, objConsumer, toStrChecker); + scanIterateAndCount(factory, workDir, subfolderName, cntEntries, 0, objConsumer, toStrChecker); assert ctrlMap.isEmpty() : " Control Map is not empty after reading entries: " + ctrlMap; assert ctrlStringsToSearch.isEmpty() : " Control Map for strings in entries is not empty after" + @@ -642,7 +637,7 @@ public void testFillWalWithDifferentTypes() throws Exception { //Validate same WAL log with flag binary objects only final IgniteWalIteratorFactory keepBinFactory = new IgniteWalIteratorFactory(log, PAGE_SIZE, - binaryMetaWithConsId, + binaryMetaWithNodeSubfolder, marshallerMapping, true); final BiConsumer binObjConsumer = new BiConsumer() { @@ -693,7 +688,7 @@ else if (val instanceof BinaryObject) { } }; - final Consumer binObjToStringChecker = new Consumer() { + final Consumer binObjToStrChecker = new Consumer() { @Override public void accept(DataRecord record) { String strRepresentation = record.toString(); for (Iterator iter = ctrlStringsForBinaryObjSearch.iterator(); iter.hasNext(); ) { @@ -705,7 +700,7 @@ else if (val instanceof BinaryObject) { } } }; - scanIterateAndCount(keepBinFactory, workDir, consistentId, cntEntries, 0, binObjConsumer, binObjToStringChecker); + scanIterateAndCount(keepBinFactory, workDir, subfolderName, cntEntries, 0, binObjConsumer, binObjToStrChecker); assert ctrlMapForBinaryObjects.isEmpty() : " Control Map is not empty after reading entries: " + ctrlMapForBinaryObjects; assert ctrlStringsForBinaryObjSearch.isEmpty() : " Control Map for strings in entries is not empty after" + diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java index 4030e53458a3d..05636ebeeb576 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.db.wal.reader; import java.io.File; +import java.io.Serializable; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.configuration.IgniteConfiguration; @@ -29,6 +30,8 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager; import org.jetbrains.annotations.Nullable; import org.mockito.Mockito; @@ -47,25 +50,34 @@ public class MockWalIteratorFactory { private final int pageSize; /** Consistent node id. */ - private final String consistentId; + private final Serializable consistentId; + + /** DB storage subfolder based node index and consistent node ID. */ + private String subfolderName; /** Segments count in work dir. */ private int segments; + /** * Creates factory * @param log Logger. * @param pageSize Page size. * @param consistentId Consistent id. + * @param subfolderName * @param segments Segments. */ - public MockWalIteratorFactory(@Nullable IgniteLogger log, int pageSize, String consistentId, int segments) { + public MockWalIteratorFactory(@Nullable IgniteLogger log, + int pageSize, + Serializable consistentId, + String subfolderName, + int segments) { this.log = log == null ? Mockito.mock(IgniteLogger.class) : log; this.pageSize = pageSize; this.consistentId = consistentId; + this.subfolderName = subfolderName; this.segments = segments; } - /** * Creates iterator * @param wal WAL directory without node consistent id @@ -93,10 +105,13 @@ public WALIterator iterator(File wal, File walArchive) throws IgniteCheckedExcep when(ctx.config()).thenReturn(cfg); when(ctx.clientNode()).thenReturn(false); + when(ctx.pdsFolderResolver()).thenReturn(new PdsFoldersResolver() { + @Override public PdsFolderSettings resolveFolders() { + return new PdsFolderSettings(new File("."), subfolderName, consistentId, null, false); + } + }); final GridDiscoveryManager disco = Mockito.mock(GridDiscoveryManager.class); - - when(disco.consistentId()).thenReturn(consistentId); when(ctx.discovery()).thenReturn(disco); final IgniteWriteAheadLogManager mgr = new FileWriteAheadLogManager(ctx); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java index 29f7255eaf722..d92d848bea1a7 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java @@ -29,6 +29,7 @@ import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsRebalancingOnNotStableTopologyTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsTransactionsHangTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsWholeClusterRestartTest; +import org.apache.ignite.internal.processors.cache.persistence.db.filename.IgniteUidAsConsistentIdMigrationTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushFailoverTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalHistoryReservationsTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalSerializerVersionTest; @@ -78,6 +79,9 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgnitePdsExchangeDuringCheckpointTest.class); + // new style folders with generated consistent ID test + suite.addTestSuite(IgniteUidAsConsistentIdMigrationTest.class); + suite.addTestSuite(IgniteWalSerializerVersionTest.class); return suite; From 2e013fbe85da059580e97d57c15e9cbc92c0aa54 Mon Sep 17 00:00:00 2001 From: dpavlov Date: Wed, 4 Oct 2017 15:05:48 +0300 Subject: [PATCH 002/243] IGNITE-6554 Atomic cache remove operations are not logged to WAL - Fixes #2800. Signed-off-by: Alexey Goncharuk --- .../processors/cache/GridCacheMapEntry.java | 2 + .../db/wal/reader/IgniteWalReaderTest.java | 159 +++++++++++++++++- 2 files changed, 158 insertions(+), 3 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index 7b60b9ccf0a47..54b8dc35a57eb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -4486,6 +4486,8 @@ private void remove(@Nullable GridCacheVersionConflictContext conflictCtx, if (updateCntr != null) updateCntr0 = updateCntr; + entry.logUpdate(op, null, newVer, 0, updateCntr0); + if (oldVal != null) { assert !entry.deletedUnlocked(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java index b9c60b255f057..6db2784cb6635 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java @@ -24,6 +24,7 @@ import java.io.ObjectOutput; import java.io.Serializable; import java.util.Collection; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -61,6 +62,7 @@ import org.apache.ignite.internal.pagemem.wal.record.UnwrapDataEntry; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.GridCacheOperation; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory; @@ -75,8 +77,9 @@ import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED; -import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.genNewStyleSubfolderName; +import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; +import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.genNewStyleSubfolderName; /** * Test suite for WAL segments reader and event generator. @@ -109,6 +112,9 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { */ private int archiveIncompleteSegmentAfterInactivityMs; + /** Custom wal mode. */ + private WALMode customWalMode; + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { final IgniteConfiguration cfg = super.getConfiguration(gridName); @@ -143,7 +149,7 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { pCfg.setWalHistorySize(1); pCfg.setWalSegmentSize(1024 * 1024); pCfg.setWalSegments(WAL_SEGMENTS); - pCfg.setWalMode(WALMode.BACKGROUND); + pCfg.setWalMode(customWalMode != null ? customWalMode : WALMode.BACKGROUND); if (archiveIncompleteSegmentAfterInactivityMs > 0) pCfg.setWalAutoArchiveAfterInactivity(archiveIncompleteSegmentAfterInactivityMs); @@ -708,6 +714,128 @@ else if (val instanceof BinaryObject) { } + /** + * Creates and fills cache with data. + * + * @param ig Ignite instance. + * @param mode Cache Atomicity Mode. + */ + private void createCache2(Ignite ig, CacheAtomicityMode mode) { + if (log.isInfoEnabled()) + log.info("Populating the cache..."); + + final CacheConfiguration cfg = new CacheConfiguration<>("Org" + "11"); + cfg.setAtomicityMode(mode); + final IgniteCache cache = ig.getOrCreateCache(cfg).withKeepBinary(); + + try (Transaction tx = ig.transactions().txStart()) { + for (int i = 0; i < 10; i++) { + + cache.put(i, new Organization(i, "Organization-" + i)); + + if (i % 2 == 0) + cache.put(i, new Organization(i, "Organization-updated-" + i)); + + if (i % 5 == 0) + cache.remove(i); + } + tx.commit(); + } + + } + + /** + * Test if DELETE operation can be found for transactional cache after mixed cache operations including remove(). + * + * @throws Exception if failed. + */ + public void testRemoveOperationPresentedForDataEntry() throws Exception { + runRemoveOperationTest(CacheAtomicityMode.TRANSACTIONAL); + } + + /** + * Test if DELETE operation can be found for atomic cache after mixed cache operations including remove(). + * + * @throws Exception if failed. + */ + public void testRemoveOperationPresentedForDataEntryForAtomic() throws Exception { + runRemoveOperationTest(CacheAtomicityMode.ATOMIC); + } + + + /** + * Test if DELETE operation can be found after mixed cache operations including remove(). + * + * @throws Exception if failed. + * @param mode Cache Atomicity Mode. + */ + private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception { + final Ignite ignite = startGrid("node0"); + + ignite.active(true); + createCache2(ignite, mode); + ignite.active(false); + + final String subfolderName = genDbSubfolderName(ignite, 0); + + stopGrid("node0"); + + final String workDir = U.defaultWorkDirectory(); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(subfolderName, workDir); + + final StringBuilder builder = new StringBuilder(); + final Map operationsFound = new EnumMap<>(GridCacheOperation.class); + + scanIterateAndCount(factory, workDir, subfolderName, 0, 0, null, new Consumer() { + @Override public void accept(DataRecord dataRecord) { + final List entries = dataRecord.writeEntries(); + + builder.append("{"); + for (DataEntry entry : entries) { + final GridCacheOperation op = entry.op(); + final Integer cnt = operationsFound.get(op); + + operationsFound.put(op, cnt == null ? 1 : (cnt + 1)); + + if (entry instanceof UnwrapDataEntry) { + final UnwrapDataEntry entry1 = (UnwrapDataEntry)entry; + + builder.append(entry1.op()).append(" for ").append(entry1.unwrappedKey()); + final GridCacheVersion ver = entry.nearXidVersion(); + + builder.append(", "); + + if (ver != null) + builder.append("tx=").append(ver).append(", "); + } + } + + builder.append("}\n"); + } + }); + + final Integer deletesFound = operationsFound.get(DELETE); + + if (log.isInfoEnabled()) + log.info(builder.toString()); + + assertTrue("Delete operations should be found in log: " + operationsFound, + deletesFound != null && deletesFound > 0); + } + + @NotNull private IgniteWalIteratorFactory createWalIteratorFactory(String subfolderName, + String workDir) throws IgniteCheckedException { + final File binaryMeta = U.resolveWorkDirectory(workDir, "binary_meta", false); + final File binaryMetaWithConsId = new File(binaryMeta, subfolderName); + final File marshallerMapping = U.resolveWorkDirectory(workDir, "marshaller", false); + + return new IgniteWalIteratorFactory(log, + PAGE_SIZE, + binaryMetaWithConsId, + marshallerMapping); + } + + /** * @param values collection with numbers * @return sum of numbers @@ -776,7 +904,7 @@ else if (entry instanceof LazyDataEntry) { "; Key: " + unwrappedKeyObj + "; Value: " + unwrappedValObj); - if (cacheObjHnd != null && unwrappedKeyObj != null || unwrappedValObj != null) + if (cacheObjHnd != null && (unwrappedKeyObj != null || unwrappedValObj != null)) cacheObjHnd.accept(unwrappedKeyObj, unwrappedValObj); final Integer entriesUnderTx = entriesUnderTxFound.get(globalTxId); @@ -967,4 +1095,29 @@ public TestStringContainerToBePrinted(String data) { '}'; } } + + /** Test class for storing in ignite */ + private static class Organization { + /** Key. */ + private final int key; + /** Name. */ + private final String name; + + /** + * @param key Key. + * @param name Name. + */ + public Organization(int key, String name) { + this.key = key; + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return "Organization{" + + "key=" + key + + ", name='" + name + '\'' + + '}'; + } + } } From 959bf853b1a77fa674ac8115fcf57c31feb0e6c0 Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Wed, 4 Oct 2017 17:35:50 +0300 Subject: [PATCH 003/243] Removed excluding ML from examples/src (cherry picked from commit 78f77b1) --- assembly/release-fabric-base.xml | 1 - 1 file changed, 1 deletion(-) diff --git a/assembly/release-fabric-base.xml b/assembly/release-fabric-base.xml index a16d99683a70a..8e8bd75ad9329 100644 --- a/assembly/release-fabric-base.xml +++ b/assembly/release-fabric-base.xml @@ -239,7 +239,6 @@ **/package.html src/test/** - src/main/ml/** From d4e5d4fd3e989b69f7bfa49986ee8f8d04cac62f Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Thu, 5 Oct 2017 11:55:26 +0300 Subject: [PATCH 004/243] IGNITE-6529 JDBC: support column metadata 'nullable' property. This closes #2793. --- .../internal/jdbc2/JdbcMetadataSelfTest.java | 28 ++-- .../jdbc/thin/JdbcThinMetadataSelfTest.java | 33 ++-- .../jdbc/thin/JdbcThinDatabaseMetadata.java | 3 +- .../internal/jdbc/thin/JdbcThinTcpIo.java | 4 +- .../internal/jdbc2/JdbcDatabaseMetadata.java | 70 ++++++-- .../cache/query/GridCacheQueryManager.java | 87 +++++++++- .../query/GridCacheQuerySqlMetadataJobV2.java | 154 ++++++++++++++++++ .../query/GridCacheQuerySqlMetadataV2.java | 101 ++++++++++++ .../cache/query/GridCacheSqlMetadata.java | 8 + .../processors/odbc/jdbc/JdbcColumnMeta.java | 10 ++ .../odbc/jdbc/JdbcColumnMetaV2.java | 74 +++++++++ .../odbc/jdbc/JdbcConnectionContext.java | 4 +- .../odbc/jdbc/JdbcMetaColumnsResult.java | 28 +++- .../odbc/jdbc/JdbcMetaColumnsResultV2.java | 50 ++++++ .../odbc/jdbc/JdbcRequestHandler.java | 32 +++- .../processors/odbc/jdbc/JdbcResult.java | 8 + 16 files changed, 642 insertions(+), 52 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataJobV2.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataV2.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV2.java diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java index 6020a3a4c3e02..bdc6644bb64c5 100755 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java @@ -42,7 +42,7 @@ import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteVersionUtils; -import org.apache.ignite.internal.binary.BinaryMarshaller; +import org.apache.ignite.internal.processors.query.QueryEntityEx; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -78,11 +78,16 @@ public class JdbcMetadataSelfTest extends GridCommonAbstractTest { cfg.setCacheConfiguration( cacheConfiguration("pers").setQueryEntities(Arrays.asList( - new QueryEntity(AffinityKey.class, Person.class) - .setIndexes(Arrays.asList( - new QueryIndex("orgId"), - new QueryIndex().setFields(persFields)))) - ), + new QueryEntityEx( + new QueryEntity(AffinityKey.class.getName(), Person.class.getName()) + .addQueryField("name", String.class.getName(), null) + .addQueryField("age", Integer.class.getName(), null) + .addQueryField("orgId", Integer.class.getName(), null) + .setIndexes(Arrays.asList( + new QueryIndex("orgId"), + new QueryIndex().setFields(persFields)))) + .setNotNullFields(new HashSet<>(Arrays.asList("age", "name"))) + )), cacheConfiguration("org").setQueryEntities(Arrays.asList( new QueryEntity(AffinityKey.class, Organization.class)))); @@ -208,7 +213,6 @@ public void testGetTables() throws Exception { * @throws Exception If failed. */ public void testGetColumns() throws Exception { - final boolean primitivesInformationIsLostAfterStore = ignite(0).configuration().getMarshaller() instanceof BinaryMarshaller; try (Connection conn = DriverManager.getConnection(BASE_URL)) { DatabaseMetaData meta = conn.getMetaData(); @@ -232,11 +236,15 @@ public void testGetColumns() throws Exception { if ("NAME".equals(name)) { assertEquals(VARCHAR, rs.getInt("DATA_TYPE")); assertEquals("VARCHAR", rs.getString("TYPE_NAME")); - assertEquals(1, rs.getInt("NULLABLE")); - } else if ("AGE".equals(name) || "ORGID".equals(name)) { + assertEquals(0, rs.getInt("NULLABLE")); + } else if ("AGE".equals(name)) { + assertEquals(INTEGER, rs.getInt("DATA_TYPE")); + assertEquals("INTEGER", rs.getString("TYPE_NAME")); + assertEquals(0, rs.getInt("NULLABLE")); + } else if ("ORGID".equals(name)) { assertEquals(INTEGER, rs.getInt("DATA_TYPE")); assertEquals("INTEGER", rs.getString("TYPE_NAME")); - assertEquals(primitivesInformationIsLostAfterStore ? 1 : 0, rs.getInt("NULLABLE")); + assertEquals(1, rs.getInt("NULLABLE")); } cnt++; diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java index 01b2e8a6e97a9..abbe4e15c8df1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java @@ -41,7 +41,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteVersionUtils; -import org.apache.ignite.internal.binary.BinaryMarshaller; +import org.apache.ignite.internal.processors.query.QueryEntityEx; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -118,13 +118,15 @@ protected CacheConfiguration cacheConfiguration(QueryEntity qryEntity) { persFields.put("age", false); IgniteCache personCache = jcache(grid(0), cacheConfiguration( - new QueryEntity(AffinityKey.class.getName(), Person.class.getName()) - .addQueryField("name", String.class.getName(), null) - .addQueryField("age", Integer.class.getName(), null) - .addQueryField("orgId", Integer.class.getName(), null) - .setIndexes(Arrays.asList( - new QueryIndex("orgId"), - new QueryIndex().setFields(persFields))) + new QueryEntityEx( + new QueryEntity(AffinityKey.class.getName(), Person.class.getName()) + .addQueryField("name", String.class.getName(), null) + .addQueryField("age", Integer.class.getName(), null) + .addQueryField("orgId", Integer.class.getName(), null) + .setIndexes(Arrays.asList( + new QueryIndex("orgId"), + new QueryIndex().setFields(persFields)))) + .setNotNullFields(new HashSet<>(Arrays.asList("age", "name"))) ), "pers"); assert personCache != null; @@ -251,9 +253,6 @@ public void testGetAllTables() throws Exception { * @throws Exception If failed. */ public void testGetColumns() throws Exception { - final boolean primitivesInformationIsLostAfterStore = ignite(0).configuration().getMarshaller() - instanceof BinaryMarshaller; - try (Connection conn = DriverManager.getConnection(URL)) { conn.setSchema("pers"); @@ -279,18 +278,22 @@ public void testGetColumns() throws Exception { if ("NAME".equals(name)) { assert rs.getInt("DATA_TYPE") == VARCHAR; assert "VARCHAR".equals(rs.getString("TYPE_NAME")); + assert rs.getInt("NULLABLE") == 0; + } else if ("ORGID".equals(name)) { + assert rs.getInt("DATA_TYPE") == INTEGER; + assert "INTEGER".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 1; - } else if ("AGE".equals(name) || "ORGID".equals(name)) { + } else if ("AGE".equals(name)) { assert rs.getInt("DATA_TYPE") == INTEGER; assert "INTEGER".equals(rs.getString("TYPE_NAME")); - assertEquals(primitivesInformationIsLostAfterStore ? 1 : 0, rs.getInt("NULLABLE")); + assert rs.getInt("NULLABLE") == 0; } - if ("_KEY".equals(name)) { + else if ("_KEY".equals(name)) { assert rs.getInt("DATA_TYPE") == OTHER; assert "OTHER".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 0; } - if ("_VAL".equals(name)) { + else if ("_VAL".equals(name)) { assert rs.getInt("DATA_TYPE") == OTHER; assert "OTHER".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java index 2ce7983aebce7..8b26900d9b9ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java @@ -29,7 +29,6 @@ import java.util.LinkedList; import java.util.List; import org.apache.ignite.internal.IgniteVersionUtils; -import org.apache.ignite.internal.jdbc2.JdbcUtils; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcColumnMeta; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcIndexMeta; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcMetaColumnsRequest; @@ -845,7 +844,7 @@ private List columnRow(JdbcColumnMeta colMeta, int pos) { row.add((Integer)null); row.add((Integer)null); row.add(10); - row.add(JdbcUtils.nullable(colMeta.columnName(), colMeta.dataTypeClass()) ? 1 : 0 ); + row.add(colMeta.isNullable() ? 1 : 0); row.add((String)null); row.add((String)null); row.add(Integer.MAX_VALUE); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java index 7ac9c2cc233c1..688f90820287c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java @@ -50,10 +50,10 @@ public class JdbcThinTcpIo { private static final ClientListenerProtocolVersion VER_2_1_0 = ClientListenerProtocolVersion.create(2, 1, 0); /** Version 2.3.1. */ - private static final ClientListenerProtocolVersion VER_2_3_1 = ClientListenerProtocolVersion.create(2, 3, 1); + private static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_1; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_0; /** Initial output stream capacity for handshake. */ private static final int HANDSHAKE_MSG_SIZE = 13; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java index 4c21cbd2064de..03fde791a1924 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -59,7 +60,7 @@ public class JdbcDatabaseMetadata implements DatabaseMetaData { private final JdbcConnection conn; /** Metadata. */ - private Map>> meta; + private Map>> meta; /** Index info. */ private Collection> indexes; @@ -714,7 +715,7 @@ public class JdbcDatabaseMetadata implements DatabaseMetaData { List> rows = new LinkedList<>(); if (validCatalogPattern(catalog) && (tblTypes == null || Arrays.asList(tblTypes).contains("TABLE"))) { - for (Map.Entry>> schema : meta.entrySet()) { + for (Map.Entry>> schema : meta.entrySet()) { if (matches(schema.getKey(), schemaPtrn)) { for (String tbl : schema.getValue().keySet()) { if (matches(tbl, tblNamePtrn)) @@ -796,14 +797,14 @@ private List tableRow(String schema, String tbl) { int cnt = 0; if (validCatalogPattern(catalog)) { - for (Map.Entry>> schema : meta.entrySet()) { + for (Map.Entry>> schema : meta.entrySet()) { if (matches(schema.getKey(), schemaPtrn)) { - for (Map.Entry> tbl : schema.getValue().entrySet()) { + for (Map.Entry> tbl : schema.getValue().entrySet()) { if (matches(tbl.getKey(), tblNamePtrn)) { - for (Map.Entry col : tbl.getValue().entrySet()) { + for (Map.Entry col : tbl.getValue().entrySet()) { rows.add(columnRow(schema.getKey(), tbl.getKey(), col.getKey(), - JdbcUtils.type(col.getValue()), JdbcUtils.typeName(col.getValue()), - JdbcUtils.nullable(col.getKey(), col.getValue()), ++cnt)); + JdbcUtils.type(col.getValue().typeName()), JdbcUtils.typeName(col.getValue().typeName()), + !col.getValue().isNotNull(), ++cnt)); } } } @@ -925,9 +926,9 @@ private List columnRow(String schema, String tbl, String col, int type, List> rows = new LinkedList<>(); if (validCatalogPattern(catalog)) { - for (Map.Entry>> schema : meta.entrySet()) { + for (Map.Entry>> schema : meta.entrySet()) { if (matches(schema.getKey(), schemaPtrn)) { - for (Map.Entry> tbl : schema.getValue().entrySet()) { + for (Map.Entry> tbl : schema.getValue().entrySet()) { if (matches(tbl.getKey(), tblNamePtrn)) rows.add(Arrays.asList(null, schema.getKey(), tbl.getKey(), "_KEY", 1, "_KEY")); } @@ -1361,10 +1362,21 @@ private void updateMetaData() throws SQLException { Collection types = m.types(); - Map> typesMap = U.newHashMap(types.size()); + Map> typesMap = U.newHashMap(types.size()); for (String type : types) { - typesMap.put(type.toUpperCase(), m.fields(type)); + Collection notNullFields = m.notNullFields(type); + + Map fields = new LinkedHashMap<>(); + + for (Map.Entry fld : m.fields(type).entrySet()) { + ColumnInfo colInfo = new ColumnInfo(fld.getValue(), + notNullFields == null ? false : notNullFields.contains(fld.getKey())); + + fields.put(fld.getKey(), colInfo); + } + + typesMap.put(type.toUpperCase(), fields); for (GridCacheSqlIndexMetadata idx : m.indexes(type)) { int cnt = 0; @@ -1435,7 +1447,41 @@ public UpdateMetadataTask(String cacheName, Ignite ignite) { @Override public Collection call() throws Exception { IgniteCache cache = ignite.cache(cacheName); - return ((IgniteCacheProxy)cache).context().queries().sqlMetadata(); + return ((IgniteCacheProxy)cache).context().queries().sqlMetadataV2(); + } + } + + /** + * Column info. + */ + private static class ColumnInfo { + /** Class name. */ + private final String typeName; + + /** Not null flag. */ + private final boolean notNull; + + /** + * @param typeName Type name. + * @param notNull Not null flag. + */ + private ColumnInfo(String typeName, boolean notNull) { + this.typeName = typeName; + this.notNull = notNull; + } + + /** + * @return Type name. + */ + public String typeName() { + return typeName; + } + + /** + * @return Not null flag. + */ + public boolean isNotNull() { + return notNull; } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java index f8734618c73fe..64e74fb6f1cd9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java @@ -113,6 +113,7 @@ import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.lang.IgniteClosure; +import org.apache.ignite.lang.IgniteProductVersion; import org.apache.ignite.lang.IgniteReducer; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.resources.IgniteInstanceResource; @@ -145,6 +146,9 @@ public abstract class GridCacheQueryManager extends GridCacheManagerAdapte /** Maximum number of query detail metrics to evict at once. */ private static final int QRY_DETAIL_METRICS_EVICTION_LIMIT = 10_000; + /** Support 'not null' field constraint since v 2.3.0. */ + private static final IgniteProductVersion NOT_NULLS_SUPPORT_VER = IgniteProductVersion.fromString("2.3.0"); + /** Comparator for priority queue with query detail metrics with priority to new metrics. */ private static final Comparator QRY_DETAIL_METRICS_PRIORITY_NEW_CMP = new Comparator() { @@ -1907,6 +1911,79 @@ public Collection sqlMetadata() throws IgniteCheckedExcept } } + /** + * Gets SQL metadata with not nulls fields. + * + * @return SQL metadata. + * @throws IgniteCheckedException In case of error. + */ + public Collection sqlMetadataV2() throws IgniteCheckedException { + if (!enterBusy()) + throw new IllegalStateException("Failed to get metadata (grid is stopping)."); + + try { + Callable> job = new GridCacheQuerySqlMetadataJobV2(); + + // Remote nodes that have current cache. + Collection nodes = CU.affinityNodes(cctx, AffinityTopologyVersion.NONE); + + Collection> res = new ArrayList<>(nodes.size() + 1); + + IgniteInternalFuture>> rmtFut = null; + + // Get metadata from remote nodes. + if (!nodes.isEmpty()) { + boolean allNodesNew = true; + + for (ClusterNode n : nodes) { + if (n.version().compareTo(NOT_NULLS_SUPPORT_VER) < 0) + allNodesNew = false; + } + + if (!allNodesNew) + return sqlMetadata(); + + rmtFut = cctx.closures().callAsyncNoFailover(BROADCAST, Collections.singleton(job), nodes, true, 0); + } + + // Get local metadata. + IgniteInternalFuture> locFut = cctx.closures().callLocalSafe(job, true); + + if (rmtFut != null) + res.addAll(rmtFut.get()); + + res.add(locFut.get()); + + Map> map = new HashMap<>(); + + for (Collection col : res) { + for (CacheSqlMetadata meta : col) { + String name = meta.cacheName(); + + Collection cacheMetas = map.get(name); + + if (cacheMetas == null) + map.put(name, cacheMetas = new LinkedList<>()); + + cacheMetas.add(meta); + } + } + + Collection col = new ArrayList<>(map.size()); + + // Metadata for current cache must be first in list. + col.add(new GridCacheQuerySqlMetadataV2(map.remove(cacheName))); + + for (Collection metas : map.values()) + col.add(new GridCacheQuerySqlMetadataV2(metas)); + + return col; + } + finally { + leaveBusy(); + } + } + /** * @param Key type. * @param Value type. @@ -2079,7 +2156,7 @@ private static class MetadataJob implements IgniteCallable notNullFields(String type) { + return null; + } + /** {@inheritDoc} */ @Override public Map keyClasses() { return keyClasses; @@ -2236,7 +2318,7 @@ public CacheSqlMetadata() { /** * Cache metadata index. */ - private static class CacheSqlIndexMetadata implements GridCacheSqlIndexMetadata { + public static class CacheSqlIndexMetadata implements GridCacheSqlIndexMetadata { /** */ private static final long serialVersionUID = 0L; @@ -2371,6 +2453,7 @@ private static class FieldsResult extends CachedResult { /** * @return Metadata. + * @throws IgniteCheckedException On error. */ public List metaData() throws IgniteCheckedException { get(); // Ensure that result is ready. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataJobV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataJobV2.java new file mode 100644 index 0000000000000..9907d1a2433f5 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataJobV2.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import org.apache.ignite.Ignite; +import org.apache.ignite.cache.QueryIndexType; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteKernal; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; +import org.apache.ignite.internal.processors.query.GridQueryIndexDescriptor; +import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.util.typedef.C1; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.P1; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Map; +import java.util.Set; + +/** + * Metadata job. + */ +@GridInternal +class GridCacheQuerySqlMetadataJobV2 implements IgniteCallable> { + /** */ + private static final long serialVersionUID = 0L; + + /** Number of fields to report when no fields defined. Includes _key and _val columns. */ + private static final int NO_FIELDS_COLUMNS_COUNT = 2; + + /** Grid */ + @IgniteInstanceResource + private Ignite ignite; + + /** {@inheritDoc} */ + @Override public Collection call() { + final GridKernalContext ctx = ((IgniteKernal)ignite).context(); + + Collection cacheNames = F.viewReadOnly(ctx.cache().caches(), + new C1, String>() { + @Override public String apply(IgniteInternalCache c) { + return c.name(); + } + }, + new P1>() { + @Override public boolean apply(IgniteInternalCache c) { + return !CU.isSystemCache(c.name()) && !DataStructuresProcessor.isDataStructureCache(c.name()); + } + } + ); + + return F.transform(cacheNames, new C1() { + @Override public GridCacheQueryManager.CacheSqlMetadata apply(String cacheName) { + Collection types = ctx.query().types(cacheName); + + Collection names = U.newHashSet(types.size()); + Map keyClasses = U.newHashMap(types.size()); + Map valClasses = U.newHashMap(types.size()); + Map> fields = U.newHashMap(types.size()); + Map> indexes = U.newHashMap(types.size()); + Map> notNullFields = U.newHashMap(types.size()); + + for (GridQueryTypeDescriptor type : types) { + // Filter internal types (e.g., data structures). + if (type.name().startsWith("GridCache")) + continue; + + names.add(type.name()); + + keyClasses.put(type.name(), type.keyClass().getName()); + valClasses.put(type.name(), type.valueClass().getName()); + + int size = type.fields().isEmpty() ? NO_FIELDS_COLUMNS_COUNT : type.fields().size(); + + Map fieldsMap = U.newLinkedHashMap(size); + HashSet notNullFieldsSet = U.newHashSet(1); + + // _KEY and _VAL are not included in GridIndexingTypeDescriptor.valueFields + if (type.fields().isEmpty()) { + fieldsMap.put("_KEY", type.keyClass().getName()); + fieldsMap.put("_VAL", type.valueClass().getName()); + } + + for (Map.Entry> e : type.fields().entrySet()) { + String fieldName = e.getKey(); + + fieldsMap.put(fieldName.toUpperCase(), e.getValue().getName()); + + if (type.property(fieldName).notNull()) + notNullFieldsSet.add(fieldName.toUpperCase()); + } + + fields.put(type.name(), fieldsMap); + notNullFields.put(type.name(), notNullFieldsSet); + + Map idxs = type.indexes(); + + Collection indexesCol = new ArrayList<>(idxs.size()); + + for (Map.Entry e : idxs.entrySet()) { + GridQueryIndexDescriptor desc = e.getValue(); + + // Add only SQL indexes. + if (desc.type() == QueryIndexType.SORTED) { + Collection idxFields = new LinkedList<>(); + Collection descendings = new LinkedList<>(); + + for (String idxField : e.getValue().fields()) { + String idxFieldUpper = idxField.toUpperCase(); + + idxFields.add(idxFieldUpper); + + if (desc.descending(idxField)) + descendings.add(idxFieldUpper); + } + + indexesCol.add(new GridCacheQueryManager.CacheSqlIndexMetadata(e.getKey().toUpperCase(), + idxFields, descendings, false)); + } + } + + indexes.put(type.name(), indexesCol); + } + + return new GridCacheQuerySqlMetadataV2(cacheName, names, keyClasses, valClasses, fields, indexes, + notNullFields); + } + }); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataV2.java new file mode 100644 index 0000000000000..66821c90e01c6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQuerySqlMetadataV2.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +/** + * Cache metadata with not null field. + */ +public class GridCacheQuerySqlMetadataV2 extends GridCacheQueryManager.CacheSqlMetadata { + /** */ + private static final long serialVersionUID = 0L; + + /** Not null fields. */ + private Map> notNullFields; + + /** + * Required by {@link Externalizable}. + */ + public GridCacheQuerySqlMetadataV2() { + // No-op. + } + + /** + * @param cacheName Cache name. + * @param types Types. + * @param keyClasses Key classes map. + * @param valClasses Value classes map. + * @param fields Fields maps. + * @param indexes Indexes. + * @param notNullFields Not null fields. + */ + GridCacheQuerySqlMetadataV2(@Nullable String cacheName, Collection types, Map keyClasses, + Map valClasses, Map> fields, + Map> indexes, Map> notNullFields) { + super(cacheName, types, keyClasses, valClasses, fields, indexes); + + this.notNullFields = notNullFields; + } + + /** + * @param metas Meta data instances from different nodes. + */ + GridCacheQuerySqlMetadataV2(Iterable metas) { + super(metas); + + notNullFields = new HashMap<>(); + + for (GridCacheQueryManager.CacheSqlMetadata meta : metas) { + if (meta instanceof GridCacheQuerySqlMetadataV2) { + GridCacheQuerySqlMetadataV2 metaV2 = (GridCacheQuerySqlMetadataV2)meta; + + notNullFields.putAll(metaV2.notNullFields); + } + } + } + + /** {@inheritDoc} */ + @Override public Collection notNullFields(String type) { + return notNullFields.get(type); + } + + /** {@inheritDoc} */ + @Override public void writeExternal(ObjectOutput out) throws IOException { + super.writeExternal(out); + + U.writeMap(out, notNullFields); + } + + /** {@inheritDoc} */ + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + super.readExternal(in); + + notNullFields = U.readHashMap(in); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlMetadata.java index 724962e853839..ddc2860311758 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlMetadata.java @@ -77,6 +77,14 @@ public interface GridCacheSqlMetadata extends Externalizable { */ @Nullable public Map fields(String type); + /** + * Gets not null fields. + * + * @param type Type name. + * @return Not null fields collection map or {@code null} if type name is unknown. + */ + Collection notNullFields(String type); + /** * @return Key classes. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java index 9f145e0ec73ef..d927c26ecc793 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java @@ -20,6 +20,7 @@ import org.apache.ignite.internal.binary.BinaryReaderExImpl; import org.apache.ignite.internal.binary.BinaryWriterExImpl; import org.apache.ignite.internal.jdbc.thin.JdbcThinUtils; +import org.apache.ignite.internal.jdbc2.JdbcUtils; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; @@ -126,6 +127,15 @@ public String dataTypeClass() { return dataTypeClass; } + /** + * Return 'nullable' flag in compatibility mode (according with column name and column type). + * + * @return {@code true} in case the column allows null values. Otherwise returns {@code false} + */ + public boolean isNullable() { + return JdbcUtils.nullable(colName, dataTypeClass); + } + /** {@inheritDoc} */ @Override public void writeBinary(BinaryWriterExImpl writer) { writer.writeString(schemaName); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java new file mode 100644 index 0000000000000..a2b4acf0f40cb --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.jdbc; + +import org.apache.ignite.internal.binary.BinaryReaderExImpl; +import org.apache.ignite.internal.binary.BinaryWriterExImpl; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * JDBC column metadata. + */ +public class JdbcColumnMetaV2 extends JdbcColumnMeta{ + /** Allow nulls . */ + private boolean nullable; + + /** + * Default constructor is used for serialization. + */ + JdbcColumnMetaV2() { + // No-op. + } + + /** + * @param schemaName Schema. + * @param tblName Table. + * @param colName Column. + * @param cls Type. + * @param nullable Allow nulls. + */ + public JdbcColumnMetaV2(String schemaName, String tblName, String colName, Class cls, boolean nullable) { + super(schemaName, tblName, colName, cls); + + this.nullable = nullable; + } + + /** {@inheritDoc} */ + @Override public boolean isNullable() { + return nullable; + } + + /** {@inheritDoc} */ + @Override public void writeBinary(BinaryWriterExImpl writer) { + super.writeBinary(writer); + + writer.writeBoolean(nullable); + } + + /** {@inheritDoc} */ + @Override public void readBinary(BinaryReaderExImpl reader) { + super.readBinary(reader); + + nullable = reader.readBoolean(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JdbcColumnMetaV2.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java index 38d1972631e38..a6a7aa5c27507 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java @@ -38,10 +38,10 @@ public class JdbcConnectionContext implements ClientListenerConnectionContext { private static final ClientListenerProtocolVersion VER_2_1_5 = ClientListenerProtocolVersion.create(2, 1, 5); /** Version 2.3.1: added "multiple statements query" feature. */ - public static final ClientListenerProtocolVersion VER_2_3_1 = ClientListenerProtocolVersion.create(2, 3, 1); + public static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_1; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_0; /** Supported versions. */ private static final Set SUPPORTED_VERS = new HashSet<>(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java index da270de71d56a..9931ce0f6a38a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java @@ -50,6 +50,25 @@ public class JdbcMetaColumnsResult extends JdbcResult { this.meta = new ArrayList<>(meta); } + /** + * Used by children classes. + * @param type Type ID. + */ + protected JdbcMetaColumnsResult(byte type) { + super(type); + } + + /** + * Used by children classes. + * @param type Type ID. + * @param meta Columns metadata. + */ + protected JdbcMetaColumnsResult(byte type, Collection meta) { + super(type); + + this.meta = new ArrayList<>(meta); + } + /** * @return Columns metadata. */ @@ -83,7 +102,7 @@ public List meta() { meta = new ArrayList<>(size); for (int i = 0; i < size; ++i) { - JdbcColumnMeta m = new JdbcColumnMeta(); + JdbcColumnMeta m = createMetaColumn(); m.readBinary(reader); @@ -92,6 +111,13 @@ public List meta() { } } + /** + * @return Empty columns metadata to deserialization. + */ + protected JdbcColumnMeta createMetaColumn() { + return new JdbcColumnMeta(); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(JdbcMetaColumnsResult.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV2.java new file mode 100644 index 0000000000000..2673a135bcbf6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV2.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.jdbc; + +import java.util.Collection; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * JDBC columns metadata result. + */ +public class JdbcMetaColumnsResultV2 extends JdbcMetaColumnsResult { + /** + * Default constructor is used for deserialization. + */ + JdbcMetaColumnsResultV2() { + super(META_COLUMNS_V2); + } + + /** + * @param meta Columns metadata. + */ + JdbcMetaColumnsResultV2(Collection meta) { + super(META_COLUMNS_V2, meta); + } + + /** {@inheritDoc} */ + @Override protected JdbcColumnMeta createMetaColumn() { + return new JdbcColumnMetaV2(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JdbcMetaColumnsResultV2.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java index 202f8138baf7d..166402fcce2c0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java @@ -23,6 +23,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -43,6 +44,7 @@ import org.apache.ignite.internal.processors.odbc.ClientListenerResponse; import org.apache.ignite.internal.processors.odbc.odbc.OdbcQueryGetColumnsMetaRequest; import org.apache.ignite.internal.processors.query.GridQueryIndexDescriptor; +import org.apache.ignite.internal.processors.query.GridQueryProperty; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; @@ -51,6 +53,7 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_3_0; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BATCH_EXEC; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.META_COLUMNS; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.META_INDEXES; @@ -291,7 +294,7 @@ private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) { qry.setSchema(schemaName); List>> results = ctx.query().querySqlFieldsNoCache(qry, true, - protocolVer.compareTo(JdbcConnectionContext.VER_2_3_1) < 0); + protocolVer.compareTo(VER_2_3_0) < 0); if (results.size() == 1) { FieldsQueryCursor> qryCur = results.get(0); @@ -559,9 +562,10 @@ private JdbcResponse getTablesMeta(JdbcMetaTablesRequest req) { * @param req Get columns metadata request. * @return Response. */ + @SuppressWarnings("unchecked") private JdbcResponse getColumnsMeta(JdbcMetaColumnsRequest req) { try { - Collection meta = new HashSet<>(); + Collection meta = new LinkedHashSet<>(); for (String cacheName : ctx.cache().publicCacheNames()) { for (GridQueryTypeDescriptor table : ctx.query().types(cacheName)) { @@ -572,11 +576,22 @@ private JdbcResponse getColumnsMeta(JdbcMetaColumnsRequest req) { continue; for (Map.Entry> field : table.fields().entrySet()) { - if (!matches(field.getKey(), req.columnName())) + String colName = field.getKey(); + + if (!matches(colName, req.columnName())) continue; - JdbcColumnMeta columnMeta = new JdbcColumnMeta(table.schemaName(), table.tableName(), - field.getKey(), field.getValue()); + JdbcColumnMeta columnMeta; + + if (protocolVer.compareTo(VER_2_3_0) >= 0) { + GridQueryProperty prop = table.property(colName); + + columnMeta = new JdbcColumnMetaV2(table.schemaName(), table.tableName(), + field.getKey(), field.getValue(), !prop.notNull()); + } + else + columnMeta = new JdbcColumnMeta(table.schemaName(), table.tableName(), + field.getKey(), field.getValue()); if (!meta.contains(columnMeta)) meta.add(columnMeta); @@ -584,7 +599,12 @@ private JdbcResponse getColumnsMeta(JdbcMetaColumnsRequest req) { } } - JdbcMetaColumnsResult res = new JdbcMetaColumnsResult(meta); + JdbcMetaColumnsResult res; + + if (protocolVer.compareTo(VER_2_3_0) >= 0) + res = new JdbcMetaColumnsResultV2(meta); + else + res = new JdbcMetaColumnsResult(meta); return new JdbcResponse(res); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java index c6c74383381f0..6d460e658a900 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java @@ -59,6 +59,9 @@ public class JdbcResult implements JdbcRawBinarylizable { /** Multiple statements query results. */ static final byte QRY_EXEC_MULT = 13; + /** Columns metadata result V2. */ + static final byte META_COLUMNS_V2 = 14; + /** Success status. */ private byte type; @@ -147,6 +150,11 @@ public static JdbcResult readResult(BinaryReaderExImpl reader) throws BinaryObje break; + case META_COLUMNS_V2: + res = new JdbcMetaColumnsResultV2(); + + break; + default: throw new IgniteException("Unknown SQL listener request ID: [request ID=" + resId + ']'); } From 81d20410205086ff29ac2e66b7cd17909173d642 Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Thu, 5 Oct 2017 16:32:33 +0300 Subject: [PATCH 005/243] IGNITE-6358: JDBC thick: support multiple statements. This closes #2777. --- .../internal/jdbc2/JdbcStatementSelfTest.java | 130 ++++++++- .../org/apache/ignite/IgniteJdbcDriver.java | 9 +- .../ignite/internal/jdbc2/JdbcConnection.java | 13 + .../internal/jdbc2/JdbcDatabaseMetadata.java | 54 ++-- .../JdbcQueryMultipleStatementsTask.java | 167 +++++++++++ .../ignite/internal/jdbc2/JdbcQueryTask.java | 154 +++------- .../internal/jdbc2/JdbcQueryTaskResult.java | 120 ++++++++ .../internal/jdbc2/JdbcQueryTaskV3.java | 94 ++++++ .../ignite/internal/jdbc2/JdbcResultSet.java | 175 +++++++++--- .../ignite/internal/jdbc2/JdbcStatement.java | 270 ++++++++++-------- .../jdbc2/JdbcStatementResultInfo.java | 73 +++++ .../jdbc2/JdbcStreamedPreparedStatement.java | 19 +- 12 files changed, 966 insertions(+), 312 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskResult.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatementResultInfo.java diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStatementSelfTest.java index 138eef5a90851..d3f77e03d2d5b 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStatementSelfTest.java @@ -45,7 +45,8 @@ public class JdbcStatementSelfTest extends GridCommonAbstractTest { private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); /** JDBC URL. */ - private static final String BASE_URL = CFG_URL_PREFIX + "cache=default@modules/clients/src/test/config/jdbc-config.xml"; + private static final String BASE_URL = CFG_URL_PREFIX + + "cache=default:multipleStatementsAllowed=true@modules/clients/src/test/config/jdbc-config.xml"; /** SQL query. */ private static final String SQL = "select * from Person where age > 30"; @@ -249,6 +250,133 @@ else if (id == 3) { assert cnt == 2; } + /** + * @throws Exception If failed. + */ + public void testExecuteQueryMultipleOnlyResultSets() throws Exception { + assert conn.getMetaData().supportsMultipleResultSets(); + + int stmtCnt = 10; + + StringBuilder sql = new StringBuilder(); + + for (int i = 0; i < stmtCnt; ++i) + sql.append("select ").append(i).append("; "); + + assert stmt.execute(sql.toString()); + + for (int i = 0; i < stmtCnt; ++i) { + assert stmt.getMoreResults(); + + ResultSet rs = stmt.getResultSet(); + + assert rs.next(); + assert rs.getInt(1) == i; + assert !rs.next(); + } + + assert !stmt.getMoreResults(); + } + + /** + * @throws Exception If failed. + */ + public void testExecuteQueryMultipleOnlyDml() throws Exception { + assert conn.getMetaData().supportsMultipleResultSets(); + + conn.setSchema(null); + + int stmtCnt = 10; + + StringBuilder sql = new StringBuilder( + "drop table if exists test; create table test(ID int primary key, NAME varchar(20)); "); + + for (int i = 0; i < stmtCnt; ++i) + sql.append("insert into test (ID, NAME) values (" + i + ", 'name_" + i +"'); "); + + assert !stmt.execute(sql.toString()); + + // DROP TABLE statement + assert stmt.getResultSet() == null; + assert stmt.getUpdateCount() == 0; + + // CREATE TABLE statement + assert stmt.getResultSet() == null; + assert stmt.getUpdateCount() == 0; + + for (int i = 0; i < stmtCnt; ++i) { + assert stmt.getMoreResults(); + + assert stmt.getResultSet() == null; + assert stmt.getUpdateCount() == 1; + } + + assert !stmt.getMoreResults(); + } + + /** + * @throws Exception If failed. + */ + public void testExecuteQueryMultipleMixed() throws Exception { + assert conn.getMetaData().supportsMultipleResultSets(); + + conn.setSchema(null); + + int stmtCnt = 10; + + StringBuilder sql = new StringBuilder( + "drop table if exists test; create table test(ID int primary key, NAME varchar(20)); "); + + for (int i = 0; i < stmtCnt; ++i) { + if (i % 2 == 0) + sql.append(" insert into test (ID, NAME) values (" + i + ", 'name_" + i + "'); "); + else + sql.append(" select * from test where id < " + i + "; "); + } + + assert !stmt.execute(sql.toString()); + + // DROP TABLE statement + assert stmt.getResultSet() == null; + assert stmt.getUpdateCount() == 0; + + // CREATE TABLE statement + assert stmt.getResultSet() == null; + assert stmt.getUpdateCount() == 0; + + boolean notEmptyResult = false; + + for (int i = 0; i < stmtCnt; ++i) { + assert stmt.getMoreResults(); + + if (i % 2 == 0) { + assert stmt.getResultSet() == null; + assert stmt.getUpdateCount() == 1; + } + else { + assert stmt.getUpdateCount() == -1; + + ResultSet rs = stmt.getResultSet(); + + assert rs.getMetaData().getColumnCount() == 2; + + int rowsCnt = 0; + + while(rs.next()) + rowsCnt++; + + assert rowsCnt <= (i + 1) / 2; + + if (rowsCnt == (i + 1) / 2) + notEmptyResult = true; + } + } + + assert notEmptyResult; + + assert !stmt.getMoreResults(); + } + /** * Person. */ diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java index f519589dbfe5a..b03e38733f58b 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java @@ -331,6 +331,9 @@ public class IgniteJdbcDriver implements Driver { /** Whether DML streaming will overwrite existing cache entries. */ private static final String PARAM_STREAMING_ALLOW_OVERWRITE = "streamingAllowOverwrite"; + /** Allow queries with multiple statements. */ + private static final String PARAM_MULTIPLE_STMTS = "multipleStatementsAllowed"; + /** Hostname property name. */ public static final String PROP_HOST = PROP_PREFIX + "host"; @@ -376,6 +379,9 @@ public class IgniteJdbcDriver implements Driver { /** Whether DML streaming will overwrite existing cache entries. */ public static final String PROP_STREAMING_ALLOW_OVERWRITE = PROP_PREFIX + PARAM_STREAMING_ALLOW_OVERWRITE; + /** Allow query with multiple statements. */ + public static final String PROP_MULTIPLE_STMTS = PROP_PREFIX + PARAM_MULTIPLE_STMTS; + /** Cache name property name. */ public static final String PROP_CFG = PROP_PREFIX + "cfg"; @@ -447,7 +453,8 @@ public class IgniteJdbcDriver implements Driver { new JdbcDriverPropertyInfo("Distributed Joins", info.getProperty(PROP_DISTRIBUTED_JOINS), ""), new JdbcDriverPropertyInfo("Enforce Join Order", info.getProperty(JdbcThinUtils.PROP_ENFORCE_JOIN_ORDER), ""), new JdbcDriverPropertyInfo("Lazy query execution", info.getProperty(JdbcThinUtils.PROP_LAZY), ""), - new JdbcDriverPropertyInfo("Transactions Allowed", info.getProperty(PROP_TX_ALLOWED), "") + new JdbcDriverPropertyInfo("Transactions Allowed", info.getProperty(PROP_TX_ALLOWED), ""), + new JdbcDriverPropertyInfo("Queries with multiple statements allowed", info.getProperty(PROP_MULTIPLE_STMTS), "") ); if (info.getProperty(PROP_CFG) != null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java index fde16ff09e89a..ccc09ece9a4bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java @@ -80,6 +80,7 @@ import static org.apache.ignite.IgniteJdbcDriver.PROP_ENFORCE_JOIN_ORDER; import static org.apache.ignite.IgniteJdbcDriver.PROP_LAZY; import static org.apache.ignite.IgniteJdbcDriver.PROP_LOCAL; +import static org.apache.ignite.IgniteJdbcDriver.PROP_MULTIPLE_STMTS; import static org.apache.ignite.IgniteJdbcDriver.PROP_NODE_ID; import static org.apache.ignite.IgniteJdbcDriver.PROP_TX_ALLOWED; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING; @@ -164,6 +165,9 @@ public class JdbcConnection implements Connection { /** Allow overwrites for duplicate keys on streamed {@code INSERT}s. */ private final boolean streamAllowOverwrite; + /** Allow queries with multiple statements. */ + private final boolean multipleStmts; + /** Statements. */ final Set statements = new HashSet<>(); @@ -204,6 +208,8 @@ public JdbcConnection(String url, Properties props) throws SQLException { // by IgniteDataStreamer.DFLT_PARALLEL_OPS_MULTIPLIER will be used streamNodeParOps = Integer.parseInt(props.getProperty(PROP_STREAMING_PER_NODE_PAR_OPS, "0")); + multipleStmts = Boolean.parseBoolean(props.getProperty(PROP_MULTIPLE_STMTS)); + String nodeIdProp = props.getProperty(PROP_NODE_ID); if (nodeIdProp != null) @@ -840,6 +846,13 @@ boolean isDmlSupported() { return ignite.version().greaterThanEqual(1, 8, 0); } + /** + * @return {@code true} if multiple statements allowed, {@code false} otherwise. + */ + boolean isMultipleStatementsAllowed() { + return multipleStmts; + } + /** * @return Local query flag. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java index 03fde791a1924..2fe24bb1def15 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java @@ -319,7 +319,7 @@ public class JdbcDatabaseMetadata implements DatabaseMetaData { /** {@inheritDoc} */ @Override public boolean supportsMultipleResultSets() { - return false; + return conn.isMultipleStatementsAllowed(); } /** {@inheritDoc} */ @@ -675,7 +675,7 @@ public class JdbcDatabaseMetadata implements DatabaseMetaData { /** {@inheritDoc} */ @Override public ResultSet getProcedures(String catalog, String schemaPtrn, String procedureNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", @@ -689,7 +689,7 @@ public class JdbcDatabaseMetadata implements DatabaseMetaData { /** {@inheritDoc} */ @Override public ResultSet getProcedureColumns(String catalog, String schemaPtrn, String procedureNamePtrn, String colNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("PROCEDURE_CAT", "PROCEDURE_SCHEM", "PROCEDURE_NAME", @@ -725,7 +725,7 @@ public class JdbcDatabaseMetadata implements DatabaseMetaData { } } - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "TABLE_TYPE", "REMARKS", "TYPE_CAT", @@ -766,7 +766,7 @@ private List tableRow(String schema, String tbl) { /** {@inheritDoc} */ @Override public ResultSet getCatalogs() throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.singletonList("TABLE_CAT"), @@ -778,7 +778,7 @@ private List tableRow(String schema, String tbl) { /** {@inheritDoc} */ @Override public ResultSet getTableTypes() throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.singletonList("TABLE_TYPE"), @@ -812,7 +812,7 @@ private List tableRow(String schema, String tbl) { } } - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "DATA_TYPE", @@ -870,7 +870,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getColumnPrivileges(String catalog, String schema, String tbl, String colNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -883,7 +883,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getTablePrivileges(String catalog, String schemaPtrn, String tblNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -896,7 +896,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getBestRowIdentifier(String catalog, String schema, String tbl, int scope, boolean nullable) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -908,7 +908,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getVersionColumns(String catalog, String schema, String tbl) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -936,7 +936,7 @@ private List columnRow(String schema, String tbl, String col, int type, } } - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "KEY_SEQ", "PK_NAME"), @@ -948,7 +948,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getImportedKeys(String catalog, String schema, String tbl) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -960,7 +960,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getExportedKeys(String catalog, String schema, String tbl) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -973,7 +973,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getCrossReference(String parentCatalog, String parentSchema, String parentTbl, String foreignCatalog, String foreignSchema, String foreignTbl) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -985,7 +985,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getTypeInfo() throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -1000,7 +1000,7 @@ private List columnRow(String schema, String tbl, String col, int type, boolean approximate) throws SQLException { updateMetaData(); - Collection> rows = new ArrayList<>(indexes.size()); + List> rows = new ArrayList<>(indexes.size()); if (validCatalogPattern(catalog)) { for (List idx : indexes) { @@ -1029,7 +1029,7 @@ private List columnRow(String schema, String tbl, String col, int type, } } - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "NON_UNIQUE", "INDEX_QUALIFIER", @@ -1106,7 +1106,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getUDTs(String catalog, String schemaPtrn, String typeNamePtrn, int[] types) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -1144,7 +1144,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getSuperTypes(String catalog, String schemaPtrn, String typeNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -1157,7 +1157,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getSuperTables(String catalog, String schemaPtrn, String tblNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -1170,7 +1170,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getAttributes(String catalog, String schemaPtrn, String typeNamePtrn, String attributeNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -1233,7 +1233,7 @@ private List columnRow(String schema, String tbl, String col, int type, } } - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("TABLE_SCHEM", "TABLE_CATALOG"), @@ -1259,7 +1259,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getClientInfoProperties() throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), @@ -1272,7 +1272,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getFunctions(String catalog, String schemaPtrn, String functionNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("FUNCTION_CAT", "FUNCTION_SCHEM", "FUNCTION_NAME", @@ -1286,7 +1286,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getFunctionColumns(String catalog, String schemaPtrn, String functionNamePtrn, String colNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Arrays.asList("FUNCTION_CAT", "FUNCTION_SCHEM", "FUNCTION_NAME", @@ -1305,7 +1305,7 @@ private List columnRow(String schema, String tbl, String col, int type, /** {@inheritDoc} */ @Override public ResultSet getPseudoColumns(String catalog, String schemaPtrn, String tblNamePtrn, String colNamePtrn) throws SQLException { - return new JdbcResultSet(null, + return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), Collections.emptyList(), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java new file mode 100644 index 0000000000000..bf7c24e640ccd --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc2; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteJdbcDriver; +import org.apache.ignite.cache.query.FieldsQueryCursor; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteKernal; +import org.apache.ignite.internal.processors.cache.QueryCursorImpl; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; + +/** + * Task for SQL queries execution through {@link IgniteJdbcDriver}. + * The query can contains several SQL statements. + */ +class JdbcQueryMultipleStatementsTask implements IgniteCallable> { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Ignite. */ + @IgniteInstanceResource + private Ignite ignite; + + /** Schema name. */ + private final String schemaName; + + /** Sql. */ + private final String sql; + + /** Operation type flag - query or not. */ + private Boolean isQry; + + /** Args. */ + private final Object[] args; + + /** Fetch size. */ + private final int fetchSize; + + /** Local execution flag. */ + private final boolean loc; + + /** Local query flag. */ + private final boolean locQry; + + /** Collocated query flag. */ + private final boolean collocatedQry; + + /** Distributed joins flag. */ + private final boolean distributedJoins; + + /** Enforce join order flag. */ + private final boolean enforceJoinOrder; + + /** Lazy query execution flag. */ + private final boolean lazy; + + /** + * @param ignite Ignite. + * @param schemaName Schema name. + * @param sql Sql query. + * @param isQry Operation type flag - query or not - to enforce query type check. + * @param loc Local execution flag. + * @param args Args. + * @param fetchSize Fetch size. + * @param locQry Local query flag. + * @param collocatedQry Collocated query flag. + * @param distributedJoins Distributed joins flag. + * @param enforceJoinOrder Enforce joins order falg. + * @param lazy Lazy query execution flag. + */ + public JdbcQueryMultipleStatementsTask(Ignite ignite, String schemaName, String sql, Boolean isQry, boolean loc, + Object[] args, int fetchSize, boolean locQry, boolean collocatedQry, boolean distributedJoins, + boolean enforceJoinOrder, boolean lazy) { + this.ignite = ignite; + this.args = args; + this.schemaName = schemaName; + this.sql = sql; + this.isQry = isQry; + this.fetchSize = fetchSize; + this.loc = loc; + this.locQry = locQry; + this.collocatedQry = collocatedQry; + this.distributedJoins = distributedJoins; + this.enforceJoinOrder = enforceJoinOrder; + this.lazy = lazy; + } + + /** {@inheritDoc} */ + @Override public List call() throws Exception { + SqlFieldsQuery qry = (isQry != null ? new JdbcSqlFieldsQuery(sql, isQry) : new SqlFieldsQuery(sql)) + .setArgs(args); + + qry.setPageSize(fetchSize); + qry.setLocal(locQry); + qry.setCollocated(collocatedQry); + qry.setDistributedJoins(distributedJoins); + qry.setEnforceJoinOrder(enforceJoinOrder); + qry.setLazy(lazy); + qry.setSchema(schemaName); + + GridKernalContext ctx = ((IgniteKernal)ignite).context(); + + List>> curs = ctx.query().querySqlFieldsNoCache(qry, true, false); + + List resultsInfo = new ArrayList<>(curs.size()); + + for (FieldsQueryCursor> cur0 : curs) { + QueryCursorImpl> cur = (QueryCursorImpl>)cur0; + + long updCnt = -1; + + UUID qryId = null; + + if (!cur.isQuery()) { + List> items = cur.getAll(); + + assert items != null && items.size() == 1 && items.get(0).size() == 1 + && items.get(0).get(0) instanceof Long : + "Invalid result set for not-SELECT query. [qry=" + sql + + ", res=" + S.toString(List.class, items) + ']'; + + updCnt = (Long)items.get(0).get(0); + + cur.close(); + } + else { + qryId = UUID.randomUUID(); + + JdbcQueryTask.Cursor jdbcCur = new JdbcQueryTask.Cursor(cur, cur.iterator()); + + JdbcQueryTask.addCursor(qryId, jdbcCur); + + if (!loc) + JdbcQueryTask.scheduleRemoval(qryId); + } + + JdbcStatementResultInfo resInfo = new JdbcStatementResultInfo(cur.isQuery(), qryId, updCnt); + + resultsInfo.add(resInfo); + } + + return resultsInfo; + } + +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java index 485412944db4e..ecbfb713451bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java @@ -17,7 +17,6 @@ package org.apache.ignite.internal.jdbc2; -import java.io.Serializable; import java.sql.SQLException; import java.util.ArrayList; import java.util.Collection; @@ -50,7 +49,7 @@ * This parameter can be configured via {@link IgniteSystemProperties#IGNITE_JDBC_DRIVER_CURSOR_REMOVE_DELAY} * system property. */ -class JdbcQueryTask implements IgniteCallable { +class JdbcQueryTask implements IgniteCallable { /** Serial version uid. */ private static final long serialVersionUID = 0L; @@ -132,7 +131,7 @@ public JdbcQueryTask(Ignite ignite, String cacheName, String schemaName, String } /** {@inheritDoc} */ - @Override public JdbcQueryTask.QueryResult call() throws Exception { + @Override public JdbcQueryTaskResult call() throws Exception { Cursor cursor = CURSORS.get(uuid); List tbls = null; @@ -173,7 +172,11 @@ public JdbcQueryTask(Ignite ignite, String cacheName, String schemaName, String if (isQry == null) isQry = qryCursor.isQuery(); - Collection meta = qryCursor.fieldsMeta(); + CURSORS.put(uuid, cursor = new Cursor(qryCursor, qryCursor.iterator())); + } + + if (first || updateMetadata()) { + Collection meta = cursor.queryCursor().fieldsMeta(); tbls = new ArrayList<>(meta.size()); cols = new ArrayList<>(meta.size()); @@ -184,8 +187,6 @@ public JdbcQueryTask(Ignite ignite, String cacheName, String schemaName, String cols.add(desc.fieldName().toUpperCase()); types.add(desc.fieldTypeName()); } - - CURSORS.put(uuid, cursor = new Cursor(qryCursor, qryCursor.iterator())); } List> rows = new ArrayList<>(); @@ -208,14 +209,14 @@ public JdbcQueryTask(Ignite ignite, String cacheName, String schemaName, String remove(uuid, cursor); else if (first) { if (!loc) - scheduleRemoval(uuid, RMV_DELAY); + scheduleRemoval(uuid); } else if (!loc && !CURSORS.replace(uuid, cursor, new Cursor(cursor.cursor, cursor.iter))) assert !CURSORS.containsKey(uuid) : "Concurrent cursor modification."; assert isQry != null : "Query flag must be set prior to returning result"; - return new QueryResult(uuid, finished, isQry, rows, cols, tbls, types); + return new JdbcQueryTaskResult(uuid, finished, isQry, rows, cols, tbls, types); } /** @@ -232,15 +233,29 @@ protected boolean lazy() { return false; } + /** + * @return Flag to update metadata on demand. + */ + protected boolean updateMetadata() { + return false; + } + /** * Schedules removal of stored cursor in case of remote query execution. * * @param uuid Cursor UUID. - * @param delay Delay in milliseconds. */ - private void scheduleRemoval(final UUID uuid, long delay) { - assert !loc; + static void scheduleRemoval(final UUID uuid) { + scheduleRemoval(uuid, RMV_DELAY); + } + /** + * Schedules removal of stored cursor in case of remote query execution. + * + * @param uuid Cursor UUID. + * @param delay Delay in milliseconds. + */ + private static void scheduleRemoval(final UUID uuid, long delay) { SCHEDULER.schedule(new CAX() { @Override public void applyx() { while (true) { @@ -278,6 +293,14 @@ private static boolean remove(UUID uuid, Cursor c) { return rmv; } + /** + * @param uuid Cursor UUID. + * @param c Cursor. + */ + static void addCursor(UUID uuid, Cursor c) { + CURSORS.putIfAbsent(uuid, c); + } + /** * Closes and removes cursor. * @@ -290,108 +313,10 @@ static void remove(UUID uuid) { c.cursor.close(); } - /** - * Result of query execution. - */ - static class QueryResult implements Serializable { - /** Serial version uid. */ - private static final long serialVersionUID = 0L; - - /** Uuid. */ - private final UUID uuid; - - /** Finished. */ - private final boolean finished; - - /** Result type - query or update. */ - private final boolean isQry; - - /** Rows. */ - private final List> rows; - - /** Tables. */ - private final List tbls; - - /** Columns. */ - private final List cols; - - /** Types. */ - private final List types; - - /** - * @param uuid UUID.. - * @param finished Finished. - * @param isQry - * @param rows Rows. - * @param cols Columns. - * @param tbls Tables. - * @param types Types. - */ - public QueryResult(UUID uuid, boolean finished, boolean isQry, List> rows, List cols, - List tbls, List types) { - this.isQry = isQry; - this.cols = cols; - this.uuid = uuid; - this.finished = finished; - this.rows = rows; - this.tbls = tbls; - this.types = types; - } - - /** - * @return Query result rows. - */ - public List> getRows() { - return rows; - } - - /** - * @return Tables metadata. - */ - public List getTbls() { - return tbls; - } - - /** - * @return Columns metadata. - */ - public List getCols() { - return cols; - } - - /** - * @return Types metadata. - */ - public List getTypes() { - return types; - } - - /** - * @return Query UUID. - */ - public UUID getUuid() { - return uuid; - } - - /** - * @return {@code True} if it is finished query. - */ - public boolean isFinished() { - return finished; - } - - /** - * @return {@code true} if it is result of a query operation, not update; {@code false} otherwise. - */ - public boolean isQuery() { - return isQry; - } - } - /** * Cursor. */ - private static final class Cursor implements Iterable> { + static final class Cursor implements Iterable> { /** Cursor. */ final QueryCursor> cursor; @@ -405,7 +330,7 @@ private static final class Cursor implements Iterable> { * @param cursor Cursor. * @param iter Iterator. */ - private Cursor(QueryCursor> cursor, Iterator> iter) { + Cursor(QueryCursor> cursor, Iterator> iter) { this.cursor = cursor; this.iter = iter; this.lastAccessTime = U.currentTimeMillis(); @@ -422,5 +347,12 @@ private Cursor(QueryCursor> cursor, Iterator> iter) { public boolean hasNext() { return iter.hasNext(); } + + /** + * @return Cursor. + */ + public QueryCursorImpl> queryCursor() { + return (QueryCursorImpl>)cursor; + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskResult.java new file mode 100644 index 0000000000000..607bb3877ad64 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskResult.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc2; + +import java.io.Serializable; +import java.util.List; +import java.util.UUID; + +/** + * Result of query execution. + */ +class JdbcQueryTaskResult implements Serializable { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Uuid. */ + private final UUID uuid; + + /** Finished. */ + private final boolean finished; + + /** Result type - query or update. */ + private final boolean isQry; + + /** Rows. */ + private final List> rows; + + /** Tables. */ + private final List tbls; + + /** Columns. */ + private final List cols; + + /** Types. */ + private final List types; + + /** + * @param uuid UUID.. + * @param finished Finished. + * @param isQry Is query flag. + * @param rows Rows. + * @param cols Columns. + * @param tbls Tables. + * @param types Types. + */ + public JdbcQueryTaskResult(UUID uuid, boolean finished, boolean isQry, List> rows, List cols, + List tbls, List types) { + this.isQry = isQry; + this.cols = cols; + this.uuid = uuid; + this.finished = finished; + this.rows = rows; + this.tbls = tbls; + this.types = types; + } + + /** + * @return Query result rows. + */ + public List> getRows() { + return rows; + } + + /** + * @return Tables metadata. + */ + public List getTbls() { + return tbls; + } + + /** + * @return Columns metadata. + */ + public List getCols() { + return cols; + } + + /** + * @return Types metadata. + */ + public List getTypes() { + return types; + } + + /** + * @return Query UUID. + */ + public UUID getUuid() { + return uuid; + } + + /** + * @return {@code True} if it is finished query. + */ + public boolean isFinished() { + return finished; + } + + /** + * @return {@code true} if it is result of a query operation, not update; {@code false} otherwise. + */ + public boolean isQuery() { + return isQry; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java new file mode 100644 index 0000000000000..cb2d45220f7f3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc2; + +import java.util.UUID; +import org.apache.ignite.Ignite; + +/** + * Task for fetch results of multi-statement query. + */ +class JdbcQueryTaskV3 extends JdbcQueryTaskV2 { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Update metadata on demand flag. */ + private final boolean updateMeta; + + /** + * @param ignite Ignite. + * @param cacheName Cache name. + * @param schemaName Schema name. + * @param sql Sql query. + * @param isQry Operation type flag - query or not - to enforce query type check. + * @param loc Local execution flag. + * @param args Args. + * @param fetchSize Fetch size. + * @param uuid UUID. + * @param locQry Local query flag. + * @param collocatedQry Collocated query flag. + * @param distributedJoins Distributed joins flag. + * @param enforceJoinOrder Enforce joins order flag. + * @param lazy Lazy query execution flag. + * @param updateMeta Update metadata on demand. + */ + public JdbcQueryTaskV3(Ignite ignite, String cacheName, String schemaName, String sql, Boolean isQry, boolean loc, + Object[] args, int fetchSize, UUID uuid, boolean locQry, boolean collocatedQry, boolean distributedJoins, + boolean enforceJoinOrder, boolean lazy, boolean updateMeta) { + super(ignite, cacheName, schemaName, sql, isQry, loc, args, fetchSize, uuid, locQry, + collocatedQry, distributedJoins, enforceJoinOrder, lazy); + + this.updateMeta = updateMeta; + } + + /** {@inheritDoc} */ + @Override protected boolean updateMetadata() { + return updateMeta; + } + + /** + * @param ignite Ignite. + * @param cacheName Cache name. + * @param schemaName Schema name. + * @param sql Sql query. + * @param isQry Operation type flag - query or not - to enforce query type check. + * @param loc Local execution flag. + * @param args Args. + * @param fetchSize Fetch size. + * @param uuid UUID. + * @param locQry Local query flag. + * @param collocatedQry Collocated query flag. + * @param distributedJoins Distributed joins flag. + * @param enforceJoinOrder Enforce joins order flag. + * @param lazy Lazy query execution flag. + * @param updateMeta Update metadata on demand. + * @return Appropriate task JdbcQueryTask or JdbcQueryTaskV2. + */ + public static JdbcQueryTask createTask(Ignite ignite, String cacheName, String schemaName, String sql, + Boolean isQry, boolean loc, Object[] args, int fetchSize, UUID uuid, boolean locQry, + boolean collocatedQry, boolean distributedJoins, + boolean enforceJoinOrder, boolean lazy, boolean updateMeta) { + + if (updateMeta) + return new JdbcQueryTaskV3(ignite, cacheName, schemaName, sql, isQry, loc, args, fetchSize, + uuid, locQry, collocatedQry, distributedJoins, enforceJoinOrder, lazy, true); + else + return JdbcQueryTaskV2.createTask(ignite, cacheName, schemaName, sql, isQry, loc, args, fetchSize, + uuid, locQry, collocatedQry, distributedJoins, enforceJoinOrder, lazy); + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java index 04b40412da89c..69d4252d7bcee 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java @@ -39,13 +39,14 @@ import java.sql.Timestamp; import java.util.ArrayList; import java.util.Calendar; -import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.UUID; import org.apache.ignite.Ignite; import org.apache.ignite.internal.processors.odbc.SqlStateCode; +import org.apache.ignite.internal.util.typedef.F; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.jdbc2.JdbcUtils.convertToSqlException; @@ -54,6 +55,12 @@ * JDBC result set implementation. */ public class JdbcResultSet implements ResultSet { + /** Is query. */ + private final boolean isQry; + + /** Update count. */ + private final long updCnt; + /** Uuid. */ private final UUID uuid; @@ -61,13 +68,13 @@ public class JdbcResultSet implements ResultSet { private final JdbcStatement stmt; /** Table names. */ - private final List tbls; + private List tbls; /** Column names. */ - private final List cols; + private List cols; /** Class names. */ - private final List types; + private List types; /** Rows cursor iterator. */ private Iterator> it; @@ -93,6 +100,7 @@ public class JdbcResultSet implements ResultSet { /** * Creates new result set. * + * @param isQry Is query flag. * @param uuid Query UUID. * @param stmt Statement. * @param tbls Table names. @@ -100,26 +108,56 @@ public class JdbcResultSet implements ResultSet { * @param types Types. * @param fields Fields. * @param finished Result set finished flag (the last result set). + * @throws SQLException On error. */ - JdbcResultSet(@Nullable UUID uuid, JdbcStatement stmt, List tbls, List cols, - List types, Collection> fields, boolean finished) { - assert stmt != null; - assert tbls != null; - assert cols != null; - assert types != null; - assert fields != null; - - this.uuid = uuid; + JdbcResultSet(boolean isQry, @Nullable UUID uuid, JdbcStatement stmt, List tbls, List cols, + List types, List> fields, boolean finished) throws SQLException { + this.isQry = isQry; this.stmt = stmt; - this.tbls = tbls; - this.cols = cols; - this.types = types; - this.finished = finished; - this.it = fields.iterator(); + if (isQry) { + this.uuid = uuid; + updCnt = -1; + this.tbls = tbls; + this.cols = cols; + this.types = types; + this.finished = finished; + + if (fields != null) + it = fields.iterator(); + else + it = Collections.emptyIterator(); + } + else { + updCnt = updateCounterFromQueryResult(fields); + + this.uuid = null; + this.tbls = null; + this.cols = null; + this.types = null; + this.finished = true; + it = null; + } } - /** {@inheritDoc} */ + /** + * @param stmt Statement. + * @param updCnt Update count. + */ + JdbcResultSet(JdbcStatement stmt, long updCnt) { + isQry = false; + this.updCnt = updCnt; + this.stmt = stmt; + + uuid = null; + tbls = null; + cols = null; + types = null; + finished = true; + it = null; + } + + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public boolean next() throws SQLException { ensureNotClosed(); @@ -140,37 +178,52 @@ else if (it.hasNext()) { return true; } else if (!finished) { - JdbcConnection conn = (JdbcConnection)stmt.getConnection(); + fetchPage(); - Ignite ignite = conn.ignite(); + return next(); + } - UUID nodeId = conn.nodeId(); + it = null; - boolean loc = nodeId == null; + return false; + } - // Connections from new clients send queries with new tasks, so we have to continue in the same manner - JdbcQueryTask qryTask = JdbcQueryTaskV2.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), - null,true, loc, null, fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), - conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy()); + /** + * + */ + private void fetchPage() throws SQLException { + JdbcConnection conn = (JdbcConnection)stmt.getConnection(); - try { - JdbcQueryTask.QueryResult res = - loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask); + Ignite ignite = conn.ignite(); - finished = res.isFinished(); + UUID nodeId = conn.nodeId(); - it = res.getRows().iterator(); + boolean loc = nodeId == null; - return next(); - } - catch (Exception e) { - throw convertToSqlException(e, "Failed to query Ignite."); - } - } + boolean updateMetadata = tbls == null; - it = null; + // Connections from new clients send queries with new tasks, so we have to continue in the same manner + JdbcQueryTask qryTask = JdbcQueryTaskV3.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), + null,true, loc, null, fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), + conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy(), updateMetadata); - return false; + try { + JdbcQueryTaskResult res = + loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask); + + finished = res.isFinished(); + + it = res.getRows().iterator(); + + if (updateMetadata) { + tbls = res.getTbls(); + cols = res.getCols(); + types = res.getTypes(); + } + } + catch (Exception e) { + throw convertToSqlException(e, "Failed to query Ignite."); + } } /** {@inheritDoc} */ @@ -421,6 +474,9 @@ void closeInternal() throws SQLException { @Override public ResultSetMetaData getMetaData() throws SQLException { ensureNotClosed(); + if (tbls == null) + fetchPage(); + return new JdbcResultSetMetadata(tbls, cols, types); } @@ -1523,4 +1579,43 @@ private void ensureHasCurrentRow() throws SQLException { if (curr == null) throw new SQLException("Result set is not positioned on a row."); } + + /** + * @return Is Query flag. + */ + public boolean isQuery() { + return isQry; + } + + /** + * @return Update count. + */ + public long updateCount() { + return updCnt; + } + + /** + * @param rows query result. + * @return update counter, if found. + * @throws SQLException if getting an update counter from result proved to be impossible. + */ + private static long updateCounterFromQueryResult(List> rows) throws SQLException { + if (F.isEmpty(rows)) + return -1; + + if (rows.size() != 1) + throw new SQLException("Expected fetch size of 1 for update operation."); + + List row = rows.get(0); + + if (row.size() != 1) + throw new SQLException("Expected row size of 1 for update operation."); + + Object objRes = row.get(0); + + if (!(objRes instanceof Long)) + throw new SQLException("Unexpected update result type."); + + return (Long)objRes; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java index a94b8fd7a7a31..acac12337d35a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java @@ -24,6 +24,7 @@ import java.sql.SQLWarning; import java.sql.Statement; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -57,9 +58,6 @@ public class JdbcStatement implements Statement { /** Rows limit. */ private int maxRows; - /** Current result set. */ - protected ResultSet rs; - /** Query arguments. */ protected ArrayList args; @@ -72,12 +70,15 @@ public class JdbcStatement implements Statement { /** Fields indexes. */ Map fieldsIdxs = new HashMap<>(); - /** Current updated items count. */ - long updateCnt = -1; - /** Batch of statements. */ private List batch; + /** Results. */ + protected List results; + + /** Current result set index. */ + protected int curRes = 0; + /** * Creates new statement. * @@ -92,11 +93,20 @@ public class JdbcStatement implements Statement { /** {@inheritDoc} */ @SuppressWarnings("deprecation") @Override public ResultSet executeQuery(String sql) throws SQLException { - ensureNotClosed(); + execute0(sql, true); - rs = null; + return getResultSet(); + } - updateCnt = -1; + /** + * @param sql SQL query. + * @param isQuery Expected type of statements are contained in the query. + * @throws SQLException On error. + */ + private void executeMultipleStatement(String sql, Boolean isQuery) throws SQLException { + ensureNotClosed(); + + closeResults(); if (F.isEmpty(sql)) throw new SQLException("SQL query is empty"); @@ -105,53 +115,37 @@ public class JdbcStatement implements Statement { UUID nodeId = conn.nodeId(); - UUID uuid = UUID.randomUUID(); - boolean loc = nodeId == null; - JdbcQueryTask qryTask = JdbcQueryTaskV2.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), - sql, true, loc, getArgs(), fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), + JdbcQueryMultipleStatementsTask qryTask = new JdbcQueryMultipleStatementsTask(loc ? ignite : null, conn.schemaName(), + sql, isQuery, loc, getArgs(), fetchSize, conn.isLocalQuery(), conn.isCollocatedQuery(), conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy()); try { - JdbcQueryTask.QueryResult res = + List rsInfos = loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask); - JdbcResultSet rs = new JdbcResultSet(uuid, this, res.getTbls(), res.getCols(), res.getTypes(), - res.getRows(), res.isFinished()); + results = new ArrayList<>(rsInfos.size()); - rs.setFetchSize(fetchSize); - - resSets.add(rs); - - return rs; + for (JdbcStatementResultInfo rsInfo : rsInfos) { + if (rsInfo.isQuery()) + results.add(new JdbcResultSet(true, rsInfo.queryId(), this, null, null, null, null, false)); + else + results.add(new JdbcResultSet(this, rsInfo.updateCount())); + } } catch (Exception e) { throw convertToSqlException(e, "Failed to query Ignite."); } } - /** {@inheritDoc} */ - @Override public int executeUpdate(String sql) throws SQLException { - ensureNotClosed(); - - rs = null; - - updateCnt = -1; - - return Long.valueOf(doUpdate(sql, getArgs())).intValue(); - } - /** - * Run update query. * @param sql SQL query. - * @param args Update arguments. - * @return Number of affected items. - * @throws SQLException If failed. + * @param isQuery Expected type of statements are contained in the query. + * @throws SQLException On error. */ - long doUpdate(String sql, Object[] args) throws SQLException { - if (F.isEmpty(sql)) - throw new SQLException("SQL query is empty"); + private void executeSingle(String sql, Boolean isQuery) throws SQLException { + ensureNotClosed(); Ignite ignite = conn.ignite(); @@ -162,46 +156,50 @@ long doUpdate(String sql, Object[] args) throws SQLException { boolean loc = nodeId == null; if (!conn.isDmlSupported()) - throw new SQLException("Failed to query Ignite: DML operations are supported in versions 1.8.0 and newer"); + if(isQuery != null && !isQuery) + throw new SQLException("Failed to query Ignite: DML operations are supported in versions 1.8.0 and newer"); + else + isQuery = true; JdbcQueryTask qryTask = JdbcQueryTaskV2.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), - sql, false, loc, args, fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), + sql, isQuery, loc, getArgs(), fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy()); try { - JdbcQueryTask.QueryResult qryRes = + JdbcQueryTaskResult qryRes = loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask); - return updateCnt = updateCounterFromQueryResult(qryRes.getRows()); + JdbcResultSet rs = new JdbcResultSet(qryRes.isQuery(), uuid, this, qryRes.getTbls(), qryRes.getCols(), + qryRes.getTypes(), qryRes.getRows(), qryRes.isFinished()); + + rs.setFetchSize(fetchSize); + + results = Collections.singletonList(rs); + curRes = 0; } catch (Exception e) { throw convertToSqlException(e, "Failed to query Ignite."); } + } /** - * @param rows query result. - * @return update counter, if found. - * @throws SQLException if getting an update counter from result proved to be impossible. + * @param sql SQL query. + * @param isQuery Expected type of statements are contained in the query. + * @throws SQLException On error. */ - private static long updateCounterFromQueryResult(List> rows) throws SQLException { - if (F.isEmpty(rows)) - return -1; - - if (rows.size() != 1) - throw new SQLException("Expected fetch size of 1 for update operation"); - - List row = rows.get(0); - - if (row.size() != 1) - throw new SQLException("Expected row size of 1 for update operation"); - - Object objRes = row.get(0); + protected void execute0(String sql, Boolean isQuery) throws SQLException { + if (conn.isMultipleStatementsAllowed()) + executeMultipleStatement(sql, isQuery); + else + executeSingle(sql, isQuery); + } - if (!(objRes instanceof Long)) - throw new SQLException("Unexpected update result type"); + /** {@inheritDoc} */ + @Override public int executeUpdate(String sql) throws SQLException { + execute0(sql, false); - return (Long)objRes; + return getUpdateCount(); } /** {@inheritDoc} */ @@ -302,86 +300,48 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public boolean execute(String sql) throws SQLException { - if (!conn.isDmlSupported()) { - // We attempt to run a query without any checks as long as server does not support DML anyway, - // so it simply will throw an exception when given a DML statement instead of a query. - rs = executeQuery(sql); - - return true; - } - - ensureNotClosed(); - - rs = null; - - updateCnt = -1; - - if (F.isEmpty(sql)) - throw new SQLException("SQL query is empty"); - - Ignite ignite = conn.ignite(); + execute0(sql, null); - UUID nodeId = conn.nodeId(); - - UUID uuid = UUID.randomUUID(); - - boolean loc = nodeId == null; - - JdbcQueryTask qryTask = JdbcQueryTaskV2.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), - sql, null, loc, getArgs(), fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), - conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy()); - - try { - JdbcQueryTask.QueryResult res = - loc ? qryTask.call() : ignite.compute(ignite.cluster().forNodeId(nodeId)).call(qryTask); - - if (res.isQuery()) { - JdbcResultSet rs = new JdbcResultSet(uuid, this, res.getTbls(), res.getCols(), - res.getTypes(), res.getRows(), res.isFinished()); - - rs.setFetchSize(fetchSize); - - resSets.add(rs); - - this.rs = rs; - } - else - updateCnt = updateCounterFromQueryResult(res.getRows()); - - return res.isQuery(); - } - catch (Exception e) { - throw convertToSqlException(e, "Failed to query Ignite."); - } + return results.get(0).isQuery(); } /** {@inheritDoc} */ @Override public ResultSet getResultSet() throws SQLException { - ensureNotClosed(); + JdbcResultSet rs = nextResultSet(); - ResultSet rs0 = rs; + if (rs == null) + return null; - rs = null; + if (!rs.isQuery()) { + curRes--; - return rs0; + return null; + } + + return rs; } /** {@inheritDoc} */ @Override public int getUpdateCount() throws SQLException { - ensureNotClosed(); + JdbcResultSet rs = nextResultSet(); - long res = updateCnt; + if (rs == null) + return -1; - updateCnt = -1; + if (rs.isQuery()) { + curRes--; + + return -1; + } - return Long.valueOf(res).intValue(); + return (int)rs.updateCount(); } /** {@inheritDoc} */ @Override public boolean getMoreResults() throws SQLException { ensureNotClosed(); - return false; + return getMoreResults(CLOSE_CURRENT_RESULT); } /** {@inheritDoc} */ @@ -472,9 +432,8 @@ sql, null, loc, getArgs(), fetchSize, uuid, conn.isLocalQuery(), conn.isCollocat */ protected int[] doBatchUpdate(String command, List batch, List> batchArgs) throws SQLException { - rs = null; - updateCnt = -1; + closeResults(); if ((F.isEmpty(command) || F.isEmpty(batchArgs)) && F.isEmpty(batch)) throw new SQLException("Batch is empty."); @@ -495,7 +454,11 @@ protected int[] doBatchUpdate(String command, List batch, List batch, List= results.size()) + return null; + else + return results.get(curRes++); + } + + /** + * Close results. + * + * @throws SQLException On error. + */ + private void closeResults() throws SQLException { + if (results != null) { + for (JdbcResultSet rs : results) + rs.close(); + + results = null; + curRes = 0; + } + } + } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatementResultInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatementResultInfo.java new file mode 100644 index 0000000000000..8aa02f1352bd1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatementResultInfo.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc2; + +import java.util.UUID; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * JDBC statement result information. Keeps statement type (SELECT or UPDATE) and + * queryId or update count (depends on statement type). + */ +public class JdbcStatementResultInfo { + /** Query flag. */ + private boolean isQuery; + + /** Update count. */ + private long updCnt; + + /** Query ID. */ + private UUID qryId; + + /** + * @param isQuery Query flag. + * @param qryId Query ID. + * @param updCnt Update count. + */ + public JdbcStatementResultInfo(boolean isQuery, UUID qryId, long updCnt) { + this.isQuery = isQuery; + this.updCnt = updCnt; + this.qryId = qryId; + } + + /** + * @return Query flag. + */ + public boolean isQuery() { + return isQuery; + } + + /** + * @return Query ID. + */ + public UUID queryId() { + return qryId; + } + + /** + * @return Update count. + */ + public long updateCount() { + return updCnt; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JdbcStatementResultInfo.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java index 9f76700a3ca76..408f0897cad6f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java @@ -19,8 +19,8 @@ import java.sql.PreparedStatement; import java.sql.SQLException; +import java.util.Collections; import org.apache.ignite.IgniteDataStreamer; -import org.apache.ignite.internal.IgniteEx; /** * Prepared statement associated with a data streamer. @@ -33,8 +33,9 @@ class JdbcStreamedPreparedStatement extends JdbcPreparedStatement { * Creates new prepared statement. * * @param conn Connection. - * @param sql SQL query. + * @param sql SQL query. * @param streamer Data streamer to use with this statement. Will be closed on statement close. + * @param nativeStmt Native statement. */ JdbcStreamedPreparedStatement(JdbcConnection conn, String sql, IgniteDataStreamer streamer, PreparedStatement nativeStmt) { @@ -53,8 +54,16 @@ class JdbcStreamedPreparedStatement extends JdbcPreparedStatement { } /** {@inheritDoc} */ - @Override long doUpdate(String sql, Object[] args) throws SQLException { - return conn.ignite().context().query().streamUpdateQuery(conn.cacheName(), conn.schemaName(), - streamer, sql, args); + @Override protected void execute0(String sql, Boolean isQuery) throws SQLException { + assert isQuery != null && !isQuery; + + long updCnt = conn.ignite().context().query().streamUpdateQuery(conn.cacheName(), conn.schemaName(), + streamer, sql, getArgs()); + + JdbcResultSet rs = new JdbcResultSet(this, updCnt); + + results = Collections.singletonList(rs); + + curRes = 0; } } From 2e046d61078874c12e4fd33bba0826f5453b055d Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Thu, 5 Oct 2017 16:45:31 +0300 Subject: [PATCH 006/243] IGNITE-6556: JDBC thin: fixed setSchema() case sensitivity handling. This closes #2805. --- .../JdbcThinAbstractDmlStatementSelfTest.java | 2 +- .../JdbcThinAutoCloseServerCursorTest.java | 8 ++-- .../thin/JdbcThinComplexQuerySelfTest.java | 2 +- .../jdbc/thin/JdbcThinConnectionSelfTest.java | 4 ++ .../JdbcThinDynamicIndexAbstractSelfTest.java | 2 +- .../jdbc/thin/JdbcThinEmptyCacheSelfTest.java | 2 +- .../jdbc/thin/JdbcThinMetadataSelfTest.java | 4 +- .../JdbcThinMissingLongArrayResultsTest.java | 2 +- .../thin/JdbcThinNoDefaultSchemaTest.java | 5 +- .../JdbcThinPreparedStatementSelfTest.java | 2 +- .../jdbc/thin/JdbcThinResultSetSelfTest.java | 2 +- .../jdbc/thin/JdbcThinStatementSelfTest.java | 46 ++++++++++--------- .../jdbc/thin/JdbcThinConnection.java | 6 +-- .../jdbc/thin/JdbcThinPreparedStatement.java | 5 +- .../internal/jdbc/thin/JdbcThinStatement.java | 9 +++- 15 files changed, 59 insertions(+), 42 deletions(-) diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java index 607f5576bccd3..afe5e2e4b79b1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java @@ -69,7 +69,7 @@ public abstract class JdbcThinAbstractDmlStatementSelfTest extends JdbcThinAbstr conn = DriverManager.getConnection(URL); - conn.setSchema(DEFAULT_CACHE_NAME); + conn.setSchema('"' + DEFAULT_CACHE_NAME + '"'); } /** {@inheritDoc} */ diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java index eff504b026bcf..db4ed3f1ab3e8 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java @@ -115,7 +115,7 @@ public void testQuery() throws Exception { cache.put(person.id, person); try (Connection conn = DriverManager.getConnection(URL)) { - conn.setSchema(CACHE_NAME); + conn.setSchema('"' + CACHE_NAME + '"'); String sqlText = "select * from Person"; @@ -198,7 +198,7 @@ public void testQuery() throws Exception { */ public void testInsert() throws Exception { try (Connection conn = DriverManager.getConnection(URL)) { - conn.setSchema(CACHE_NAME); + conn.setSchema('"' + CACHE_NAME + '"'); String sqlText = "insert into Person (_key, id, name, age) values (?, ?, ?, ?)"; @@ -233,7 +233,7 @@ public void testUpdate() throws Exception { cache.put(1, p); try (Connection conn = DriverManager.getConnection(URL)) { - conn.setSchema(CACHE_NAME); + conn.setSchema('"' + CACHE_NAME + '"'); String sqlText = "update Person set age = age + 1"; @@ -258,7 +258,7 @@ public void testDelete() throws Exception { cache.put(1, p); try (Connection conn = DriverManager.getConnection(URL)) { - conn.setSchema(CACHE_NAME); + conn.setSchema('"' + CACHE_NAME + '"'); String sqlText = "delete Person where age = ?"; diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java index 1714ab1681579..ad1e3126c88aa 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java @@ -113,7 +113,7 @@ protected CacheConfiguration cacheConfiguration() { @Override protected void beforeTest() throws Exception { Connection conn = DriverManager.getConnection(URL); - conn.setSchema("pers"); + conn.setSchema("\"pers\""); stmt = conn.createStatement(); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java index 17ce6868552e2..fbbec0d73f885 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java @@ -1633,6 +1633,10 @@ public void testGetSetSchema() throws Exception { conn.setSchema(schema); + assertEquals(schema.toUpperCase(), conn.getSchema()); + + conn.setSchema('"' + schema + '"'); + assertEquals(schema, conn.getSchema()); conn.close(); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java index 5089894ed11cc..dbe93a49f465d 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java @@ -309,7 +309,7 @@ private void assertSize(long expSize) throws SQLException { assertEquals(expSize, cache().size()); try (Statement stmt = conn.createStatement()) { - conn.setSchema(DEFAULT_CACHE_NAME); + conn.setSchema('"' + DEFAULT_CACHE_NAME + '"'); try (ResultSet rs = stmt.executeQuery("SELECT COUNT(*) from Person")) { assertEquals(expSize, getSingleValue(rs)); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinEmptyCacheSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinEmptyCacheSelfTest.java index 1ede536f893e8..41b43386dfe83 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinEmptyCacheSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinEmptyCacheSelfTest.java @@ -87,7 +87,7 @@ public class JdbcThinEmptyCacheSelfTest extends JdbcThinAbstractSelfTest { @Override protected void beforeTest() throws Exception { Connection conn = DriverManager.getConnection(URL); - conn.setSchema(CACHE_NAME); + conn.setSchema('"' + CACHE_NAME + '"'); stmt = conn.createStatement(); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java index abbe4e15c8df1..6c20de0df5347 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java @@ -156,7 +156,7 @@ protected CacheConfiguration cacheConfiguration(QueryEntity qryEntity) { public void testResultSetMetaData() throws Exception { Connection conn = DriverManager.getConnection(URL); - conn.setSchema("pers"); + conn.setSchema("\"pers\""); Statement stmt = conn.createStatement(); @@ -528,7 +528,7 @@ public void testGetAllPrimaryKeys() throws Exception { */ public void testParametersMetadata() throws Exception { try (Connection conn = DriverManager.getConnection(URL)) { - conn.setSchema("pers"); + conn.setSchema("\"pers\""); PreparedStatement stmt = conn.prepareStatement("select orgId from Person p where p.name > ? and p.orgId > ?"); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMissingLongArrayResultsTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMissingLongArrayResultsTest.java index 1a53ab7147228..1f6e2c0fa44f5 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMissingLongArrayResultsTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMissingLongArrayResultsTest.java @@ -171,7 +171,7 @@ private CacheConfiguration cacheConfiguration(@NotNull String name) throws Excep @SuppressWarnings({"EmptyTryBlock", "unused"}) public void testDefaults() throws Exception { try (Connection conn = DriverManager.getConnection(URL)) { - conn.setSchema(CACHE_NAME); + conn.setSchema('"' + CACHE_NAME + '"'); try (PreparedStatement st = conn.prepareStatement("SELECT * FROM VALUE")) { ResultSet rs = st.executeQuery(); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java index cab28f4f8bd84..a1be582003e49 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java @@ -219,10 +219,13 @@ public void testSetSchema() throws Exception { } }, SQLException.class, "Failed to parse query"); - conn.setSchema("cache1"); + conn.setSchema("\"cache1\""); Statement stmt = conn.createStatement(); + //Must not affects previous created statements. + conn.setSchema("invalid_schema"); + stmt.execute("select t._key, t._val from Integer t"); ResultSet rs = stmt.getResultSet(); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java index 85eb1d3a2eb78..c5778537096f1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java @@ -148,7 +148,7 @@ public class JdbcThinPreparedStatementSelfTest extends JdbcThinAbstractSelfTest @Override protected void beforeTest() throws Exception { conn = DriverManager.getConnection(URL); - conn.setSchema(DEFAULT_CACHE_NAME); + conn.setSchema('"' + DEFAULT_CACHE_NAME + '"'); assert conn != null; assert !conn.isClosed(); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java index 5a3c5df9f4726..fd46cdaf56b5f 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java @@ -120,7 +120,7 @@ public class JdbcThinResultSetSelfTest extends JdbcThinAbstractSelfTest { @Override protected void beforeTest() throws Exception { Connection conn = DriverManager.getConnection(URL); - conn.setSchema(DEFAULT_CACHE_NAME); + conn.setSchema('"' + DEFAULT_CACHE_NAME + '"'); stmt = conn.createStatement(); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java index 530946511eee3..82c0512c7ab70 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java @@ -104,7 +104,7 @@ public class JdbcThinStatementSelfTest extends JdbcThinAbstractSelfTest { @Override protected void beforeTest() throws Exception { conn = DriverManager.getConnection(URL); - conn.setSchema(DEFAULT_CACHE_NAME); + conn.setSchema('"' + DEFAULT_CACHE_NAME + '"'); stmt = conn.createStatement(); @@ -444,6 +444,8 @@ public void testExecuteQueryMultipleOnlyResultSets() throws Exception { public void testExecuteQueryMultipleOnlyDml() throws Exception { conn.setSchema(null); + Statement stmt0 = conn.createStatement(); + int stmtCnt = 10; StringBuilder sql = new StringBuilder("drop table if exists test; create table test(ID int primary key, NAME varchar(20)); "); @@ -451,24 +453,24 @@ public void testExecuteQueryMultipleOnlyDml() throws Exception { for (int i = 0; i < stmtCnt; ++i) sql.append("insert into test (ID, NAME) values (" + i + ", 'name_" + i +"'); "); - assert !stmt.execute(sql.toString()); + assert !stmt0.execute(sql.toString()); // DROP TABLE statement - assert stmt.getResultSet() == null; - assert stmt.getUpdateCount() == 0; + assert stmt0.getResultSet() == null; + assert stmt0.getUpdateCount() == 0; // CREATE TABLE statement - assert stmt.getResultSet() == null; - assert stmt.getUpdateCount() == 0; + assert stmt0.getResultSet() == null; + assert stmt0.getUpdateCount() == 0; for (int i = 0; i < stmtCnt; ++i) { - assert stmt.getMoreResults(); + assert stmt0.getMoreResults(); - assert stmt.getResultSet() == null; - assert stmt.getUpdateCount() == 1; + assert stmt0.getResultSet() == null; + assert stmt0.getUpdateCount() == 1; } - assert !stmt.getMoreResults(); + assert !stmt0.getMoreResults(); } /** @@ -477,6 +479,8 @@ public void testExecuteQueryMultipleOnlyDml() throws Exception { public void testExecuteQueryMultipleMixed() throws Exception { conn.setSchema(null); + Statement stmt0 = conn.createStatement(); + int stmtCnt = 10; StringBuilder sql = new StringBuilder("drop table if exists test; create table test(ID int primary key, NAME varchar(20)); "); @@ -488,29 +492,29 @@ public void testExecuteQueryMultipleMixed() throws Exception { sql.append(" select * from test where id < " + i + "; "); } - assert !stmt.execute(sql.toString()); + assert !stmt0.execute(sql.toString()); // DROP TABLE statement - assert stmt.getResultSet() == null; - assert stmt.getUpdateCount() == 0; + assert stmt0.getResultSet() == null; + assert stmt0.getUpdateCount() == 0; // CREATE TABLE statement - assert stmt.getResultSet() == null; - assert stmt.getUpdateCount() == 0; + assert stmt0.getResultSet() == null; + assert stmt0.getUpdateCount() == 0; boolean notEmptyResult = false; for (int i = 0; i < stmtCnt; ++i) { - assert stmt.getMoreResults(); + assert stmt0.getMoreResults(); if (i % 2 == 0) { - assert stmt.getResultSet() == null; - assert stmt.getUpdateCount() == 1; + assert stmt0.getResultSet() == null; + assert stmt0.getUpdateCount() == 1; } else { - assert stmt.getUpdateCount() == -1; + assert stmt0.getUpdateCount() == -1; - ResultSet rs = stmt.getResultSet(); + ResultSet rs = stmt0.getResultSet(); int rowsCnt = 0; @@ -526,7 +530,7 @@ public void testExecuteQueryMultipleMixed() throws Exception { assert notEmptyResult; - assert !stmt.getMoreResults(); + assert !stmt0.getMoreResults(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java index d8047501176df..5afed4e5bc5a1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java @@ -168,7 +168,7 @@ public JdbcThinConnection(String url, Properties props, String schema) throws SQ checkCursorOptions(resSetType, resSetConcurrency, resSetHoldability); - JdbcThinStatement stmt = new JdbcThinStatement(this, resSetHoldability); + JdbcThinStatement stmt = new JdbcThinStatement(this, resSetHoldability, schema); if (timeout > 0) stmt.timeout(timeout); @@ -197,7 +197,7 @@ public JdbcThinConnection(String url, Properties props, String schema) throws SQ if (sql == null) throw new SQLException("SQL string cannot be null."); - JdbcThinPreparedStatement stmt = new JdbcThinPreparedStatement(this, sql, resSetHoldability); + JdbcThinPreparedStatement stmt = new JdbcThinPreparedStatement(this, sql, resSetHoldability, schema); if (timeout > 0) stmt.timeout(timeout); @@ -592,7 +592,7 @@ private void checkCursorOptions(int resSetType, int resSetConcurrency, @Override public void setSchema(String schema) throws SQLException { ensureNotClosed(); - this.schema = schema; + this.schema = normalizeSchema(schema); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java index fb2810d623a45..23d3bbe37102b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java @@ -65,9 +65,10 @@ public class JdbcThinPreparedStatement extends JdbcThinStatement implements Prep * @param conn Connection. * @param sql SQL query. * @param resHoldability Result set holdability. + * @param schema Schema name. */ - JdbcThinPreparedStatement(JdbcThinConnection conn, String sql, int resHoldability) { - super(conn, resHoldability); + JdbcThinPreparedStatement(JdbcThinConnection conn, String sql, int resHoldability, String schema) { + super(conn, resHoldability, schema); this.sql = sql; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java index 603545b6fa858..d29df932c2ba5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java @@ -55,6 +55,9 @@ public class JdbcThinStatement implements Statement { /** JDBC Connection implementation. */ protected JdbcThinConnection conn; + /** Schema name. */ + private final String schema; + /** Closed flag. */ private boolean closed; @@ -87,12 +90,14 @@ public class JdbcThinStatement implements Statement { * * @param conn JDBC connection. * @param resHoldability Result set holdability. + * @param schema Schema name. */ - JdbcThinStatement(JdbcThinConnection conn, int resHoldability) { + JdbcThinStatement(JdbcThinConnection conn, int resHoldability, String schema) { assert conn != null; this.conn = conn; this.resHoldability = resHoldability; + this.schema = schema; } /** {@inheritDoc} */ @@ -122,7 +127,7 @@ protected void execute0(JdbcStatementType stmtType, String sql, List arg if (sql == null || sql.isEmpty()) throw new SQLException("SQL query is empty."); - JdbcResult res0 = conn.sendRequest(new JdbcQueryExecuteRequest(stmtType, conn.getSchema(), pageSize, + JdbcResult res0 = conn.sendRequest(new JdbcQueryExecuteRequest(stmtType, schema, pageSize, maxRows, sql, args == null ? null : args.toArray(new Object[args.size()]))); assert res0 != null; From 62b4b5311109af1f4ac0ae84ac47ad885eaa3533 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Thu, 5 Oct 2017 17:37:04 +0300 Subject: [PATCH 007/243] IGNITE-5739 Fixed Ignite node crash on deactivation --- .../GridCacheDatabaseSharedManager.java | 2 ++ .../persistence/pagemem/PageMemoryImpl.java | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 2d89942cb4da2..a56b8f45e828c 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -540,6 +540,8 @@ private void unRegistrateMetricsMBean() { try { cctx.kernalContext().config().getMBeanServer().unregisterMBean(persistenceMetricsMbeanName); + + persistenceMetricsMbeanName = null; } catch (Throwable e) { U.error(log, "Failed to unregister " + MBEAN_NAME + " MBean.", e); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index 1da17b529b69d..95b81ad15cc07 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -237,6 +237,9 @@ public class PageMemoryImpl implements PageMemoryEx { /** */ private MemoryMetricsImpl memMetrics; + /** */ + private volatile boolean closed; + /** * @param directMemoryProvider Memory allocator to use. * @param sharedCtx Cache shared context. @@ -358,6 +361,15 @@ private void initWriteThrottle() { U.shutdownNow(getClass(), asyncRunner, log); + closed = true; + + for (Segment seg : segments) { + // Make sure all threads have left the lock. + seg.writeLock().lock(); + + seg.writeLock().unlock(); + } + directMemoryProvider.shutdown(); } @@ -1093,6 +1105,9 @@ private void copyInBuffer(long absPtr, ByteBuffer tmpBuf) { seg.readLock().lock(); try { + if (closed) + continue; + total += seg.loadedPages.size(); } finally { @@ -1114,6 +1129,9 @@ public long acquiredPages() { seg.readLock().lock(); try { + if (closed) + continue; + total += seg.acquiredPages(); } finally { From b33be44164d4dc4948ab5c27857d53b75ac4a332 Mon Sep 17 00:00:00 2001 From: Vasiliy Sisko Date: Fri, 6 Oct 2017 14:25:42 +0700 Subject: [PATCH 008/243] IGNITE-6287 Web Console: Improved DDL support. (cherry picked from commit 2410f07) --- .../cache/GridCacheCommandHandler.java | 12 +++++-- .../internal/visor/query/VisorQueryTask.java | 36 +++++++++++++++++-- .../app/modules/agent/AgentManager.service.js | 7 ++-- .../app/modules/sql/sql.controller.js | 36 +++++++++++++------ .../frontend/views/sql/sql.tpl.pug | 14 ++++---- 5 files changed, 79 insertions(+), 26 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/cache/GridCacheCommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/cache/GridCacheCommandHandler.java index 53342c93886c9..d627b20141f47 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/cache/GridCacheCommandHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/handlers/cache/GridCacheCommandHandler.java @@ -56,6 +56,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheAdapter; import org.apache.ignite.internal.processors.cache.GridCacheEntryEx; import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException; +import org.apache.ignite.internal.processors.cache.IgniteCacheProxy; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.processors.cache.query.GridCacheSqlMetadata; import org.apache.ignite.internal.processors.rest.GridRestCommand; @@ -1078,7 +1079,11 @@ private static class MetadataJob extends ComputeJobAdapter { /** {@inheritDoc} */ @Override public Collection execute() { String cacheName = null; - IgniteInternalCache cache; + + if (!ignite.active()) + return Collections.emptyList(); + + IgniteInternalCache cache = null; if (!F.isEmpty(arguments())) { cacheName = argument(0); @@ -1088,7 +1093,10 @@ private static class MetadataJob extends ComputeJobAdapter { assert cache != null; } else { - cache = F.first(ignite.context().cache().publicCaches()).internalProxy(); + IgniteCacheProxy pubCache = F.first(ignite.context().cache().publicCaches()); + + if (pubCache != null) + cache = pubCache.internalProxy(); if (cache == null) return Collections.emptyList(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java index c85ceea721004..a3668c8ab18e4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java @@ -22,10 +22,15 @@ import java.util.Collection; import java.util.List; import java.util.UUID; +import javax.cache.CacheException; import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; +import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorEither; @@ -70,7 +75,6 @@ private VisorQueryJob(VisorQueryTaskArg arg, boolean debug) { /** {@inheritDoc} */ @Override protected VisorEither run(final VisorQueryTaskArg arg) { try { - IgniteCache c = ignite.cache(arg.getCacheName()); UUID nid = ignite.localNode().id(); SqlFieldsQuery qry = new SqlFieldsQuery(arg.getQueryText()); @@ -83,7 +87,35 @@ private VisorQueryJob(VisorQueryTaskArg arg, boolean debug) { long start = U.currentTimeMillis(); - VisorQueryCursor> cur = new VisorQueryCursor<>(c.withKeepBinary().query(qry)); + FieldsQueryCursor> qryCursor; + + String cacheName = arg.getCacheName(); + + if (F.isEmpty(cacheName)) + qryCursor = ignite.context().query().querySqlFieldsNoCache(qry, true); + else { + IgniteCache c = ignite.cache(cacheName); + + if (c == null) + throw new SQLException("Fail to execute query. Cache not found: " + cacheName); + + try { + qryCursor = c.withKeepBinary().query(qry); + } + catch (CacheException e) { + // Work around for DDL without explicit schema name. + if (X.hasCause(e, IgniteSQLException.class) + && e.getMessage().contains("can only be executed on PUBLIC schema")) { + qry.setSchema("PUBLIC"); + + qryCursor = c.withKeepBinary().query(qry); + } + else + throw e; + } + } + + VisorQueryCursor> cur = new VisorQueryCursor<>(qryCursor); Collection meta = cur.fieldsMeta(); diff --git a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js index 20d29768eaa44..288ec945e0bbd 100644 --- a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js +++ b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js @@ -377,11 +377,10 @@ export default class IgniteAgentManager { } /** - * @param {String} [cacheName] Cache name. * @returns {Promise} */ - metadata(cacheName) { - return this._rest('node:rest', {cmd: 'metadata', cacheName: maskNull(cacheName)}) + metadata() { + return this._rest('node:rest', {cmd: 'metadata'}) .then((caches) => { let types = []; @@ -590,7 +589,7 @@ export default class IgniteAgentManager { nid + '=' + queryId); } - return this.visorTask('queryClose', nid, queryId); + return this.visorTask('queryClose', nid, nid, queryId); } /** diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index 5f06c1ee376ad..8011b0f6f9023 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -863,9 +863,6 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', return cachesAcc; }, []), 'label'); - if (_.isEmpty($scope.caches)) - return; - // Reset to first cache in case of stopped selected. const cacheNames = _.map($scope.caches, (cache) => cache.value); @@ -1313,6 +1310,9 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', * @return {String} Nid */ const _chooseNode = (name, local) => { + if (_.isEmpty(name)) + return Promise.resolve(null); + const nodes = cacheNodes(name); if (local) { @@ -1386,7 +1386,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', const enforceJoinOrder = !!paragraph.enforceJoinOrder; const lazy = !!paragraph.lazy; - $scope.actionAvailable(paragraph, true) && _chooseNode(paragraph.cacheName, local) + $scope.queryAvailable(paragraph) && _chooseNode(paragraph.cacheName, local) .then((nid) => { Notebook.save($scope.notebook) .catch(Messages.showError); @@ -1444,7 +1444,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', }; $scope.explain = (paragraph) => { - if (!$scope.actionAvailable(paragraph, true)) + if (!$scope.queryAvailable(paragraph)) return; Notebook.save($scope.notebook) @@ -1483,7 +1483,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', const filter = paragraph.filter; const pageSize = paragraph.pageSize; - $scope.actionAvailable(paragraph, false) && _chooseNode(cacheName, local) + $scope.scanAvailable(paragraph) && _chooseNode(cacheName, local) .then((nid) => { Notebook.save($scope.notebook) .catch(Messages.showError); @@ -1689,18 +1689,32 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', _chartApplySettings(paragraph, true); }; - $scope.actionAvailable = function(paragraph, needQuery) { - return $scope.caches.length > 0 && (!needQuery || paragraph.query) && !paragraph.loading; + $scope.queryAvailable = function(paragraph) { + return paragraph.query && !paragraph.loading; + }; + + $scope.queryTooltip = function(paragraph, action) { + if ($scope.queryAvailable(paragraph)) + return; + + if (paragraph.loading) + return 'Waiting for server response'; + + return 'Input text to ' + action; + }; + + $scope.scanAvailable = function(paragraph) { + return $scope.caches.length && !paragraph.loading; }; - $scope.actionTooltip = function(paragraph, action, needQuery) { - if ($scope.actionAvailable(paragraph, needQuery)) + $scope.scanTooltip = function(paragraph) { + if ($scope.scanAvailable(paragraph)) return; if (paragraph.loading) return 'Waiting for server response'; - return 'To ' + action + ' query select cache' + (needQuery ? ' and input query' : ''); + return 'Select cache to export scan results'; }; $scope.clickableMetadata = function(node) { diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/views/sql/sql.tpl.pug index 1ef2a4c8022f1..724c53cf160b2 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/views/sql/sql.tpl.pug @@ -123,10 +123,10 @@ mixin query-settings span Lazy result set mixin query-actions - button.btn.btn-primary(ng-disabled='!actionAvailable(paragraph, true)' ng-click='execute(paragraph)') Execute - button.btn.btn-primary(ng-disabled='!actionAvailable(paragraph, true)' ng-click='execute(paragraph, true)') Execute on selected node + button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph)') Execute + button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph, true)') Execute on selected node - a.btn.btn-default(ng-disabled='!actionAvailable(paragraph, true)' ng-click='explain(paragraph)' data-placement='bottom' bs-tooltip='' data-title='{{actionTooltip(paragraph, "explain", true)}}') Explain + a.btn.btn-default(ng-disabled='!queryAvailable(paragraph)' ng-click='explain(paragraph)' data-placement='bottom' bs-tooltip='' data-title='{{queryTooltip(paragraph, "explain query")}}') Explain mixin table-result-heading-query .total.row @@ -142,7 +142,7 @@ mixin table-result-heading-query .col-xs-4 .pull-right -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] - +btn-group('paragraph.loading', options, '{{ actionTooltip(paragraph, "export", false) }}') + +btn-group('paragraph.loading', options, '{{ queryTooltip(paragraph, "export query results") }}') mixin table-result-heading-scan .total.row @@ -158,7 +158,7 @@ mixin table-result-heading-scan .col-xs-4 .pull-right -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] - +btn-group('paragraph.loading', options, '{{ actionTooltip(paragraph, "export", false) }}') + +btn-group('paragraph.loading', options, '{{ scanTooltip(paragraph) }}') mixin table-result-body .grid(ui-grid='paragraph.gridOptions' ui-grid-resize-columns ui-grid-exporter) @@ -196,9 +196,9 @@ mixin paragraph-scan button.btn.btn-default.select-toggle.tipLabel(ng-model='paragraph.pageSize' bs-select bs-options='item for item in pageSizes') .col-sm-12.sql-controls - button.btn.btn-primary(ng-disabled='!actionAvailable(paragraph, false)' ng-click='scan(paragraph)') + button.btn.btn-primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph)') | Scan - button.btn.btn-primary(ng-disabled='!actionAvailable(paragraph, false)' ng-click='scan(paragraph, true)') + button.btn.btn-primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph, true)') | Scan on selected node .col-sm-12.sql-result(ng-if='paragraph.queryExecuted()' ng-switch='paragraph.resultType()') From 6c4c41d4ce6f2066a399756f60f798507e0553ad Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Fri, 6 Oct 2017 17:00:39 +0700 Subject: [PATCH 009/243] IGNITE-6570 Web Console: Move parsing of JSON to pool of workers. (cherry picked from commit 74f0400) --- .../web-console/backend/app/agentSocket.js | 21 +--- .../backend/app/browsersHandler.js | 9 +- .../app/modules/agent/AgentManager.service.js | 18 ++- .../app/modules/agent/decompress.worker.js | 33 +++++ .../frontend/app/utils/SimpleWorkerPool.js | 119 ++++++++++++++++++ modules/web-console/frontend/package.json | 1 + 6 files changed, 176 insertions(+), 25 deletions(-) create mode 100644 modules/web-console/frontend/app/modules/agent/decompress.worker.js create mode 100644 modules/web-console/frontend/app/utils/SimpleWorkerPool.js diff --git a/modules/web-console/backend/app/agentSocket.js b/modules/web-console/backend/app/agentSocket.js index 489d1454c5179..75dcd53449a42 100644 --- a/modules/web-console/backend/app/agentSocket.js +++ b/modules/web-console/backend/app/agentSocket.js @@ -24,7 +24,7 @@ */ module.exports = { implements: 'agent-socket', - inject: ['require(lodash)', 'require(zlib)'] + inject: ['require(lodash)'] }; /** @@ -79,10 +79,9 @@ class Command { /** * @param _ - * @param zlib * @returns {AgentSocket} */ -module.exports.factory = function(_, zlib) { +module.exports.factory = function(_) { /** * Connected agent descriptor. */ @@ -136,21 +135,7 @@ module.exports.factory = function(_, zlib) { if (resErr) return reject(resErr); - if (res.zipped) { - // TODO IGNITE-6127 Temporary solution until GZip support for socket.io-client-java. - // See: https://github.com/socketio/socket.io-client-java/issues/312 - // We can GZip manually for now. - zlib.gunzip(new Buffer(res.data, 'base64'), (unzipErr, unzipped) => { - if (unzipErr) - return reject(unzipErr); - - res.data = unzipped.toString(); - - resolve(res); - }); - } - else - resolve(res); + resolve(res); }) ); } diff --git a/modules/web-console/backend/app/browsersHandler.js b/modules/web-console/backend/app/browsersHandler.js index 4fb5088ee5ef6..f4ff23c2534a9 100644 --- a/modules/web-console/backend/app/browsersHandler.js +++ b/modules/web-console/backend/app/browsersHandler.js @@ -181,8 +181,12 @@ module.exports.factory = (_, socketio, configure, errors, mongo) => { return agent .then((agentSock) => agentSock.emitEvent('node:rest', {uri: 'ignite', demo, params})) .then((res) => { - if (res.status === 0) + if (res.status === 0) { + if (res.zipped) + return res; + return JSON.parse(res.data); + } throw new Error(res.error); }); @@ -250,6 +254,9 @@ module.exports.factory = (_, socketio, configure, errors, mongo) => { this.executeOnNode(agent, demo, params) .then((data) => { + if (data.zipped) + return cb(null, data); + if (data.finished) return cb(null, data.result); diff --git a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js index 288ec945e0bbd..752b4f05a3124 100644 --- a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js +++ b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js @@ -17,6 +17,8 @@ import { BehaviorSubject } from 'rxjs/BehaviorSubject'; +import Worker from 'worker!./decompress.worker'; +import SimpleWorkerPool from '../../utils/SimpleWorkerPool'; import maskNull from 'app/core/utils/maskNull'; const State = { @@ -82,11 +84,9 @@ export default class IgniteAgentManager { this.promises = new Set(); - /** - * Connection to backend. - * @type {Socket} - */ - this.socket = null; + this.pool = new SimpleWorkerPool('decompressor', Worker, 4); + + this.socket = null; // Connection to backend. let cluster; @@ -364,7 +364,13 @@ export default class IgniteAgentManager { * @private */ _rest(event, ...args) { - return this._emit(event, _.get(this.connectionSbj.getValue(), 'cluster.id'), ...args); + return this._emit(event, _.get(this.connectionSbj.getValue(), 'cluster.id'), ...args) + .then((data) => { + if (data.zipped) + return this.pool.postMessage(data.data); + + return data; + }); } /** diff --git a/modules/web-console/frontend/app/modules/agent/decompress.worker.js b/modules/web-console/frontend/app/modules/agent/decompress.worker.js new file mode 100644 index 0000000000000..d8e176d59b89e --- /dev/null +++ b/modules/web-console/frontend/app/modules/agent/decompress.worker.js @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import _ from 'lodash'; +import pako from 'pako'; + +/** This worker decode & decompress BASE64/Zipped data and parse to JSON. */ +// eslint-disable-next-line no-undef +onmessage = function(e) { + const data = e.data; + + const binaryString = atob(data); // Decode from BASE64 + + const unzipped = pako.inflate(binaryString, {to: 'string'}); + + const res = JSON.parse(unzipped); + + postMessage(_.get(res, 'result', res)); +}; diff --git a/modules/web-console/frontend/app/utils/SimpleWorkerPool.js b/modules/web-console/frontend/app/utils/SimpleWorkerPool.js new file mode 100644 index 0000000000000..d8ed28b6186e4 --- /dev/null +++ b/modules/web-console/frontend/app/utils/SimpleWorkerPool.js @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import {Observable} from 'rxjs/Observable'; +import {Subject} from 'rxjs/Subject'; +import 'rxjs/add/observable/race'; +import 'rxjs/add/operator/filter'; +import 'rxjs/add/operator/pluck'; +import 'rxjs/add/operator/take'; +import 'rxjs/add/operator/toPromise'; + +/** + * Simple implementation of workers pool. + */ +export default class SimpleWorkerPool { + constructor(name, WorkerClass, poolSize = (navigator.hardwareConcurrency || 4), dbg = false) { + this._name = name; + this._WorkerClass = WorkerClass; + this._tasks = []; + this._msgId = 0; + this.messages$ = new Subject(); + this.errors$ = new Subject(); + this.__dbg = dbg; + + this._workers = _.range(poolSize).map(() => { + const worker = new this._WorkerClass(); + + worker.onmessage = (m) => { + this.messages$.next({tid: worker.tid, m}); + + worker.tid = null; + + this._run(); + }; + + worker.onerror = (e) => { + this.errors$.next({tid: worker.tid, e}); + + worker.tid = null; + + this._run(); + }; + + return worker; + }); + } + + _makeTaskID() { + return this._msgId++; + } + + _getNextWorker() { + return this._workers.find((w) => !w.tid); + } + + _getNextTask() { + return this._tasks.shift(); + } + + _run() { + const worker = this._getNextWorker(); + + if (!worker || !this._tasks.length) + return; + + const task = this._getNextTask(); + + worker.tid = task.tid; + + if (this.__dbg) + console.time(`Post message[pool=${this._name}]`); + + worker.postMessage(task.data); + + if (this.__dbg) + console.timeEnd('Post message'); + } + + terminate() { + this._workers.forEach((w) => w.terminate()); + + this.messages$.complete(); + this.errors$.complete(); + + this._workers = null; + } + + postMessage(data) { + const tid = this._makeTaskID(); + + this._tasks.push({tid, data}); + + if (this.__dbg) + console.log(`Pool: [name=${this._name}, queue=${this._tasks.length}]`); + + this._run(); + + return Observable.race( + this.messages$.filter((e) => e.tid === tid).take(1).pluck('m', 'data'), + this.errors$.filter((e) => e.tid === tid).take(1).map((e) => { + throw e.e; + })) + .take(1).toPromise(); + } +} diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index d828e178b7ddb..2083640b60df5 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -80,6 +80,7 @@ "lodash": "4.17.4", "node-sass": "4.5.3", "nvd3": "1.8.4", + "pako": "1.0.6", "progress-bar-webpack-plugin": "1.10.0", "pug-html-loader": "1.1.0", "pug-loader": "2.3.0", From 6efb0cee753eac1289efd21cc773ca372775aa68 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Tue, 3 Oct 2017 18:33:57 +0300 Subject: [PATCH 010/243] .NET: Fix TestRecordLocal --- modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs index 672ff9ebc7345..a7c05344e963f 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs @@ -314,7 +314,7 @@ public void TestLocalQuery() [Test] public void TestRecordLocal() { - Assert.Throws(() => _grid1.GetEvents().RecordLocal(new MyEvent())); + Assert.Throws(() => _grid1.GetEvents().RecordLocal(new MyEvent())); } /// From fca198bf1ecd4eee8bb72233cd59d0ea2e427c7c Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Fri, 6 Oct 2017 18:04:44 +0300 Subject: [PATCH 011/243] IGNITE-6054: SQL: implemented "WRAP_KEY" and "WRAP_VALUE" modes for CREATE TABLE. This closes #2784. --- .../jdbc/thin/JdbcThinMetadataSelfTest.java | 2 +- .../cache/query/IgniteQueryErrorCode.java | 2 +- .../processors/query/GridQueryIndexing.java | 22 +- .../processors/query/GridQueryProcessor.java | 8 +- .../query/QueryTypeDescriptorImpl.java | 42 ++- .../query/property/QueryBinaryProperty.java | 1 - ...niteClientCacheInitializationFailTest.java | 18 +- .../query/h2/DmlStatementsProcessor.java | 26 +- .../processors/query/h2/H2Schema.java | 17 +- .../processors/query/h2/H2TypeKey.java | 64 ++++ .../processors/query/h2/IgniteH2Indexing.java | 33 +- .../query/h2/ddl/DdlStatementsProcessor.java | 36 ++- .../query/h2/sql/GridSqlCreateTable.java | 34 ++ .../query/h2/sql/GridSqlQueryParser.java | 58 +++- ...amicColumnsAbstractConcurrentSelfTest.java | 57 ++-- ...nsConcurrentAtomicPartitionedSelfTest.java | 2 +- ...mnsConcurrentAtomicReplicatedSelfTest.java | 2 +- ...rrentTransactionalPartitionedSelfTest.java | 2 +- ...urrentTransactionalReplicatedSelfTest.java | 5 +- ...H2DynamicColumnsAbstractBasicSelfTest.java | 44 +++ .../cache/index/H2DynamicTableSelfTest.java | 301 ++++++++++++++++-- .../query/IgniteSqlNotNullConstraintTest.java | 2 +- .../h2/GridIndexingSpiAbstractSelfTest.java | 47 +-- .../Cache/Query/CacheDmlQueriesTest.cs | 4 +- 24 files changed, 673 insertions(+), 156 deletions(-) create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TypeKey.java diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java index 6c20de0df5347..4e1ae4d59e71b 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java @@ -139,7 +139,7 @@ protected CacheConfiguration cacheConfiguration(QueryEntity qryEntity) { Statement stmt = conn.createStatement(); stmt.execute("CREATE TABLE TEST (ID INT, NAME VARCHAR(50), VAL VARCHAR(50), PRIMARY KEY (ID, NAME))"); - stmt.execute("CREATE TABLE \"Quoted\" (\"Id\" INT primary key, \"Name\" VARCHAR(50))"); + stmt.execute("CREATE TABLE \"Quoted\" (\"Id\" INT primary key, \"Name\" VARCHAR(50)) WITH WRAP_KEY"); stmt.execute("CREATE INDEX \"MyTestIndex quoted\" on \"Quoted\" (\"Id\" DESC)"); stmt.execute("CREATE INDEX IDX ON TEST (ID ASC)"); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java index 8e5af31820db7..e0ff9a40ee8fa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java @@ -91,7 +91,7 @@ public final class IgniteQueryErrorCode { /** Attempt to INSERT or MERGE {@code null} key. */ public final static int NULL_KEY = 4003; - /** Attempt to INSERT or MERGE {@code null} value. */ + /** Attempt to INSERT or MERGE {@code null} value, or to to set {@code null} to a {@code NOT NULL} column. */ public final static int NULL_VALUE = 4004; /** {@link EntryProcessor} has thrown an exception during {@link IgniteCache#invokeAll}. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java index b8445ca7dcc3c..93d541d480523 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java @@ -66,13 +66,13 @@ public interface GridQueryIndexing { * Parses SQL query into two step query and executes it. * * @param schemaName Schema name. + * @param cacheName Cache name. * @param qry Query. * @param keepBinary Keep binary flag. - * @param mainCacheId Main cache ID. - * @return Cursor. + * @param mainCacheId Main cache ID. @return Cursor. * @throws IgniteCheckedException If failed. */ - public QueryCursor> queryDistributedSql(String schemaName, SqlQuery qry, + public QueryCursor> queryDistributedSql(String schemaName, String cacheName, SqlQuery qry, boolean keepBinary, int mainCacheId) throws IgniteCheckedException; /** @@ -109,12 +109,12 @@ public long streamUpdateQuery(String schemaName, String qry, @Nullable Object[] * Executes regular query. * * @param schemaName Schema name. - * @param qry Query. + * @param cacheName Cache name. + *@param qry Query. * @param filter Cache name and key filter. - * @param keepBinary Keep binary flag. - * @return Cursor. + * @param keepBinary Keep binary flag. @return Cursor. */ - public QueryCursor> queryLocalSql(String schemaName, SqlQuery qry, + public QueryCursor> queryLocalSql(String schemaName, String cacheName, SqlQuery qry, IndexingQueryFilter filter, boolean keepBinary) throws IgniteCheckedException; /** @@ -134,14 +134,14 @@ public FieldsQueryCursor> queryLocalSqlFields(String schemaName, SqlFiel * Executes text query. * * @param schemaName Schema name. + * @param cacheName Cache name. * @param qry Text query. * @param typeName Type name. - * @param filter Cache name and key filter. - * @return Queried rows. + * @param filter Cache name and key filter. @return Queried rows. * @throws IgniteCheckedException If failed. */ - public GridCloseableIterator> queryLocalText(String schemaName, String qry, - String typeName, IndexingQueryFilter filter) throws IgniteCheckedException; + public GridCloseableIterator> queryLocalText(String schemaName, String cacheName, + String qry, String typeName, IndexingQueryFilter filter) throws IgniteCheckedException; /** * Create new index locally. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index 56e8a42d76f8b..f044c1d4dd4df 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -2044,7 +2044,7 @@ private QueryCursor> queryDistributedSql(final GridCacheC return executeQuery(GridCacheQueryType.SQL, qry.getSql(), cctx, new IgniteOutClosureX>>() { @Override public QueryCursor> applyx() throws IgniteCheckedException { - return idx.queryDistributedSql(schemaName, qry, keepBinary, mainCacheId); + return idx.queryDistributedSql(schemaName, cctx.name(), qry, keepBinary, mainCacheId); } }, true); } @@ -2088,10 +2088,10 @@ private QueryCursor> queryLocalSql(final GridCacheConte if (cctx.config().getQueryParallelism() > 1) { qry.setDistributedJoins(true); - return idx.queryDistributedSql(schemaName, qry, keepBinary, mainCacheId); + return idx.queryDistributedSql(schemaName, cctx.name(), qry, keepBinary, mainCacheId); } else - return idx.queryLocalSql(schemaName, qry, idx.backupFilter(requestTopVer.get(), + return idx.queryLocalSql(schemaName, cctx.name(), qry, idx.backupFilter(requestTopVer.get(), qry.getPartitions()), keepBinary); } }, true); @@ -2344,7 +2344,7 @@ public GridCloseableIterator> queryText(final String String typeName = typeName(cacheName, resType); String schemaName = idx.schema(cacheName); - return idx.queryLocalText(schemaName, clause, typeName, filters); + return idx.queryLocalText(schemaName, cacheName, clause, typeName, filters); } }, true); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java index e12476a928bfc..72adefd43f37f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java @@ -24,12 +24,12 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.QueryIndexType; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.S; import org.jetbrains.annotations.Nullable; @@ -453,7 +453,7 @@ public void markObsolete() { * Sets key field name. * @param keyFieldName Key field name. */ - public void keyFieldName(String keyFieldName) { + void keyFieldName(String keyFieldName) { this.keyFieldName = keyFieldName; } @@ -464,10 +464,10 @@ public void keyFieldName(String keyFieldName) { /** * Sets value field name. - * @param valueFieldName value field name. + * @param valFieldName value field name. */ - public void valueFieldName(String valueFieldName) { - this.valFieldName = valueFieldName; + void valueFieldName(String valFieldName) { + this.valFieldName = valFieldName; } /** {@inheritDoc} */ @@ -480,6 +480,7 @@ public void valueFieldName(String valueFieldName) { return keyFieldName != null ? aliases.get(keyFieldName) : null; } + /** {@inheritDoc} */ @Nullable @Override public String valueFieldAlias() { return valFieldName != null ? aliases.get(valFieldName) : null; } @@ -487,17 +488,34 @@ public void valueFieldName(String valueFieldName) { /** {@inheritDoc} */ @SuppressWarnings("ForLoopReplaceableByForEach") @Override public void validateKeyAndValue(Object key, Object val) throws IgniteCheckedException { - if (validateProps == null) + if (F.isEmpty(validateProps)) return; - final int size = validateProps.size(); + for (int i = 0; i < validateProps.size(); ++i) { + GridQueryProperty prop = validateProps.get(i); + + Object propVal; + + int errCode; + + if (F.eq(prop.name(), keyFieldName)) { + propVal = key; + + errCode = IgniteQueryErrorCode.NULL_KEY; + } + else if (F.eq(prop.name(), valFieldName)) { + propVal = val; + + errCode = IgniteQueryErrorCode.NULL_VALUE; + } + else { + propVal = prop.value(key, val); - for (int idx = 0; idx < size; ++idx) { - GridQueryProperty prop = validateProps.get(idx); + errCode = IgniteQueryErrorCode.NULL_VALUE; + } - if (prop.value(key, val) == null) - throw new IgniteSQLException("Null value is not allowed for field '" + prop.name() + "'", - IgniteQueryErrorCode.NULL_VALUE); + if (propVal == null) + throw new IgniteSQLException("Null value is not allowed for column '" + prop.name() + "'", errCode); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java index 5d90a43c964ca..18508a8b8edad 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java @@ -81,7 +81,6 @@ public class QueryBinaryProperty implements GridQueryProperty { */ public QueryBinaryProperty(GridKernalContext ctx, String propName, QueryBinaryProperty parent, Class type, @Nullable Boolean key, String alias, boolean notNull) { - this.ctx = ctx; log = ctx.log(QueryBinaryProperty.class); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java index 1ebf5568fa544..83dd9c94c29f1 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java @@ -193,7 +193,7 @@ private void checkFineCache(Ignite client, String cacheName) { /** * @param client Client. */ - @SuppressWarnings("ThrowableNotThrown") + @SuppressWarnings({"ThrowableNotThrown", "ThrowableResultOfMethodCallIgnored"}) private void checkFailedCache(final Ignite client, final String cacheName) { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { @@ -236,14 +236,14 @@ private static class FailedIndexing implements GridQueryIndexing { } /** {@inheritDoc} */ - @Override public QueryCursor> queryDistributedSql(String schemaName, SqlQuery qry, - boolean keepBinary, int mainCacheId) throws IgniteCheckedException { + @Override public QueryCursor> queryDistributedSql(String schemaName, String cacheName, + SqlQuery qry, boolean keepBinary, int mainCacheId) throws IgniteCheckedException { return null; } /** {@inheritDoc} */ - @Override public List>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, - boolean keepBinary, GridQueryCancel cancel, + @Override public List>> queryDistributedSqlFields(String schemaName, + SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId, boolean failOnMultipleStmts) throws IgniteCheckedException { return null; } @@ -255,8 +255,8 @@ private static class FailedIndexing implements GridQueryIndexing { } /** {@inheritDoc} */ - @Override public QueryCursor> queryLocalSql(String schemaName, SqlQuery qry, - IndexingQueryFilter filter, boolean keepBinary) throws IgniteCheckedException { + @Override public QueryCursor> queryLocalSql(String schemaName, String cacheName, + SqlQuery qry, IndexingQueryFilter filter, boolean keepBinary) throws IgniteCheckedException { return null; } @@ -267,8 +267,8 @@ private static class FailedIndexing implements GridQueryIndexing { } /** {@inheritDoc} */ - @Override public GridCloseableIterator> queryLocalText(String spaceName, String qry, - String typeName, IndexingQueryFilter filter) throws IgniteCheckedException { + @Override public GridCloseableIterator> queryLocalText(String spaceName, + String cacheName, String qry, String typeName, IndexingQueryFilter filter) throws IgniteCheckedException { return null; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java index ee1875f155894..98117b2dfe6d7 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java @@ -49,13 +49,13 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.GridKernalContext; -import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheOperationContext; import org.apache.ignite.internal.processors.cache.GridCacheAdapter; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator; import org.apache.ignite.internal.processors.query.GridQueryCancel; import org.apache.ignite.internal.processors.query.GridQueryFieldsResult; @@ -255,13 +255,12 @@ GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, PreparedStatement } /** - * Perform given statement against given data streamer. Only rows based INSERT and MERGE are supported - * as well as key bound UPDATE and DELETE (ones with filter {@code WHERE _key = ?}). + * Perform given statement against given data streamer. Only rows based INSERT is supported. * * @param streamer Streamer to feed data to. * @param stmt Statement. * @param args Statement arguments. - * @return Number of rows in given statement for INSERT and MERGE, {@code 1} otherwise. + * @return Number of rows in given INSERT statement. * @throws IgniteCheckedException if failed. */ @SuppressWarnings({"unchecked", "ConstantConditions"}) @@ -916,11 +915,22 @@ private static PageProcessingResult processPage(GridCacheContext cctx, val = convert(val, rowDesc, desc.valueClass(), plan.colTypes[plan.valColIdx]); } - if (key == null) - throw new IgniteSQLException("Key for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY); + if (key == null) { + if (F.isEmpty(desc.keyFieldName())) + throw new IgniteSQLException("Key for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY); + else + throw new IgniteSQLException("Null value is not allowed for column '" + desc.keyFieldName() + "'", + IgniteQueryErrorCode.NULL_KEY); + } - if (val == null) - throw new IgniteSQLException("Value for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_VALUE); + if (val == null) { + if (F.isEmpty(desc.valueFieldName())) + throw new IgniteSQLException("Value for INSERT, MERGE, or UPDATE must not be null", + IgniteQueryErrorCode.NULL_VALUE); + else + throw new IgniteSQLException("Null value is not allowed for column '" + desc.valueFieldName() + "'", + IgniteQueryErrorCode.NULL_VALUE); + } Map newColVals = new HashMap<>(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java index 3f39e6a7eb545..f5cf0f26b5cd8 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java @@ -17,9 +17,8 @@ package org.apache.ignite.internal.processors.query.h2; -import org.jsr166.ConcurrentHashMap8; - import java.util.Collection; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; /** @@ -30,10 +29,10 @@ public class H2Schema { private final String schemaName; /** */ - private final ConcurrentMap tbls = new ConcurrentHashMap8<>(); + private final ConcurrentMap tbls = new ConcurrentHashMap<>(); /** */ - private final ConcurrentMap typeToTbl = new ConcurrentHashMap8<>(); + private final ConcurrentMap typeToTbl = new ConcurrentHashMap<>(); /** * Constructor. @@ -70,8 +69,8 @@ public H2TableDescriptor tableByName(String tblName) { * @param typeName Type name. * @return Table. */ - public H2TableDescriptor tableByTypeName(String typeName) { - return typeToTbl.get(typeName); + public H2TableDescriptor tableByTypeName(String cacheName, String typeName) { + return typeToTbl.get(new H2TypeKey(cacheName, typeName)); } /** @@ -81,7 +80,7 @@ public void add(H2TableDescriptor tbl) { if (tbls.putIfAbsent(tbl.tableName(), tbl) != null) throw new IllegalStateException("Table already registered: " + tbl.fullTableName()); - if (typeToTbl.putIfAbsent(tbl.typeName(), tbl) != null) + if (typeToTbl.putIfAbsent(new H2TypeKey(tbl.cache().name(), tbl.typeName()), tbl) != null) throw new IllegalStateException("Table already registered: " + tbl.fullTableName()); } @@ -91,7 +90,7 @@ public void add(H2TableDescriptor tbl) { public void remove(H2TableDescriptor tbl) { tbls.remove(tbl.tableName()); - typeToTbl.remove(tbl.typeName()); + typeToTbl.remove(new H2TypeKey(tbl.cache().name(), tbl.typeName())); } /** @@ -104,7 +103,7 @@ public void drop(H2TableDescriptor tbl) { tbls.remove(tbl.tableName()); - typeToTbl.remove(tbl.typeName()); + typeToTbl.remove(new H2TypeKey(tbl.cache().name(), tbl.typeName())); } /** diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TypeKey.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TypeKey.java new file mode 100644 index 0000000000000..d39918c425309 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TypeKey.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2; + +import org.apache.ignite.internal.util.typedef.F; + +/** + * Key for types lookup. + */ +public class H2TypeKey { + /** Cache name. */ + private final String cacheName; + + /** Type name. */ + private final String typeName; + + /** + * Constructor. + * + * @param cacheName Cache name. + * @param typeName Type name. + */ + H2TypeKey(String cacheName, String typeName) { + this.cacheName = cacheName; + this.typeName = typeName; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + H2TypeKey other = (H2TypeKey)o; + + return F.eq(typeName, other.typeName) && F.eq(cacheName, other.cacheName); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + int res = cacheName.hashCode(); + + res = 31 * res + typeName.hashCode(); + + return res; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index fd7b9a85451e1..22ed592bc98b3 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -548,7 +548,7 @@ private void onSqlException() { GridCacheVersion ver, long expirationTime, long link) throws IgniteCheckedException { - H2TableDescriptor tbl = tableDescriptor(schema(cacheName), type.name()); + H2TableDescriptor tbl = tableDescriptor(schema(cacheName), cacheName, type.name()); if (tbl == null) return; // Type was rejected. @@ -572,7 +572,7 @@ private void onSqlException() { if (log.isDebugEnabled()) log.debug("Removing key from cache query index [locId=" + nodeId + ", key=" + key + ", val=" + val + ']'); - H2TableDescriptor tbl = tableDescriptor(schema(cacheName), type.name()); + H2TableDescriptor tbl = tableDescriptor(schema(cacheName), cacheName, type.name()); if (tbl == null) return; @@ -777,10 +777,11 @@ public GridH2IndexBase createSortedIndex(String name, GridH2Table tbl, boolean p } } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") - @Override public GridCloseableIterator> queryLocalText(String schemaName, String qry, - String typeName, IndexingQueryFilter filters) throws IgniteCheckedException { - H2TableDescriptor tbl = tableDescriptor(schemaName, typeName); + @Override public GridCloseableIterator> queryLocalText(String schemaName, + String cacheName, String qry, String typeName, IndexingQueryFilter filters) throws IgniteCheckedException { + H2TableDescriptor tbl = tableDescriptor(schemaName, cacheName, typeName); if (tbl != null && tbl.luceneIndex() != null) { GridRunningQueryInfo run = new GridRunningQueryInfo(qryIdGen.incrementAndGet(), qry, TEXT, schemaName, @@ -1098,7 +1099,7 @@ public void bindParameters(PreparedStatement stmt, /** {@inheritDoc} */ @SuppressWarnings("unchecked") - @Override public QueryCursor> queryLocalSql(String schemaName, + @Override public QueryCursor> queryLocalSql(String schemaName, String cacheName, final SqlQuery qry, final IndexingQueryFilter filter, final boolean keepBinary) throws IgniteCheckedException { String type = qry.getType(); String sqlQry = qry.getSql(); @@ -1107,7 +1108,7 @@ public void bindParameters(PreparedStatement stmt, GridQueryCancel cancel = new GridQueryCancel(); - final GridCloseableIterator> i = queryLocalSql(schemaName, sqlQry, alias, + final GridCloseableIterator> i = queryLocalSql(schemaName, cacheName, sqlQry, alias, F.asList(params), type, filter, cancel); return new QueryCursorImpl<>(new Iterable>() { @@ -1142,19 +1143,19 @@ public void bindParameters(PreparedStatement stmt, * Executes regular query. * * @param schemaName Schema name. + * @param cacheName Cache name. * @param qry Query. * @param alias Table alias. * @param params Query parameters. * @param type Query return type. - * @param filter Cache name and key filter. - * @return Queried rows. + * @param filter Cache name and key filter. @return Queried rows. * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") - public GridCloseableIterator> queryLocalSql(String schemaName, + public GridCloseableIterator> queryLocalSql(String schemaName, String cacheName, final String qry, String alias, @Nullable final Collection params, String type, final IndexingQueryFilter filter, GridQueryCancel cancel) throws IgniteCheckedException { - final H2TableDescriptor tbl = tableDescriptor(schemaName, type); + final H2TableDescriptor tbl = tableDescriptor(schemaName, cacheName, type); if (tbl == null) throw new IgniteSQLException("Failed to find SQL table for type: " + type, @@ -1216,11 +1217,11 @@ private Iterable> runQueryTwoStep( /** {@inheritDoc} */ @SuppressWarnings("unchecked") - @Override public QueryCursor> queryDistributedSql(String schemaName, SqlQuery qry, - boolean keepBinary, int mainCacheId) { + @Override public QueryCursor> queryDistributedSql(String schemaName, String cacheName, + SqlQuery qry, boolean keepBinary, int mainCacheId) { String type = qry.getType(); - H2TableDescriptor tblDesc = tableDescriptor(schemaName, type); + H2TableDescriptor tblDesc = tableDescriptor(schemaName, cacheName, type); if (tblDesc == null) throw new IgniteSQLException("Failed to find SQL table for type: " + type, @@ -1829,13 +1830,13 @@ private String dbTypeFromClass(Class cls) { * @param type Type name. * @return Descriptor. */ - @Nullable private H2TableDescriptor tableDescriptor(String schemaName, String type) { + @Nullable private H2TableDescriptor tableDescriptor(String schemaName, String cacheName, String type) { H2Schema schema = schemas.get(schemaName); if (schema == null) return null; - return schema.tableByTypeName(type); + return schema.tableByTypeName(cacheName, type); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index affd903dcb05d..f39e587177c82 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -62,6 +62,7 @@ import org.h2.value.DataType; import static org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing.UPDATE_RESULT_META; +import static org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser.PARAM_WRAP_VALUE; /** * DDL statements processor.

@@ -227,6 +228,10 @@ else if (stmt0 instanceof GridSqlAlterTableAddColumn) { cmd.tableName()); } else { + if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass())) + throw new SchemaOperationException("Cannot add column(s) because table was created " + + "with " + PARAM_WRAP_VALUE + "=false option."); + List cols = new ArrayList<>(cmd.columns().length); boolean allFieldsNullable = true; @@ -373,11 +378,38 @@ private static QueryEntity toQueryEntity(GridSqlCreateTable createTbl) { if (!F.isEmpty(createTbl.valueTypeName())) valTypeName = createTbl.valueTypeName(); + assert createTbl.wrapKey() != null; + assert createTbl.wrapValue() != null; + + if (!createTbl.wrapKey()) { + GridSqlColumn pkCol = createTbl.columns().get(createTbl.primaryKeyColumns().iterator().next()); + + keyTypeName = DataType.getTypeClassName(pkCol.column().getType()); + + res.setKeyFieldName(pkCol.columnName()); + } + else + res.setKeyFields(createTbl.primaryKeyColumns()); + + if (!createTbl.wrapValue()) { + GridSqlColumn valCol = null; + + for (Map.Entry e : createTbl.columns().entrySet()) { + if (!createTbl.primaryKeyColumns().contains(e.getKey())) { + valCol = e.getValue(); + + break; + } + } + + valTypeName = DataType.getTypeClassName(valCol.column().getType()); + + res.setValueFieldName(valCol.columnName()); + } + res.setValueType(valTypeName); res.setKeyType(keyTypeName); - res.setKeyFields(createTbl.primaryKeyColumns()); - if (!F.isEmpty(notNullFields)) { QueryEntityEx res0 = new QueryEntityEx(res); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java index b73214f49b2b1..de1082634d437 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java @@ -71,6 +71,12 @@ public class GridSqlCreateTable extends GridSqlStatement { /** Name of the column that represents affinity key. */ private String affinityKey; + /** Forcefully turn single column PK into an Object. */ + private Boolean wrapKey; + + /** Forcefully turn single column value into an Object. */ + private Boolean wrapVal; + /** Extra WITH-params. */ private List params; @@ -270,6 +276,34 @@ public void ifNotExists(boolean ifNotExists) { this.ifNotExists = ifNotExists; } + /** + * @return Forcefully turn single column PK into an Object. + */ + public Boolean wrapKey() { + return wrapKey; + } + + /** + * @param wrapKey Forcefully turn single column PK into an Object. + */ + public void wrapKey(boolean wrapKey) { + this.wrapKey = wrapKey; + } + + /** + * @return Forcefully turn single column value into an Object. + */ + public Boolean wrapValue() { + return wrapVal; + } + + /** + * @param wrapVal Forcefully turn single column value into an Object.. + */ + public void wrapValue(boolean wrapVal) { + this.wrapVal = wrapVal; + } + /** * @return Extra WITH-params. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java index 3d7a1a05f8f45..bf72200c45b02 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java @@ -474,6 +474,12 @@ public class GridSqlQueryParser { /** */ private static final String PARAM_VAL_TYPE = "VALUE_TYPE"; + /** */ + private static final String PARAM_WRAP_KEY = "WRAP_KEY"; + + /** */ + public static final String PARAM_WRAP_VALUE = "WRAP_VALUE"; + /** */ private final IdentityHashMap h2ObjToGridObj = new IdentityHashMap<>(); @@ -1007,7 +1013,8 @@ private GridSqlCreateTable parseCreateTable(CreateTable createTbl) { pkCols.add(gridCol.columnName()); } - int valColsNum = cols.size() - pkCols.size(); + int keyColsNum = pkCols.size(); + int valColsNum = cols.size() - keyColsNum; if (valColsNum == 0) throw new IgniteSQLException("Table must have at least one non PRIMARY KEY column.", @@ -1052,6 +1059,44 @@ private GridSqlCreateTable parseCreateTable(CreateTable createTbl) { processExtraParam(e.getKey(), e.getValue(), res); } + // Process key wrapping. + Boolean wrapKey = res.wrapKey(); + + if (wrapKey != null && !wrapKey) { + if (keyColsNum > 1) { + throw new IgniteSQLException(PARAM_WRAP_KEY + " cannot be false when composite primary key exists.", + IgniteQueryErrorCode.PARSING); + } + + if (!F.isEmpty(res.keyTypeName())) { + throw new IgniteSQLException(PARAM_WRAP_KEY + " cannot be false when " + PARAM_KEY_TYPE + " is set.", + IgniteQueryErrorCode.PARSING); + } + } + + boolean wrapKey0 = (res.wrapKey() != null && res.wrapKey()) || !F.isEmpty(res.keyTypeName()) || keyColsNum > 1; + + res.wrapKey(wrapKey0); + + // Process value wrapping. + Boolean wrapVal = res.wrapValue(); + + if (wrapVal != null && !wrapVal) { + if (valColsNum > 1) { + throw new IgniteSQLException(PARAM_WRAP_VALUE + " cannot be false when multiple non-primary key " + + "columns exist.", IgniteQueryErrorCode.PARSING); + } + + if (!F.isEmpty(res.valueTypeName())) { + throw new IgniteSQLException(PARAM_WRAP_VALUE + " cannot be false when " + PARAM_VAL_TYPE + " is set.", + IgniteQueryErrorCode.PARSING); + } + + res.wrapValue(false); + } + else + res.wrapValue(true); // By default value is always wrapped to allow for ALTER TABLE ADD COLUMN commands. + if (!F.isEmpty(res.valueTypeName()) && F.eq(res.keyTypeName(), res.valueTypeName())) throw new IgniteSQLException("Key and value type names " + "should be different for CREATE TABLE: " + res.valueTypeName(), IgniteQueryErrorCode.PARSING); @@ -1336,6 +1381,17 @@ else if (CacheWriteSynchronizationMode.PRIMARY_SYNC.name().equalsIgnoreCase(val) break; + case PARAM_WRAP_KEY: { + res.wrapKey(F.isEmpty(val) || Boolean.parseBoolean(val)); + + break; + } + + case PARAM_WRAP_VALUE: + res.wrapValue(F.isEmpty(val) || Boolean.parseBoolean(val)); + + break; + default: throw new IgniteSQLException("Unsupported parameter: " + name, IgniteQueryErrorCode.PARSING); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java index 969c9850632fb..3d4b2a384ee83 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java @@ -82,7 +82,7 @@ public abstract class DynamicColumnsAbstractConcurrentSelfTest extends DynamicCo private static final String ATTR_FILTERED = "FILTERED"; /** SQL statement to create test table accompanied by template specification. */ - private static final String CREATE_SQL_WITH_TEMPLATE = CREATE_SQL + " WITH \"template=TPL\""; + private final String createSql; /** Latches to block certain index operations. */ private static final ConcurrentHashMap> BLOCKS = @@ -103,6 +103,7 @@ public abstract class DynamicColumnsAbstractConcurrentSelfTest extends DynamicCo DynamicColumnsAbstractConcurrentSelfTest(CacheMode cacheMode, CacheAtomicityMode atomicityMode) { this.cacheMode = cacheMode; this.atomicityMode = atomicityMode; + createSql = CREATE_SQL + " WITH \"template=TPL\""; } /** {@inheritDoc} */ @@ -158,7 +159,7 @@ public void testCoordinatorChange() throws Exception { createSqlCache(cli); - run(cli, CREATE_SQL_WITH_TEMPLATE); + run(cli, createSql); // Test migration between normal servers. CountDownLatch idxLatch = blockIndexing(srv1Id); @@ -210,7 +211,7 @@ public void testOperationChaining() throws Exception { createSqlCache(srv1); - run(srv1, CREATE_SQL_WITH_TEMPLATE); + run(srv1, createSql); CountDownLatch idxLatch = blockIndexing(srv1); @@ -253,7 +254,7 @@ public void testNodeJoinOnPendingOperation() throws Exception { createSqlCache(srv1); - run(srv1, CREATE_SQL_WITH_TEMPLATE); + run(srv1, createSql); CountDownLatch idxLatch = blockIndexing(srv1); @@ -296,7 +297,7 @@ public void testConcurrentPutRemove() throws Exception { createSqlCache(srv1); - run(srv1, CREATE_SQL_WITH_TEMPLATE); + run(srv1, createSql); // Start data change operations from several threads. final AtomicBoolean stopped = new AtomicBoolean(); @@ -309,7 +310,7 @@ public void testConcurrentPutRemove() throws Exception { int key = ThreadLocalRandom.current().nextInt(0, LARGE_CACHE_SIZE); int val = ThreadLocalRandom.current().nextInt(); - IgniteCache cache = node.cache(CACHE_NAME); + IgniteCache cache = node.cache(CACHE_NAME); if (ThreadLocalRandom.current().nextBoolean()) cache.put(key(node, key), val(node, val)); @@ -342,15 +343,15 @@ public void testConcurrentPutRemove() throws Exception { // Get expected values. Set expKeys = new HashSet<>(); - IgniteCache cache = srv1.cache(CACHE_NAME).withKeepBinary(); + IgniteCache cache = srv1.cache(CACHE_NAME).withKeepBinary(); for (int i = 0; i < LARGE_CACHE_SIZE; i++) { - BinaryObject key = key(srv1, i); + Object key = key(srv1, i); BinaryObject val = cache.get(key); if (val != null) { - int id = key.field("ID"); + int id = (Integer)key; assertEquals(i, id); @@ -363,17 +364,17 @@ public void testConcurrentPutRemove() throws Exception { // Validate query result. for (Ignite node : Ignition.allGrids()) { - IgniteCache nodeCache = node.cache(CACHE_NAME).withKeepBinary(); + IgniteCache nodeCache = node.cache(CACHE_NAME).withKeepBinary(); SqlQuery qry = new SqlQuery(valTypeName, "from " + TBL_NAME + " where mod(id, 2) <> 0"); - List> res = nodeCache.query(qry).getAll(); + List> res = nodeCache.query(qry).getAll(); assertEquals("Cache size mismatch [exp=" + expKeys.size() + ", actual=" + res.size() + ']', expKeys.size(), res.size()); - for (Cache.Entry entry : res) { - int key = entry.getKey().field("ID"); + for (Cache.Entry entry : res) { + int key = (Integer)entry.getKey(); int v = entry.getValue().field("v"); String name = entry.getValue().field("NAME"); @@ -399,13 +400,11 @@ private BinaryObject val(Ignite node, int val) { /** * @param node Node. - * @param key Value for ID field. - * @return PERSON cache key. + * @param id Key. + * @return PERSON cache key (int or {@link BinaryObject}). */ - private BinaryObject key(Ignite node, int key) { - String keyTypeName = ((IgniteEx)node).context().query().types(CACHE_NAME).iterator().next().keyTypeName(); - - return node.binary().builder(keyTypeName).setField("ID", key).build(); + private Object key(Ignite node, int id) { + return id; } /** @@ -420,7 +419,7 @@ public void testConcurrentRebalance() throws Exception { createSqlCache(srv1); - run(srv1, CREATE_SQL_WITH_TEMPLATE); + run(srv1, createSql); awaitPartitionMapExchange(); @@ -481,7 +480,7 @@ public void testConcurrentCacheDestroy() throws Exception { // Start cache and populate it with data. createSqlCache(cli); - run(cli, CREATE_SQL_WITH_TEMPLATE); + run(cli, createSql); put(cli, 0, LARGE_CACHE_SIZE); @@ -526,7 +525,7 @@ public void testQueryConsistencyMultithreaded() throws Exception { createSqlCache(cli); - run(cli, CREATE_SQL_WITH_TEMPLATE); + run(cli, createSql); put(cli, 0, 5000); @@ -539,8 +538,8 @@ public void testQueryConsistencyMultithreaded() throws Exception { while (!stopped.get()) { Ignite node = grid(ThreadLocalRandom.current().nextInt(1, 5)); - IgniteInternalFuture fut = addCols(node, QueryUtils.DFLT_SCHEMA, c("newCol" + dynColCnt.getAndIncrement(), - Integer.class.getName())); + IgniteInternalFuture fut = addCols(node, QueryUtils.DFLT_SCHEMA, c("newCol" + + dynColCnt.getAndIncrement(), Integer.class.getName())); try { fut.get(); @@ -641,7 +640,7 @@ private void checkClientReconnect(final boolean restartCache, boolean dynamicCac if (dynamicCache) { createSqlCache(cli); - run(cli, CREATE_SQL_WITH_TEMPLATE); + run(cli, createSql); } final String schemaName = dynamicCache ? QueryUtils.DFLT_SCHEMA : "idx"; @@ -677,7 +676,7 @@ private void reconnectClientNode(final Ignite srvNode, final Ignite cliNode, fin if (dynamicCache) { DynamicColumnsAbstractConcurrentSelfTest.this.run(srvNode, DROP_SQL); - DynamicColumnsAbstractConcurrentSelfTest.this.run(srvNode, CREATE_SQL_WITH_TEMPLATE); + DynamicColumnsAbstractConcurrentSelfTest.this.run(srvNode, createSql); } else { srvNode.destroyCache("idx"); @@ -724,7 +723,7 @@ public void testConcurrentOperationsAndNodeStartStopMultithreaded() throws Excep createSqlCache(cli); - run(cli, CREATE_SQL_WITH_TEMPLATE); + run(cli, createSql); final AtomicBoolean stopped = new AtomicBoolean(); @@ -783,8 +782,8 @@ public void testConcurrentOperationsAndNodeStartStopMultithreaded() throws Excep while (!stopped.get()) { Ignite node = grid(ThreadLocalRandom.current().nextInt(1, 5)); - IgniteInternalFuture fut = addCols(node, QueryUtils.DFLT_SCHEMA, c("newCol" + dynColCnt.getAndIncrement(), - Integer.class.getName())); + IgniteInternalFuture fut = addCols(node, QueryUtils.DFLT_SCHEMA, c("newCol" + + dynColCnt.getAndIncrement(), Integer.class.getName())); try { fut.get(); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicPartitionedSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicPartitionedSelfTest.java index ca6890398e466..3f65fca489ccf 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicPartitionedSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicPartitionedSelfTest.java @@ -21,7 +21,7 @@ import org.apache.ignite.cache.CacheMode; /** - * Test to check concurrent operations on dynamic columns on ATOMIC PARTITIONED cache. + * Test to check concurrent operations on dynamic columns on ATOMIC PARTITIONED cache with flat key. */ public class DynamicColumnsConcurrentAtomicPartitionedSelfTest extends DynamicColumnsAbstractConcurrentSelfTest { /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicReplicatedSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicReplicatedSelfTest.java index 9a3a32cbf8ff8..289a01e7e9156 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicReplicatedSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentAtomicReplicatedSelfTest.java @@ -21,7 +21,7 @@ import org.apache.ignite.cache.CacheMode; /** - * Test to check concurrent operations on dynamic columns on ATOMIC REPLICATED cache. + * Test to check concurrent operations on dynamic columns on ATOMIC REPLICATED cache with flat key. */ public class DynamicColumnsConcurrentAtomicReplicatedSelfTest extends DynamicColumnsAbstractConcurrentSelfTest { /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalPartitionedSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalPartitionedSelfTest.java index f42a447a3ef0a..371313eebcb4f 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalPartitionedSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalPartitionedSelfTest.java @@ -21,7 +21,7 @@ import org.apache.ignite.cache.CacheMode; /** - * Test to check concurrent operations on dynamic columns on TRANSACTIONAL PARTITIONED cache. + * Test to check concurrent operations on dynamic columns on TRANSACTIONAL PARTITIONED cache with flat key. */ public class DynamicColumnsConcurrentTransactionalPartitionedSelfTest extends DynamicColumnsAbstractConcurrentSelfTest { /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalReplicatedSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalReplicatedSelfTest.java index 2b53e4282c9ac..f004b8968a123 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalReplicatedSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsConcurrentTransactionalReplicatedSelfTest.java @@ -21,9 +21,10 @@ import org.apache.ignite.cache.CacheMode; /** - * Test to check concurrent operations on dynamic columns on TRANSACTIONAL REPLICATED cache. + * Test to check concurrent operations on dynamic columns on TRANSACTIONAL REPLICATED cache with flat key. */ -public class DynamicColumnsConcurrentTransactionalReplicatedSelfTest extends DynamicColumnsAbstractConcurrentSelfTest { +public class DynamicColumnsConcurrentTransactionalReplicatedSelfTest + extends DynamicColumnsAbstractConcurrentSelfTest { /** * Constructor. */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java index 5e649596622ae..34be34d213d4f 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java @@ -301,6 +301,50 @@ public void testAddNullColumn() { checkNodeState((IgniteEx)node, QueryUtils.DFLT_SCHEMA, "PERSON", c); } + /** + * Test that {@code ADD COLUMN} fails for non dynamic table that has flat value. + */ + @SuppressWarnings({"unchecked", "ThrowFromFinallyBlock"}) + public void testTestAlterTableOnFlatValueNonDynamicTable() { + CacheConfiguration c = + new CacheConfiguration("ints").setIndexedTypes(Integer.class, Integer.class) + .setSqlSchema(QueryUtils.DFLT_SCHEMA); + + try { + grid(nodeIndex()).getOrCreateCache(c); + + doTestAlterTableOnFlatValue("INTEGER"); + } + finally { + grid(nodeIndex()).destroyCache("ints"); + } + } + + /** + * Test that {@code ADD COLUMN} fails for dynamic table that has flat value. + */ + @SuppressWarnings({"unchecked", "ThrowFromFinallyBlock"}) + public void testTestAlterTableOnFlatValueDynamicTable() { + try { + run("CREATE TABLE TEST (id int primary key, x varchar) with \"wrap_value=false\""); + + doTestAlterTableOnFlatValue("TEST"); + } + finally { + run("DROP TABLE TEST"); + } + } + + /** + * Test that {@code ADD COLUMN} fails for tables that have flat value. + * @param tblName table name. + */ + private void doTestAlterTableOnFlatValue(String tblName) { + assertThrows("ALTER TABLE " + tblName + " ADD COLUMN y varchar", + "ADD COLUMN is not supported for tables created with wrap_value=false param. " + + "(To enable ADD COLUMN, create table with wrap_value=true param)."); + } + /** * @return Node index to run queries on. */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java index 773e7e0586fde..e0ab6c55bb506 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java @@ -17,6 +17,11 @@ package org.apache.ignite.internal.processors.cache.index; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -59,6 +64,7 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.GridTestUtils; import org.h2.jdbc.JdbcSQLException; +import org.h2.value.DataType; /** * Tests for CREATE/DROP TABLE. @@ -284,6 +290,14 @@ public void testDuplicateCustomCacheName() throws Exception { } } + /** + * Test that {@code CREATE TABLE} with given write sync mode actually creates new cache as needed. + * @throws Exception if failed. + */ + public void testPlainKey() throws Exception { + doTestCreateTable(null, null, null, CacheWriteSynchronizationMode.FULL_SYNC); + } + /** * Test that appending supplied arguments to {@code CREATE TABLE} results in creating new cache that has settings * as expected @@ -292,18 +306,19 @@ public void testDuplicateCustomCacheName() throws Exception { * @param valTypeName Value type name, or {@code null} if the name generated by default should be used. */ private void doTestCustomNames(String cacheName, String keyTypeName, String valTypeName) { - GridStringBuilder b = new GridStringBuilder("CREATE TABLE \"NameTest\" (id int primary key, x varchar) WITH "); + GridStringBuilder b = new GridStringBuilder("CREATE TABLE \"NameTest\" (id int primary key, x varchar) WITH " + + "wrap_key,wrap_value"); assert !F.isEmpty(cacheName) || !F.isEmpty(keyTypeName) || !F.isEmpty(valTypeName); if (!F.isEmpty(cacheName)) - b.a("\"cache_name=").a(cacheName).a('"').a(','); + b.a(",\"cache_name=").a(cacheName).a('"'); if (!F.isEmpty(keyTypeName)) - b.a("\"key_type=").a(keyTypeName).a('"').a(','); + b.a(",\"key_type=").a(keyTypeName).a('"'); if (!F.isEmpty(valTypeName)) - b.a("\"value_type=").a(valTypeName).a('"'); + b.a(",\"value_type=").a(valTypeName).a('"'); String res = b.toString(); @@ -312,8 +327,7 @@ private void doTestCustomNames(String cacheName, String keyTypeName, String valT execute(client(), res); - String resCacheName = U.firstNotNull(cacheName, QueryUtils.createTableCacheName(QueryUtils.DFLT_SCHEMA, - "NameTest")); + String resCacheName = U.firstNotNull(cacheName, cacheName("NameTest")); IgniteInternalCache cache = client().cachex(resCacheName); @@ -770,7 +784,8 @@ public void testTableNameConflictCheckSql() throws Exception { * @throws Exception if failed. */ public void testAffinityKey() throws Exception { - execute("CREATE TABLE \"City\" (\"name\" varchar primary key, \"code\" int) WITH \"affinityKey='name'\""); + execute("CREATE TABLE \"City\" (\"name\" varchar primary key, \"code\" int) WITH wrap_key,wrap_value," + + "\"affinityKey='name'\""); assertAffinityCacheConfiguration("City", "name"); @@ -783,7 +798,7 @@ public void testAffinityKey() throws Exception { // We need unique name for this table to avoid conflicts with existing binary metadata. execute("CREATE TABLE \"Person2\" (\"id\" int, \"city\" varchar," + " \"name\" varchar, \"surname\" varchar, \"age\" int, PRIMARY KEY (\"id\", \"city\")) WITH " + - "\"template=cache,affinityKey='city'\""); + "wrap_key,wrap_value,\"template=cache,affinityKey='city'\""); assertAffinityCacheConfiguration("Person2", "city"); @@ -828,26 +843,31 @@ public void testAffinityKey() throws Exception { */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") public void testAffinityKeyCaseSensitivity() { - execute("CREATE TABLE \"A\" (\"name\" varchar primary key, \"code\" int) WITH \"affinityKey='name'\""); + execute("CREATE TABLE \"A\" (\"name\" varchar primary key, \"code\" int) WITH wrap_key,wrap_value," + + "\"affinityKey='name'\""); assertAffinityCacheConfiguration("A", "name"); - execute("CREATE TABLE \"B\" (name varchar primary key, \"code\" int) WITH \"affinityKey=name\""); + execute("CREATE TABLE \"B\" (name varchar primary key, \"code\" int) WITH wrap_key,wrap_value," + + "\"affinityKey=name\""); assertAffinityCacheConfiguration("B", "NAME"); - execute("CREATE TABLE \"C\" (name varchar primary key, \"code\" int) WITH \"affinityKey=NamE\""); + execute("CREATE TABLE \"C\" (name varchar primary key, \"code\" int) WITH wrap_key,wrap_value," + + "\"affinityKey=NamE\""); assertAffinityCacheConfiguration("C", "NAME"); - execute("CREATE TABLE \"D\" (\"name\" varchar primary key, \"code\" int) WITH \"affinityKey=NAME\""); + execute("CREATE TABLE \"D\" (\"name\" varchar primary key, \"code\" int) WITH wrap_key,wrap_value," + + "\"affinityKey=NAME\""); assertAffinityCacheConfiguration("D", "name"); // Error arises because user has specified case sensitive affinity column name GridTestUtils.assertThrows(null, new Callable() { @Override public Object call() throws Exception { - execute("CREATE TABLE \"E\" (name varchar primary key, \"code\" int) WITH \"affinityKey='Name'\""); + execute("CREATE TABLE \"E\" (name varchar primary key, \"code\" int) WITH wrap_key,wrap_value," + + "\"affinityKey='Name'\""); return null; } @@ -865,9 +885,19 @@ public void testAffinityKeyCaseSensitivity() { }, IgniteSQLException.class, "Ambiguous affinity column name, use single quotes for case sensitivity: name"); execute("CREATE TABLE \"E\" (\"name\" varchar, \"Name\" int, val int, primary key(\"name\", " + - "\"Name\")) WITH \"affinityKey='Name'\""); + "\"Name\")) WITH wrap_key,wrap_value,\"affinityKey='Name'\""); assertAffinityCacheConfiguration("E", "Name"); + + execute("drop table a"); + + execute("drop table b"); + + execute("drop table c"); + + execute("drop table d"); + + execute("drop table e"); } /** @@ -914,7 +944,7 @@ public void testTableAndIndexRecreate() { execute("create index on \"PUBLIC\".t (b desc)"); execute("drop table \"PUBLIC\".t"); - assertNull(client().cache("t")); + assertNull(client().cache(cacheName("t"))); execute("create table \"PUBLIC\".t (a int primary key, b varchar(30))"); @@ -943,6 +973,224 @@ public void testTableAndIndexRecreate() { execute("drop table \"PUBLIC\".t"); } + /** + * Test that it's impossible to create tables with same name regardless of key/value wrapping settings. + */ + public void testWrappedAndUnwrappedKeyTablesInteroperability() { + { + execute("create table a (id int primary key, x varchar)"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_key", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_value", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_key,wrap_value", + "Table already exists: A"); + + execute("drop table a"); + } + + { + execute("create table a (id int primary key, x varchar) with wrap_key"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar)", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_value", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_key,wrap_value", + "Table already exists: A"); + + execute("drop table a"); + } + + { + execute("create table a (id int primary key, x varchar) with wrap_value"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar)", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_key", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_key,wrap_value", + "Table already exists: A"); + + execute("drop table a"); + } + + { + execute("create table a (id int primary key, x varchar) with wrap_key,wrap_value"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar)", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_value", + "Table already exists: A"); + + assertDdlCommandThrows("create table a (id int primary key, x varchar) with wrap_key", + "Table already exists: A"); + + execute("drop table a"); + } + } + + /** + * Test that it's possible to create tables with matching key and/or value primitive types. + */ + public void testDynamicTablesInteroperability() { + execute("create table a (id int primary key, x varchar) with \"wrap_value=false\""); + + execute("create table b (id long primary key, y varchar) with \"wrap_value=false\""); + + execute("create table c (id int primary key, z long) with \"wrap_value=false\""); + + execute("create table d (id int primary key, w varchar) with \"wrap_value=false\""); + + execute("drop table a"); + + execute("drop table b"); + + execute("drop table c"); + + execute("drop table d"); + } + + /** + * Test that when key or value has more than one column, wrap=false is forbidden. + */ + public void testWrappingAlwaysOnWithComplexObjects() { + assertDdlCommandThrows("create table a (id int, x varchar, c long, primary key(id, c)) with \"wrap_key=false\"", + "WRAP_KEY cannot be false when composite primary key exists."); + + assertDdlCommandThrows("create table a (id int, x varchar, c long, primary key(id)) with \"wrap_value=false\"", + "WRAP_VALUE cannot be false when multiple non-primary key columns exist."); + } + + /** + * Test behavior when neither key nor value should be wrapped. + * @throws SQLException if failed. + */ + public void testNoWrap() throws SQLException { + doTestKeyValueWrap(false, false); + } + + /** + * Test behavior when only key is wrapped. + * @throws SQLException if failed. + */ + public void testKeyWrap() throws SQLException { + doTestKeyValueWrap(true, false); + } + + /** + * Test behavior when only value is wrapped. + * @throws SQLException if failed. + */ + public void testValueWrap() throws SQLException { + doTestKeyValueWrap(false, true); + } + + /** + * Test behavior when both key and value is wrapped. + * @throws SQLException if failed. + */ + public void testKeyAndValueWrap() throws SQLException { + doTestKeyValueWrap(true, true); + } + + /** + * Test behavior for given combination of wrap flags. + * @param wrapKey Whether key wrap should be enforced. + * @param wrapVal Whether value wrap should be enforced. + * @throws SQLException if failed. + */ + private void doTestKeyValueWrap(boolean wrapKey, boolean wrapVal) throws SQLException { + try { + String sql = String.format("CREATE TABLE T (\"id\" int primary key, \"x\" varchar) WITH " + + "\"wrap_key=%b,wrap_value=%b\"", wrapKey, wrapVal); + + if (wrapKey) + sql += ",\"key_type=tkey\""; + + if (wrapVal) + sql += ",\"value_type=tval\""; + + execute(sql); + + execute("INSERT INTO T(\"id\", \"x\") values(1, 'a')"); + + LinkedHashMap resCols = new LinkedHashMap<>(); + + List resData = new ArrayList<>(); + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1")) { + try (ResultSet colsRs = conn.getMetaData().getColumns(null, QueryUtils.DFLT_SCHEMA, "T", ".*")) { + while (colsRs.next()) + resCols.put(colsRs.getString("COLUMN_NAME"), + DataType.getTypeClassName(DataType.convertSQLTypeToValueType(colsRs + .getShort("DATA_TYPE")))); + } + + try (PreparedStatement ps = conn.prepareStatement("SELECT * FROM T")) { + try (ResultSet dataRs = ps.executeQuery()) { + assertTrue(dataRs.next()); + + for (int i = 0; i < dataRs.getMetaData().getColumnCount(); i++) + resData.add(dataRs.getObject(i + 1)); + } + } + } + + LinkedHashMap expCols = new LinkedHashMap<>(); + + expCols.put("id", Integer.class.getName()); + expCols.put("x", String.class.getName()); + + assertEquals(expCols, resCols); + + assertEqualsCollections(Arrays.asList(1, "a"), resData); + + Object key = createKeyForWrapTest(1, wrapKey); + + Object val = client().cache(cacheName("T")).withKeepBinary().get(key); + + assertNotNull(val); + + assertEquals(createValueForWrapTest("a", wrapVal), val); + } + finally { + execute("DROP TABLE IF EXISTS T"); + } + } + + /** + * @param key Key to wrap. + * @param wrap Whether key should be wrapped. + * @return (optionally wrapped) key. + */ + private Object createKeyForWrapTest(int key, boolean wrap) { + if (!wrap) + return key; + + return client().binary().builder("tkey").setField("id", key).build(); + } + + /** + * @param val Value to wrap. + * @param wrap Whether value should be wrapped. + * @return (optionally wrapped) value. + */ + private Object createValueForWrapTest(String val, boolean wrap) { + if (!wrap) + return val; + + return client().binary().builder("tval").setField("x", val).build(); + } + /** * Fill re-created table with data. */ @@ -1025,19 +1273,30 @@ private void assertCreateTableWithParamsThrows(final String params, String expEr } /** - * Test that {@code DROP TABLE} on non-public schema causes an exception. - * - * @throws Exception if failed. + * Test that arbitrary command yields specific error. + * @param cmd Command. + * @param expErrMsg Expected error message. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - public void testDropTableNotPublicSchema() throws Exception { + private void assertDdlCommandThrows(final String cmd, String expErrMsg) { GridTestUtils.assertThrows(null, new Callable() { @Override public Object call() throws Exception { - execute("DROP TABLE \"cache_idx\".\"Person\""); + execute(cmd); return null; } - }, IgniteSQLException.class, "DROP TABLE can only be executed on PUBLIC schema."); + }, IgniteSQLException.class, expErrMsg); + } + + /** + * Test that {@code DROP TABLE} on non-public schema causes an exception. + * + * @throws Exception if failed. + */ + @SuppressWarnings("ThrowableResultOfMethodCallIgnored") + public void testDropTableNotPublicSchema() throws Exception { + assertDdlCommandThrows("DROP TABLE \"cache_idx\".\"Person\"", + "DROP TABLE can only be executed on PUBLIC schema."); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java index b372eb3ebe6e1..8deb61f33d0a2 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java @@ -86,7 +86,7 @@ public class IgniteSqlNotNullConstraintTest extends GridCommonAbstractTest { private static String CACHE_INTERCEPTOR = "cacheInterceptor"; /** Expected error message. */ - private static String ERR_MSG = "Null value is not allowed for field 'NAME'"; + private static String ERR_MSG = "Null value is not allowed for column 'NAME'"; /** Expected error message for read-through restriction. */ private static String READ_THROUGH_ERR_MSG = "NOT NULL constraint is not supported when " + diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java index 1ee8a849b0cdb..62860c0d16f82 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java @@ -251,20 +251,21 @@ public void testSpi() throws Exception { IgniteCache cacheB = ignite0.createCache(cacheBCfg()); - assertFalse(spi.queryLocalSql(spi.schema(typeAA.cacheName()), "select * from A.A", null, Collections.emptySet(), - typeAA.name(), null, null).hasNext()); + assertFalse(spi.queryLocalSql(spi.schema(typeAA.cacheName()), typeAA.cacheName(), "select * from A.A", null, + Collections.emptySet(), typeAA.name(), null, null).hasNext()); - assertFalse(spi.queryLocalSql(spi.schema(typeAB.cacheName()), "select * from A.B", null, Collections.emptySet(), - typeAB.name(), null, null).hasNext()); - - assertFalse(spi.queryLocalSql(spi.schema(typeBA.cacheName()), "select * from B.A", null, Collections.emptySet(), - typeBA.name(), null, null).hasNext()); + assertFalse(spi.queryLocalSql(spi.schema(typeAB.cacheName()), typeAB.cacheName(), "select * from A.B", null, + Collections.emptySet(), typeAB.name(), null, null).hasNext()); - assertFalse(spi.queryLocalSql(spi.schema(typeBA.cacheName()), "select * from B.A, A.B, A.A", null, + assertFalse(spi.queryLocalSql(spi.schema(typeBA.cacheName()), typeBA.cacheName(), "select * from B.A", null, Collections.emptySet(), typeBA.name(), null, null).hasNext()); + assertFalse(spi.queryLocalSql(spi.schema(typeBA.cacheName()), typeBA.cacheName(), + "select * from B.A, A.B, A.A", null, Collections.emptySet(), typeBA.name(), null, null).hasNext()); + try { - spi.queryLocalSql(spi.schema(typeBA.cacheName()), "select aa.*, ab.*, ba.* from A.A aa, A.B ab, B.A ba", + spi.queryLocalSql(spi.schema(typeBA.cacheName()), typeBA.cacheName(), + "select aa.*, ab.*, ba.* from A.A aa, A.B ab, B.A ba", null, Collections.emptySet(), typeBA.name(), null, null).hasNext(); fail("Enumerations of aliases in select block must be prohibited"); @@ -273,11 +274,11 @@ public void testSpi() throws Exception { // all fine } - assertFalse(spi.queryLocalSql(spi.schema(typeAB.cacheName()), "select ab.* from A.B ab", null, - Collections.emptySet(), typeAB.name(), null, null).hasNext()); + assertFalse(spi.queryLocalSql(spi.schema(typeAB.cacheName()), typeAB.cacheName(), "select ab.* from A.B ab", + null, Collections.emptySet(), typeAB.name(), null, null).hasNext()); - assertFalse(spi.queryLocalSql(spi.schema(typeBA.cacheName()), "select ba.* from B.A as ba", null, - Collections.emptySet(), typeBA.name(), null, null).hasNext()); + assertFalse(spi.queryLocalSql(spi.schema(typeBA.cacheName()), typeBA.cacheName(), + "select ba.* from B.A as ba", null, Collections.emptySet(), typeBA.name(), null, null).hasNext()); cacheA.put(1, aa("A", 1, "Vasya", 10).build()); cacheA.put(1, ab(1, "Vasya", 20, "Some text about Vasya goes here.").build()); @@ -289,7 +290,7 @@ public void testSpi() throws Exception { // Query data. Iterator> res = spi.queryLocalSql(spi.schema(typeAA.cacheName()), - "from a order by age", null, Collections.emptySet(), typeAA.name(), null, null); + typeAA.cacheName(), "from a order by age", null, Collections.emptySet(), typeAA.name(), null, null); assertTrue(res.hasNext()); assertEquals(aa("A", 3, "Borya", 18).build(), value(res.next())); @@ -297,8 +298,8 @@ public void testSpi() throws Exception { assertEquals(aa("A", 2, "Valera", 19).build(), value(res.next())); assertFalse(res.hasNext()); - res = spi.queryLocalSql(spi.schema(typeAA.cacheName()), "select aa.* from a aa order by aa.age", null, - Collections.emptySet(), typeAA.name(), null, null); + res = spi.queryLocalSql(spi.schema(typeAA.cacheName()), typeAA.cacheName(), + "select aa.* from a aa order by aa.age", null, Collections.emptySet(), typeAA.name(), null, null); assertTrue(res.hasNext()); assertEquals(aa("A", 3, "Borya", 18).build(), value(res.next())); @@ -306,8 +307,8 @@ public void testSpi() throws Exception { assertEquals(aa("A", 2, "Valera", 19).build(), value(res.next())); assertFalse(res.hasNext()); - res = spi.queryLocalSql(spi.schema(typeAB.cacheName()), "from b order by name", null, Collections.emptySet(), - typeAB.name(), null, null); + res = spi.queryLocalSql(spi.schema(typeAB.cacheName()), typeAB.cacheName(), "from b order by name", null, + Collections.emptySet(), typeAB.name(), null, null); assertTrue(res.hasNext()); assertEquals(ab(1, "Vasya", 20, "Some text about Vasya goes here.").build(), value(res.next())); @@ -315,8 +316,8 @@ public void testSpi() throws Exception { assertEquals(ab(4, "Vitalya", 20, "Very Good guy").build(), value(res.next())); assertFalse(res.hasNext()); - res = spi.queryLocalSql(spi.schema(typeAB.cacheName()), "select bb.* from b as bb order by bb.name", null, - Collections.emptySet(), typeAB.name(), null, null); + res = spi.queryLocalSql(spi.schema(typeAB.cacheName()), typeAB.cacheName(), + "select bb.* from b as bb order by bb.name", null, Collections.emptySet(), typeAB.name(), null, null); assertTrue(res.hasNext()); assertEquals(ab(1, "Vasya", 20, "Some text about Vasya goes here.").build(), value(res.next())); @@ -324,8 +325,8 @@ public void testSpi() throws Exception { assertEquals(ab(4, "Vitalya", 20, "Very Good guy").build(), value(res.next())); assertFalse(res.hasNext()); - res = spi.queryLocalSql(spi.schema(typeBA.cacheName()), "from a", null, Collections.emptySet(), typeBA.name(), - null, null); + res = spi.queryLocalSql(spi.schema(typeBA.cacheName()), typeBA.cacheName(), "from a", null, + Collections.emptySet(), typeBA.name(), null, null); assertTrue(res.hasNext()); assertEquals(ba(2, "Kolya", 25, true).build(), value(res.next())); @@ -333,7 +334,7 @@ public void testSpi() throws Exception { // Text queries Iterator> txtRes = spi.queryLocalText(spi.schema(typeAB.cacheName()), - "good", typeAB.name(), null); + typeAB.cacheName(), "good", typeAB.name(), null); assertTrue(txtRes.hasNext()); assertEquals(ab(4, "Vitalya", 20, "Very Good guy").build(), value(txtRes.next())); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs index a6ddc8c055bcb..172cb90d36015 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs @@ -122,7 +122,7 @@ public void TestNotNull() var ex = Assert.Throws(() => cache.QueryFields(new SqlFieldsQuery( "insert into foo(_key, name) values (?, ?)", 1, "bar")).GetAll()); - Assert.AreEqual("Null value is not allowed for field 'ID'", ex.Message); + Assert.AreEqual("Null value is not allowed for column 'ID'", ex.Message); } /// @@ -137,7 +137,7 @@ public void TestNotNullAttribute() var ex = Assert.Throws(() => cache.QueryFields(new SqlFieldsQuery( "insert into foo(_key, id) values (?, ?)", 1, 2)).GetAll()); - Assert.AreEqual("Null value is not allowed for field 'NAME'", ex.Message); + Assert.AreEqual("Null value is not allowed for column 'NAME'", ex.Message); } /// From 73f092df4792bee918083322ee93026dfa95e3b8 Mon Sep 17 00:00:00 2001 From: devozerov Date: Mon, 9 Oct 2017 10:48:06 +0300 Subject: [PATCH 012/243] Fixed JavaDoc. --- .../cache/persistence/wal/serializer/RecordV2Serializer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java index 0a5bf0130d409..a06b6d78c5b6a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java @@ -39,7 +39,7 @@ * Record V2 serializer. * Stores records in following format: *
    - *
  • Record type from {@link WALRecord.RecordType#ordinal()} incremented by 1
  • + *
  • Record type from {@link WALRecord.RecordType} incremented by 1
  • *
  • WAL pointer to double check consistency
  • *
  • Record length
  • *
  • Data
  • From 81b16ada108dc497d38f702b7f678de539345705 Mon Sep 17 00:00:00 2001 From: devozerov Date: Mon, 9 Oct 2017 11:34:51 +0300 Subject: [PATCH 013/243] Fixed JavaDoc again. --- .../cache/persistence/wal/serializer/RecordV2Serializer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java index a06b6d78c5b6a..98804d9902275 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java @@ -39,7 +39,7 @@ * Record V2 serializer. * Stores records in following format: *
      - *
    • Record type from {@link WALRecord.RecordType} incremented by 1
    • + *
    • Record type from {@code WALRecord.RecordType} incremented by 1
    • *
    • WAL pointer to double check consistency
    • *
    • Record length
    • *
    • Data
    • From d9f0f4e1bec08ab9f52c6c1ba842524f25a4ba19 Mon Sep 17 00:00:00 2001 From: devozerov Date: Mon, 9 Oct 2017 11:57:06 +0300 Subject: [PATCH 014/243] IGNITE-6054: Fixed tests. --- .../cache/index/H2DynamicColumnsAbstractBasicSelfTest.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java index 34be34d213d4f..b9f8c615ab24b 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java @@ -341,8 +341,7 @@ public void testTestAlterTableOnFlatValueDynamicTable() { */ private void doTestAlterTableOnFlatValue(String tblName) { assertThrows("ALTER TABLE " + tblName + " ADD COLUMN y varchar", - "ADD COLUMN is not supported for tables created with wrap_value=false param. " + - "(To enable ADD COLUMN, create table with wrap_value=true param)."); + "Cannot add column(s) because table was created with WRAP_VALUE=false option."); } /** From 9358a88538189e48de68ad2d157c2c1ebf9a219e Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Mon, 9 Oct 2017 15:14:23 +0300 Subject: [PATCH 015/243] IGNITE-6529: JDBC thin: fixed driver protocol compatibility. This closes #2819. --- .../internal/jdbc/thin/JdbcThinTcpIo.java | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java index 688f90820287c..9e12fbf571005 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java @@ -49,6 +49,9 @@ public class JdbcThinTcpIo { /** Version 2.1.0. */ private static final ClientListenerProtocolVersion VER_2_1_0 = ClientListenerProtocolVersion.create(2, 1, 0); + /** Version 2.1.5: added "lazy" flag. */ + private static final ClientListenerProtocolVersion VER_2_1_5 = ClientListenerProtocolVersion.create(2, 1, 5); + /** Version 2.3.1. */ private static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); @@ -180,22 +183,25 @@ public void start() throws SQLException, IOException { SqlStateCode.CLIENT_CONNECTION_FAILED, e); } - handshake(); + handshake(CURRENT_VER); } /** + * Used for versions: 2.1.5 and 2.3.0. The protocol version is changed but handshake format isn't changed. + * + * @param ver JDBC client version. * @throws IOException On IO error. * @throws SQLException On connection reject. */ - public void handshake() throws IOException, SQLException { + public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQLException { BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(HANDSHAKE_MSG_SIZE), null, null); writer.writeByte((byte) ClientListenerRequest.HANDSHAKE); - writer.writeShort(CURRENT_VER.major()); - writer.writeShort(CURRENT_VER.minor()); - writer.writeShort(CURRENT_VER.maintenance()); + writer.writeShort(ver.major()); + writer.writeShort(ver.minor()); + writer.writeShort(ver.maintenance()); writer.writeByte(ClientListenerNioListener.JDBC_CLIENT); @@ -238,7 +244,9 @@ public void handshake() throws IOException, SQLException { ClientListenerProtocolVersion srvProtocolVer = ClientListenerProtocolVersion.create(maj, min, maintenance); - if (VER_2_1_0.equals(srvProtocolVer)) + if (VER_2_1_5.equals(srvProtocolVer)) + handshake(VER_2_1_5); + else if (VER_2_1_0.equals(srvProtocolVer)) handshake_2_1_0(); else { throw new SQLException("Handshake failed [driverProtocolVer=" + CURRENT_VER + From c4acf54413a0e8cb9cedcb7201084f29ee554ee7 Mon Sep 17 00:00:00 2001 From: Vasiliy Sisko Date: Mon, 9 Oct 2017 19:23:23 +0700 Subject: [PATCH 016/243] IGNITE-6287 Web Console: Improved DDL support: added checkbox "Use selected cache as default schema name". (cherry picked from commit a45677c) --- .../internal/visor/query/VisorQueryTask.java | 15 +-------------- modules/web-console/backend/app/mongo.js | 1 + .../frontend/app/modules/sql/sql.controller.js | 14 +++++++++++++- .../frontend/public/stylesheets/style.scss | 8 ++++++++ .../web-console/frontend/views/sql/sql.tpl.pug | 10 ++++++++++ 5 files changed, 33 insertions(+), 15 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java index a3668c8ab18e4..933bacc6c1882 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java @@ -99,20 +99,7 @@ private VisorQueryJob(VisorQueryTaskArg arg, boolean debug) { if (c == null) throw new SQLException("Fail to execute query. Cache not found: " + cacheName); - try { - qryCursor = c.withKeepBinary().query(qry); - } - catch (CacheException e) { - // Work around for DDL without explicit schema name. - if (X.hasCause(e, IgniteSQLException.class) - && e.getMessage().contains("can only be executed on PUBLIC schema")) { - qry.setSchema("PUBLIC"); - - qryCursor = c.withKeepBinary().query(qry); - } - else - throw e; - } + qryCursor = c.withKeepBinary().query(qry); } VisorQueryCursor> cur = new VisorQueryCursor<>(qryCursor); diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index ecc833fed5a0f..a07f979964b3a 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -1036,6 +1036,7 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose maxPages: Number, hideSystemColumns: Boolean, cacheName: String, + useAsDefaultSchema: Boolean, chartsOptions: {barChart: {stacked: Boolean}, areaChart: {style: String}}, rate: { value: Number, diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index 8011b0f6f9023..a3fc0cacfc1d4 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -32,6 +32,8 @@ const ENFORCE_JOIN_SINCE = [['1.7.9', '1.8.0'], ['1.8.4', '1.9.0'], '1.9.1']; const LAZY_QUERY_SINCE = [['2.1.4-p1', '2.2.0'], '2.2.1']; +const DDL_SINCE = [['2.1.6', '2.2.0'], '2.3.0']; + const _fullColName = (col) => { const res = []; @@ -56,6 +58,7 @@ class Paragraph { self.qryType = paragraph.qryType || 'query'; self.maxPages = 0; self.filter = ''; + self.useAsDefaultSchema = false; _.assign(this, paragraph); @@ -1381,6 +1384,15 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', return false; }; + $scope.ddlAvailable = (paragraph) => { + const cache = _.find($scope.caches, {name: paragraph.cacheName}); + + if (cache) + return !!_.find(cache.nodes, (node) => Version.since(node.version, ...DDL_SINCE)); + + return false; + }; + $scope.execute = (paragraph, local = false) => { const nonCollocatedJoins = !!paragraph.nonCollocatedJoins; const enforceJoinOrder = !!paragraph.enforceJoinOrder; @@ -1399,7 +1411,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', .then(() => { const args = paragraph.queryArgs = { type: 'QUERY', - cacheName: paragraph.cacheName, + cacheName: ($scope.ddlAvailable(paragraph) && !paragraph.useAsDefaultSchema) ? null : paragraph.cacheName, query: paragraph.query, pageSize: paragraph.pageSize, maxPages: paragraph.maxPages, diff --git a/modules/web-console/frontend/public/stylesheets/style.scss b/modules/web-console/frontend/public/stylesheets/style.scss index eeb3a55eea26c..b259f1d870abb 100644 --- a/modules/web-console/frontend/public/stylesheets/style.scss +++ b/modules/web-console/frontend/public/stylesheets/style.scss @@ -304,6 +304,14 @@ body > .wrapper > ui-view { } } +.use-cache { + display: flex; + + input[type="checkbox"] { + width: 20px; + } +} + .group-section { margin-top: 20px; } diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/views/sql/sql.tpl.pug index 724c53cf160b2..7ee966d20870b 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/views/sql/sql.tpl.pug @@ -240,6 +240,16 @@ mixin paragraph-query td(style='width: 100%') input.labelField(id='cache_{{ [paragraph.id, $index].join("_") }}' type='radio' value='{{cache.name}}' ng-model='paragraph.cacheName') label(for='cache_{{ [paragraph.id, $index].join("_") }} ' ng-bind-html='cache.label') + .settings-row + .row(ng-if='ddlAvailable(paragraph)') + label.tipLabel.use-cache(bs-tooltip data-placement='bottom' + data-title= + 'Use selected cache as default schema name.
      \ + This will allow to execute query on specified cache without specify schema name.
      \ + NOTE: In future version of Ignite this feature will be removed.' + data-trigger='hover') + input(type='checkbox' ng-model='paragraph.useAsDefaultSchema') + span Use selected cache as default schema name .empty-caches(ng-show='displayedCaches.length == 0 && caches.length != 0') label Wrong caches filter .empty-caches(ng-show='caches.length == 0') From 831c4d9635c911fde3781987f79a28523d6cf15b Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Mon, 9 Oct 2017 17:33:46 +0300 Subject: [PATCH 017/243] IGNITE-6397 .NET thin client: basic cache operations. This closes #2725. --- .../platform/client/ClientIntResponse.java | 46 ++ .../platform/client/ClientLongResponse.java | 46 ++ .../platform/client/ClientMessageParser.java | 133 ++++ .../cache/ClientCacheClearKeyRequest.java | 44 ++ .../cache/ClientCacheClearKeysRequest.java | 44 ++ .../client/cache/ClientCacheClearRequest.java | 44 ++ .../cache/ClientCacheContainsKeyRequest.java | 45 ++ .../cache/ClientCacheContainsKeysRequest.java | 45 ++ .../cache/ClientCacheGetAllRequest.java | 46 ++ .../cache/ClientCacheGetAllResponse.java | 57 ++ .../ClientCacheGetAndPutIfAbsentRequest.java | 45 ++ .../cache/ClientCacheGetAndPutRequest.java | 45 ++ .../cache/ClientCacheGetAndRemoveRequest.java | 45 ++ .../ClientCacheGetAndReplaceRequest.java | 45 ++ .../client/cache/ClientCacheGetRequest.java | 9 +- .../cache/ClientCacheGetSizeRequest.java | 57 ++ .../client/cache/ClientCacheKeyRequest.java | 48 ++ .../cache/ClientCacheKeyValueRequest.java | 48 ++ .../client/cache/ClientCacheKeysRequest.java | 68 ++ .../cache/ClientCachePutAllRequest.java | 57 ++ .../cache/ClientCachePutIfAbsentRequest.java | 45 ++ .../client/cache/ClientCachePutRequest.java | 13 +- .../cache/ClientCacheRemoveAllRequest.java | 44 ++ .../ClientCacheRemoveIfEqualsRequest.java | 45 ++ .../cache/ClientCacheRemoveKeyRequest.java | 45 ++ .../cache/ClientCacheRemoveKeysRequest.java | 44 ++ .../ClientCacheReplaceIfEqualsRequest.java | 50 ++ .../cache/ClientCacheReplaceRequest.java | 45 ++ .../client/cache/ClientCacheRequest.java | 2 +- .../Client/Cache/CacheTest.cs | 611 +++++++++++++++++- .../Client/Cache/CacheTestNoMeta.cs | 4 +- .../Client/ClientTestBase.cs | 9 + .../Client/Cache/ICacheClient.cs | 155 +++++ .../Client/IgniteClientException.cs | 8 + .../Impl/Cache/CacheImpl.cs | 24 +- .../Impl/Client/Cache/CacheClient.cs | 260 +++++++- .../Impl/Client/ClientOp.cs | 21 +- .../Apache.Ignite.Core/Impl/IgniteUtils.cs | 21 + 38 files changed, 2418 insertions(+), 45 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientIntResponse.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientLongResponse.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeyRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeysRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeyRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeysRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllResponse.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutIfAbsentRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndRemoveRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndReplaceRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetSizeRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyValueRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeysRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutAllRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutIfAbsentRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveAllRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveIfEqualsRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeyRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeysRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceIfEqualsRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceRequest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientIntResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientIntResponse.java new file mode 100644 index 0000000000000..b8debf18254ca --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientIntResponse.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client; + +import org.apache.ignite.internal.binary.BinaryRawWriterEx; + +/** + * Int response. + */ +public class ClientIntResponse extends ClientResponse { + /** */ + private final int val; + + /** + * Constructor. + * + * @param reqId Request id. + */ + public ClientIntResponse(long reqId, int val) { + super(reqId); + + this.val = val; + } + + /** {@inheritDoc} */ + @Override public void encode(BinaryRawWriterEx writer) { + super.encode(writer); + + writer.writeInt(val); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientLongResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientLongResponse.java new file mode 100644 index 0000000000000..a11615702c014 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientLongResponse.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client; + +import org.apache.ignite.internal.binary.BinaryRawWriterEx; + +/** + * Long response. + */ +public class ClientLongResponse extends ClientResponse { + /** */ + private final long val; + + /** + * Constructor. + * + * @param reqId Request id. + */ + public ClientLongResponse(long reqId, long val) { + super(reqId); + + this.val = val; + } + + /** {@inheritDoc} */ + @Override public void encode(BinaryRawWriterEx writer) { + super.encode(writer); + + writer.writeLong(val); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java index 84d3eee36ba85..f0f0f4cae27f0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java @@ -32,8 +32,27 @@ import org.apache.ignite.internal.processors.platform.client.binary.ClientBinaryTypeNameGetRequest; import org.apache.ignite.internal.processors.platform.client.binary.ClientBinaryTypeNamePutRequest; import org.apache.ignite.internal.processors.platform.client.binary.ClientBinaryTypePutRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheClearKeyRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheClearKeysRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheClearRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheContainsKeyRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheContainsKeysRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheGetAllRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheGetAndPutIfAbsentRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheGetAndPutRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheGetAndRemoveRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheGetAndReplaceRequest; import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheGetRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheGetSizeRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCachePutAllRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCachePutIfAbsentRequest; import org.apache.ignite.internal.processors.platform.client.cache.ClientCachePutRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheRemoveIfEqualsRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheRemoveAllRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheRemoveKeysRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheRemoveKeyRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheReplaceIfEqualsRequest; +import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheReplaceRequest; import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheScanQueryNextPageRequest; import org.apache.ignite.internal.processors.platform.client.cache.ClientCacheScanQueryRequest; @@ -68,6 +87,63 @@ public class ClientMessageParser implements ClientListenerMessageParser { /** */ private static final short OP_RESOURCE_CLOSE = 9; + /** */ + private static final short OP_CACHE_CONTAINS_KEY = 10; + + /** */ + private static final short OP_CACHE_CONTAINS_KEYS = 11; + + /** */ + private static final short OP_CACHE_GET_ALL = 12; + + /** */ + private static final short OP_CACHE_GET_AND_PUT = 13; + + /** */ + private static final short OP_CACHE_GET_AND_REPLACE = 14; + + /** */ + private static final short OP_CACHE_GET_AND_REMOVE = 15; + + /** */ + private static final short OP_CACHE_PUT_IF_ABSENT = 16; + + /** */ + private static final short OP_CACHE_GET_AND_PUT_IF_ABSENT = 17; + + /** */ + private static final short OP_CACHE_REPLACE = 18; + + /** */ + private static final short OP_CACHE_REPLACE_IF_EQUALS = 19; + + /** */ + private static final short OP_CACHE_PUT_ALL = 20; + + /** */ + private static final short OP_CACHE_CLEAR = 21; + + /** */ + private static final short OP_CACHE_CLEAR_KEY = 22; + + /** */ + private static final short OP_CACHE_CLEAR_KEYS = 23; + + /** */ + private static final short OP_CACHE_REMOVE_KEY = 24; + + /** */ + private static final short OP_CACHE_REMOVE_IF_EQUALS = 25; + + /** */ + private static final short OP_CACHE_GET_SIZE = 26; + + /** */ + private static final short OP_CACHE_REMOVE_KEYS = 27; + + /** */ + private static final short OP_CACHE_REMOVE_ALL = 28; + /** Marshaller. */ private final GridBinaryMarshaller marsh; @@ -129,6 +205,63 @@ public ClientListenerRequest decode(BinaryRawReaderEx reader) { case OP_RESOURCE_CLOSE: return new ClientResourceCloseRequest(reader); + + case OP_CACHE_CONTAINS_KEY: + return new ClientCacheContainsKeyRequest(reader); + + case OP_CACHE_CONTAINS_KEYS: + return new ClientCacheContainsKeysRequest(reader); + + case OP_CACHE_GET_ALL: + return new ClientCacheGetAllRequest(reader); + + case OP_CACHE_GET_AND_PUT: + return new ClientCacheGetAndPutRequest(reader); + + case OP_CACHE_GET_AND_REPLACE: + return new ClientCacheGetAndReplaceRequest(reader); + + case OP_CACHE_GET_AND_REMOVE: + return new ClientCacheGetAndRemoveRequest(reader); + + case OP_CACHE_PUT_IF_ABSENT: + return new ClientCachePutIfAbsentRequest(reader); + + case OP_CACHE_GET_AND_PUT_IF_ABSENT: + return new ClientCacheGetAndPutIfAbsentRequest(reader); + + case OP_CACHE_REPLACE: + return new ClientCacheReplaceRequest(reader); + + case OP_CACHE_REPLACE_IF_EQUALS: + return new ClientCacheReplaceIfEqualsRequest(reader); + + case OP_CACHE_PUT_ALL: + return new ClientCachePutAllRequest(reader); + + case OP_CACHE_CLEAR: + return new ClientCacheClearRequest(reader); + + case OP_CACHE_CLEAR_KEY: + return new ClientCacheClearKeyRequest(reader); + + case OP_CACHE_CLEAR_KEYS: + return new ClientCacheClearKeysRequest(reader); + + case OP_CACHE_REMOVE_KEY: + return new ClientCacheRemoveKeyRequest(reader); + + case OP_CACHE_REMOVE_IF_EQUALS: + return new ClientCacheRemoveIfEqualsRequest(reader); + + case OP_CACHE_GET_SIZE: + return new ClientCacheGetSizeRequest(reader); + + case OP_CACHE_REMOVE_KEYS: + return new ClientCacheRemoveKeysRequest(reader); + + case OP_CACHE_REMOVE_ALL: + return new ClientCacheRemoveAllRequest(reader); } return new ClientRawRequest(reader.readLong(), ClientStatus.INVALID_OP_CODE, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeyRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeyRequest.java new file mode 100644 index 0000000000000..6bcbbe89b2636 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeyRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Clear key request. + */ +public class ClientCacheClearKeyRequest extends ClientCacheKeyRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheClearKeyRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + cache(ctx).clear(key()); + + return super.process(ctx); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeysRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeysRequest.java new file mode 100644 index 0000000000000..04eb7f60c9502 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearKeysRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Clear keys request. + */ +public class ClientCacheClearKeysRequest extends ClientCacheKeysRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheClearKeysRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + cache(ctx).clearAll(keys()); + + return super.process(ctx); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearRequest.java new file mode 100644 index 0000000000000..0e5f20de1eb1b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheClearRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.binary.BinaryRawReader; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache clear request. + */ +public class ClientCacheClearRequest extends ClientCacheRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheClearRequest(BinaryRawReader reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + cache(ctx).clear(); + + return super.process(ctx); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeyRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeyRequest.java new file mode 100644 index 0000000000000..8470828e424a1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeyRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientBooleanResponse; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * ContainsKey request. + */ +public class ClientCacheContainsKeyRequest extends ClientCacheKeyRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheContainsKeyRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + boolean val = cache(ctx).containsKey(key()); + + return new ClientBooleanResponse(requestId(), val); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeysRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeysRequest.java new file mode 100644 index 0000000000000..41e13068db1f5 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheContainsKeysRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientBooleanResponse; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * ContainsKeys request. + */ +public class ClientCacheContainsKeysRequest extends ClientCacheKeysRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheContainsKeysRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + boolean val = cache(ctx).containsKeys(keys()); + + return new ClientBooleanResponse(requestId(), val); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllRequest.java new file mode 100644 index 0000000000000..2b33af1cb69f8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllRequest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +import java.util.Map; + +/** + * GetAll request. + */ +public class ClientCacheGetAllRequest extends ClientCacheKeysRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheGetAllRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + Map val = cache(ctx).getAll(keys()); + + return new ClientCacheGetAllResponse(requestId(), val); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllResponse.java new file mode 100644 index 0000000000000..2ee2d5b563ef7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAllResponse.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawWriterEx; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +import java.util.Map; + +/** + * GetAll response. + */ +class ClientCacheGetAllResponse extends ClientResponse { + /** Result. */ + private final Map res; + + /** + * Ctor. + * + * @param requestId Request id. + * @param res Result. + */ + ClientCacheGetAllResponse(long requestId, Map res) { + super(requestId); + + assert res != null; + + this.res = res; + } + + /** {@inheritDoc} */ + @Override public void encode(BinaryRawWriterEx writer) { + super.encode(writer); + + writer.writeInt(res.size()); + + for (Map.Entry e : res.entrySet()) { + writer.writeObjectDetached(e.getKey()); + writer.writeObjectDetached(e.getValue()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutIfAbsentRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutIfAbsentRequest.java new file mode 100644 index 0000000000000..836021313c5fc --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutIfAbsentRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientObjectResponse; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache get and put if absent request. + */ +public class ClientCacheGetAndPutIfAbsentRequest extends ClientCacheKeyValueRequest { + /** + * Ctor. + * + * @param reader Reader. + */ + public ClientCacheGetAndPutIfAbsentRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + Object res = cache(ctx).getAndPutIfAbsent(key(), val()); + + return new ClientObjectResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutRequest.java new file mode 100644 index 0000000000000..7a540e8473ac9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndPutRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientObjectResponse; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache get and put request. + */ +public class ClientCacheGetAndPutRequest extends ClientCacheKeyValueRequest { + /** + * Ctor. + * + * @param reader Reader. + */ + public ClientCacheGetAndPutRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + Object res = cache(ctx).getAndPut(key(), val()); + + return new ClientObjectResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndRemoveRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndRemoveRequest.java new file mode 100644 index 0000000000000..e4fd735b186ac --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndRemoveRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientObjectResponse; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache get and remove request. + */ +public class ClientCacheGetAndRemoveRequest extends ClientCacheKeyRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheGetAndRemoveRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + Object val = cache(ctx).getAndRemove(key()); + + return new ClientObjectResponse(requestId(), val); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndReplaceRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndReplaceRequest.java new file mode 100644 index 0000000000000..dba8639e4c07a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetAndReplaceRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientObjectResponse; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache get and replace request. + */ +public class ClientCacheGetAndReplaceRequest extends ClientCacheKeyValueRequest { + /** + * Ctor. + * + * @param reader Reader. + */ + public ClientCacheGetAndReplaceRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + Object res = cache(ctx).getAndReplace(key(), val()); + + return new ClientObjectResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetRequest.java index e2d261a86ce8c..41558c2863d03 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetRequest.java @@ -25,10 +25,7 @@ /** * Cache get request. */ -public class ClientCacheGetRequest extends ClientCacheRequest { - /** Key. */ - private final Object key; - +public class ClientCacheGetRequest extends ClientCacheKeyRequest { /** * Constructor. * @@ -36,14 +33,12 @@ public class ClientCacheGetRequest extends ClientCacheRequest { */ public ClientCacheGetRequest(BinaryRawReaderEx reader) { super(reader); - - key = reader.readObjectDetached(); } /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public ClientResponse process(ClientConnectionContext ctx) { - Object val = cache(ctx).get(key); + Object val = cache(ctx).get(key()); return new ClientObjectResponse(requestId(), val); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetSizeRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetSizeRequest.java new file mode 100644 index 0000000000000..ba185bf7415d8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetSizeRequest.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.binary.BinaryRawReader; +import org.apache.ignite.cache.CachePeekMode; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientLongResponse; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache size request. + */ +public class ClientCacheGetSizeRequest extends ClientCacheRequest { + /** Peek modes. */ + private final CachePeekMode[] modes; + + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheGetSizeRequest(BinaryRawReader reader) { + super(reader); + + int cnt = reader.readInt(); + + modes = new CachePeekMode[cnt]; + + for (int i = 0; i < cnt; i++) { + modes[i] = CachePeekMode.fromOrdinal(reader.readByte()); + } + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + long res = cache(ctx).sizeLong(modes); + + return new ClientLongResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyRequest.java new file mode 100644 index 0000000000000..e888236b7f83a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyRequest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; + +/** + * Cache request involving key. + */ +public abstract class ClientCacheKeyRequest extends ClientCacheRequest { + /** Key. */ + private final Object key; + + /** + * Ctor. + * + * @param reader Reader. + */ + ClientCacheKeyRequest(BinaryRawReaderEx reader) { + super(reader); + + key = reader.readObjectDetached(); + } + + /** + * Gets the key. + * + * @return Key. + */ + public Object key() { + return key; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyValueRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyValueRequest.java new file mode 100644 index 0000000000000..03b85d6e87f84 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeyValueRequest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; + +/** + * Cache request involving key and value. + */ +public class ClientCacheKeyValueRequest extends ClientCacheKeyRequest { + /** Value. */ + private final Object val; + + /** + * Ctor. + * + * @param reader Reader. + */ + ClientCacheKeyValueRequest(BinaryRawReaderEx reader) { + super(reader); + + val = reader.readObjectDetached(); + } + + /** + * Gets the value. + * + * @return Value. + */ + public Object val() { + return val; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeysRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeysRequest.java new file mode 100644 index 0000000000000..526ee5b41fe9e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheKeysRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; + +import java.util.LinkedHashSet; +import java.util.Set; + +/** + * Key set request. + */ +public class ClientCacheKeysRequest extends ClientCacheRequest { + /** Keys. */ + private final Set keys; + + /** + * Constructor. + * + * @param reader Reader. + */ + ClientCacheKeysRequest(BinaryRawReaderEx reader) { + super(reader); + + keys = readSet(reader); + } + + /** + * Gets the set of keys. + * + * @return Keys. + */ + public Set keys() { + return keys; + } + + /** + * Reads a set of objects. + * + * @param reader Reader. + * @return Set of objects. + */ + private static Set readSet(BinaryRawReaderEx reader) { + int cnt = reader.readInt(); + + Set keys = new LinkedHashSet<>(cnt); + + for (int i = 0; i < cnt; i++) + keys.add(reader.readObjectDetached()); + + return keys; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutAllRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutAllRequest.java new file mode 100644 index 0000000000000..28a7fa57e3ee5 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutAllRequest.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * PutAll request. + */ +public class ClientCachePutAllRequest extends ClientCacheRequest { + /** Map. */ + private final Map map; + + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCachePutAllRequest(BinaryRawReaderEx reader) { + super(reader); + + int cnt = reader.readInt(); + + map = new LinkedHashMap<>(cnt); + + for (int i = 0; i < cnt; i++) + map.put(reader.readObjectDetached(), reader.readObjectDetached()); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + cache(ctx).putAll(map); + + return super.process(ctx); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutIfAbsentRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutIfAbsentRequest.java new file mode 100644 index 0000000000000..4dd2cde58ce06 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutIfAbsentRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientBooleanResponse; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache put if absent request. + */ +public class ClientCachePutIfAbsentRequest extends ClientCacheKeyValueRequest { + /** + * Ctor. + * + * @param reader Reader. + */ + public ClientCachePutIfAbsentRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + boolean res = cache(ctx).putIfAbsent(key(), val()); + + return new ClientBooleanResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutRequest.java index 04e396181ed97..94c2b25ad5ca0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCachePutRequest.java @@ -24,13 +24,7 @@ /** * Cache put request. */ -public class ClientCachePutRequest extends ClientCacheRequest { - /** Key. */ - private final Object key; - - /** Value. */ - private final Object val; - +public class ClientCachePutRequest extends ClientCacheKeyValueRequest { /** * Ctor. * @@ -38,15 +32,12 @@ public class ClientCachePutRequest extends ClientCacheRequest { */ public ClientCachePutRequest(BinaryRawReaderEx reader) { super(reader); - - key = reader.readObjectDetached(); - val = reader.readObjectDetached(); } /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public ClientResponse process(ClientConnectionContext ctx) { - cache(ctx).put(key, val); + cache(ctx).put(key(), val()); return super.process(ctx); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveAllRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveAllRequest.java new file mode 100644 index 0000000000000..f5adc6378912e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveAllRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.binary.BinaryRawReader; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache removeAll request. + */ +public class ClientCacheRemoveAllRequest extends ClientCacheRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheRemoveAllRequest(BinaryRawReader reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + cache(ctx).removeAll(); + + return super.process(ctx); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveIfEqualsRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveIfEqualsRequest.java new file mode 100644 index 0000000000000..b86f2f8895d64 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveIfEqualsRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientBooleanResponse; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache remove request with value. + */ +public class ClientCacheRemoveIfEqualsRequest extends ClientCacheKeyValueRequest { + /** + * Ctor. + * + * @param reader Reader. + */ + public ClientCacheRemoveIfEqualsRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + boolean res = cache(ctx).remove(key(), val()); + + return new ClientBooleanResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeyRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeyRequest.java new file mode 100644 index 0000000000000..a68c32730f4fe --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeyRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientBooleanResponse; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Remove request. + */ +public class ClientCacheRemoveKeyRequest extends ClientCacheKeyRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheRemoveKeyRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + boolean val = cache(ctx).remove(key()); + + return new ClientBooleanResponse(requestId(), val); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeysRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeysRequest.java new file mode 100644 index 0000000000000..043b5688a3f43 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRemoveKeysRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Remove keys request. + */ +public class ClientCacheRemoveKeysRequest extends ClientCacheKeysRequest { + /** + * Constructor. + * + * @param reader Reader. + */ + public ClientCacheRemoveKeysRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + cache(ctx).removeAll(keys()); + + return super.process(ctx); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceIfEqualsRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceIfEqualsRequest.java new file mode 100644 index 0000000000000..8645fbb817322 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceIfEqualsRequest.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientBooleanResponse; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache replace request. + */ +public class ClientCacheReplaceIfEqualsRequest extends ClientCacheKeyValueRequest { + /** New value. */ + private final Object newVal; + + /** + * Ctor. + * + * @param reader Reader. + */ + public ClientCacheReplaceIfEqualsRequest(BinaryRawReaderEx reader) { + super(reader); + + newVal = reader.readObjectDetached(); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + boolean res = cache(ctx).replace(key(), val(), newVal); + + return new ClientBooleanResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceRequest.java new file mode 100644 index 0000000000000..bd7a642bb39e0 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheReplaceRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.platform.client.cache; + +import org.apache.ignite.internal.binary.BinaryRawReaderEx; +import org.apache.ignite.internal.processors.platform.client.ClientBooleanResponse; +import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; +import org.apache.ignite.internal.processors.platform.client.ClientResponse; + +/** + * Cache replace request. + */ +public class ClientCacheReplaceRequest extends ClientCacheKeyValueRequest { + /** + * Ctor. + * + * @param reader Reader. + */ + public ClientCacheReplaceRequest(BinaryRawReaderEx reader) { + super(reader); + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public ClientResponse process(ClientConnectionContext ctx) { + boolean res = cache(ctx).replace(key(), val()); + + return new ClientBooleanResponse(requestId(), res); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java index 8f81e94fc876c..1aaa22cfb2f65 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java @@ -21,9 +21,9 @@ import org.apache.ignite.binary.BinaryRawReader; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; -import org.apache.ignite.internal.processors.platform.client.IgniteClientException; import org.apache.ignite.internal.processors.platform.client.ClientRequest; import org.apache.ignite.internal.processors.platform.client.ClientStatus; +import org.apache.ignite.internal.processors.platform.client.IgniteClientException; /** * Cache get request. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs index c456592230480..083038a4ae417 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs @@ -23,6 +23,7 @@ namespace Apache.Ignite.Core.Tests.Client.Cache using System.Linq; using System.Threading; using Apache.Ignite.Core.Binary; + using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Client; using Apache.Ignite.Core.Impl.Client; using NUnit.Framework; @@ -58,6 +59,11 @@ public void TestPutGetPrimitives() // Null key. Assert.Throws(() => clientCache.Get(null)); + + // Null vs 0. + var intCache = client.GetCache(CacheName); + intCache.Put(1, 0); + Assert.AreEqual(0, intCache.Get(1)); } } @@ -78,7 +84,7 @@ public void TestPutGetUserObjects([Values(true, false)] bool compactFooter) { var person = new Person {Id = 100, Name = "foo"}; var person2 = new Person2 {Id = 200, Name = "bar"}; - + var serverCache = GetCache(); var clientCache = client.GetCache(CacheName); @@ -109,6 +115,604 @@ public void TestPutGetUserObjects([Values(true, false)] bool compactFooter) } } + /// + /// Tests the TryGet method. + /// + [Test] + public void TestTryGet() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 0; + cache[2] = 2; + + // Non-existent key. + int res; + var success = cache.TryGet(0, out res); + + Assert.AreEqual(0, res); + Assert.IsFalse(success); + + // Key with default value. + success = cache.TryGet(1, out res); + + Assert.AreEqual(0, res); + Assert.IsTrue(success); + + // Key with custom value. + success = cache.TryGet(2, out res); + + Assert.AreEqual(2, res); + Assert.IsTrue(success); + + // Null key. + Assert.Throws(() => cache.TryGet(null, out res)); + } + } + + /// + /// Tests the GetAll method. + /// + [Test] + public void TestGetAll() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + cache[3] = 3; + + var res = cache.GetAll(new int?[] {1}).Single(); + Assert.AreEqual(1, res.Key); + Assert.AreEqual(1, res.Value); + + res = cache.GetAll(new int?[] {1, -1}).Single(); + Assert.AreEqual(1, res.Key); + Assert.AreEqual(1, res.Value); + + CollectionAssert.AreEquivalent(new[] {1, 2, 3}, + cache.GetAll(new int?[] {1, 2, 3}).Select(x => x.Value)); + + Assert.Throws(() => cache.GetAll(null)); + + Assert.Throws(() => cache.GetAll(new int?[] {1, null})); + Assert.Throws(() => cache.GetAll(new int?[] {null})); + } + } + + /// + /// Tests the GetAndPut method. + /// + [Test] + public void TestGetAndPut() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.IsFalse(cache.ContainsKey(1)); + + var res = cache.GetAndPut(1, 1); + Assert.IsFalse(res.Success); + Assert.IsNull(res.Value); + + Assert.IsTrue(cache.ContainsKey(1)); + + res = cache.GetAndPut(1, 2); + Assert.IsTrue(res.Success); + Assert.AreEqual(1, res.Value); + + Assert.AreEqual(2, cache[1]); + + Assert.Throws(() => cache.GetAndPut(1, null)); + Assert.Throws(() => cache.GetAndPut(null, 1)); + } + } + + /// + /// Tests the GetAndReplace method. + /// + [Test] + public void TestGetAndReplace() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.IsFalse(cache.ContainsKey(1)); + + var res = cache.GetAndReplace(1, 1); + Assert.IsFalse(res.Success); + Assert.IsNull(res.Value); + + Assert.IsFalse(cache.ContainsKey(1)); + cache[1] = 1; + + res = cache.GetAndReplace(1, 2); + Assert.IsTrue(res.Success); + Assert.AreEqual(1, res.Value); + + Assert.AreEqual(2, cache[1]); + + Assert.Throws(() => cache.GetAndReplace(1, null)); + Assert.Throws(() => cache.GetAndReplace(null, 1)); + } + } + + /// + /// Tests the GetAndRemove method. + /// + [Test] + public void TestGetAndRemove() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.IsFalse(cache.ContainsKey(1)); + + var res = cache.GetAndRemove(1); + Assert.IsFalse(res.Success); + Assert.IsNull(res.Value); + + Assert.IsFalse(cache.ContainsKey(1)); + cache[1] = 1; + + res = cache.GetAndRemove(1); + Assert.IsTrue(res.Success); + Assert.AreEqual(1, res.Value); + + Assert.IsFalse(cache.ContainsKey(1)); + + Assert.Throws(() => cache.GetAndRemove(null)); + } + } + + /// + /// Tests the ContainsKey method. + /// + [Test] + public void TestContainsKey() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + + Assert.IsTrue(cache.ContainsKey(1)); + Assert.IsFalse(cache.ContainsKey(2)); + + Assert.Throws(() => cache.ContainsKey(null)); + } + } + + /// + /// Tests the ContainsKeys method. + /// + [Test] + public void TestContainsKeys() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + cache[3] = 3; + + Assert.IsTrue(cache.ContainsKeys(new[] {1})); + Assert.IsTrue(cache.ContainsKeys(new[] {1, 2})); + Assert.IsTrue(cache.ContainsKeys(new[] {2, 1})); + Assert.IsTrue(cache.ContainsKeys(new[] {1, 2, 3})); + Assert.IsTrue(cache.ContainsKeys(new[] {1, 3, 2})); + + Assert.IsFalse(cache.ContainsKeys(new[] {0})); + Assert.IsFalse(cache.ContainsKeys(new[] {0, 1})); + Assert.IsFalse(cache.ContainsKeys(new[] {1, 0})); + Assert.IsFalse(cache.ContainsKeys(new[] {1, 2, 3, 0})); + + Assert.Throws(() => cache.ContainsKeys(null)); + } + } + + /// + /// Tests the PutIfAbsent method. + /// + [Test] + public void TestPutIfAbsent() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.IsFalse(cache.ContainsKey(1)); + + var res = cache.PutIfAbsent(1, 1); + Assert.IsTrue(res); + Assert.AreEqual(1, cache[1]); + + res = cache.PutIfAbsent(1, 2); + Assert.IsFalse(res); + Assert.AreEqual(1, cache[1]); + + Assert.Throws(() => cache.PutIfAbsent(null, 1)); + Assert.Throws(() => cache.PutIfAbsent(1, null)); + } + } + + /// + /// Tests the GetAndPutIfAbsent method. + /// + [Test] + public void TestGetAndPutIfAbsent() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.IsFalse(cache.ContainsKey(1)); + + var res = cache.GetAndPutIfAbsent(1, 1); + Assert.IsFalse(res.Success); + Assert.IsNull(res.Value); + Assert.AreEqual(1, cache[1]); + + res = cache.GetAndPutIfAbsent(1, 2); + Assert.IsTrue(res.Success); + Assert.AreEqual(1, res.Value); + Assert.AreEqual(1, cache[1]); + + Assert.Throws(() => cache.GetAndPutIfAbsent(null, 1)); + Assert.Throws(() => cache.GetAndPutIfAbsent(1, null)); + } + } + + /// + /// Tests the Replace method. + /// + [Test] + public void TestReplace() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.IsFalse(cache.ContainsKey(1)); + + var res = cache.Replace(1, 1); + Assert.IsFalse(res); + Assert.IsFalse(cache.ContainsKey(1)); + + cache[1] = 1; + + res = cache.Replace(1, 2); + Assert.IsTrue(res); + Assert.AreEqual(2, cache[1]); + + Assert.Throws(() => cache.Replace(null, 1)); + Assert.Throws(() => cache.Replace(1, null)); + } + } + + /// + /// Tests the Replace overload with additional argument. + /// + [Test] + public void TestReplace2() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.IsFalse(cache.ContainsKey(1)); + + var res = cache.Replace(1, 1, 2); + Assert.IsFalse(res); + Assert.IsFalse(cache.ContainsKey(1)); + + cache[1] = 1; + + res = cache.Replace(1, -1, 2); + Assert.IsFalse(res); + Assert.AreEqual(1, cache[1]); + + res = cache.Replace(1, 1, 2); + Assert.IsTrue(res); + Assert.AreEqual(2, cache[1]); + + Assert.Throws(() => cache.Replace(null, 1, 1)); + Assert.Throws(() => cache.Replace(1, null, 1)); + Assert.Throws(() => cache.Replace(1, 1, null)); + } + } + + /// + /// Tests the PutAll method. + /// + [Test] + public void TestPutAll() + { + using (var client = GetClient()) + { + // Primitives. + var cache = client.GetCache(CacheName); + + cache.PutAll(Enumerable.Range(1, 3).ToDictionary(x => (int?) x, x => (int?) x + 1)); + + Assert.AreEqual(2, cache[1]); + Assert.AreEqual(3, cache[2]); + Assert.AreEqual(4, cache[3]); + + // Objects. + var cache2 = client.GetCache(CacheName); + + var obj1 = new Container(); + var obj2 = new Container(); + var obj3 = new Container(); + + obj1.Inner = obj2; + obj2.Inner = obj1; + obj3.Inner = obj2; + + cache2.PutAll(new Dictionary + { + {1, obj1}, + {2, obj2}, + {3, obj3} + }); + + var res1 = cache2[1]; + var res2 = cache2[2]; + var res3 = cache2[3]; + + Assert.AreEqual(res1, res1.Inner.Inner); + Assert.AreEqual(res2, res2.Inner.Inner); + Assert.IsNotNull(res3.Inner.Inner.Inner); + + // Nulls. + Assert.Throws(() => cache.PutAll(null)); + + Assert.Throws(() => cache.PutAll(new[] + { + new KeyValuePair(null, 1) + })); + + Assert.Throws(() => cache.PutAll(new[] + { + new KeyValuePair(1, null) + })); + } + } + + /// + /// Tests the Clear method. + /// + [Test] + public void TestClear() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + + cache.Clear(); + + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsFalse(cache.ContainsKey(2)); + } + } + + /// + /// Tests the Clear method with a key argument. + /// + [Test] + public void TestClearKey() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + + cache.Clear(1); + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsTrue(cache.ContainsKey(2)); + + cache.Clear(2); + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsFalse(cache.ContainsKey(2)); + + Assert.Throws(() => cache.Clear(null)); + } + } + + /// + /// Tests the ClearAll method. + /// + [Test] + public void TestClearAll() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + cache[3] = 3; + + cache.ClearAll(new int?[] {1, 3}); + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsTrue(cache.ContainsKey(2)); + Assert.IsFalse(cache.ContainsKey(3)); + + Assert.Throws(() => cache.ClearAll(null)); + Assert.Throws(() => cache.ClearAll(new int?[] {null, 1})); + } + } + + /// + /// Tests the Remove method. + /// + [Test] + public void TestRemove() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + + var res = cache.Remove(1); + Assert.IsTrue(res); + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsTrue(cache.ContainsKey(2)); + + res = cache.Remove(2); + Assert.IsTrue(res); + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsFalse(cache.ContainsKey(2)); + + res = cache.Remove(-1); + Assert.IsFalse(res); + + Assert.Throws(() => cache.Remove(null)); + } + } + + /// + /// Tests the Remove method with value argument. + /// + [Test] + public void TestRemoveKeyVal() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + + var res = cache.Remove(1, 0); + Assert.IsFalse(res); + + res = cache.Remove(0, 0); + Assert.IsFalse(res); + + res = cache.Remove(1, 1); + Assert.IsTrue(res); + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsTrue(cache.ContainsKey(2)); + + res = cache.Remove(2, 2); + Assert.IsTrue(res); + Assert.IsFalse(cache.ContainsKey(1)); + Assert.IsFalse(cache.ContainsKey(2)); + + res = cache.Remove(2, 2); + Assert.IsFalse(res); + + Assert.Throws(() => cache.Remove(1, null)); + Assert.Throws(() => cache.Remove(null, 1)); + } + } + + /// + /// Tests the RemoveAll with a set of keys. + /// + [Test] + public void TestRemoveReys() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + var keys = Enumerable.Range(1, 10).Cast().ToArray(); + + cache.PutAll(keys.ToDictionary(x => x, x => x)); + + cache.RemoveAll(keys.Skip(2)); + CollectionAssert.AreEquivalent(keys.Take(2), cache.GetAll(keys).Select(x => x.Key)); + + cache.RemoveAll(new int?[] {1}); + Assert.AreEqual(2, cache.GetAll(keys).Single().Value); + + cache.RemoveAll(keys); + cache.RemoveAll(keys); + + Assert.AreEqual(0, cache.GetSize()); + + Assert.Throws(() => cache.RemoveAll(null)); + Assert.Throws(() => cache.RemoveAll(new int?[] {1, null})); + } + } + + /// + /// Tests the RemoveAll method without argument. + /// + [Test] + public void TestRemoveAll() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + cache[1] = 1; + cache[2] = 2; + + cache.RemoveAll(); + + Assert.AreEqual(0, cache.GetSize()); + } + } + + /// + /// Tests the GetSize method. + /// + [Test] + public void TestGetSize() + { + using (var client = GetClient()) + { + var cache = client.GetCache(CacheName); + + Assert.AreEqual(0, cache.GetSize()); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.All)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Backup)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Near)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Offheap)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Onheap)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Primary)); + + cache[1] = 1; + + Assert.AreEqual(1, cache.GetSize()); + Assert.AreEqual(1, cache.GetSize(CachePeekMode.All)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Backup)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Near)); + Assert.AreEqual(1, cache.GetSize(CachePeekMode.Offheap)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Onheap)); + Assert.AreEqual(1, cache.GetSize(CachePeekMode.Primary)); + + cache.PutAll(Enumerable.Range(1, 100).ToDictionary(x => x, x => x)); + + Assert.AreEqual(100, cache.GetSize()); + Assert.AreEqual(100, cache.GetSize(CachePeekMode.All)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Backup)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Near)); + Assert.AreEqual(100, cache.GetSize(CachePeekMode.Offheap)); + Assert.AreEqual(0, cache.GetSize(CachePeekMode.Onheap)); + Assert.AreEqual(100, cache.GetSize(CachePeekMode.Primary)); + } + } + /// /// Tests client get in multiple threads with a single client. /// @@ -170,5 +774,10 @@ public void TestExceptions() Assert.AreEqual((int) ClientStatus.CacheDoesNotExist, ex.ErrorCode); } } + + private class Container + { + public Container Inner; + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs index 66958353e73c5..782e3cc43fe92 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs @@ -52,7 +52,7 @@ public void TestPutGetUserObjects() using (var client = Ignition.StartClient(cfg)) { var serverCache = Ignition.GetIgnite().GetOrCreateCache( - new CacheConfiguration(CacheName, new QueryEntity + new CacheConfiguration("person", new QueryEntity { KeyType = typeof(int), ValueType = typeof(Person), @@ -63,7 +63,7 @@ public void TestPutGetUserObjects() } })); - var clientCache = client.GetCache(CacheName); + var clientCache = client.GetCache(serverCache.Name); // Put through client cache. clientCache.Put(1, new Person { Id = 100, Name = "foo" }); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs index 81e14187b1be2..408eb7363ca2e 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs @@ -75,6 +75,15 @@ public void FixtureTearDown() Ignition.StopAll(true); } + /// + /// Sets up the test. + /// + [SetUp] + public void TestSetUp() + { + GetCache().RemoveAll(); + } + /// /// Gets the cache. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs index edd411ca1e7e1..d772ba6054c22 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs @@ -50,6 +50,25 @@ public interface ICacheClient /// If the key is not present in the cache. TV Get(TK key); + /// + /// Retrieves value mapped to the specified key from cache. + /// + /// Key. + /// When this method returns, the value associated with the specified key, + /// if the key is found; otherwise, the default value for the type of the value parameter. + /// This parameter is passed uninitialized. + /// + /// true if the cache contains an element with the specified key; otherwise, false. + /// + bool TryGet(TK key, out TV value); + + /// + /// Retrieves values mapped to the specified keys from cache. + /// + /// Keys. + /// Map of key-value pairs. + ICollection> GetAll(IEnumerable keys); + /// /// Gets or sets a cache value with the specified key. /// Shortcut to and @@ -59,11 +78,147 @@ public interface ICacheClient /// If the key is not present in the cache. TV this[TK key] { get; set; } + /// + /// Check if cache contains mapping for this key. + /// + /// Key. + /// True if cache contains mapping for this key. + bool ContainsKey(TK key); + + /// + /// Check if cache contains mapping for these keys. + /// + /// Keys. + /// True if cache contains mapping for all these keys. + bool ContainsKeys(IEnumerable keys); + /// /// Executes a Scan query. /// /// Scan query. /// Query cursor. IQueryCursor> Query(ScanQuery scanQuery); + + /// + /// Associates the specified value with the specified key in this cache, + /// returning an existing value if one existed. + /// + /// Key with which the specified value is to be associated. + /// Value to be associated with the specified key. + /// + /// The value associated with the key at the start of the operation. + /// + CacheResult GetAndPut(TK key, TV val); + + /// + /// Atomically replaces the value for a given key if and only if there is a value currently mapped by the key. + /// + /// Key with which the specified value is to be associated. + /// Value to be associated with the specified key. + /// + /// The previous value associated with the specified key. + /// + CacheResult GetAndReplace(TK key, TV val); + + /// + /// Atomically removes the entry for a key only if currently mapped to some value. + /// + /// Key with which the specified value is associated. + /// The value if one existed. + CacheResult GetAndRemove(TK key); + + /// + /// Atomically associates the specified key with the given value if it is not already associated with a value. + /// + /// Key with which the specified value is to be associated. + /// Value to be associated with the specified key. + /// True if a value was set. + bool PutIfAbsent(TK key, TV val); + + /// + /// Stores given key-value pair in cache only if cache had no previous mapping for it. + /// + /// Key to store in cache. + /// Value to be associated with the given key. + /// + /// Previously contained value regardless of whether put happened or not. + /// + CacheResult GetAndPutIfAbsent(TK key, TV val); + + /// + /// Stores given key-value pair in cache only if there is a previous mapping for it. + /// + /// Key to store in cache. + /// Value to be associated with the given key. + /// True if the value was replaced. + bool Replace(TK key, TV val); + + /// + /// Stores given key-value pair in cache only if only if the previous value is equal to the + /// old value passed as argument. + /// + /// Key to store in cache. + /// Old value to match. + /// Value to be associated with the given key. + /// True if replace happened, false otherwise. + bool Replace(TK key, TV oldVal, TV newVal); + + /// + /// Stores given key-value pairs in cache. + /// + /// Key-value pairs to store in cache. + void PutAll(IEnumerable> vals); + + /// + /// Clears the contents of the cache, without notifying listeners or CacheWriters. + /// + void Clear(); + + /// + /// Clear entry from the cache, without notifying listeners or CacheWriters. + /// + /// Key to clear. + void Clear(TK key); + + /// + /// Clear entries from the cache, without notifying listeners or CacheWriters. + /// + /// Keys to clear. + void ClearAll(IEnumerable keys); + + /// + /// Removes given key mapping from cache, notifying listeners and cache writers. + /// + /// Key to remove. + /// True if entry was removed, false otherwise. + bool Remove(TK key); + + /// + /// Removes given key mapping from cache if one exists and value is equal to the passed in value. + /// + /// Key whose mapping is to be removed from cache. + /// Value to match against currently cached value. + /// True if entry was removed, false otherwise. + bool Remove(TK key, TV val); + + /// + /// Removes given key mappings from cache, notifying listeners and cache writers. + /// + /// Keys to be removed from cache. + void RemoveAll(IEnumerable keys); + + /// + /// Removes all mappings from cache, notifying listeners and cache writers. + /// + void RemoveAll(); + + /// + /// Gets the number of all entries cached across all nodes. + /// + /// NOTE: this operation is distributed and will query all participating nodes for their cache sizes. + /// + /// Optional peek modes. If not provided, then total cache size is returned. + /// Cache size across all nodes. + long GetSize(params CachePeekMode[] modes); } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs index a20bec3a383a2..2df3d1bb06e41 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs @@ -104,5 +104,13 @@ public override void GetObjectData(SerializationInfo info, StreamingContext cont info.AddValue(ErrorCodeField, _errorCode); } + + /// + /// Returns a that represents this instance. + /// + public override string ToString() + { + return string.Format("{0} [ErrorCode={1}]", base.ToString(), ErrorCode); + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs index 9d45f503dc25d..ca9fb63d97b7a 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs @@ -342,7 +342,7 @@ public bool TryLocalPeek(TK key, out TV value, params CachePeekMode[] modes) w => { w.WriteObjectDetached(key); - w.WriteInt(EncodePeekModes(modes)); + w.WriteInt(IgniteUtils.EncodePeekModes(modes)); }, (s, r) => r == True ? new CacheResult(Unmarshal(s)) : new CacheResult(), _readException); @@ -808,7 +808,7 @@ public int GetSize(params CachePeekMode[] modes) /** */ public Task GetSizeAsync(params CachePeekMode[] modes) { - var modes0 = EncodePeekModes(modes); + var modes0 = IgniteUtils.EncodePeekModes(modes); return DoOutOpAsync(CacheOp.SizeAsync, w => w.WriteInt(modes0)); } @@ -821,7 +821,7 @@ public Task GetSizeAsync(params CachePeekMode[] modes) /// Size. private int Size0(bool loc, params CachePeekMode[] modes) { - var modes0 = EncodePeekModes(modes); + var modes0 = IgniteUtils.EncodePeekModes(modes); var op = loc ? CacheOp.SizeLoc : CacheOp.Size; @@ -1147,7 +1147,7 @@ public IContinuousQueryHandle QueryContinuous(ContinuousQuery qry) /** */ public IEnumerable> GetLocalEntries(CachePeekMode[] peekModes) { - return new CacheEnumerable(this, EncodePeekModes(peekModes)); + return new CacheEnumerable(this, IgniteUtils.EncodePeekModes(peekModes)); } /** */ @@ -1188,22 +1188,6 @@ protected override T Unmarshal(IBinaryStream stream) return Marshaller.Unmarshal(stream, _flagKeepBinary); } - /// - /// Encodes the peek modes into a single int value. - /// - private static int EncodePeekModes(CachePeekMode[] modes) - { - int modesEncoded = 0; - - if (modes != null) - { - foreach (var mode in modes) - modesEncoded |= (int) mode; - } - - return modesEncoded; - } - /// /// Reads results of InvokeAll operation. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs index 5492ef859fe87..be6e7da2d4d04 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs @@ -21,6 +21,7 @@ namespace Apache.Ignite.Core.Impl.Client.Cache using System.Collections.Generic; using System.Diagnostics; using System.IO; + using Apache.Ignite.Core.Binary; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Query; using Apache.Ignite.Core.Client; @@ -54,7 +55,7 @@ internal class CacheClient : ICacheClient private readonly Marshaller _marsh; /** Keep binary flag. */ - private bool _keepBinary = false; + private readonly bool _keepBinary = false; /// /// Initializes a new instance of the class. @@ -93,6 +94,39 @@ public TV Get(TK key) return DoOutInOp(ClientOp.CacheGet, w => w.WriteObject(key), UnmarshalNotNull); } + /** */ + public bool TryGet(TK key, out TV value) + { + IgniteArgumentCheck.NotNull(key, "key"); + + var res = DoOutInOp(ClientOp.CacheGet, w => w.WriteObject(key), UnmarshalCacheResult); + + value = res.Value; + + return res.Success; + } + + /** */ + public ICollection> GetAll(IEnumerable keys) + { + IgniteArgumentCheck.NotNull(keys, "keys"); + + return DoOutInOp(ClientOp.CacheGetAll, w => w.WriteEnumerable(keys), stream => + { + var reader = _marsh.StartUnmarshal(stream, _keepBinary); + + var cnt = reader.ReadInt(); + var res = new List>(cnt); + + for (var i = 0; i < cnt; i++) + { + res.Add(new CacheEntry(reader.ReadObject(), reader.ReadObject())); + } + + return res; + }); + } + /** */ public void Put(TK key, TV val) { @@ -106,6 +140,22 @@ public void Put(TK key, TV val) }); } + /** */ + public bool ContainsKey(TK key) + { + IgniteArgumentCheck.NotNull(key, "key"); + + return DoOutInOp(ClientOp.CacheContainsKey, w => w.WriteObjectDetached(key), r => r.ReadBool()); + } + + /** */ + public bool ContainsKeys(IEnumerable keys) + { + IgniteArgumentCheck.NotNull(keys, "keys"); + + return DoOutInOp(ClientOp.CacheContainsKeys, w => w.WriteEnumerable(keys), r => r.ReadBool()); + } + /** */ public IQueryCursor> Query(ScanQuery scanQuery) { @@ -116,6 +166,166 @@ public void Put(TK key, TV val) return DoOutInOp(ClientOp.QueryScan, w => WriteScanQuery(w, scanQuery), s => new ClientQueryCursor(_ignite, s.ReadLong(), _keepBinary, s)); } + + /** */ + public CacheResult GetAndPut(TK key, TV val) + { + IgniteArgumentCheck.NotNull(key, "key"); + IgniteArgumentCheck.NotNull(val, "val"); + + return DoOutInOp(ClientOp.CacheGetAndPut, w => + { + w.WriteObjectDetached(key); + w.WriteObjectDetached(val); + }, UnmarshalCacheResult); + } + + /** */ + public CacheResult GetAndReplace(TK key, TV val) + { + IgniteArgumentCheck.NotNull(key, "key"); + IgniteArgumentCheck.NotNull(val, "val"); + + return DoOutInOp(ClientOp.CacheGetAndReplace, w => + { + w.WriteObjectDetached(key); + w.WriteObjectDetached(val); + }, UnmarshalCacheResult); + } + + /** */ + public CacheResult GetAndRemove(TK key) + { + IgniteArgumentCheck.NotNull(key, "key"); + + return DoOutInOp(ClientOp.CacheGetAndRemove, w => w.WriteObjectDetached(key), + UnmarshalCacheResult); + } + + /** */ + public bool PutIfAbsent(TK key, TV val) + { + IgniteArgumentCheck.NotNull(key, "key"); + IgniteArgumentCheck.NotNull(val, "val"); + + return DoOutInOp(ClientOp.CachePutIfAbsent, w => + { + w.WriteObjectDetached(key); + w.WriteObjectDetached(val); + }, s => s.ReadBool()); + } + + /** */ + public CacheResult GetAndPutIfAbsent(TK key, TV val) + { + IgniteArgumentCheck.NotNull(key, "key"); + IgniteArgumentCheck.NotNull(val, "val"); + + return DoOutInOp(ClientOp.CacheGetAndPutIfAbsent, w => + { + w.WriteObjectDetached(key); + w.WriteObjectDetached(val); + }, UnmarshalCacheResult); + } + + /** */ + public bool Replace(TK key, TV val) + { + IgniteArgumentCheck.NotNull(key, "key"); + IgniteArgumentCheck.NotNull(val, "val"); + + return DoOutInOp(ClientOp.CacheReplace, w => + { + w.WriteObjectDetached(key); + w.WriteObjectDetached(val); + }, s => s.ReadBool()); + } + + /** */ + public bool Replace(TK key, TV oldVal, TV newVal) + { + IgniteArgumentCheck.NotNull(key, "key"); + IgniteArgumentCheck.NotNull(oldVal, "oldVal"); + IgniteArgumentCheck.NotNull(newVal, "newVal"); + + return DoOutInOp(ClientOp.CacheReplaceIfEquals, w => + { + w.WriteObjectDetached(key); + w.WriteObjectDetached(oldVal); + w.WriteObjectDetached(newVal); + }, s => s.ReadBool()); + } + + /** */ + public void PutAll(IEnumerable> vals) + { + IgniteArgumentCheck.NotNull(vals, "vals"); + + DoOutOp(ClientOp.CachePutAll, w => w.WriteDictionary(vals)); + } + + /** */ + public void Clear() + { + DoOutOp(ClientOp.CacheClear); + } + + /** */ + public void Clear(TK key) + { + IgniteArgumentCheck.NotNull(key, "key"); + + DoOutOp(ClientOp.CacheClearKey, w => w.WriteObjectDetached(key)); + } + + /** */ + public void ClearAll(IEnumerable keys) + { + IgniteArgumentCheck.NotNull(keys, "keys"); + + DoOutOp(ClientOp.CacheClearKeys, w => w.WriteEnumerable(keys)); + } + + /** */ + public bool Remove(TK key) + { + IgniteArgumentCheck.NotNull(key, "key"); + + return DoOutInOp(ClientOp.CacheRemoveKey, w => w.WriteObjectDetached(key), r => r.ReadBool()); + } + + /** */ + public bool Remove(TK key, TV val) + { + IgniteArgumentCheck.NotNull(key, "key"); + IgniteArgumentCheck.NotNull(val, "val"); + + return DoOutInOp(ClientOp.CacheRemoveIfEquals, w => + { + w.WriteObjectDetached(key); + w.WriteObjectDetached(val); + }, r => r.ReadBool()); + } + + /** */ + public void RemoveAll(IEnumerable keys) + { + IgniteArgumentCheck.NotNull(keys, "keys"); + + DoOutOp(ClientOp.CacheRemoveKeys, w => w.WriteEnumerable(keys)); + } + + /** */ + public void RemoveAll() + { + DoOutOp(ClientOp.CacheRemoveAll); + } + + /** */ + public long GetSize(params CachePeekMode[] modes) + { + return DoOutInOp(ClientOp.CacheGetSize, w => WritePeekModes(modes, w), s => s.ReadLong()); + } /// /// Does the out in op. @@ -142,7 +352,7 @@ public void Put(TK key, TV val) /// /// Does the out op. /// - private void DoOutOp(ClientOp opId, Action writeAction) + private void DoOutOp(ClientOp opId, Action writeAction = null) { DoOutInOp(opId, writeAction, null); } @@ -164,6 +374,23 @@ private T UnmarshalNotNull(IBinaryStream stream) return _marsh.Unmarshal(stream); } + /// + /// Unmarshals the value, wrapping in a cache result. + /// + private CacheResult UnmarshalCacheResult(IBinaryStream stream) + { + var hdr = stream.ReadByte(); + + if (hdr == BinaryUtils.HdrNull) + { + return new CacheResult(); + } + + stream.Seek(-1, SeekOrigin.Current); + + return new CacheResult(_marsh.Unmarshal(stream)); + } + /// /// Writes the scan query. /// @@ -214,5 +441,34 @@ private static KeyNotFoundException GetKeyNotFoundException() { return new KeyNotFoundException("The given key was not present in the cache."); } + + /// + /// Writes the peek modes. + /// + private static void WritePeekModes(ICollection modes, IBinaryRawWriter w) + { + if (modes == null) + { + w.WriteInt(0); + } + else + { + w.WriteInt(modes.Count); + + foreach (var m in modes) + { + // Convert bit flag to ordinal. + byte val = 0; + var flagVal = (int)m; + + while ((flagVal = flagVal >> 1) > 0) + { + val++; + } + + w.WriteByte(val); + } + } + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientOp.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientOp.cs index c39b68f0de4d1..3511a7927163d 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientOp.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientOp.cs @@ -30,6 +30,25 @@ internal enum ClientOp : short BinaryTypePut = 6, QueryScan = 7, QueryScanCursorGetPage = 8, - ResourceClose = 9 + ResourceClose = 9, + CacheContainsKey = 10, + CacheContainsKeys = 11, + CacheGetAll = 12, + CacheGetAndPut = 13, + CacheGetAndReplace = 14, + CacheGetAndRemove = 15, + CachePutIfAbsent = 16, + CacheGetAndPutIfAbsent = 17, + CacheReplace = 18, + CacheReplaceIfEquals = 19, + CachePutAll = 20, + CacheClear = 21, + CacheClearKey = 22, + CacheClearKeys = 23, + CacheRemoveKey = 24, + CacheRemoveIfEquals = 25, + CacheGetSize = 26, + CacheRemoveKeys = 27, + CacheRemoveAll = 28 } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs index d55960a8446b3..e439208130546 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/IgniteUtils.cs @@ -27,6 +27,7 @@ namespace Apache.Ignite.Core.Impl using System.Reflection; using System.Runtime.InteropServices; using System.Text; + using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cluster; using Apache.Ignite.Core.Common; using Apache.Ignite.Core.Impl.Binary; @@ -510,5 +511,25 @@ public static List ReadNodes(BinaryReader reader, Func + /// Encodes the peek modes into a single int value. + /// + public static int EncodePeekModes(CachePeekMode[] modes) + { + var res = 0; + + if (modes == null) + { + return res; + } + + foreach (var mode in modes) + { + res |= (int)mode; + } + + return res; + } } } From 1a6bb2bbf513c8a50f81e9f8def1b3f479c08f4b Mon Sep 17 00:00:00 2001 From: Andrey Gura Date: Wed, 27 Sep 2017 13:50:26 +0300 Subject: [PATCH 018/243] ignite-6305 Ignite update checker enabled --- .../processors/cluster/ClusterProcessor.java | 39 ++- .../cluster/GridUpdateNotifier.java | 224 ++++++++---------- .../cluster/HttpIgniteUpdatesChecker.java | 29 +-- .../cluster/GridUpdateNotifierSelfTest.java | 50 +--- 4 files changed, 130 insertions(+), 212 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ClusterProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ClusterProcessor.java index dc503fb264838..5f2c66ce7fe59 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ClusterProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ClusterProcessor.java @@ -18,7 +18,6 @@ package org.apache.ignite.internal.processors.cluster; import java.io.Serializable; -import java.lang.ref.WeakReference; import java.util.Collection; import java.util.HashMap; import java.util.Map; @@ -30,6 +29,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.Event; @@ -60,6 +60,7 @@ import org.jetbrains.annotations.Nullable; import static org.apache.ignite.IgniteSystemProperties.IGNITE_DIAGNOSTIC_ENABLED; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_UPDATE_NOTIFIER; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; @@ -84,7 +85,7 @@ public class ClusterProcessor extends GridProcessorAdapter { private IgniteClusterImpl cluster; /** */ - private final AtomicBoolean notifyEnabled = new AtomicBoolean(false); + private final AtomicBoolean notifyEnabled = new AtomicBoolean(); /** */ @GridToStringExclude @@ -107,6 +108,8 @@ public class ClusterProcessor extends GridProcessorAdapter { public ClusterProcessor(GridKernalContext ctx) { super(ctx); + notifyEnabled.set(IgniteSystemProperties.getBoolean(IGNITE_UPDATE_NOTIFIER, true)); + cluster = new IgniteClusterImpl(ctx); } @@ -297,12 +300,13 @@ private Serializable getDiscoveryData() { /** * @param vals collection to seek through. */ + @SuppressWarnings("unchecked") private Boolean findLastFlag(Collection vals) { Boolean flag = null; for (Serializable ser : vals) { if (ser != null) { - Map map = (Map) ser; + Map map = (Map)ser; if (map.containsKey(ATTR_UPDATE_NOTIFIER_STATUS)) flag = (Boolean) map.get(ATTR_UPDATE_NOTIFIER_STATUS); @@ -316,11 +320,7 @@ private Boolean findLastFlag(Collection vals) { @Override public void onKernalStart(boolean active) throws IgniteCheckedException { if (notifyEnabled.get()) { try { - verChecker = new GridUpdateNotifier(ctx.igniteInstanceName(), - VER_STR, - ctx.gateway(), - ctx.plugins().allProviders(), - false); + verChecker = new GridUpdateNotifier(ctx.igniteInstanceName(), VER_STR, false); updateNtfTimer = new Timer("ignite-update-notifier-timer", true); @@ -473,9 +473,6 @@ private ConcurrentHashMap diagnosticFuturesMap() * Update notifier timer task. */ private static class UpdateNotifierTimerTask extends GridTimerTask { - /** Reference to kernal. */ - private final WeakReference kernalRef; - /** Logger. */ private final IgniteLogger log; @@ -494,10 +491,11 @@ private static class UpdateNotifierTimerTask extends GridTimerTask { * @param kernal Kernal. * @param verChecker Version checker. */ - private UpdateNotifierTimerTask(IgniteKernal kernal, GridUpdateNotifier verChecker, - AtomicBoolean notifyEnabled) { - kernalRef = new WeakReference<>(kernal); - + private UpdateNotifierTimerTask( + IgniteKernal kernal, + GridUpdateNotifier verChecker, + AtomicBoolean notifyEnabled + ) { log = kernal.context().log(UpdateNotifierTimerTask.class); this.verChecker = verChecker; @@ -509,14 +507,7 @@ private UpdateNotifierTimerTask(IgniteKernal kernal, GridUpdateNotifier verCheck if (!notifyEnabled.get()) return; - if (!first) { - IgniteKernal kernal = kernalRef.get(); - - if (kernal != null) - verChecker.topologySize(kernal.cluster().nodes().size()); - } - - verChecker.checkForNewVersion(log); + verChecker.checkForNewVersion(log, first); // Just wait for 10 secs. Thread.sleep(PERIODIC_VER_CHECK_CONN_TIMEOUT); @@ -530,7 +521,7 @@ private UpdateNotifierTimerTask(IgniteKernal kernal, GridUpdateNotifier verCheck // No-op if status is NOT available. verChecker.reportStatus(log); - if (first) { + if (first && verChecker.error() == null) { first = false; verChecker.reportOnlyNew(true); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifier.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifier.java index eacc42f180738..ff834f6b0fc85 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifier.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifier.java @@ -17,34 +17,17 @@ package org.apache.ignite.internal.processors.cluster; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.io.UnsupportedEncodingException; -import java.net.URL; -import java.net.URLConnection; -import java.util.Collection; +import java.util.Comparator; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.atomic.AtomicReference; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.IgniteSystemProperties; -import org.apache.ignite.internal.GridKernalGateway; import org.apache.ignite.internal.IgniteKernal; -import org.apache.ignite.internal.IgniteProperties; -import org.apache.ignite.internal.util.typedef.F; -import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.util.worker.GridWorker; -import org.apache.ignite.plugin.PluginProvider; import org.jetbrains.annotations.Nullable; -import static java.net.URLEncoder.encode; - /** * This class is responsible for notification about new version availability. *

      @@ -55,8 +38,8 @@ class GridUpdateNotifier { /** Default encoding. */ private static final String CHARSET = "UTF-8"; - /** Access URL to be used to access latest version data. */ - private final String UPD_STATUS_PARAMS = IgniteProperties.get("ignite.update.status.params"); + /** Version comparator. */ + private static final VersionComparator VER_COMPARATOR = new VersionComparator(); /** Throttling for logging out. */ private static final long THROTTLE_PERIOD = 24 * 60 * 60 * 1000; // 1 day. @@ -64,12 +47,18 @@ class GridUpdateNotifier { /** Sleep milliseconds time for worker thread. */ private static final int WORKER_THREAD_SLEEP_TIME = 5000; - /** Default url for request Ignite updates. */ - private final static String DEFAULT_IGNITE_UPDATES_URL = "https://ignite.run/update_status_ignite-plain-text.php"; + /** Default URL for request Ignite updates. */ + private final static String DEFAULT_IGNITE_UPDATES_URL = "https://ignite.apache.org/latest"; + + /** Default HTTP parameter for request Ignite updates. */ + private final static String DEFAULT_IGNITE_UPDATES_PARAMS = "?ver="; /** Grid version. */ private final String ver; + /** Error during obtaining data. */ + private volatile Exception err; + /** Latest version. */ private volatile String latestVer; @@ -82,18 +71,6 @@ class GridUpdateNotifier { /** Whether or not to report only new version. */ private volatile boolean reportOnlyNew; - /** */ - private volatile int topSize; - - /** System properties */ - private final String vmProps; - - /** Plugins information for request */ - private final String pluginsVers; - - /** Kernal gateway */ - private final GridKernalGateway gw; - /** */ private long lastLog = -1; @@ -111,88 +88,50 @@ class GridUpdateNotifier { * * @param igniteInstanceName igniteInstanceName * @param ver Compound Ignite version. - * @param gw Kernal gateway. - * @param pluginProviders Kernal gateway. * @param reportOnlyNew Whether or not to report only new version. * @param updatesChecker Service for getting Ignite updates * @throws IgniteCheckedException If failed. */ - GridUpdateNotifier(String igniteInstanceName, String ver, GridKernalGateway gw, Collection pluginProviders, - boolean reportOnlyNew, HttpIgniteUpdatesChecker updatesChecker) throws IgniteCheckedException { - try { - this.ver = ver; - this.igniteInstanceName = igniteInstanceName == null ? "null" : igniteInstanceName; - this.gw = gw; - this.updatesChecker = updatesChecker; - - SB pluginsBuilder = new SB(); - - for (PluginProvider provider : pluginProviders) - pluginsBuilder.a("&").a(provider.name() + "-plugin-version").a("="). - a(encode(provider.version(), CHARSET)); - - pluginsVers = pluginsBuilder.toString(); - - this.reportOnlyNew = reportOnlyNew; - - vmProps = getSystemProperties(); + GridUpdateNotifier( + String igniteInstanceName, + String ver, + boolean reportOnlyNew, + HttpIgniteUpdatesChecker updatesChecker + ) throws IgniteCheckedException { + this.ver = ver; + this.igniteInstanceName = igniteInstanceName == null ? "null" : igniteInstanceName; + this.updatesChecker = updatesChecker; + this.reportOnlyNew = reportOnlyNew; - workerThread = new Thread(new Runnable() { - @Override public void run() { - try { - while(!Thread.currentThread().isInterrupted()) { - Runnable cmd0 = cmd.getAndSet(null); + workerThread = new Thread(new Runnable() { + @Override public void run() { + try { + while(!Thread.currentThread().isInterrupted()) { + Runnable cmd0 = cmd.getAndSet(null); - if (cmd0 != null) - cmd0.run(); - else - Thread.sleep(WORKER_THREAD_SLEEP_TIME); - } - } - catch (InterruptedException ignore) { - // No-op. + if (cmd0 != null) + cmd0.run(); + else + Thread.sleep(WORKER_THREAD_SLEEP_TIME); } } - }, "upd-ver-checker"); + catch (InterruptedException ignore) { + // No-op. + } + } + }, "upd-ver-checker"); - workerThread.setDaemon(true); + workerThread.setDaemon(true); - workerThread.start(); - } - catch (UnsupportedEncodingException e) { - throw new IgniteCheckedException("Failed to encode.", e); - } + workerThread.start(); } /** * Creates new notifier with default Ignite updates URL */ - GridUpdateNotifier(String igniteInstanceName, String ver, GridKernalGateway gw, Collection pluginProviders, - boolean reportOnlyNew) throws IgniteCheckedException { - this(igniteInstanceName, ver, gw, pluginProviders, reportOnlyNew, new HttpIgniteUpdatesChecker(DEFAULT_IGNITE_UPDATES_URL, CHARSET)); - } - - /** - * Gets system properties. - * - * @return System properties. - */ - private static String getSystemProperties() { - try { - StringWriter sw = new StringWriter(); - - try { - IgniteSystemProperties.safeSnapshot().store(new PrintWriter(sw), ""); - } - catch (IOException ignore) { - return null; - } - - return sw.toString(); - } - catch (SecurityException ignore) { - return null; - } + GridUpdateNotifier(String igniteInstanceName, String ver, boolean reportOnlyNew) throws IgniteCheckedException { + this(igniteInstanceName, ver, reportOnlyNew, + new HttpIgniteUpdatesChecker(DEFAULT_IGNITE_UPDATES_URL, DEFAULT_IGNITE_UPDATES_PARAMS + ver, CHARSET)); } /** @@ -203,31 +142,32 @@ void reportOnlyNew(boolean reportOnlyNew) { } /** - * @param topSize Size of topology for license verification purpose. + * @return Latest version. */ - void topologySize(int topSize) { - this.topSize = topSize; + String latestVersion() { + return latestVer; } /** - * @return Latest version. + * @return Error. */ - String latestVersion() { - return latestVer; + Exception error() { + return err; } /** * Starts asynchronous process for retrieving latest version data. * * @param log Logger. + * @param first First checking. */ - void checkForNewVersion(IgniteLogger log) { + void checkForNewVersion(IgniteLogger log, boolean first) { assert log != null; log = log.getLogger(getClass()); try { - cmd.set(new UpdateChecker(log)); + cmd.set(new UpdateChecker(log, first)); } catch (RejectedExecutionException e) { U.error(log, "Failed to schedule a thread due to execution rejection (safely ignoring): " + @@ -250,13 +190,16 @@ void reportStatus(IgniteLogger log) { downloadUrl = downloadUrl != null ? downloadUrl : IgniteKernal.SITE; - if (latestVer != null) - if (latestVer.equals(ver)) { + if (latestVer != null) { + int cmp = VER_COMPARATOR.compare(latestVer, ver); + + if (cmp == 0) { if (!reportOnlyNew) throttle(log, false, "Your version is up to date."); } - else + else if (cmp > 0) throttle(log, true, "New version is available at " + downloadUrl + ": " + latestVer); + } else if (!reportOnlyNew) throttle(log, false, "Update status is not available."); @@ -302,35 +245,29 @@ private class UpdateChecker extends GridWorker { /** Logger. */ private final IgniteLogger log; + /** First. */ + private final boolean first; + /** * Creates checked with given logger. * * @param log Logger. */ - UpdateChecker(IgniteLogger log) { + UpdateChecker(IgniteLogger log, boolean first) { super(igniteInstanceName, "grid-version-checker", log); this.log = log.getLogger(getClass()); + this.first = first; } /** {@inheritDoc} */ @Override protected void body() throws InterruptedException { try { - String stackTrace = gw != null ? gw.userStackTrace() : null; - - String postParams = - "igniteInstanceName=" + encode(igniteInstanceName, CHARSET) + - (!F.isEmpty(UPD_STATUS_PARAMS) ? "&" + UPD_STATUS_PARAMS : "") + - (topSize > 0 ? "&topSize=" + topSize : "") + - (!F.isEmpty(stackTrace) ? "&stackTrace=" + encode(stackTrace, CHARSET) : "") + - (!F.isEmpty(vmProps) ? "&vmProps=" + encode(vmProps, CHARSET) : "") + - pluginsVers; - if (!isCancelled()) { try { - String updatesResponse = updatesChecker.getUpdates(postParams); + String updatesRes = updatesChecker.getUpdates(first); - String[] lines = updatesResponse.split("\n"); + String[] lines = updatesRes.split("\n"); for (String line : lines) { if (line.contains("version")) @@ -338,14 +275,20 @@ private class UpdateChecker extends GridWorker { else if (line.contains("downloadUrl")) downloadUrl = obtainDownloadUrlFrom(line); } + + err = null; } catch (IOException e) { + err = e; + if (log.isDebugEnabled()) log.debug("Failed to connect to Ignite update server. " + e.getMessage()); } } } catch (Exception e) { + err = e; + if (log.isDebugEnabled()) log.debug("Unexpected exception in update checker. " + e.getMessage()); } @@ -385,4 +328,35 @@ else if (line.contains("downloadUrl")) return obtainMeta("downloadUrl=", line); } } + + /** + * Ignite version comparator. + */ + private static final class VersionComparator implements Comparator { + /** Dot pattern. */ + private static final String DOT_PATTERN = "\\."; + + /** Dash pattern. */ + private static final String DASH_PATTERN = "-"; + + /** {@inheritDoc} */ + @Override public int compare(String o1, String o2) { + if (o1.equals(o2)) + return 0; + + String[] ver1 = o1.split(DOT_PATTERN, 3); + String[] ver2 = o2.split(DOT_PATTERN, 3); + + assert ver1.length == 3; + assert ver2.length == 3; + + if (Integer.valueOf(ver1[0]) >= Integer.valueOf(ver2[0]) && + Integer.valueOf(ver1[1]) >= Integer.valueOf(ver2[1]) && + Integer.valueOf(ver1[2].split(DASH_PATTERN)[0]) >= Integer.valueOf(ver2[2].split(DASH_PATTERN)[0])) + + return 1; + else + return -1; + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/HttpIgniteUpdatesChecker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/HttpIgniteUpdatesChecker.java index 2b93ceb1e44e4..5d6732ee63045 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/HttpIgniteUpdatesChecker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/HttpIgniteUpdatesChecker.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.io.OutputStream; import java.net.URL; import java.net.URLConnection; @@ -32,6 +31,9 @@ public class HttpIgniteUpdatesChecker { /** Url for request updates. */ private final String url; + /** Params. */ + private final String params; + /** Charset for encoding requests/responses */ private final String charset; @@ -40,43 +42,38 @@ public class HttpIgniteUpdatesChecker { * @param url URL for getting Ignite updates information * @param charset Charset for encoding */ - HttpIgniteUpdatesChecker(String url, String charset) { + HttpIgniteUpdatesChecker(String url, String params, String charset) { this.url = url; + this.params = params; this.charset = charset; } /** * Gets information about Ignite updates via HTTP - * @param updateRequest HTTP Request parameters * @return Information about Ignite updates separated by line endings * @throws IOException If HTTP request was failed */ - public String getUpdates(String updateRequest) throws IOException { - URLConnection conn = new URL(url).openConnection(); - conn.setDoOutput(true); + public String getUpdates(boolean first) throws IOException { + String addr = first ? url + params : url; + + URLConnection conn = new URL(addr).openConnection(); conn.setRequestProperty("Accept-Charset", charset); - conn.setRequestProperty("Content-Type", "application/x-www-form-urlencoded;charset=" + charset); conn.setConnectTimeout(3000); conn.setReadTimeout(3000); - try (OutputStream os = conn.getOutputStream()) { - os.write(updateRequest.getBytes(charset)); - } - try (InputStream in = conn.getInputStream()) { if (in == null) return null; BufferedReader reader = new BufferedReader(new InputStreamReader(in, charset)); - StringBuilder response = new StringBuilder(); + StringBuilder res = new StringBuilder(); - for (String line; (line = reader.readLine()) != null; ) { - response.append(line).append('\n'); - } + for (String line; (line = reader.readLine()) != null; ) + res.append(line).append('\n'); - return response.toString(); + return res.toString(); } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifierSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifierSelfTest.java index 1a20f261550ba..f295b2bd649b4 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifierSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cluster/GridUpdateNotifierSelfTest.java @@ -17,19 +17,13 @@ package org.apache.ignite.internal.processors.cluster; -import java.util.Collections; import java.util.Properties; import org.apache.ignite.IgniteSystemProperties; -import org.apache.ignite.internal.GridKernalGateway; -import org.apache.ignite.internal.GridKernalState; import org.apache.ignite.internal.IgniteProperties; -import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteProductVersion; -import org.apache.ignite.plugin.PluginProvider; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.apache.ignite.testframework.junits.common.GridCommonTest; -import org.mockito.Matchers; import org.mockito.Mockito; /** @@ -78,13 +72,12 @@ public void testNotifier() throws Exception { HttpIgniteUpdatesChecker updatesCheckerMock = Mockito.mock(HttpIgniteUpdatesChecker.class); // Return current node version and some other info - Mockito.when(updatesCheckerMock.getUpdates(Matchers.anyString())) + Mockito.when(updatesCheckerMock.getUpdates(true)) .thenReturn("meta=meta" + "\n" + "version=" + nodeVer + "\n" + "downloadUrl=url"); - GridUpdateNotifier ntf = new GridUpdateNotifier(null, nodeVer, - TEST_GATEWAY, Collections.emptyList(), false, updatesCheckerMock); + GridUpdateNotifier ntf = new GridUpdateNotifier(null, nodeVer, false, updatesCheckerMock); - ntf.checkForNewVersion(log); + ntf.checkForNewVersion(log, true); String ver = ntf.latestVersion(); @@ -108,41 +101,4 @@ public void testNotifier() throws Exception { ntf.reportStatus(log); } - - /** - * Test kernal gateway that always return uninitialized user stack trace. - */ - private static final GridKernalGateway TEST_GATEWAY = new GridKernalGateway() { - @Override public void readLock() throws IllegalStateException {} - - @Override public void readLockAnyway() {} - - @Override public void setState(GridKernalState state) {} - - @Override public GridKernalState getState() { - return null; - } - - @Override public void readUnlock() {} - - @Override public void writeLock() {} - - @Override public void writeUnlock() {} - - @Override public String userStackTrace() { - return null; - } - - @Override public boolean tryWriteLock(long timeout) { - return false; - } - - @Override public GridFutureAdapter onDisconnected() { - return null; - } - - @Override public void onReconnected() { - // No-op. - } - }; } From 72b409cc233919462b3df1547afdec082d8273e1 Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Mon, 9 Oct 2017 22:17:28 +0300 Subject: [PATCH 019/243] IGNITE-6569: Fixed hang when trying to execute "DROP TABLE" on the cache this table belongs to. This closes #2823. --- .../processors/query/GridQueryProcessor.java | 17 ++++++++++++++ .../cache/index/H2DynamicTableSelfTest.java | 23 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index f044c1d4dd4df..0d8ee4785b5c1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -70,6 +70,7 @@ import org.apache.ignite.internal.processors.cache.query.CacheQueryFuture; import org.apache.ignite.internal.processors.cache.query.CacheQueryType; import org.apache.ignite.internal.processors.cache.query.GridCacheQueryType; +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.query.property.QueryBinaryProperty; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; @@ -186,6 +187,9 @@ public class GridQueryProcessor extends GridProcessorAdapter { /** Pending status messages. */ private final LinkedList pendingMsgs = new LinkedList<>(); + /** Current cache that has a query running on it. */ + private final ThreadLocal curCache = new ThreadLocal<>(); + /** Disconnected flag. */ private boolean disconnected; @@ -1440,6 +1444,13 @@ else if (QueryUtils.TEMPLATE_REPLICÄTED.equalsIgnoreCase(templateName)) */ @SuppressWarnings("unchecked") public void dynamicTableDrop(String cacheName, String tblName, boolean ifExists) throws SchemaOperationException { + GridCacheContext currCache = this.curCache.get(); + + if (currCache != null && F.eq(currCache.name(), cacheName)) + throw new IgniteSQLException("DROP TABLE cannot be called from the same cache that holds " + + "the table being dropped [cacheName-" + cacheName + ", tblName=" + tblName + ']', + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + boolean res = ctx.grid().destroyCache0(cacheName, true); if (!res && !ifExists) @@ -1853,6 +1864,10 @@ public FieldsQueryCursor> querySqlFields(final GridCacheContext cct if (!busyLock.enterBusy()) throw new IllegalStateException("Failed to execute query (grid is stopping)."); + GridCacheContext oldCctx = curCache.get(); + + curCache.set(cctx); + try { final String schemaName = qry.getSchema() != null ? qry.getSchema() : idx.schema(cctx.name()); final int mainCacheId = CU.cacheId(cctx.name()); @@ -1898,6 +1913,8 @@ public FieldsQueryCursor> querySqlFields(final GridCacheContext cct throw new CacheException(e); } finally { + curCache.set(oldCctx); + busyLock.leaveBusy(); } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java index e0ab6c55bb506..b108bb3f71c10 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java @@ -651,6 +651,29 @@ public void testDropTable() throws Exception { } } + /** + * Test that attempting to execute {@code DROP TABLE} via API of cache being dropped yields an error. + * @throws Exception if failed. + */ + @SuppressWarnings("ThrowableResultOfMethodCallIgnored") + public void testCacheSelfDrop() throws Exception { + execute("CREATE TABLE IF NOT EXISTS \"Person\" (\"id\" int, \"city\" varchar," + + " \"name\" varchar, \"surname\" varchar, \"age\" int, PRIMARY KEY (\"id\", \"city\")) WITH " + + "\"template=cache\""); + + GridTestUtils.assertThrows(null, new Callable() { + @Override public Object call() throws Exception { + client().cache(QueryUtils.createTableCacheName(QueryUtils.DFLT_SCHEMA, "Person")) + .query(new SqlFieldsQuery("DROP TABLE \"Person\"")).getAll(); + + return null; + } + }, IgniteSQLException.class, "DROP TABLE cannot be called from the same cache that holds the table " + + "being dropped"); + + execute("DROP TABLE \"Person\""); + } + /** * Test that attempting to {@code DROP TABLE} that does not exist does not yield an error if the statement contains * {@code IF EXISTS} clause. From ab384d4b1692538eca6de81c04ec9b720a73308e Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Tue, 10 Oct 2017 10:05:12 +0300 Subject: [PATCH 020/243] IGNITE-6568: Fixed cache configuration persistence logic. This closes #2815. --- .../cache/CacheAffinitySharedManager.java | 15 ++++++++------- .../processors/cache/ClusterCachesInfo.java | 1 + .../processors/query/GridQueryProcessor.java | 2 ++ .../IgnitePersistentStoreSchemaLoadTest.java | 2 ++ 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index 741e204449aad..a413adea04665 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -2500,7 +2500,7 @@ void init(Map grps, Map cfg) { + private void saveCacheConfiguration(CacheConfiguration cfg, boolean sql) { if (cctx.pageStore() != null && cctx.database().persistenceEnabled() && !cctx.kernalContext().clientNode()) { try { - cctx.pageStore().storeCacheData( - new StoredCacheData(cfg), - false); + StoredCacheData data = new StoredCacheData(cfg); + + data.sql(sql); + + cctx.pageStore().storeCacheData(data, false); } catch (IgniteCheckedException e) { U.error(log(), "Error while saving cache configuration on disk, cfg = " + cfg, e); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 5e2c8db939a1e..b4cc9c550eec1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -1219,6 +1219,7 @@ public ExchangeActions onStateChangeRequest(ChangeGlobalStateMessage msg, Affini req.startCacheConfiguration(ccfg); req.cacheType(ctx.cache().cacheType(ccfg.getName())); req.schema(new QuerySchema(storedCfg.queryEntities())); + req.sql(storedCfg.sql()); reqs.add(req); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index 0d8ee4785b5c1..3a1cdb7cc16a8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -2564,6 +2564,8 @@ private void saveCacheConfiguration(DynamicCacheDescriptor desc) { if (desc.schema() != null) data.queryEntities(desc.schema().entities()); + data.sql(desc.sql()); + cctx.pageStore().storeCacheData(data, true); } catch (IgniteCheckedException e) { diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java index 85ce4d8986e43..a408596ca9291 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java @@ -208,6 +208,8 @@ private void checkSchemaStateAfterNodeRestart(boolean aliveCluster) throws Excep node.active(true); checkDynamicSchemaChanges(node, SQL_CACHE_NAME); + + node.context().query().querySqlFieldsNoCache(new SqlFieldsQuery("drop table \"Person\""), false).getAll(); } /** */ From f06927d13303b780bcb9865a3eea9c95b926a25e Mon Sep 17 00:00:00 2001 From: Sergey Chugunov Date: Mon, 9 Oct 2017 18:35:11 +0300 Subject: [PATCH 021/243] IGNITE-6583 Proper getters for rebalance metrics were added; ignite-style getters (without get) were deprecated Signed-off-by: Andrey Gura --- .../org/apache/ignite/cache/CacheMetrics.java | 20 +++++++++++++++++++ .../cache/CacheClusterMetricsMXBeanImpl.java | 10 ++++++++++ .../cache/CacheLocalMetricsMXBeanImpl.java | 10 ++++++++++ .../processors/cache/CacheMetricsImpl.java | 12 ++++++++++- .../cache/CacheMetricsSnapshot.java | 10 ++++++++++ .../CacheGroupsMetricsRebalanceTest.java | 6 +++--- .../PlatformCacheWriteMetricsTask.java | 10 ++++++++++ 7 files changed, 74 insertions(+), 4 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java b/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java index 20ea692f33198..fe789ca7c5720 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java @@ -506,15 +506,35 @@ public interface CacheMetrics { public long getRebalancingBytesRate(); /** + * This method is deprecated and will be deleted in future major release. + * + * Use {@link #getEstimatedRebalancingFinishTime()} instead. + * * @return Estimated rebalancing finished time. */ + @Deprecated public long estimateRebalancingFinishTime(); /** + * This method is deprecated and will be deleted in future major release. + * + * Use {@link #getRebalancingStartTime()} instead. + * * @return Rebalancing start time. */ + @Deprecated public long rebalancingStartTime(); + /** + * @return Estimated rebalancing finish time. + */ + public long getEstimatedRebalancingFinishTime(); + + /** + * @return Rebalancing start time. + */ + public long getRebalancingStartTime(); + /** * Checks whether statistics collection is enabled in this cache. *

      diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java index df4a6ab7d9b22..1611840958eff 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java @@ -392,4 +392,14 @@ class CacheClusterMetricsMXBeanImpl implements CacheMetricsMXBean { @Override public long rebalancingStartTime() { return cache.clusterMetrics().rebalancingStartTime(); } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingFinishTime() { + return cache.clusterMetrics().getEstimatedRebalancingFinishTime(); + } + + /** {@inheritDoc} */ + @Override public long getRebalancingStartTime() { + return cache.clusterMetrics().getRebalancingStartTime(); + } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java index a7671931c40d7..4a8c25c784a21 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java @@ -392,4 +392,14 @@ class CacheLocalMetricsMXBeanImpl implements CacheMetricsMXBean { @Override public long rebalancingStartTime() { return cache.metrics0().rebalancingStartTime(); } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingFinishTime() { + return cache.metrics0().getEstimatedRebalancingFinishTime(); + } + + /** {@inheritDoc} */ + @Override public long getRebalancingStartTime() { + return cache.metrics0().getRebalancingStartTime(); + } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java index 413b60df36df9..df189cf70620a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java @@ -808,6 +808,16 @@ public void startRebalance(long delay){ /** {@inheritDoc} */ @Override public long estimateRebalancingFinishTime() { + return getEstimatedRebalancingFinishTime(); + } + + /** {@inheritDoc} */ + @Override public long rebalancingStartTime() { + return rebalanceStartTime.get(); + } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingFinishTime() { long rate = rebalancingKeysRate.getRate(); return rate <= 0 ? -1L : @@ -815,7 +825,7 @@ public void startRebalance(long delay){ } /** {@inheritDoc} */ - @Override public long rebalancingStartTime() { + @Override public long getRebalancingStartTime() { return rebalanceStartTime.get(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java index 2d38db8a02648..9590c8876789a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java @@ -733,6 +733,16 @@ public CacheMetricsSnapshot(CacheMetrics loc, Collection metrics) return rebalanceStartTime; } + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingFinishTime() { + return rebalanceFinishTime; + } + + /** {@inheritDoc} */ + @Override public long getRebalancingStartTime() { + return rebalanceStartTime; + } + /** {@inheritDoc} */ @Override public boolean isWriteBehindEnabled() { return isWriteBehindEnabled; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheGroupsMetricsRebalanceTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheGroupsMetricsRebalanceTest.java index a49ce6145df95..eb7c3a8e619a8 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheGroupsMetricsRebalanceTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheGroupsMetricsRebalanceTest.java @@ -189,13 +189,13 @@ public void testRebalanceEstimateFinishTime() throws Exception { waitForCondition(new PA() { @Override public boolean apply() { - return ig2.cache(CACHE1).localMetrics().rebalancingStartTime() != -1L; + return ig2.cache(CACHE1).localMetrics().getRebalancingStartTime() != -1L; } }, 5_000); CacheMetrics metrics = ig2.cache(CACHE1).localMetrics(); - long startTime = metrics.rebalancingStartTime(); + long startTime = metrics.getRebalancingStartTime(); assertTrue(startTime > 0); assertTrue((U.currentTimeMillis() - startTime) < 5000); @@ -235,7 +235,7 @@ public void testRebalanceEstimateFinishTime() throws Exception { latch.await(); - long finishTime = ig2.cache(CACHE1).localMetrics().estimateRebalancingFinishTime(); + long finishTime = ig2.cache(CACHE1).localMetrics().getEstimatedRebalancingFinishTime(); assertTrue(finishTime > 0); diff --git a/modules/core/src/test/java/org/apache/ignite/platform/PlatformCacheWriteMetricsTask.java b/modules/core/src/test/java/org/apache/ignite/platform/PlatformCacheWriteMetricsTask.java index 64ff0bc7a6ddc..e7404faba1667 100644 --- a/modules/core/src/test/java/org/apache/ignite/platform/PlatformCacheWriteMetricsTask.java +++ b/modules/core/src/test/java/org/apache/ignite/platform/PlatformCacheWriteMetricsTask.java @@ -443,6 +443,16 @@ private static class TestCacheMetrics implements CacheMetrics { @Override public long rebalancingStartTime() { return 61; } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingFinishTime() { + return 62; + } + + /** {@inheritDoc} */ + @Override public long getRebalancingStartTime() { + return 63; + } } } From 4a782fb8b8f10917addecd70af77049719f908bd Mon Sep 17 00:00:00 2001 From: Alexander Belyak Date: Tue, 10 Oct 2017 15:09:30 +0300 Subject: [PATCH 022/243] IGNITE-5733: Fixed failures in JerryRestProcessorAbstractSelfTest. --- .../processors/rest/JettyRestProcessorAbstractSelfTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java index b2725b865a7f7..13613efde6e85 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java @@ -711,12 +711,12 @@ public void testGetAndReplace() throws Exception { * @throws Exception If failed. */ public void testDeactivateActivate() throws Exception { - assertClusterState(true); changeClusterState(false); - changeClusterState(true); + + initCache(); } /** From f0c64e7288ef94c84984a5e7a70f7038a539c6cd Mon Sep 17 00:00:00 2001 From: devozerov Date: Tue, 10 Oct 2017 17:54:50 +0300 Subject: [PATCH 023/243] IGNITE-6588: SQL: optimized index segment resolution. This closes #2825. --- .../query/h2/opt/GridH2IndexBase.java | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java index 6568f1302c752..048192a3407ba 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java @@ -804,26 +804,23 @@ protected int segmentForPartition(int partition){ protected int segmentForRow(SearchRow row) { assert row != null; - CacheObject key; - - if (ctx != null) { - final Value keyColValue = row.getValue(KEY_COL); + if (segmentsCount() == 1 || ctx == null) + return 0; - assert keyColValue != null; + CacheObject key; - final Object o = keyColValue.getObject(); + final Value keyColValue = row.getValue(KEY_COL); - if (o instanceof CacheObject) - key = (CacheObject)o; - else - key = ctx.toCacheKeyObject(o); + assert keyColValue != null; - return segmentForPartition(ctx.affinity().partition(key)); - } + final Object o = keyColValue.getObject(); - assert segmentsCount() == 1; + if (o instanceof CacheObject) + key = (CacheObject)o; + else + key = ctx.toCacheKeyObject(o); - return 0; + return segmentForPartition(ctx.affinity().partition(key)); } /** From be91bbf8bcb7e9c71fe6d3bea0f79763f9606558 Mon Sep 17 00:00:00 2001 From: Krzysztof Chmielewski Date: Tue, 10 Oct 2017 17:50:59 +0300 Subject: [PATCH 024/243] Fixed "IGNITE-6234 Initialize schemaIds to empty set if schemas field is null during the deserialization". Signed-off-by: nikolay_tikhonov --- .../org/apache/ignite/internal/binary/BinaryMetadata.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadata.java index ead00b72418ea..4c3448f26e6f6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadata.java @@ -186,6 +186,9 @@ public Collection schemas() { * @return {@code true} if BinaryMetadata instance has schema with ID specified, {@code false} otherwise. */ public boolean hasSchema(int schemaId) { + if (schemaIds == null) + return false; + return schemaIds.contains(schemaId); } @@ -304,8 +307,11 @@ public void readFrom(DataInput in) throws IOException { int schemasSize = in.readInt(); - if (schemasSize == -1) + if (schemasSize == -1) { schemas = null; + + schemaIds = Collections.emptySet(); + } else { schemas = new ArrayList<>(); From 08389601728512dc4e7fa5b953f5afe34ae4506f Mon Sep 17 00:00:00 2001 From: AMRepo Date: Tue, 10 Oct 2017 11:57:20 +0300 Subject: [PATCH 025/243] IGNITE-6545: Failure during Ignite Service.cancel() can break normal shutdown process. This closes #2807. (cherry picked from commit 8ffa109) --- .../processors/service/GridServiceProcessor.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java index 9272760404bf0..6f1dfc7679af9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java @@ -316,7 +316,16 @@ public void updateUtilityCache() { Service svc = ctx.service(); if (svc != null) - svc.cancel(ctx); + try { + svc.cancel(ctx); + } + catch (Throwable e) { + log.error("Failed to cancel service (ignoring) [name=" + ctx.name() + + ", execId=" + ctx.executionId() + ']', e); + + if (e instanceof Error) + throw e; + } ctx.executor().shutdownNow(); } From 0df4bf00e52ace07c4f3adf8b4e463e2e63167b2 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 12 Oct 2017 22:48:35 +0700 Subject: [PATCH 026/243] IGNITE-6127 Fixed bytes encoding. (cherry picked from commit 0f3f7d2) --- .../ignite/console/agent/handlers/AbstractListener.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java index 2eec89b8317c3..fc4daef09d68c 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java @@ -20,6 +20,7 @@ import io.socket.client.Ack; import io.socket.emitter.Emitter; import java.io.ByteArrayOutputStream; +import java.nio.charset.Charset; import java.util.Arrays; import java.util.Collections; import java.util.Map; @@ -39,6 +40,9 @@ * Base class for web socket handlers. */ abstract class AbstractListener implements Emitter.Listener { + /** UTF8 charset. */ + private static final Charset UTF8 = Charset.forName("UTF-8"); + /** */ private ExecutorService pool; @@ -81,7 +85,7 @@ else if (args.length == 1) Base64OutputStream b64os = new Base64OutputStream(baos); GZIPOutputStream gzip = new GZIPOutputStream(b64os); - gzip.write(restRes.getData().getBytes()); + gzip.write(restRes.getData().getBytes(UTF8)); gzip.close(); From 76fd90b000bdcc05bc3299f5ceb99525b60b0099 Mon Sep 17 00:00:00 2001 From: Sergey Kalashnikov Date: Fri, 13 Oct 2017 12:29:53 +0300 Subject: [PATCH 027/243] IGNITE-6024: SQL: Implemented "skipReducerOnUpdate" flag. This closes #2488. --- .../jdbc2/JdbcConnectionSelfTest.java | 13 +- .../jdbc/suite/IgniteJdbcDriverTestSuite.java | 11 + .../JdbcThinAbstractDmlStatementSelfTest.java | 14 +- .../thin/JdbcThinComplexDmlDdlSelfTest.java | 10 +- ...plexDmlDdlSkipReducerOnUpdateSelfTest.java | 33 + .../jdbc/thin/JdbcThinConnectionSelfTest.java | 18 +- .../thin/JdbcThinInsertStatementSelfTest.java | 1 - ...tStatementSkipReducerOnUpdateSelfTest.java | 33 + ...eStatementSkipReducerOnUpdateSelfTest.java | 33 + ...eStatementSkipReducerOnUpdateSelfTest.java | 33 + .../ignite/codegen/MessageCodeGenerator.java | 2 + .../org/apache/ignite/IgniteJdbcDriver.java | 9 +- .../apache/ignite/IgniteJdbcThinDriver.java | 3 +- .../ignite/cache/query/SqlFieldsQuery.java | 7 + .../jdbc/thin/JdbcThinConnection.java | 4 +- .../internal/jdbc/thin/JdbcThinTcpIo.java | 15 +- .../internal/jdbc/thin/JdbcThinUtils.java | 6 + .../internal/jdbc2/JdbcBatchUpdateTask.java | 3 +- .../ignite/internal/jdbc2/JdbcConnection.java | 14 +- .../JdbcQueryMultipleStatementsTask.java | 3 +- .../ignite/internal/jdbc2/JdbcQueryTask.java | 10 +- .../internal/jdbc2/JdbcQueryTaskV3.java | 19 +- .../ignite/internal/jdbc2/JdbcResultSet.java | 2 +- .../internal/jdbc2/JdbcSqlFieldsQuery.java | 105 --- .../ignite/internal/jdbc2/JdbcStatement.java | 4 +- .../cache/query/GridCacheSqlQuery.java | 24 + .../cache/query/SqlFieldsQueryEx.java | 158 ++++ .../odbc/jdbc/JdbcConnectionContext.java | 7 +- .../odbc/jdbc/JdbcRequestHandler.java | 19 +- .../odbc/odbc/OdbcConnectionContext.java | 13 +- .../odbc/odbc/OdbcRequestHandler.java | 14 +- .../resources/META-INF/classnames.properties | 4 +- .../query/h2/DmlStatementsProcessor.java | 160 +++- .../processors/query/h2/H2DmlPlanKey.java | 21 +- .../processors/query/h2/IgniteH2Indexing.java | 116 ++- .../processors/query/h2/UpdateResult.java | 63 ++ .../processors/query/h2/dml/UpdatePlan.java | 64 +- .../query/h2/dml/UpdatePlanBuilder.java | 117 ++- .../query/h2/sql/GridSqlQuerySplitter.java | 33 + .../h2/twostep/DistributedUpdateRun.java | 133 +++ .../h2/twostep/GridMapQueryExecutor.java | 136 +++ .../h2/twostep/GridReduceQueryExecutor.java | 294 ++++++- .../query/h2/twostep/MapNodeResults.java | 33 + .../h2/twostep/msg/GridH2DmlRequest.java | 516 ++++++++++++ .../h2/twostep/msg/GridH2DmlResponse.java | 250 ++++++ .../msg/GridH2ValueMessageFactory.java | 6 + ...SqlSkipReducerOnUpdateDmlFlagSelfTest.java | 783 ++++++++++++++++++ ...niteSqlSkipReducerOnUpdateDmlSelfTest.java | 755 +++++++++++++++++ .../IgniteCacheQuerySelfTestSuite.java | 4 + .../cpp/odbc-test/src/configuration_test.cpp | 25 +- .../cpp/odbc-test/src/queries_test.cpp | 8 + .../ignite/odbc/config/configuration.h | 26 + .../cpp/odbc/include/ignite/odbc/message.h | 6 +- .../include/ignite/odbc/protocol_version.h | 1 + .../odbc/system/ui/dsn_configuration_window.h | 4 + .../system/ui/dsn_configuration_window.cpp | 20 + .../cpp/odbc/src/config/configuration.cpp | 50 +- modules/platforms/cpp/odbc/src/connection.cpp | 5 +- modules/platforms/cpp/odbc/src/dsn_config.cpp | 4 + modules/platforms/cpp/odbc/src/message.cpp | 12 +- .../cpp/odbc/src/protocol_version.cpp | 6 +- 61 files changed, 3999 insertions(+), 296 deletions(-) create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSkipReducerOnUpdateSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest.java delete mode 100644 modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcSqlFieldsQuery.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/DistributedUpdateRun.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlRequest.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlResponse.java create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlSelfTest.java diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcConnectionSelfTest.java index aeb7c76113bd2..35d0fba0aa83b 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcConnectionSelfTest.java @@ -31,7 +31,6 @@ import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; import static org.apache.ignite.IgniteJdbcDriver.CFG_URL_PREFIX; @@ -315,6 +314,7 @@ public void testSqlHints() throws Exception { assertFalse(((JdbcConnection)conn).isDistributedJoins()); assertFalse(((JdbcConnection)conn).isCollocatedQuery()); assertFalse(((JdbcConnection)conn).isLazy()); + assertFalse(((JdbcConnection)conn).skipReducerOnUpdate()); } try (final Connection conn = DriverManager.getConnection(CFG_URL_PREFIX + "distributedJoins=true@" @@ -323,6 +323,7 @@ public void testSqlHints() throws Exception { assertTrue(((JdbcConnection)conn).isDistributedJoins()); assertFalse(((JdbcConnection)conn).isCollocatedQuery()); assertFalse(((JdbcConnection)conn).isLazy()); + assertFalse(((JdbcConnection)conn).skipReducerOnUpdate()); } try (final Connection conn = DriverManager.getConnection(CFG_URL_PREFIX + "collocated=true@" @@ -331,6 +332,7 @@ public void testSqlHints() throws Exception { assertFalse(((JdbcConnection)conn).isDistributedJoins()); assertTrue(((JdbcConnection)conn).isCollocatedQuery()); assertFalse(((JdbcConnection)conn).isLazy()); + assertFalse(((JdbcConnection)conn).skipReducerOnUpdate()); } try (final Connection conn = DriverManager.getConnection(CFG_URL_PREFIX + "lazy=true@" + configURL())) { @@ -338,6 +340,15 @@ public void testSqlHints() throws Exception { assertFalse(((JdbcConnection)conn).isDistributedJoins()); assertFalse(((JdbcConnection)conn).isCollocatedQuery()); assertTrue(((JdbcConnection)conn).isLazy()); + assertFalse(((JdbcConnection)conn).skipReducerOnUpdate()); + } + try (final Connection conn = DriverManager.getConnection(CFG_URL_PREFIX + "skipReducerOnUpdate=true@" + + configURL())) { + assertFalse(((JdbcConnection)conn).isEnforceJoinOrder()); + assertFalse(((JdbcConnection)conn).isDistributedJoins()); + assertFalse(((JdbcConnection)conn).isCollocatedQuery()); + assertFalse(((JdbcConnection)conn).isLazy()); + assertTrue(((JdbcConnection)conn).skipReducerOnUpdate()); } } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java index 1ae2427e0a0c9..bec388a0a9bb4 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java @@ -58,6 +58,10 @@ import org.apache.ignite.jdbc.thin.JdbcThinSelectAfterAlterTable; import org.apache.ignite.jdbc.thin.JdbcThinStatementSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinInsertStatementSkipReducerOnUpdateSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinMergeStatementSkipReducerOnUpdateSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest; /** * JDBC driver test suite. @@ -152,6 +156,13 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(JdbcThinSelectAfterAlterTable.class)); + // Update on server + suite.addTest(new TestSuite(JdbcThinInsertStatementSkipReducerOnUpdateSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.class)); + + return suite; } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java index afe5e2e4b79b1..69435daebda88 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractDmlStatementSelfTest.java @@ -20,6 +20,7 @@ import java.io.Serializable; import java.sql.Connection; import java.sql.DriverManager; +import java.sql.SQLException; import java.util.Collections; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.query.annotations.QuerySqlField; @@ -42,9 +43,6 @@ public abstract class JdbcThinAbstractDmlStatementSelfTest extends JdbcThinAbstr /** IP finder. */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); - /** URL. */ - private static final String URL = "jdbc:ignite:thin://127.0.0.1/"; - /** SQL SELECT query for verification. */ static final String SQL_SELECT = "select _key, id, firstName, lastName, age from Person"; @@ -67,7 +65,7 @@ public abstract class JdbcThinAbstractDmlStatementSelfTest extends JdbcThinAbstr @Override protected void beforeTest() throws Exception { ignite(0).getOrCreateCache(cacheConfig()); - conn = DriverManager.getConnection(URL); + conn = createConnection(); conn.setSchema('"' + DEFAULT_CACHE_NAME + '"'); } @@ -81,6 +79,14 @@ public abstract class JdbcThinAbstractDmlStatementSelfTest extends JdbcThinAbstr assertTrue(conn.isClosed()); } + /** + * @return JDBC connection. + * @throws SQLException On error. + */ + protected Connection createConnection() throws SQLException { + return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1/"); + } + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { return getConfiguration0(igniteInstanceName); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java index 07601077a5a60..d4e03bc417908 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java @@ -93,6 +93,14 @@ private CacheConfiguration cacheConfiguration(@NotNull String name) throws Excep return cfg; } + /** + * @return JDBC connection. + * @throws SQLException On error. + */ + protected Connection createConnection() throws SQLException { + return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1"); + } + /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { super.beforeTestsStarted(); @@ -109,7 +117,7 @@ private CacheConfiguration cacheConfiguration(@NotNull String name) throws Excep @Override protected void beforeTest() throws Exception { super.beforeTest(); - conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1"); + conn = createConnection(); } /** {@inheritDoc} */ diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.java new file mode 100644 index 0000000000000..7ae64792634c3 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSelfTest; + +/** + * Base class for complex SQL tests based on JDBC driver. + */ +public class JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest extends JdbcThinComplexDmlDdlSelfTest { + /** {@inheritDoc} */ + @Override protected Connection createConnection() throws SQLException { + return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?skipReducerOnUpdate=true"); + } +} \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java index fbbec0d73f885..7f67136c202b3 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java @@ -187,6 +187,7 @@ public void testSqlHints() throws Exception { assertFalse(io(conn).collocated()); assertFalse(io(conn).replicatedOnly()); assertFalse(io(conn).lazy()); + assertFalse(io(conn).skipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?distributedJoins=true")) { @@ -195,6 +196,7 @@ public void testSqlHints() throws Exception { assertFalse(io(conn).collocated()); assertFalse(io(conn).replicatedOnly()); assertFalse(io(conn).lazy()); + assertFalse(io(conn).skipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?enforceJoinOrder=true")) { @@ -203,6 +205,7 @@ public void testSqlHints() throws Exception { assertFalse(io(conn).collocated()); assertFalse(io(conn).replicatedOnly()); assertFalse(io(conn).lazy()); + assertFalse(io(conn).skipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?collocated=true")) { @@ -211,6 +214,7 @@ public void testSqlHints() throws Exception { assertTrue(io(conn).collocated()); assertFalse(io(conn).replicatedOnly()); assertFalse(io(conn).lazy()); + assertFalse(io(conn).skipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?replicatedOnly=true")) { @@ -219,6 +223,7 @@ public void testSqlHints() throws Exception { assertFalse(io(conn).collocated()); assertTrue(io(conn).replicatedOnly()); assertFalse(io(conn).lazy()); + assertFalse(io(conn).skipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?lazy=true")) { @@ -227,15 +232,26 @@ public void testSqlHints() throws Exception { assertFalse(io(conn).collocated()); assertFalse(io(conn).replicatedOnly()); assertTrue(io(conn).lazy()); + assertFalse(io(conn).skipReducerOnUpdate()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?skipReducerOnUpdate=true")) { + assertFalse(io(conn).distributedJoins()); + assertFalse(io(conn).enforceJoinOrder()); + assertFalse(io(conn).collocated()); + assertFalse(io(conn).replicatedOnly()); + assertFalse(io(conn).lazy()); + assertTrue(io(conn).skipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?distributedJoins=true&" + - "enforceJoinOrder=true&collocated=true&replicatedOnly=true&lazy=true")) { + "enforceJoinOrder=true&collocated=true&replicatedOnly=true&lazy=true&skipReducerOnUpdate=true")) { assertTrue(io(conn).distributedJoins()); assertTrue(io(conn).enforceJoinOrder()); assertTrue(io(conn).collocated()); assertTrue(io(conn).replicatedOnly()); assertTrue(io(conn).lazy()); + assertTrue(io(conn).skipReducerOnUpdate()); } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSelfTest.java index 8ab5760e7c6f5..bf55da0879eb2 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSelfTest.java @@ -24,7 +24,6 @@ import java.util.Arrays; import java.util.HashSet; import java.util.concurrent.Callable; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.testframework.GridTestUtils; /** diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSkipReducerOnUpdateSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSkipReducerOnUpdateSelfTest.java new file mode 100644 index 0000000000000..d99639fa1b8d3 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinInsertStatementSkipReducerOnUpdateSelfTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import org.apache.ignite.jdbc.thin.JdbcThinInsertStatementSelfTest; + +/** + * Statement test. + */ +public class JdbcThinInsertStatementSkipReducerOnUpdateSelfTest extends JdbcThinInsertStatementSelfTest { + /** {@inheritDoc} */ + @Override protected Connection createConnection() throws SQLException { + return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?skipReducerOnUpdate=true"); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.java new file mode 100644 index 0000000000000..0832fb711d3a7 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import org.apache.ignite.jdbc.thin.JdbcThinMergeStatementSelfTest; + +/** + * MERGE statement test. + */ +public class JdbcThinMergeStatementSkipReducerOnUpdateSelfTest extends JdbcThinMergeStatementSelfTest { + /** {@inheritDoc} */ + @Override protected Connection createConnection() throws SQLException { + return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?skipReducerOnUpdate=true"); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest.java new file mode 100644 index 0000000000000..475a77f23cc28 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSelfTest; + +/** + * + */ +public class JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest extends JdbcThinUpdateStatementSelfTest { + /** {@inheritDoc} */ + @Override protected Connection createConnection() throws SQLException { + return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?skipReducerOnUpdate=true"); + } +} diff --git a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java index 99ec08ad290b2..3ea0c81ab7295 100644 --- a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java +++ b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java @@ -235,6 +235,8 @@ public static void main(String[] args) throws Exception { // gen.generateAndWrite(GridH2RowMessage.class); // gen.generateAndWrite(GridCacheVersion.class); // gen.generateAndWrite(GridCacheVersionEx.class); +// gen.generateAndWrite(GridH2DmlRequest.class); +// gen.generateAndWrite(GridH2DmlResponse.class); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java index b03e38733f58b..ea9b7f8fa7fa3 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java @@ -334,6 +334,9 @@ public class IgniteJdbcDriver implements Driver { /** Allow queries with multiple statements. */ private static final String PARAM_MULTIPLE_STMTS = "multipleStatementsAllowed"; + /** Skip reducer on update property name. */ + private static final String PARAM_SKIP_REDUCER_ON_UPDATE = "skipReducerOnUpdate"; + /** Hostname property name. */ public static final String PROP_HOST = PROP_PREFIX + "host"; @@ -382,6 +385,9 @@ public class IgniteJdbcDriver implements Driver { /** Allow query with multiple statements. */ public static final String PROP_MULTIPLE_STMTS = PROP_PREFIX + PARAM_MULTIPLE_STMTS; + /** Skip reducer on update update property name. */ + public static final String PROP_SKIP_REDUCER_ON_UPDATE = PROP_PREFIX + PARAM_SKIP_REDUCER_ON_UPDATE; + /** Cache name property name. */ public static final String PROP_CFG = PROP_PREFIX + "cfg"; @@ -454,7 +460,8 @@ public class IgniteJdbcDriver implements Driver { new JdbcDriverPropertyInfo("Enforce Join Order", info.getProperty(JdbcThinUtils.PROP_ENFORCE_JOIN_ORDER), ""), new JdbcDriverPropertyInfo("Lazy query execution", info.getProperty(JdbcThinUtils.PROP_LAZY), ""), new JdbcDriverPropertyInfo("Transactions Allowed", info.getProperty(PROP_TX_ALLOWED), ""), - new JdbcDriverPropertyInfo("Queries with multiple statements allowed", info.getProperty(PROP_MULTIPLE_STMTS), "") + new JdbcDriverPropertyInfo("Queries with multiple statements allowed", info.getProperty(PROP_MULTIPLE_STMTS), ""), + new JdbcDriverPropertyInfo("Skip reducer on update", info.getProperty(PROP_SKIP_REDUCER_ON_UPDATE), "") ); if (info.getProperty(PROP_CFG) != null) diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java index 8085ed42631cc..a313f92dc01f6 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java @@ -186,7 +186,8 @@ public class IgniteJdbcThinDriver implements Driver { new JdbcDriverPropertyInfo("Enforce Join Order", info.getProperty(JdbcThinUtils.PROP_ENFORCE_JOIN_ORDER), ""), new JdbcDriverPropertyInfo("Collocated", info.getProperty(JdbcThinUtils.PROP_COLLOCATED), ""), new JdbcDriverPropertyInfo("Replicated only", info.getProperty(JdbcThinUtils.PROP_REPLICATED_ONLY), ""), - new JdbcDriverPropertyInfo("Lazy query execution flag", info.getProperty(JdbcThinUtils.PROP_LAZY),"") + new JdbcDriverPropertyInfo("Lazy query execution flag", info.getProperty(JdbcThinUtils.PROP_LAZY),""), + new JdbcDriverPropertyInfo("Skip reducer on update", info.getProperty(JdbcThinUtils.PROP_SKIP_REDUCER_ON_UPDATE),"") ); return props.toArray(new DriverPropertyInfo[0]); diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java index 2d128d158b9fb..4e12b8ca03ecc 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java @@ -369,6 +369,13 @@ public SqlFieldsQuery setSchema(@Nullable String schema) { return this; } + /** + * @return Copy of this query. + */ + public SqlFieldsQuery copy() { + return new SqlFieldsQuery(this); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(SqlFieldsQuery.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java index 5afed4e5bc5a1..57b25e18360d3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java @@ -62,6 +62,7 @@ import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_SOCK_RCV_BUF; import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_SOCK_SND_BUF; import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_TCP_NO_DELAY; +import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_SKIP_REDUCER_ON_UPDATE; /** * JDBC connection implementation. @@ -136,10 +137,11 @@ public JdbcThinConnection(String url, Properties props, String schema) throws SQ int sockRcvBuf = extractIntNonNegative(props, PROP_SOCK_RCV_BUF, 0); boolean tcpNoDelay = extractBoolean(props, PROP_TCP_NO_DELAY, true); + boolean skipReducerOnUpdate = extractBoolean(props, PROP_SKIP_REDUCER_ON_UPDATE, false); try { cliIo = new JdbcThinTcpIo(host, port, distributedJoins, enforceJoinOrder, collocated, replicatedOnly, - autoCloseServerCursor, lazyExec, sockSndBuf, sockRcvBuf, tcpNoDelay); + autoCloseServerCursor, lazyExec, sockSndBuf, sockRcvBuf, tcpNoDelay, skipReducerOnUpdate); cliIo.start(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java index 9e12fbf571005..0670fb10bfc16 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java @@ -100,6 +100,9 @@ public class JdbcThinTcpIo { /** Flag to automatically close server cursor. */ private final boolean autoCloseServerCursor; + /** Executes update queries on server nodes. */ + private final boolean skipReducerOnUpdate; + /** Socket send buffer. */ private final int sockSndBuf; @@ -138,10 +141,11 @@ public class JdbcThinTcpIo { * @param sockSndBuf Socket send buffer. * @param sockRcvBuf Socket receive buffer. * @param tcpNoDelay TCP no delay flag. + * @param skipReducerOnUpdate Executes update queries on ignite server nodes. */ JdbcThinTcpIo(String host, int port, boolean distributedJoins, boolean enforceJoinOrder, boolean collocated, boolean replicatedOnly, boolean autoCloseServerCursor, boolean lazy, int sockSndBuf, int sockRcvBuf, - boolean tcpNoDelay) { + boolean tcpNoDelay, boolean skipReducerOnUpdate) { this.host = host; this.port = port; this.distributedJoins = distributedJoins; @@ -153,6 +157,7 @@ public class JdbcThinTcpIo { this.sockSndBuf = sockSndBuf; this.sockRcvBuf = sockRcvBuf; this.tcpNoDelay = tcpNoDelay; + this.skipReducerOnUpdate = skipReducerOnUpdate; } /** @@ -211,6 +216,7 @@ public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQL writer.writeBoolean(replicatedOnly); writer.writeBoolean(autoCloseServerCursor); writer.writeBoolean(lazy); + writer.writeBoolean(skipReducerOnUpdate); send(writer.array()); @@ -491,4 +497,11 @@ IgniteProductVersion igniteVersion() { public boolean lazy() { return lazy; } + + /** + * @return Server side update flag. + */ + public boolean skipReducerOnUpdate() { + return skipReducerOnUpdate; + } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java index 52b3abcec2fc2..c9bf61cab5583 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java @@ -81,6 +81,9 @@ public class JdbcThinUtils { /** Parameter: Automatically close server cursor. */ public static final String PARAM_AUTO_CLOSE_SERVER_CURSOR = "autoCloseServerCursor"; + /** Parameter: execute update query in distributed mode on ignite server nodes. */ + public static final String PARAM_SKIP_REDUCER_ON_UPDATE = "skipReducerOnUpdate"; + /** Distributed joins property name. */ public static final String PROP_DISTRIBUTED_JOINS = PROP_PREFIX + PARAM_DISTRIBUTED_JOINS; @@ -108,6 +111,9 @@ public class JdbcThinUtils { /** Automatically close server cursor. */ public static final String PROP_AUTO_CLOSE_SERVER_CURSORS = PROP_PREFIX + PARAM_AUTO_CLOSE_SERVER_CURSOR; + /** Executes update queries on ignite server nodes in distributed mode. */ + public static final String PROP_SKIP_REDUCER_ON_UPDATE = PROP_PREFIX + PARAM_SKIP_REDUCER_ON_UPDATE; + /** Default port. */ public static final int DFLT_PORT = ClientConnectorConfiguration.DFLT_PORT; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java index e4916f70c7891..774f9229babfc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java @@ -29,6 +29,7 @@ import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.resources.IgniteInstanceResource; @@ -162,7 +163,7 @@ public JdbcBatchUpdateTask(Ignite ignite, String cacheName, String schemaName, S * @throws SQLException If failed. */ private Integer doSingleUpdate(IgniteCache cache, String sqlText, List args) throws SQLException { - SqlFieldsQuery qry = new JdbcSqlFieldsQuery(sqlText, false); + SqlFieldsQuery qry = new SqlFieldsQueryEx(sqlText, false); qry.setPageSize(fetchSize); qry.setLocal(locQry); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java index ccc09ece9a4bd..29cb6a1669dea 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java @@ -82,12 +82,13 @@ import static org.apache.ignite.IgniteJdbcDriver.PROP_LOCAL; import static org.apache.ignite.IgniteJdbcDriver.PROP_MULTIPLE_STMTS; import static org.apache.ignite.IgniteJdbcDriver.PROP_NODE_ID; -import static org.apache.ignite.IgniteJdbcDriver.PROP_TX_ALLOWED; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_ALLOW_OVERWRITE; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_FLUSH_FREQ; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_PER_NODE_BUF_SIZE; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_PER_NODE_PAR_OPS; +import static org.apache.ignite.IgniteJdbcDriver.PROP_TX_ALLOWED; +import static org.apache.ignite.IgniteJdbcDriver.PROP_SKIP_REDUCER_ON_UPDATE; import static org.apache.ignite.internal.jdbc2.JdbcUtils.convertToSqlException; import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.createJdbcSqlException; @@ -168,6 +169,9 @@ public class JdbcConnection implements Connection { /** Allow queries with multiple statements. */ private final boolean multipleStmts; + /** Skip reducer on update flag. */ + private final boolean skipReducerOnUpdate; + /** Statements. */ final Set statements = new HashSet<>(); @@ -209,6 +213,7 @@ public JdbcConnection(String url, Properties props) throws SQLException { streamNodeParOps = Integer.parseInt(props.getProperty(PROP_STREAMING_PER_NODE_PAR_OPS, "0")); multipleStmts = Boolean.parseBoolean(props.getProperty(PROP_MULTIPLE_STMTS)); + skipReducerOnUpdate = Boolean.parseBoolean(props.getProperty(PROP_SKIP_REDUCER_ON_UPDATE)); String nodeIdProp = props.getProperty(PROP_NODE_ID); @@ -853,6 +858,13 @@ boolean isMultipleStatementsAllowed() { return multipleStmts; } + /** + * @return {@code true} if update on server is enabled, {@code false} otherwise. + */ + boolean skipReducerOnUpdate() { + return skipReducerOnUpdate; + } + /** * @return Local query flag. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java index bf7c24e640ccd..f907525571a5f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java @@ -27,6 +27,7 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.resources.IgniteInstanceResource; @@ -109,7 +110,7 @@ public JdbcQueryMultipleStatementsTask(Ignite ignite, String schemaName, String /** {@inheritDoc} */ @Override public List call() throws Exception { - SqlFieldsQuery qry = (isQry != null ? new JdbcSqlFieldsQuery(sql, isQry) : new SqlFieldsQuery(sql)) + SqlFieldsQuery qry = (isQry != null ? new SqlFieldsQueryEx(sql, isQry) : new SqlFieldsQuery(sql)) .setArgs(args); qry.setPageSize(fetchSize); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java index ecbfb713451bd..aa9f009962db2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java @@ -36,6 +36,7 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.util.typedef.CAX; import org.apache.ignite.internal.util.typedef.internal.U; @@ -156,7 +157,7 @@ public JdbcQueryTask(Ignite ignite, String cacheName, String schemaName, String throw new SQLException("Cache not found [cacheName=" + cacheName + ']'); } - SqlFieldsQuery qry = (isQry != null ? new JdbcSqlFieldsQuery(sql, isQry) : new SqlFieldsQuery(sql)) + SqlFieldsQuery qry = (isQry != null ? new SqlFieldsQueryEx(sql, isQry) : new SqlFieldsQuery(sql)) .setArgs(args); qry.setPageSize(fetchSize); @@ -240,6 +241,13 @@ protected boolean updateMetadata() { return false; } + /** + * @return Flag to update enable server side updates. + */ + protected boolean skipReducerOnUpdate() { + return false; + } + /** * Schedules removal of stored cursor in case of remote query execution. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java index cb2d45220f7f3..f002d87127825 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTaskV3.java @@ -30,6 +30,9 @@ class JdbcQueryTaskV3 extends JdbcQueryTaskV2 { /** Update metadata on demand flag. */ private final boolean updateMeta; + /** Update metadata on demand flag. */ + private final boolean skipReducerOnUpdate; + /** * @param ignite Ignite. * @param cacheName Cache name. @@ -46,14 +49,16 @@ class JdbcQueryTaskV3 extends JdbcQueryTaskV2 { * @param enforceJoinOrder Enforce joins order flag. * @param lazy Lazy query execution flag. * @param updateMeta Update metadata on demand. + * @param skipReducerOnUpdate Flkag to enable server side updates. */ public JdbcQueryTaskV3(Ignite ignite, String cacheName, String schemaName, String sql, Boolean isQry, boolean loc, Object[] args, int fetchSize, UUID uuid, boolean locQry, boolean collocatedQry, boolean distributedJoins, - boolean enforceJoinOrder, boolean lazy, boolean updateMeta) { + boolean enforceJoinOrder, boolean lazy, boolean updateMeta, boolean skipReducerOnUpdate) { super(ignite, cacheName, schemaName, sql, isQry, loc, args, fetchSize, uuid, locQry, collocatedQry, distributedJoins, enforceJoinOrder, lazy); this.updateMeta = updateMeta; + this.skipReducerOnUpdate = skipReducerOnUpdate; } /** {@inheritDoc} */ @@ -61,6 +66,11 @@ public JdbcQueryTaskV3(Ignite ignite, String cacheName, String schemaName, Strin return updateMeta; } + /** {@inheritDoc} */ + @Override protected boolean skipReducerOnUpdate() { + return skipReducerOnUpdate; + } + /** * @param ignite Ignite. * @param cacheName Cache name. @@ -77,16 +87,17 @@ public JdbcQueryTaskV3(Ignite ignite, String cacheName, String schemaName, Strin * @param enforceJoinOrder Enforce joins order flag. * @param lazy Lazy query execution flag. * @param updateMeta Update metadata on demand. + * @param skipReducerOnUpdate Update on server flag. * @return Appropriate task JdbcQueryTask or JdbcQueryTaskV2. */ public static JdbcQueryTask createTask(Ignite ignite, String cacheName, String schemaName, String sql, Boolean isQry, boolean loc, Object[] args, int fetchSize, UUID uuid, boolean locQry, boolean collocatedQry, boolean distributedJoins, - boolean enforceJoinOrder, boolean lazy, boolean updateMeta) { + boolean enforceJoinOrder, boolean lazy, boolean updateMeta, boolean skipReducerOnUpdate) { - if (updateMeta) + if (updateMeta || skipReducerOnUpdate) return new JdbcQueryTaskV3(ignite, cacheName, schemaName, sql, isQry, loc, args, fetchSize, - uuid, locQry, collocatedQry, distributedJoins, enforceJoinOrder, lazy, true); + uuid, locQry, collocatedQry, distributedJoins, enforceJoinOrder, lazy, updateMeta, skipReducerOnUpdate); else return JdbcQueryTaskV2.createTask(ignite, cacheName, schemaName, sql, isQry, loc, args, fetchSize, uuid, locQry, collocatedQry, distributedJoins, enforceJoinOrder, lazy); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java index 69d4252d7bcee..e2ff5d866dffb 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java @@ -205,7 +205,7 @@ private void fetchPage() throws SQLException { // Connections from new clients send queries with new tasks, so we have to continue in the same manner JdbcQueryTask qryTask = JdbcQueryTaskV3.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), null,true, loc, null, fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), - conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy(), updateMetadata); + conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy(), updateMetadata, false); try { JdbcQueryTaskResult res = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcSqlFieldsQuery.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcSqlFieldsQuery.java deleted file mode 100644 index d8b9a2658398d..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcSqlFieldsQuery.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.jdbc2; - -import java.util.concurrent.TimeUnit; -import org.apache.ignite.cache.query.SqlFieldsQuery; - -/** - * {@link SqlFieldsQuery} with JDBC flavor - it has additional flag indicating whether JDBC driver expects - * this query to return a result set or an update counter. This class is not intended for use outside JDBC driver. - */ -public final class JdbcSqlFieldsQuery extends SqlFieldsQuery { - /** */ - private static final long serialVersionUID = 0L; - - /** Flag set by JDBC driver to enforce checks for correct operation type. */ - private final boolean isQry; - - /** - * @param sql SQL query. - * @param isQry Flag indicating whether this object denotes a query or an update operation. - */ - public JdbcSqlFieldsQuery(String sql, boolean isQry) { - super(sql); - this.isQry = isQry; - } - - /** - * @return Flag indicating whether this object denotes a query or an update operation.. - */ - public boolean isQuery() { - return isQry; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setSql(String sql) { - super.setSql(sql); - - return this; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setArgs(Object... args) { - super.setArgs(args); - - return this; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setTimeout(int timeout, TimeUnit timeUnit) { - super.setTimeout(timeout, timeUnit); - - return this; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setCollocated(boolean collocated) { - super.setCollocated(collocated); - - return this; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setEnforceJoinOrder(boolean enforceJoinOrder) { - super.setEnforceJoinOrder(enforceJoinOrder); - - return this; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setDistributedJoins(boolean distributedJoins) { - super.setDistributedJoins(distributedJoins); - - return this; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setPageSize(int pageSize) { - super.setPageSize(pageSize); - - return this; - } - - /** {@inheritDoc} */ - @Override public JdbcSqlFieldsQuery setLocal(boolean loc) { - super.setLocal(loc); - - return this; - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java index acac12337d35a..2498456be53eb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStatement.java @@ -161,9 +161,9 @@ private void executeSingle(String sql, Boolean isQuery) throws SQLException { else isQuery = true; - JdbcQueryTask qryTask = JdbcQueryTaskV2.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), + JdbcQueryTask qryTask = JdbcQueryTaskV3.createTask(loc ? ignite : null, conn.cacheName(), conn.schemaName(), sql, isQuery, loc, getArgs(), fetchSize, uuid, conn.isLocalQuery(), conn.isCollocatedQuery(), - conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy()); + conn.isDistributedJoins(), conn.isEnforceJoinOrder(), conn.isLazy(), false, conn.skipReducerOnUpdate()); try { JdbcQueryTaskResult qryRes = diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlQuery.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlQuery.java index d3746f30cbf0c..f38c5b264e33a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheSqlQuery.java @@ -74,6 +74,11 @@ public class GridCacheSqlQuery implements Message { @GridDirectTransient private transient Object[] derivedPartitions; + /** Flag indicating that query contains sub-queries. */ + @GridToStringInclude + @GridDirectTransient + private transient boolean hasSubQries; + /** * For {@link Message}. */ @@ -259,6 +264,7 @@ public GridCacheSqlQuery copy() { cp.sort = sort; cp.partitioned = partitioned; cp.derivedPartitions = derivedPartitions; + cp.hasSubQries = hasSubQries; return cp; } @@ -347,4 +353,22 @@ public GridCacheSqlQuery derivedPartitions(Object[] derivedPartitions) { return this; } + + /** + * @return {@code true} if query contains sub-queries. + */ + public boolean hasSubQueries() { + return hasSubQries; + } + + /** + * @param hasSubQries Flag indicating that query contains sub-queries. + * + * @return {@code this}. + */ + public GridCacheSqlQuery hasSubQueries(boolean hasSubQries) { + this.hasSubQries = hasSubQries; + + return this; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java new file mode 100644 index 0000000000000..c5f786ec45a0c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.query; + +import java.util.concurrent.TimeUnit; +import org.apache.ignite.cache.query.SqlFieldsQuery; + +/** + * {@link SqlFieldsQuery} with experimental and internal features. + */ +public final class SqlFieldsQueryEx extends SqlFieldsQuery { + /** */ + private static final long serialVersionUID = 0L; + + /** Flag to enforce checks for correct operation type. */ + private final Boolean isQry; + + /** Whether server side DML should be enabled. */ + private boolean skipReducerOnUpdate; + + /** + * @param sql SQL query. + * @param isQry Flag indicating whether this object denotes a query or an update operation. + */ + public SqlFieldsQueryEx(String sql, Boolean isQry) { + super(sql); + this.isQry = isQry; + } + + /** + * @param qry SQL query. + */ + private SqlFieldsQueryEx(SqlFieldsQueryEx qry) { + super(qry); + + this.isQry = qry.isQry; + this.skipReducerOnUpdate = qry.skipReducerOnUpdate; + } + + /** + * @return Flag indicating whether this object denotes a query or an update operation. + */ + public Boolean isQuery() { + return isQry; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setSql(String sql) { + super.setSql(sql); + + return this; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setArgs(Object... args) { + super.setArgs(args); + + return this; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setTimeout(int timeout, TimeUnit timeUnit) { + super.setTimeout(timeout, timeUnit); + + return this; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setCollocated(boolean collocated) { + super.setCollocated(collocated); + + return this; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setEnforceJoinOrder(boolean enforceJoinOrder) { + super.setEnforceJoinOrder(enforceJoinOrder); + + return this; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setDistributedJoins(boolean distributedJoins) { + super.setDistributedJoins(distributedJoins); + + return this; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setPageSize(int pageSize) { + super.setPageSize(pageSize); + + return this; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQueryEx setLocal(boolean loc) { + super.setLocal(loc); + + return this; + } + + /** + * Sets server side update flag. + *

      + * By default, when processing DML command, Ignite first fetches all affected intermediate rows for analysis to the + * node which initiated the query and only then forms batches of updated values to be sent to remote nodes. + * For simple DML commands (that however affect great deal of rows) such approach may be an overkill in terms of + * network delays and memory usage on initiating node. Use this flag as hint for Ignite to do all intermediate rows + * analysis and updates in place on corresponding remote data nodes. + *

      + * There are limitations to what DML command can be optimized this way. The command containing LIMIT, OFFSET, + * DISTINCT, ORDER BY, GROUP BY, sub-query or UNION will be processed the usual way despite this flag setting. + *

      + * Defaults to {@code false}, meaning that intermediate results will be fetched to initiating node first. + * Only affects DML commands. Ignored when {@link #isLocal()} is {@code true}. + * Note that when set to {@code true}, the query may fail in the case of even single node failure. + * + * @param skipReducerOnUpdate Server side update flag. + * @return {@code this} For chaining. + */ + public SqlFieldsQuery setSkipReducerOnUpdate(boolean skipReducerOnUpdate) { + this.skipReducerOnUpdate = skipReducerOnUpdate; + + return this; + } + + /** + * Gets server side update flag. + *

      + * See {@link #setSkipReducerOnUpdate(boolean)} for more information. + * + * @return Server side update flag. + */ + public boolean isSkipReducerOnUpdate() { + return skipReducerOnUpdate; + } + + /** {@inheritDoc} */ + @Override public SqlFieldsQuery copy() { + return new SqlFieldsQueryEx(this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java index a6a7aa5c27507..7b404664e33e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java @@ -104,8 +104,13 @@ public JdbcConnectionContext(GridKernalContext ctx, GridSpinBusyLock busyLock, i if (ver.compareTo(VER_2_1_5) >= 0) lazyExec = reader.readBoolean(); + boolean skipReducerOnUpdate = false; + + if (ver.compareTo(VER_2_3_0) >= 0) + skipReducerOnUpdate = reader.readBoolean(); + handler = new JdbcRequestHandler(ctx, busyLock, maxCursors, distributedJoins, enforceJoinOrder, - collocated, replicatedOnly, autoCloseCursors, lazyExec, ver); + collocated, replicatedOnly, autoCloseCursors, lazyExec, skipReducerOnUpdate, ver); parser = new JdbcMessageParser(ctx); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java index 166402fcce2c0..e3b6f5b5ef80e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java @@ -35,7 +35,7 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteVersionUtils; import org.apache.ignite.internal.binary.BinaryWriterExImpl; -import org.apache.ignite.internal.jdbc2.JdbcSqlFieldsQuery; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; @@ -103,6 +103,9 @@ public class JdbcRequestHandler implements ClientListenerRequestHandler { /** Lazy query execution flag. */ private final boolean lazy; + /** Skip reducer on update flag. */ + private final boolean skipReducerOnUpdate; + /** Automatic close of cursors. */ private final boolean autoCloseCursors; @@ -121,11 +124,13 @@ public class JdbcRequestHandler implements ClientListenerRequestHandler { * @param replicatedOnly Replicated only flag. * @param autoCloseCursors Flag to automatically close server cursors. * @param lazy Lazy query execution flag. + * @param skipReducerOnUpdate Skip reducer on update flag. * @param protocolVer Protocol version. */ public JdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int maxCursors, boolean distributedJoins, boolean enforceJoinOrder, boolean collocated, boolean replicatedOnly, - boolean autoCloseCursors, boolean lazy, ClientListenerProtocolVersion protocolVer) { + boolean autoCloseCursors, boolean lazy, boolean skipReducerOnUpdate, + ClientListenerProtocolVersion protocolVer) { this.ctx = ctx; this.busyLock = busyLock; this.maxCursors = maxCursors; @@ -135,6 +140,7 @@ public JdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int this.replicatedOnly = replicatedOnly; this.autoCloseCursors = autoCloseCursors; this.lazy = lazy; + this.skipReducerOnUpdate = skipReducerOnUpdate; this.protocolVer = protocolVer; log = ctx.log(getClass()); @@ -263,14 +269,17 @@ private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) { break; case SELECT_STATEMENT_TYPE: - qry = new JdbcSqlFieldsQuery(sql, true); + qry = new SqlFieldsQueryEx(sql, true); break; default: assert req.expectedStatementType() == JdbcStatementType.UPDATE_STMT_TYPE; - qry = new JdbcSqlFieldsQuery(sql, false); + qry = new SqlFieldsQueryEx(sql, false); + + if (skipReducerOnUpdate) + ((SqlFieldsQueryEx)qry).setSkipReducerOnUpdate(true); } qry.setArgs(req.arguments()); @@ -476,7 +485,7 @@ private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) { if (q.sql() != null) sql = q.sql(); - SqlFieldsQuery qry = new JdbcSqlFieldsQuery(sql, false); + SqlFieldsQuery qry = new SqlFieldsQueryEx(sql, false); qry.setArgs(q.args()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java index a4af478ebed2e..88a2e0ff1cd34 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java @@ -37,8 +37,11 @@ public class OdbcConnectionContext implements ClientListenerConnectionContext { /** Version 2.1.5: added "lazy" flag. */ public static final ClientListenerProtocolVersion VER_2_1_5 = ClientListenerProtocolVersion.create(2, 1, 5); + /** Version 2.3.0: added "skipReducerOnUpdate" flag. */ + public static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); + /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_1_5; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_0; /** Supported versions. */ private static final Set SUPPORTED_VERS = new HashSet<>(); @@ -60,6 +63,7 @@ public class OdbcConnectionContext implements ClientListenerConnectionContext { static { SUPPORTED_VERS.add(CURRENT_VER); + SUPPORTED_VERS.add(VER_2_1_5); SUPPORTED_VERS.add(VER_2_1_0); } @@ -98,8 +102,13 @@ public OdbcConnectionContext(GridKernalContext ctx, GridSpinBusyLock busyLock, i if (ver.compareTo(VER_2_1_5) >= 0) lazy = reader.readBoolean(); + boolean skipReducerOnUpdate = false; + + if (ver.compareTo(VER_2_3_0) >= 0) + skipReducerOnUpdate = reader.readBoolean(); + handler = new OdbcRequestHandler(ctx, busyLock, maxCursors, distributedJoins, - enforceJoinOrder, replicatedOnly, collocated, lazy); + enforceJoinOrder, replicatedOnly, collocated, lazy, skipReducerOnUpdate); parser = new OdbcMessageParser(ctx, ver); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java index 07b41f3286122..32375fddd7a06 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java @@ -34,6 +34,7 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.binary.BinaryWriterExImpl; import org.apache.ignite.internal.binary.GridBinaryMarshaller; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.odbc.ClientListenerRequest; @@ -43,7 +44,6 @@ import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.processors.query.GridQueryIndexing; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; -import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; @@ -94,6 +94,9 @@ public class OdbcRequestHandler implements ClientListenerRequestHandler { /** Lazy flag. */ private final boolean lazy; + /** Update on server flag. */ + private final boolean skipReducerOnUpdate; + /** * Constructor. * @param ctx Context. @@ -104,10 +107,11 @@ public class OdbcRequestHandler implements ClientListenerRequestHandler { * @param replicatedOnly Replicated only flag. * @param collocated Collocated flag. * @param lazy Lazy flag. + * @param skipReducerOnUpdate Skip reducer on update flag. */ public OdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int maxCursors, boolean distributedJoins, boolean enforceJoinOrder, boolean replicatedOnly, - boolean collocated, boolean lazy) { + boolean collocated, boolean lazy, boolean skipReducerOnUpdate) { this.ctx = ctx; this.busyLock = busyLock; this.maxCursors = maxCursors; @@ -116,6 +120,7 @@ public OdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int this.replicatedOnly = replicatedOnly; this.collocated = collocated; this.lazy = lazy; + this.skipReducerOnUpdate = skipReducerOnUpdate; log = ctx.log(getClass()); } @@ -196,8 +201,8 @@ public void onDisconnect() { * @param args Arguments. * @return Query instance. */ - private SqlFieldsQuery makeQuery(String schema, String sql, Object[] args) { - SqlFieldsQuery qry = new SqlFieldsQuery(sql); + private SqlFieldsQueryEx makeQuery(String schema, String sql, Object[] args) { + SqlFieldsQueryEx qry = new SqlFieldsQueryEx(sql, null); qry.setArgs(args); @@ -207,6 +212,7 @@ private SqlFieldsQuery makeQuery(String schema, String sql, Object[] args) { qry.setCollocated(collocated); qry.setLazy(lazy); qry.setSchema(schema); + qry.setSkipReducerOnUpdate(skipReducerOnUpdate); return qry; } diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties index 2703e6d30e340..2f795dfd8de0f 100644 --- a/modules/core/src/main/resources/META-INF/classnames.properties +++ b/modules/core/src/main/resources/META-INF/classnames.properties @@ -310,7 +310,7 @@ org.apache.ignite.internal.jdbc2.JdbcDatabaseMetadata$UpdateMetadataTask org.apache.ignite.internal.jdbc2.JdbcQueryTask org.apache.ignite.internal.jdbc2.JdbcQueryTask$1 org.apache.ignite.internal.jdbc2.JdbcQueryTask$QueryResult -org.apache.ignite.internal.jdbc2.JdbcSqlFieldsQuery +org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx org.apache.ignite.internal.managers.GridManagerAdapter$1$1 org.apache.ignite.internal.managers.checkpoint.GridCheckpointManager$CheckpointSet org.apache.ignite.internal.managers.checkpoint.GridCheckpointRequest @@ -2094,4 +2094,4 @@ org.apache.ignite.transactions.TransactionRollbackException org.apache.ignite.transactions.TransactionState org.apache.ignite.transactions.TransactionTimeoutException org.apache.ignite.util.AttributeNodeFilter -org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIO \ No newline at end of file +org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIO diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java index 98117b2dfe6d7..9e55442a422d1 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.query.h2; import java.lang.reflect.Array; +import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Time; @@ -150,7 +151,8 @@ public void onCacheStop(String cacheName) { * Execute DML statement, possibly with few re-attempts in case of concurrent data modifications. * * @param schemaName Schema. - * @param prepared Prepared JDBC statement. + * @param conn Connection. + * @param prepared Prepared statement. * @param fieldsQry Original query. * @param loc Query locality flag. * @param filters Cache name and key filter. @@ -158,13 +160,14 @@ public void onCacheStop(String cacheName) { * @return Update result (modified items count and failed keys). * @throws IgniteCheckedException if failed. */ - private UpdateResult updateSqlFields(String schemaName, Prepared prepared, SqlFieldsQuery fieldsQry, - boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException { + private UpdateResult updateSqlFields(String schemaName, Connection conn, Prepared prepared, + SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) + throws IgniteCheckedException { Object[] errKeys = null; long items = 0; - UpdatePlan plan = getPlanForStatement(schemaName, prepared, null); + UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null); GridCacheContext cctx = plan.tbl.rowDescriptor().context(); @@ -188,14 +191,14 @@ else if (!opCtx.isKeepBinary()) UpdateResult r; try { - r = executeUpdateStatement(schemaName, cctx, prepared, fieldsQry, loc, filters, cancel, errKeys); + r = executeUpdateStatement(schemaName, cctx, conn, prepared, fieldsQry, loc, filters, cancel, errKeys); } finally { cctx.operationContextPerCall(opCtx); } - items += r.cnt; - errKeys = r.errKeys; + items += r.counter(); + errKeys = r.errorKeys(); if (F.isEmpty(errKeys)) break; @@ -213,19 +216,22 @@ else if (items == 0L) /** * @param schemaName Schema. - * @param p Prepared. + * @param c Connection. + * @param p Prepared statement. * @param fieldsQry Initial query * @param cancel Query cancel. * @return Update result wrapped into {@link GridQueryFieldsResult} * @throws IgniteCheckedException if failed. */ @SuppressWarnings("unchecked") - QueryCursorImpl> updateSqlFieldsDistributed(String schemaName, Prepared p, + QueryCursorImpl> updateSqlFieldsDistributed(String schemaName, Connection c, Prepared p, SqlFieldsQuery fieldsQry, GridQueryCancel cancel) throws IgniteCheckedException { - UpdateResult res = updateSqlFields(schemaName, p, fieldsQry, false, null, cancel); + UpdateResult res = updateSqlFields(schemaName, c, p, fieldsQry, false, null, cancel); + + checkUpdateResult(res); QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl(Collections.singletonList - (Collections.singletonList(res.cnt)), cancel, false); + (Collections.singletonList(res.counter())), cancel, false); resCur.fieldsMeta(UPDATE_RESULT_META); @@ -236,6 +242,7 @@ QueryCursorImpl> updateSqlFieldsDistributed(String schemaName, Prepared * Execute DML statement on local cache. * * @param schemaName Schema. + * @param conn Connection. * @param stmt Prepared statement. * @param fieldsQry Fields query. * @param filters Cache name and key filter. @@ -244,14 +251,14 @@ QueryCursorImpl> updateSqlFieldsDistributed(String schemaName, Prepared * @throws IgniteCheckedException if failed. */ @SuppressWarnings("unchecked") - GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, PreparedStatement stmt, + GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, Connection conn, PreparedStatement stmt, SqlFieldsQuery fieldsQry, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException { - UpdateResult res = updateSqlFields(schemaName, GridSqlQueryParser.prepared(stmt), fieldsQry, true, + UpdateResult res = updateSqlFields(schemaName, conn, GridSqlQueryParser.prepared(stmt), fieldsQry, true, filters, cancel); return new GridQueryFieldsResultAdapter(UPDATE_RESULT_META, - new IgniteSingletonIterator(Collections.singletonList(res.cnt))); + new IgniteSingletonIterator(Collections.singletonList(res.counter()))); } /** @@ -272,7 +279,7 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Obje assert p != null; - UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, null); + UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, true, idx, null, null, null); if (!F.eq(streamer.cacheName(), plan.tbl.rowDescriptor().context().name())) throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + @@ -340,6 +347,7 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Obje * * @param schemaName Schema name. * @param cctx Cache context. + * @param c Connection. * @param prepared Prepared statement for DML query. * @param fieldsQry Fields query. * @param loc Local query flag. @@ -350,14 +358,14 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Obje * @throws IgniteCheckedException if failed. */ @SuppressWarnings({"ConstantConditions", "unchecked"}) - private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, + private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, Connection c, Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel, Object[] failedKeys) throws IgniteCheckedException { int mainCacheId = CU.cacheId(cctx.name()); Integer errKeysPos = null; - UpdatePlan plan = getPlanForStatement(schemaName, prepared, errKeysPos); + UpdatePlan plan = getPlanForStatement(schemaName, c, prepared, fieldsQry, loc, errKeysPos); if (plan.fastUpdateArgs != null) { assert F.isEmpty(failedKeys) && errKeysPos == null; @@ -365,6 +373,14 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo return doFastUpdate(plan, fieldsQry.getArgs()); } + if (plan.distributed != null) { + UpdateResult result = doDistributedUpdate(schemaName, fieldsQry, plan, cancel); + + // null is returned in case not all nodes support distributed DML. + if (result != null) + return result; + } + assert !F.isEmpty(plan.selectQry); QueryCursorImpl> cur; @@ -401,18 +417,31 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo int pageSize = loc ? 0 : fieldsQry.getPageSize(); + return processDmlSelectResult(cctx, plan, cur, pageSize); + } + + /** + * @param cctx Cache context. + * @param plan Update plan. + * @param cursor Cursor over select results. + * @param pageSize Page size. + * @return Pair [number of successfully processed items; keys that have failed to be processed] + * @throws IgniteCheckedException if failed. + */ + private UpdateResult processDmlSelectResult(GridCacheContext cctx, UpdatePlan plan, Iterable> cursor, + int pageSize) throws IgniteCheckedException { switch (plan.mode) { case MERGE: - return new UpdateResult(doMerge(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY); + return new UpdateResult(doMerge(plan, cursor, pageSize), X.EMPTY_OBJECT_ARRAY); case INSERT: - return new UpdateResult(doInsert(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY); + return new UpdateResult(doInsert(plan, cursor, pageSize), X.EMPTY_OBJECT_ARRAY); case UPDATE: - return doUpdate(plan, cur, pageSize); + return doUpdate(plan, cursor, pageSize); case DELETE: - return doDelete(cctx, cur, pageSize); + return doDelete(cctx, cursor, pageSize); default: throw new IgniteSQLException("Unexpected DML operation [mode=" + plan.mode + ']', @@ -425,20 +454,23 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo * if available. * * @param schema Schema. - * @param p Prepared JDBC statement. + * @param conn Connection. + * @param p Prepared statement. + * @param fieldsQry Original fields query. + * @param loc Local query flag. * @return Update plan. */ @SuppressWarnings({"unchecked", "ConstantConditions"}) - private UpdatePlan getPlanForStatement(String schema, Prepared p, @Nullable Integer errKeysPos) - throws IgniteCheckedException { - H2DmlPlanKey planKey = new H2DmlPlanKey(schema, p.getSQL()); + private UpdatePlan getPlanForStatement(String schema, Connection conn, Prepared p, SqlFieldsQuery fieldsQry, + boolean loc, @Nullable Integer errKeysPos) throws IgniteCheckedException { + H2DmlPlanKey planKey = new H2DmlPlanKey(schema, p.getSQL(), loc, fieldsQry); UpdatePlan res = (errKeysPos == null ? planCache.get(planKey) : null); if (res != null) return res; - res = UpdatePlanBuilder.planForStatement(p, errKeysPos); + res = UpdatePlanBuilder.planForStatement(p, loc, idx, conn, fieldsQry, errKeysPos); // Don't cache re-runs if (errKeysPos == null) @@ -449,6 +481,7 @@ private UpdatePlan getPlanForStatement(String schema, Prepared p, @Nullable Inte /** * Perform single cache operation based on given args. + * @param plan Update plan. * @param args Query parameters. * @return 1 if an item was affected, 0 otherwise. * @throws IgniteCheckedException if failed. @@ -486,6 +519,25 @@ private static UpdateResult doFastUpdate(UpdatePlan plan, Object[] args) throws } } + /** + * @param schemaName Schema name. + * @param fieldsQry Initial query. + * @param plan Update plan. + * @param cancel Cancel state. + * @return Update result. + * @throws IgniteCheckedException if failed. + */ + private UpdateResult doDistributedUpdate(String schemaName, SqlFieldsQuery fieldsQry, UpdatePlan plan, + GridQueryCancel cancel) throws IgniteCheckedException { + assert plan.distributed != null; + + if (cancel == null) + cancel = new GridQueryCancel(); + + return idx.runDistributedUpdate(schemaName, fieldsQry, plan.distributed.getCacheIds(), + plan.distributed.isReplicatedOnly(), cancel); + } + /** * Perform DELETE operation on top of results of SELECT. * @param cctx Cache context. @@ -573,7 +625,7 @@ private UpdateResult doUpdate(UpdatePlan plan, Iterable> cursor, int pag GridQueryProperty prop = plan.tbl.rowDescriptor().type().property(plan.colNames[i]); - assert prop != null; + assert prop != null : "Unknown property: " + plan.colNames[i]; newColVals.put(plan.colNames[i], convert(row.get(i + 2), desc, prop.type(), plan.colTypes[i])); } @@ -981,6 +1033,31 @@ private static PageProcessingResult processPage(GridCacheContext cctx, return new IgniteBiTuple<>(key, val); } + /** + * + * @param schemaName Schema name. + * @param stmt Prepared statement. + * @param fldsQry Query. + * @param filter Filter. + * @param cancel Cancel state. + * @param local Locality flag. + * @return Update result. + * @throws IgniteCheckedException if failed. + */ + UpdateResult mapDistributedUpdate(String schemaName, PreparedStatement stmt, SqlFieldsQuery fldsQry, + IndexingQueryFilter filter, GridQueryCancel cancel, boolean local) throws IgniteCheckedException { + Connection c; + + try { + c = stmt.getConnection(); + } + catch (SQLException e) { + throw new IgniteCheckedException(e); + } + + return updateSqlFields(schemaName, c, GridSqlQueryParser.prepared(stmt), fldsQry, local, filter, cancel); + } + /** */ private final static class InsertEntryProcessor implements EntryProcessor { /** Value to set. */ @@ -1079,26 +1156,19 @@ static boolean isDmlStatement(Prepared stmt) { return stmt instanceof Merge || stmt instanceof Insert || stmt instanceof Update || stmt instanceof Delete; } - /** Update result - modifications count and keys to re-run query with, if needed. */ - private final static class UpdateResult { - /** Result to return for operations that affected 1 item - mostly to be used for fast updates and deletes. */ - final static UpdateResult ONE = new UpdateResult(1, X.EMPTY_OBJECT_ARRAY); - - /** Result to return for operations that affected 0 items - mostly to be used for fast updates and deletes. */ - final static UpdateResult ZERO = new UpdateResult(0, X.EMPTY_OBJECT_ARRAY); - - /** Number of processed items. */ - final long cnt; + /** + * Check update result for erroneous keys and throws concurrent update exception if necessary. + * + * @param r Update result. + */ + static void checkUpdateResult(UpdateResult r) { + if (!F.isEmpty(r.errorKeys())) { + String msg = "Failed to update some keys because they had been modified concurrently " + + "[keys=" + r.errorKeys() + ']'; - /** Keys that failed to be updated or deleted due to concurrent modification of values. */ - @NotNull - final Object[] errKeys; + SQLException conEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE); - /** */ - @SuppressWarnings("ConstantConditions") - private UpdateResult(long cnt, Object[] errKeys) { - this.cnt = cnt; - this.errKeys = U.firstNotNull(errKeys, X.EMPTY_OBJECT_ARRAY); + throw new IgniteSQLException(conEx); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2DmlPlanKey.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2DmlPlanKey.java index 3a43ea1951639..455b5e5a3b94d 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2DmlPlanKey.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2DmlPlanKey.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.processors.query.h2; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlanBuilder; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; @@ -30,20 +32,33 @@ public class H2DmlPlanKey { /** SQL. */ private final String sql; + /** Flags. */ + private final byte flags; + /** * Constructor. * * @param schemaName Schema name. * @param sql SQL. */ - public H2DmlPlanKey(String schemaName, String sql) { + public H2DmlPlanKey(String schemaName, String sql, boolean loc, SqlFieldsQuery fieldsQry) { this.schemaName = schemaName; this.sql = sql; + + if (loc || !UpdatePlanBuilder.isSkipReducerOnUpdateQuery(fieldsQry)) + this.flags = 0; // flags only relevant for server side updates. + else { + this.flags = (byte)(1 + + (fieldsQry.isDistributedJoins() ? 2 : 0) + + (fieldsQry.isEnforceJoinOrder() ? 4 : 0) + + (fieldsQry.isCollocated() ? 8 : 0)); + } } /** {@inheritDoc} */ @Override public int hashCode() { - return 31 * (schemaName != null ? schemaName.hashCode() : 0) + (sql != null ? sql.hashCode() : 0); + return 31 * (31 * (schemaName != null ? schemaName.hashCode() : 0) + (sql != null ? sql.hashCode() : 0)) + + flags; } /** {@inheritDoc} */ @@ -56,7 +71,7 @@ public H2DmlPlanKey(String schemaName, String sql) { H2DmlPlanKey other = (H2DmlPlanKey)o; - return F.eq(sql, other.sql) && F.eq(schemaName, other.schemaName); + return F.eq(sql, other.sql) && F.eq(schemaName, other.schemaName) && flags == other.flags; } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 22ed592bc98b3..fddd2e8744b09 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -59,7 +59,7 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridTopic; import org.apache.ignite.internal.IgniteInternalFuture; -import org.apache.ignite.internal.jdbc2.JdbcSqlFieldsQuery; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheEntryImpl; import org.apache.ignite.internal.processors.cache.CacheObject; @@ -834,7 +834,7 @@ public GridQueryFieldsResult queryLocalSqlFields(final String schemaName, final fldsQry.setEnforceJoinOrder(enforceJoinOrder); fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS); - return dmlProc.updateSqlFieldsLocal(schemaName, stmt, fldsQry, filter, cancel); + return dmlProc.updateSqlFieldsLocal(schemaName, conn, stmt, fldsQry, filter, cancel); } else if (DdlStatementsProcessor.isDdlStatement(p)) throw new IgniteSQLException("DDL statements are supported for the whole cluster only", @@ -1215,6 +1215,27 @@ private Iterable> runQueryTwoStep( }; } + /** + * Run DML on remote nodes. + * + * @param schemaName Schema name. + * @param fieldsQry Initial update query. + * @param cacheIds Cache identifiers. + * @param isReplicatedOnly Whether query uses only replicated caches. + * @param cancel Cancel state. + * @return Update result. + */ + UpdateResult runDistributedUpdate( + String schemaName, + SqlFieldsQuery fieldsQry, + List cacheIds, + boolean isReplicatedOnly, + GridQueryCancel cancel) { + return rdcQryExec.update(schemaName, cacheIds, fieldsQry.getSql(), fieldsQry.getArgs(), + fieldsQry.isEnforceJoinOrder(), fieldsQry.getPageSize(), fieldsQry.getTimeout(), + fieldsQry.getPartitions(), isReplicatedOnly, cancel); + } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public QueryCursor> queryDistributedSql(String schemaName, String cacheName, @@ -1429,8 +1450,8 @@ private Iterable> runQueryTwoStep( if (twoStepQry == null) { if (DmlStatementsProcessor.isDmlStatement(prepared)) { try { - res.add(dmlProc.updateSqlFieldsDistributed(schemaName, prepared, - new SqlFieldsQuery(qry).setSql(sqlQry).setArgs(args), cancel)); + res.add(dmlProc.updateSqlFieldsDistributed(schemaName, c, prepared, + qry.copy().setSql(sqlQry).setArgs(args), cancel)); continue; } @@ -1452,33 +1473,13 @@ private Iterable> runQueryTwoStep( } } - LinkedHashSet caches0 = new LinkedHashSet<>(); - assert twoStepQry != null; - int tblCnt = twoStepQry.tablesCount(); - - if (mainCacheId != null) - caches0.add(mainCacheId); - - if (tblCnt > 0) { - for (QueryTable tblKey : twoStepQry.tables()) { - GridH2Table tbl = dataTable(tblKey); - - int cacheId = CU.cacheId(tbl.cacheName()); - - caches0.add(cacheId); - } - } + List cacheIds = collectCacheIds(mainCacheId, twoStepQry); - if (caches0.isEmpty()) + if (F.isEmpty(cacheIds)) twoStepQry.local(true); else { - //Prohibit usage indices with different numbers of segments in same query. - List cacheIds = new ArrayList<>(caches0); - - checkCacheIndexSegmentation(cacheIds); - twoStepQry.cacheIds(cacheIds); twoStepQry.local(qry.isLocal()); } @@ -1517,7 +1518,8 @@ private Iterable> runQueryTwoStep( * @param isQry {@code true} for select queries, otherwise (DML/DDL queries) {@code false}. */ private void checkQueryType(SqlFieldsQuery qry, boolean isQry) { - if (qry instanceof JdbcSqlFieldsQuery && ((JdbcSqlFieldsQuery)qry).isQuery() != isQry) + if (qry instanceof SqlFieldsQueryEx && ((SqlFieldsQueryEx)qry).isQuery() != null && + ((SqlFieldsQueryEx)qry).isQuery() != isQry) throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", IgniteQueryErrorCode.STMT_TYPE_MISMATCH); } @@ -1567,6 +1569,29 @@ private FieldsQueryCursor> executeTwoStepsQuery(String schemaName, int p return cursor; } + /** + * Run DML request from other node. + * + * @param schemaName Schema name. + * @param fldsQry Query. + * @param filter Filter. + * @param cancel Cancel state. + * @param local Locality flag. + * @return Update result. + * @throws IgniteCheckedException if failed. + */ + public UpdateResult mapDistributedUpdate(String schemaName, SqlFieldsQuery fldsQry, IndexingQueryFilter filter, + GridQueryCancel cancel, boolean local) throws IgniteCheckedException { + Connection conn = connectionForSchema(schemaName); + + H2Utils.setupConnection(conn, false, fldsQry.isEnforceJoinOrder()); + + PreparedStatement stmt = preparedStatementWithParams(conn, fldsQry.getSql(), + Arrays.asList(fldsQry.getArgs()), true); + + return dmlProc.mapDistributedUpdate(schemaName, stmt, fldsQry, filter, cancel, local); + } + /** * @throws IllegalStateException if segmented indices used with non-segmented indices. */ @@ -2523,6 +2548,43 @@ private int bindPartitionInfoParameter(CacheQueryPartitionInfo partInfo, Object[ U.close(conn, log); } + /** + * Collect cache identifiers from two-step query. + * + * @param mainCacheId Id of main cache. + * @param twoStepQry Two-step query. + * @return Result. + */ + public List collectCacheIds(@Nullable Integer mainCacheId, GridCacheTwoStepQuery twoStepQry) { + LinkedHashSet caches0 = new LinkedHashSet<>(); + + int tblCnt = twoStepQry.tablesCount(); + + if (mainCacheId != null) + caches0.add(mainCacheId); + + if (tblCnt > 0) { + for (QueryTable tblKey : twoStepQry.tables()) { + GridH2Table tbl = dataTable(tblKey); + + int cacheId = CU.cacheId(tbl.cacheName()); + + caches0.add(cacheId); + } + } + + if (caches0.isEmpty()) + return null; + else { + //Prohibit usage indices with different numbers of segments in same query. + List cacheIds = new ArrayList<>(caches0); + + checkCacheIndexSegmentation(cacheIds); + + return cacheIds; + } + } + /** * Closeable iterator. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java new file mode 100644 index 0000000000000..de0e63fde5aec --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2; + +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Update result - modifications count and keys to re-run query with, if needed. + */ +public final class UpdateResult { + /** Result to return for operations that affected 1 item - mostly to be used for fast updates and deletes. */ + final static UpdateResult ONE = new UpdateResult(1, X.EMPTY_OBJECT_ARRAY); + + /** Result to return for operations that affected 0 items - mostly to be used for fast updates and deletes. */ + final static UpdateResult ZERO = new UpdateResult(0, X.EMPTY_OBJECT_ARRAY); + + /** Number of processed items. */ + private final long cnt; + + /** Keys that failed to be updated or deleted due to concurrent modification of values. */ + private final Object[] errKeys; + + /** + * Constructor. + * + * @param cnt Updated rows count. + * @param errKeys Array of erroneous keys. + */ + public @SuppressWarnings("ConstantConditions") UpdateResult(long cnt, Object[] errKeys) { + this.cnt = cnt; + this.errKeys = U.firstNotNull(errKeys, X.EMPTY_OBJECT_ARRAY); + } + + /** + * @return Update counter. + */ + public long counter() { + return cnt; + } + + /** + * @return Error keys. + */ + public Object[] errorKeys() { + return errKeys; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index b81ac60025113..a99d811cacbe5 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.query.h2.dml; +import java.util.List; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.util.typedef.F; @@ -64,10 +65,13 @@ public final class UpdatePlan { /** Arguments for fast UPDATE or DELETE. */ public final FastUpdateArguments fastUpdateArgs; + /** Additional info for distributed update. */ + public final DistributedPlanInfo distributed; + /** */ private UpdatePlan(UpdateMode mode, GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier keySupplier, KeyValueSupplier valSupplier, int keyColIdx, int valColIdx, String selectQry, boolean isLocSubqry, - int rowsNum, FastUpdateArguments fastUpdateArgs) { + int rowsNum, FastUpdateArguments fastUpdateArgs, DistributedPlanInfo distributed) { this.colNames = colNames; this.colTypes = colTypes; this.rowsNum = rowsNum; @@ -83,46 +87,84 @@ private UpdatePlan(UpdateMode mode, GridH2Table tbl, String[] colNames, int[] co this.selectQry = selectQry; this.isLocSubqry = isLocSubqry; this.fastUpdateArgs = fastUpdateArgs; + this.distributed = distributed; } /** */ public static UpdatePlan forMerge(GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier keySupplier, KeyValueSupplier valSupplier, int keyColIdx, int valColIdx, String selectQry, boolean isLocSubqry, - int rowsNum) { + int rowsNum, DistributedPlanInfo distributed) { assert !F.isEmpty(colNames); return new UpdatePlan(UpdateMode.MERGE, tbl, colNames, colTypes, keySupplier, valSupplier, keyColIdx, valColIdx, - selectQry, isLocSubqry, rowsNum, null); + selectQry, isLocSubqry, rowsNum, null, distributed); } /** */ public static UpdatePlan forInsert(GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier keySupplier, - KeyValueSupplier valSupplier, int keyColIdx, int valColIdx, String selectQry, boolean isLocSubqry, int rowsNum) { + KeyValueSupplier valSupplier, int keyColIdx, int valColIdx, String selectQry, boolean isLocSubqry, + int rowsNum, DistributedPlanInfo distributed) { assert !F.isEmpty(colNames); - return new UpdatePlan(UpdateMode.INSERT, tbl, colNames, colTypes, keySupplier, valSupplier, keyColIdx, valColIdx, - selectQry, isLocSubqry, rowsNum, null); + return new UpdatePlan(UpdateMode.INSERT, tbl, colNames, colTypes, keySupplier, valSupplier, keyColIdx, + valColIdx, selectQry, isLocSubqry, rowsNum, null, distributed); } /** */ public static UpdatePlan forUpdate(GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier valSupplier, - int valColIdx, String selectQry) { + int valColIdx, String selectQry, DistributedPlanInfo distributed) { assert !F.isEmpty(colNames); return new UpdatePlan(UpdateMode.UPDATE, tbl, colNames, colTypes, null, valSupplier, -1, valColIdx, selectQry, - false, 0, null); + false, 0, null, distributed); } /** */ - public static UpdatePlan forDelete(GridH2Table tbl, String selectQry) { - return new UpdatePlan(UpdateMode.DELETE, tbl, null, null, null, null, -1, -1, selectQry, false, 0, null); + public static UpdatePlan forDelete(GridH2Table tbl, String selectQry, DistributedPlanInfo distributed) { + return new UpdatePlan(UpdateMode.DELETE, tbl, null, null, null, null, -1, -1, selectQry, false, 0, null, + distributed); } /** */ public static UpdatePlan forFastUpdate(UpdateMode mode, GridH2Table tbl, FastUpdateArguments fastUpdateArgs) { assert mode == UpdateMode.UPDATE || mode == UpdateMode.DELETE; - return new UpdatePlan(mode, tbl, null, null, null, null, -1, -1, null, false, 0, fastUpdateArgs); + return new UpdatePlan(mode, tbl, null, null, null, null, -1, -1, null, false, 0, fastUpdateArgs, null); } + /** + * Additional information about distributed update plan. + */ + public final static class DistributedPlanInfo { + /** Whether update involves only replicated caches. */ + private final boolean replicatedOnly; + + /** Identifiers of caches involved in update (used for cluster nodes mapping). */ + private final List cacheIds; + + /** + * Constructor. + * + * @param replicatedOnly Whether all caches are replicated. + * @param cacheIds List of cache identifiers. + */ + DistributedPlanInfo(boolean replicatedOnly, List cacheIds) { + this.replicatedOnly = replicatedOnly; + this.cacheIds = cacheIds; + } + + /** + * @return {@code true} in case all involved caches are replicated. + */ + public boolean isReplicatedOnly() { + return replicatedOnly; + } + + /** + * @return cache identifiers. + */ + public List getCacheIds() { + return cacheIds; + } + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index 804f7d8f61d7d..c84526692250e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -18,19 +18,26 @@ package org.apache.ignite.internal.processors.query.h2.dml; import java.lang.reflect.Constructor; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.binary.BinaryObjectBuilder; +import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.query.GridQueryProperty; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.processors.query.h2.DmlStatementsProcessor; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.h2.sql.DmlAstUtils; @@ -41,12 +48,15 @@ import org.apache.ignite.internal.processors.query.h2.sql.GridSqlMerge; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlTable; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlUnion; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlUpdate; import org.apache.ignite.internal.util.GridUnsafe; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.h2.command.Prepared; @@ -71,29 +81,39 @@ private UpdatePlanBuilder() { * if available. * * @param prepared H2's {@link Prepared}. + * @param loc Local query flag. + * @param idx Indexing. + * @param conn Connection. + * @param fieldsQuery Original query. * @return Update plan. */ - public static UpdatePlan planForStatement(Prepared prepared, - @Nullable Integer errKeysPos) throws IgniteCheckedException { + public static UpdatePlan planForStatement(Prepared prepared, boolean loc, IgniteH2Indexing idx, + @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery, @Nullable Integer errKeysPos) + throws IgniteCheckedException { assert !prepared.isQuery(); GridSqlStatement stmt = new GridSqlQueryParser(false).parse(prepared); if (stmt instanceof GridSqlMerge || stmt instanceof GridSqlInsert) - return planForInsert(stmt); + return planForInsert(stmt, loc, idx, conn, fieldsQuery); else - return planForUpdate(stmt, errKeysPos); + return planForUpdate(stmt, loc, idx, conn, fieldsQuery, errKeysPos); } /** * Prepare update plan for INSERT or MERGE. * * @param stmt INSERT or MERGE statement. + * @param loc Local query flag. + * @param idx Indexing. + * @param conn Connection. + * @param fieldsQuery Original query. * @return Update plan. * @throws IgniteCheckedException if failed. */ @SuppressWarnings("ConstantConditions") - private static UpdatePlan planForInsert(GridSqlStatement stmt) throws IgniteCheckedException { + private static UpdatePlan planForInsert(GridSqlStatement stmt, boolean loc, IgniteH2Indexing idx, + @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery) throws IgniteCheckedException { GridSqlQuery sel; GridSqlElement target; @@ -191,23 +211,33 @@ else if (stmt instanceof GridSqlMerge) { KeyValueSupplier keySupplier = createSupplier(cctx, desc.type(), keyColIdx, hasKeyProps, true, false); KeyValueSupplier valSupplier = createSupplier(cctx, desc.type(), valColIdx, hasValProps, false, false); + String selectSql = sel.getSQL(); + + UpdatePlan.DistributedPlanInfo distributed = (rowsNum == 0 && !F.isEmpty(selectSql)) ? + checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()) : null; + if (stmt instanceof GridSqlMerge) return UpdatePlan.forMerge(tbl.dataTable(), colNames, colTypes, keySupplier, valSupplier, keyColIdx, - valColIdx, sel.getSQL(), !isTwoStepSubqry, rowsNum); + valColIdx, selectSql, !isTwoStepSubqry, rowsNum, distributed); else return UpdatePlan.forInsert(tbl.dataTable(), colNames, colTypes, keySupplier, valSupplier, keyColIdx, - valColIdx, sel.getSQL(), !isTwoStepSubqry, rowsNum); + valColIdx, selectSql, !isTwoStepSubqry, rowsNum, distributed); } /** * Prepare update plan for UPDATE or DELETE. * * @param stmt UPDATE or DELETE statement. + * @param loc Local query flag. + * @param idx Indexing. + * @param conn Connection. + * @param fieldsQuery Original query. * @param errKeysPos index to inject param for re-run keys at. Null if it's not a re-run plan. * @return Update plan. * @throws IgniteCheckedException if failed. */ - private static UpdatePlan planForUpdate(GridSqlStatement stmt, @Nullable Integer errKeysPos) + private static UpdatePlan planForUpdate(GridSqlStatement stmt, boolean loc, IgniteH2Indexing idx, + @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery, @Nullable Integer errKeysPos) throws IgniteCheckedException { GridSqlElement target; @@ -286,12 +316,23 @@ else if (stmt instanceof GridSqlDelete) { sel = DmlAstUtils.selectForUpdate((GridSqlUpdate) stmt, errKeysPos); - return UpdatePlan.forUpdate(gridTbl, colNames, colTypes, newValSupplier, valColIdx, sel.getSQL()); + String selectSql = sel.getSQL(); + + UpdatePlan.DistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : + checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()); + + return UpdatePlan.forUpdate(gridTbl, colNames, colTypes, newValSupplier, valColIdx, selectSql, + distributed); } else { sel = DmlAstUtils.selectForDelete((GridSqlDelete) stmt, errKeysPos); - return UpdatePlan.forDelete(gridTbl, sel.getSQL()); + String selectSql = sel.getSQL(); + + UpdatePlan.DistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : + checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()); + + return UpdatePlan.forDelete(gridTbl, selectSql, distributed); } } } @@ -493,6 +534,62 @@ private static boolean updateAffectsKeyColumns(GridH2Table gridTbl, Set return false; } + /** + * Checks whether the given update plan can be distributed and returns additional info. + * + * @param idx Indexing. + * @param conn Connection. + * @param fieldsQry Initial update query. + * @param loc Local query flag. + * @param selectQry Derived select query. + * @param cacheName Cache name. + * @return distributed update plan info, or {@code null} if cannot be distributed. + * @throws IgniteCheckedException if failed. + */ + private static UpdatePlan.DistributedPlanInfo checkPlanCanBeDistributed(IgniteH2Indexing idx, + Connection conn, SqlFieldsQuery fieldsQry, boolean loc, String selectQry, String cacheName) + throws IgniteCheckedException { + + if (loc || !isSkipReducerOnUpdateQuery(fieldsQry)) + return null; + + assert conn != null; + + try { + // Get a new prepared statement for derived select query. + try (PreparedStatement stmt = conn.prepareStatement(selectQry)) { + idx.bindParameters(stmt, F.asList(fieldsQry.getArgs())); + + GridCacheTwoStepQuery qry = GridSqlQuerySplitter.split(conn, + GridSqlQueryParser.prepared(stmt), + fieldsQry.getArgs(), + fieldsQry.isCollocated(), + fieldsQry.isDistributedJoins(), + fieldsQry.isEnforceJoinOrder(), idx); + + boolean distributed = qry.skipMergeTable() && qry.mapQueries().size() == 1 && + !qry.mapQueries().get(0).hasSubQueries(); + + return distributed ? new UpdatePlan.DistributedPlanInfo(qry.isReplicatedOnly(), + idx.collectCacheIds(CU.cacheId(cacheName), qry)): null; + } + } + catch (SQLException e) { + throw new IgniteCheckedException(e); + } + } + + /** + * Checks whether query flags are compatible with server side update. + * + * @param qry Query. + * @return {@code true} if update can be distributed. + */ + public static boolean isSkipReducerOnUpdateQuery(SqlFieldsQuery qry) { + return qry != null && !qry.isLocal() && + qry instanceof SqlFieldsQueryEx && ((SqlFieldsQueryEx)qry).isSkipReducerOnUpdate(); + } + /** * Simple supplier that just takes specified element of a given row. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java index 7f28203728e0d..c96b4866cafc4 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java @@ -1509,6 +1509,19 @@ private void splitSelect( rdcQry.distinct(true); } + // -- SUB-QUERIES + boolean hasSubQueries = hasSubQueries(mapQry.where()) || hasSubQueries(mapQry.from()); + + if (!hasSubQueries) { + for (int i = 0; i < mapQry.columns(false).size(); i++) { + if (hasSubQueries(mapQry.column(i))) { + hasSubQueries = true; + + break; + } + } + } + // Replace the given select with generated reduce query in the parent. prnt.child(childIdx, rdcQry); @@ -1519,6 +1532,7 @@ private void splitSelect( map.columns(collectColumns(mapExps)); map.sortColumns(mapQry.sort()); map.partitioned(hasPartitionedTables(mapQry)); + map.hasSubQueries(hasSubQueries); if (map.isPartitioned()) map.derivedPartitions(derivePartitionsFromQuery(mapQry, ctx)); @@ -1542,6 +1556,25 @@ private static boolean hasPartitionedTables(GridSqlAst ast) { return false; } + /** + * @param ast Map query AST. + * @return {@code true} If the given AST has sub-queries. + */ + private boolean hasSubQueries(GridSqlAst ast) { + if (ast == null) + return false; + + if (ast instanceof GridSqlSubquery) + return true; + + for (int childIdx = 0; childIdx < ast.size(); childIdx++) { + if (hasSubQueries(ast.child(childIdx))) + return true; + } + + return false; + } + /** * @param sqlQry Query. * @param qryAst Select AST. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/DistributedUpdateRun.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/DistributedUpdateRun.java new file mode 100644 index 0000000000000..a783b8a26b13c --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/DistributedUpdateRun.java @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.twostep; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.UUID; +import javax.cache.CacheException; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.query.GridRunningQueryInfo; +import org.apache.ignite.internal.processors.query.h2.UpdateResult; +import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlResponse; +import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.typedef.F; + +/** + * Context for DML operation on reducer node. + */ +class DistributedUpdateRun { + /** Expected number of responses. */ + private final int nodeCount; + + /** Registers nodes that have responded. */ + private final HashSet rspNodes; + + /** Accumulates total number of updated rows. */ + private long updCntr = 0L; + + /** Accumulates error keys. */ + private HashSet errorKeys; + + /** Query info. */ + private final GridRunningQueryInfo qry; + + /** Result future. */ + private final GridFutureAdapter fut = new GridFutureAdapter<>(); + + /** + * Constructor. + * + * @param nodeCount Number of nodes to await results from. + * @param qry Query info. + */ + DistributedUpdateRun(int nodeCount, GridRunningQueryInfo qry) { + this.nodeCount = nodeCount; + this.qry = qry; + + rspNodes = new HashSet<>(nodeCount); + } + + /** + * @return Query info. + */ + GridRunningQueryInfo queryInfo() { + return qry; + } + + /** + * @return Result future. + */ + GridFutureAdapter future() { + return fut; + } + + /** + * Handle disconnection. + * @param e Pre-formatted error. + */ + void handleDisconnect(CacheException e) { + fut.onDone(new IgniteCheckedException("Update failed because client node have disconnected.", e)); + } + + /** + * Handle leave of a node. + * + * @param nodeId Node id. + */ + void handleNodeLeft(UUID nodeId) { + fut.onDone(new IgniteCheckedException("Update failed because map node left topology [nodeId=" + nodeId + "]")); + } + + /** + * Handle response from remote node. + * + * @param id Node id. + * @param msg Response message. + */ + void handleResponse(UUID id, GridH2DmlResponse msg) { + synchronized (this) { + if (!rspNodes.add(id)) + return; // ignore duplicated messages + + String err = msg.error(); + + if (err != null) { + fut.onDone(new IgniteCheckedException("Update failed. " + (F.isEmpty(err) ? "" : err) + "[reqId=" + + msg.requestId() + ", node=" + id + "].")); + + return; + } + + if (!F.isEmpty(msg.errorKeys())) { + List errList = Arrays.asList(msg.errorKeys()); + + if (errorKeys == null) + errorKeys = new HashSet<>(errList); + else + errorKeys.addAll(errList); + } + + updCntr += msg.updateCounter(); + + if (rspNodes.size() == nodeCount) + fut.onDone(new UpdateResult(updCntr, errorKeys == null ? null : errorKeys.toArray())); + } + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java index 0cc417281ddcd..77b928f062e27 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java @@ -21,6 +21,7 @@ import java.sql.ResultSet; import java.util.AbstractCollection; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Iterator; @@ -30,12 +31,14 @@ import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import javax.cache.CacheException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cache.query.QueryCancelledException; +import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.events.CacheQueryExecutedEvent; import org.apache.ignite.events.DiscoveryEvent; @@ -54,8 +57,10 @@ import org.apache.ignite.internal.processors.cache.query.CacheQueryType; import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable; import org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery; +import org.apache.ignite.internal.processors.query.GridQueryCancel; import org.apache.ignite.internal.processors.query.h2.H2Utils; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.processors.query.h2.UpdateResult; import org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode; import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RetryException; @@ -63,6 +68,8 @@ import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryFailResponse; import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageRequest; import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageResponse; +import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest; +import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlResponse; import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.typedef.CI1; @@ -71,6 +78,7 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.thread.IgniteThread; +import org.apache.ignite.spi.indexing.IndexingQueryFilter; import org.h2.jdbc.JdbcResultSet; import org.h2.value.Value; import org.jetbrains.annotations.Nullable; @@ -208,6 +216,8 @@ else if (msg instanceof GridQueryNextPageRequest) onNextPageRequest(node, (GridQueryNextPageRequest)msg); else if (msg instanceof GridQueryCancelRequest) onCancel(node, (GridQueryCancelRequest)msg); + else if (msg instanceof GridH2DmlRequest) + onDmlRequest(node, (GridH2DmlRequest)msg); else processed = false; @@ -733,6 +743,102 @@ private void onQueryRequest0( } } + /** + * @param node Node. + * @param req DML request. + */ + private void onDmlRequest(final ClusterNode node, final GridH2DmlRequest req) throws IgniteCheckedException { + int[] parts = req.queryPartitions(); + + List cacheIds = req.caches(); + + long reqId = req.requestId(); + + AffinityTopologyVersion topVer = req.topologyVersion(); + + List reserved = new ArrayList<>(); + + if (!reservePartitions(cacheIds, topVer, parts, reserved)) { + U.error(log, "Failed to reserve partitions for DML request. [localNodeId=" + ctx.localNodeId() + + ", nodeId=" + node.id() + ", reqId=" + req.requestId() + ", cacheIds=" + cacheIds + + ", topVer=" + topVer + ", parts=" + Arrays.toString(parts) + ']'); + + sendUpdateResponse(node, reqId, null, "Failed to reserve partitions for DML request. " + + "Explanation (Retry your request when re-balancing is over)."); + + return; + } + + MapNodeResults nodeResults = resultsForNode(node.id()); + + try { + IndexingQueryFilter filter = h2.backupFilter(topVer, parts); + + GridQueryCancel cancel = nodeResults.putUpdate(reqId); + + SqlFieldsQuery fldsQry = new SqlFieldsQuery(req.query()); + + if (req.parameters() != null) + fldsQry.setArgs(req.parameters()); + + fldsQry.setEnforceJoinOrder(req.isFlagSet(GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER)); + fldsQry.setTimeout(req.timeout(), TimeUnit.MILLISECONDS); + fldsQry.setPageSize(req.pageSize()); + fldsQry.setLocal(true); + + boolean local = true; + + final boolean replicated = req.isFlagSet(GridH2QueryRequest.FLAG_REPLICATED); + + if (!replicated && !F.isEmpty(cacheIds) && + findFirstPartitioned(cacheIds).config().getQueryParallelism() > 1) { + fldsQry.setDistributedJoins(true); + + local = false; + } + + UpdateResult updRes = h2.mapDistributedUpdate(req.schemaName(), fldsQry, filter, cancel, local); + + GridCacheContext mainCctx = + !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null; + + boolean evt = local && mainCctx != null && ctx.event().isRecordable(EVT_CACHE_QUERY_EXECUTED); + + if (evt) { + ctx.event().record(new CacheQueryExecutedEvent<>( + node, + "SQL query executed.", + EVT_CACHE_QUERY_EXECUTED, + CacheQueryType.SQL.name(), + mainCctx.name(), + null, + req.query(), + null, + null, + req.parameters(), + node.id(), + null)); + } + + sendUpdateResponse(node, reqId, updRes, null); + } + catch (Exception e) { + U.error(log, "Error processing dml request. [localNodeId=" + ctx.localNodeId() + + ", nodeId=" + node.id() + ", req=" + req + ']', e); + + sendUpdateResponse(node, reqId, null, e.getMessage()); + } + finally { + if (!F.isEmpty(reserved)) { + // Release reserved partitions. + for (int i = 0; i < reserved.size(); i++) + reserved.get(i).release(); + } + + nodeResults.removeUpdate(reqId); + } + } + /** * @param node Node. * @param qryReqId Query request ID. @@ -757,6 +863,36 @@ private void sendError(ClusterNode node, long qryReqId, Throwable err) { } } + /** + * Sends update response for DML request. + * + * @param node Node. + * @param reqId Request id. + * @param updResult Update result. + * @param error Error message. + */ + @SuppressWarnings("deprecation") + private void sendUpdateResponse(ClusterNode node, long reqId, UpdateResult updResult, String error) { + try { + GridH2DmlResponse rsp = new GridH2DmlResponse(reqId, updResult == null ? 0 : updResult.counter(), + updResult == null ? null : updResult.errorKeys(), error); + + if (log.isDebugEnabled()) + log.debug("Sending: [localNodeId=" + ctx.localNodeId() + ", node=" + node.id() + ", msg=" + rsp + "]"); + + if (node.isLocal()) + h2.reduceQueryExecutor().onMessage(ctx.localNodeId(), rsp); + else { + rsp.marshall(ctx.config().getMarshaller()); + + ctx.io().sendToGridTopic(node, GridTopic.TOPIC_QUERY, rsp, QUERY_POOL); + } + } + catch (Exception e) { + U.error(log, "Failed to send message.", e); + } + } + /** * @param node Node. * @param req Request. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java index 8638794f8785e..f85cd94a10d62 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -59,6 +60,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState; import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable; +import org.apache.ignite.internal.processors.cache.query.GridCacheQueryType; import org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery; import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery; import org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator; @@ -67,6 +69,7 @@ import org.apache.ignite.internal.processors.query.h2.H2FieldsIterator; import org.apache.ignite.internal.processors.query.h2.H2Utils; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.processors.query.h2.UpdateResult; import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlSortColumn; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlType; @@ -74,6 +77,8 @@ import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryFailResponse; import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageRequest; import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageResponse; +import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest; +import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlResponse; import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest; import org.apache.ignite.internal.util.GridIntIterator; import org.apache.ignite.internal.util.GridIntList; @@ -130,6 +135,9 @@ public class GridReduceQueryExecutor { /** */ private final ConcurrentMap runs = new ConcurrentHashMap8<>(); + /** Contexts of running DML requests. */ + private final ConcurrentMap updRuns = new ConcurrentHashMap<>(); + /** */ private volatile List fakeTbls = Collections.emptyList(); @@ -197,6 +205,10 @@ public void start(final GridKernalContext ctx, final IgniteH2Indexing h2) throws } } } + + for (DistributedUpdateRun r : updRuns.values()) + r.handleNodeLeft(nodeId); + } }, EventType.EVT_NODE_FAILED, EventType.EVT_NODE_LEFT); } @@ -229,6 +241,8 @@ public void onMessage(UUID nodeId, Object msg) { onNextPage(node, (GridQueryNextPageResponse)msg); else if (msg instanceof GridQueryFailResponse) onFail(node, (GridQueryFailResponse)msg); + else if (msg instanceof GridH2DmlResponse) + onDmlResponse(node, (GridH2DmlResponse)msg); else processed = false; @@ -575,25 +589,11 @@ public Iterator> query( if (qry.isLocal()) nodes = singletonList(ctx.discovery().localNode()); else { - if (isPreloadingActive(cacheIds)) { - if (isReplicatedOnly) - nodes = replicatedUnstableDataNodes(cacheIds); - else { - partsMap = partitionedUnstableDataNodes(cacheIds); - - if (partsMap != null) { - qryMap = narrowForQuery(partsMap, parts); - - nodes = qryMap == null ? null : qryMap.keySet(); - } - } - } - else { - qryMap = stableDataNodes(isReplicatedOnly, topVer, cacheIds, parts); + NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly); - if (qryMap != null) - nodes = qryMap.keySet(); - } + nodes = nodesParts.nodes(); + partsMap = nodesParts.partitionsMap(); + qryMap = nodesParts.queryPartitionsMap(); if (nodes == null) continue; // Retry. @@ -844,6 +844,153 @@ public Iterator> query( } } + /** + * + * @param schemaName Schema name. + * @param cacheIds Cache ids. + * @param selectQry Select query. + * @param params SQL parameters. + * @param enforceJoinOrder Enforce join order of tables. + * @param pageSize Page size. + * @param timeoutMillis Timeout. + * @param parts Partitions. + * @param isReplicatedOnly Whether query uses only replicated caches. + * @param cancel Cancel state. + * @return Update result, or {@code null} when some map node doesn't support distributed DML. + */ + public UpdateResult update( + String schemaName, + List cacheIds, + String selectQry, + Object[] params, + boolean enforceJoinOrder, + int pageSize, + int timeoutMillis, + final int[] parts, + boolean isReplicatedOnly, + GridQueryCancel cancel + ) { + AffinityTopologyVersion topVer = h2.readyTopologyVersion(); + + NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly); + + final long reqId = qryIdGen.incrementAndGet(); + + final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, + schemaName, U.currentTimeMillis(), cancel, false); + + Collection nodes = nodesParts.nodes(); + + if (nodes == null) + throw new CacheException("Failed to determine nodes participating in the update. " + + "Explanation (Retry update once topology recovers)."); + + if (isReplicatedOnly) { + ClusterNode locNode = ctx.discovery().localNode(); + + if (nodes.contains(locNode)) + nodes = singletonList(locNode); + else + nodes = singletonList(F.rand(nodes)); + } + + for (ClusterNode n : nodes) { + if (!n.version().greaterThanEqual(2, 3, 0)) { + log.warning("Server-side DML optimization is skipped because map node does not support it. " + + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "]."); + + return null; + } + } + + final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo); + + int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0; + + if (isReplicatedOnly) + flags |= GridH2QueryRequest.FLAG_REPLICATED; + + GridH2DmlRequest req = new GridH2DmlRequest() + .requestId(reqId) + .topologyVersion(topVer) + .caches(cacheIds) + .schemaName(schemaName) + .query(selectQry) + .pageSize(pageSize) + .parameters(params) + .timeout(timeoutMillis) + .flags(flags); + + updRuns.put(reqId, r); + + boolean release = false; + + try { + Map partsMap = (nodesParts.queryPartitionsMap() != null) ? + nodesParts.queryPartitionsMap() : nodesParts.partitionsMap(); + + ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : + new ExplicitPartitionsSpecializer(partsMap); + + final Collection finalNodes = nodes; + + cancel.set(new Runnable() { + @Override public void run() { + r.future().onCancelled(); + + send(finalNodes, new GridQueryCancelRequest(reqId), null, false); + } + }); + + // send() logs the debug message + if (send(nodes, req, partsSpec, false)) + return r.future().get(); + + throw new CacheException("Failed to send update request to participating nodes."); + } + catch (IgniteCheckedException | RuntimeException e) { + release = true; + + U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e); + + throw new CacheException("Failed to run update. " + e.getMessage(), e); + } + finally { + if (release) + send(nodes, new GridQueryCancelRequest(reqId), null, false); + + if (!updRuns.remove(reqId, r)) + U.warn(log, "Update run was already removed: " + reqId); + } + } + + /** + * Process response for DML request. + * + * @param node Node. + * @param msg Message. + */ + private void onDmlResponse(final ClusterNode node, GridH2DmlResponse msg) { + try { + long reqId = msg.requestId(); + + DistributedUpdateRun r = updRuns.get(reqId); + + if (r == null) { + U.warn(log, "Unexpected dml response (will ignore). [localNodeId=" + ctx.localNodeId() + ", nodeId=" + + node.id() + ", msg=" + msg.toString() + ']'); + + return; + } + + r.handleResponse(node.id(), msg); + } + catch (Exception e) { + U.error(log, "Error in dml response processing. [localNodeId=" + ctx.localNodeId() + ", nodeId=" + + node.id() + ", msg=" + msg.toString() + ']', e); + } + } + /** * @param cacheIds Cache IDs. * @return The first partitioned cache context. @@ -1308,6 +1455,44 @@ private static Map convert(Map m) { return res; } + /** + * Evaluates nodes and nodes to partitions map given a list of cache ids, topology version and partitions. + * + * @param cacheIds Cache ids. + * @param topVer Topology version. + * @param parts Partitions array. + * @param isReplicatedOnly Allow only replicated caches. + * @return Result. + */ + private NodesForPartitionsResult nodesForPartitions(List cacheIds, AffinityTopologyVersion topVer, + int[] parts, boolean isReplicatedOnly) { + Collection nodes = null; + Map partsMap = null; + Map qryMap = null; + + if (isPreloadingActive(cacheIds)) { + if (isReplicatedOnly) + nodes = replicatedUnstableDataNodes(cacheIds); + else { + partsMap = partitionedUnstableDataNodes(cacheIds); + + if (partsMap != null) { + qryMap = narrowForQuery(partsMap, parts); + + nodes = qryMap == null ? null : qryMap.keySet(); + } + } + } + else { + qryMap = stableDataNodes(isReplicatedOnly, topVer, cacheIds, parts); + + if (qryMap != null) + nodes = qryMap.keySet(); + } + + return new NodesForPartitionsResult(nodes, partsMap, qryMap); + } + /** * @param conn Connection. * @param qry Query. @@ -1403,6 +1588,9 @@ public void onDisconnected(IgniteFuture reconnectFut) { for (Map.Entry e : runs.entrySet()) e.getValue().disconnected(err); + + for (DistributedUpdateRun r: updRuns.values()) + r.handleDisconnect(err); } /** @@ -1421,6 +1609,11 @@ public Collection longRunningQueries(long duration) { res.add(run.queryInfo()); } + for (DistributedUpdateRun upd: updRuns.values()) { + if (upd.queryInfo().longQuery(curTime, duration)) + res.add(upd.queryInfo()); + } + return res; } @@ -1435,6 +1628,12 @@ public void cancelQueries(Collection queries) { if (run != null) run.queryInfo().cancel(); + else { + DistributedUpdateRun upd = updRuns.get(qryId); + + if (upd != null) + upd.queryInfo().cancel(); + } } } @@ -1478,11 +1677,64 @@ public ExplicitPartitionsSpecializer(Map partsMap) { /** {@inheritDoc} */ @Override public Message apply(ClusterNode node, Message msg) { - GridH2QueryRequest rq = new GridH2QueryRequest((GridH2QueryRequest)msg); + if (msg instanceof GridH2QueryRequest) { + GridH2QueryRequest rq = new GridH2QueryRequest((GridH2QueryRequest)msg); + + rq.queryPartitions(toArray(partsMap.get(node))); + + return rq; + } else if (msg instanceof GridH2DmlRequest) { + GridH2DmlRequest rq = new GridH2DmlRequest((GridH2DmlRequest)msg); + + rq.queryPartitions(toArray(partsMap.get(node))); + + return rq; + } + + return msg; + } + } + + /** + * Result of nodes to partitions mapping for a query or update. + */ + static class NodesForPartitionsResult { + /** */ + final Collection nodes; - rq.queryPartitions(toArray(partsMap.get(node))); + /** */ + final Map partsMap; - return rq; + /** */ + final Map qryMap; + + /** */ + NodesForPartitionsResult(Collection nodes, Map partsMap, + Map qryMap) { + this.nodes = nodes; + this.partsMap = partsMap; + this.qryMap = qryMap; + } + + /** + * @return Collection of nodes a message shall be sent to. + */ + Collection nodes() { + return nodes; + } + + /** + * @return Maps a node to partition array. + */ + Map partitionsMap() { + return partsMap; + } + + /** + * @return Maps a node to partition array. + */ + Map queryPartitionsMap() { + return qryMap; } } } \ No newline at end of file diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapNodeResults.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapNodeResults.java index 2d20c8d52593e..c0637eae3cac2 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapNodeResults.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapNodeResults.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.query.h2.twostep; +import org.apache.ignite.internal.processors.query.GridQueryCancel; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap; import org.jsr166.ConcurrentHashMap8; @@ -32,6 +33,9 @@ class MapNodeResults { /** */ private final ConcurrentMap res = new ConcurrentHashMap8<>(); + /** Cancel state for update requests. */ + private final ConcurrentMap updCancels = new ConcurrentHashMap8<>(); + /** */ private final GridBoundedConcurrentLinkedHashMap qryHist = new GridBoundedConcurrentLinkedHashMap<>(1024, 1024, 0.75f, 64, PER_SEGMENT_Q); @@ -88,6 +92,12 @@ public void cancelRequest(long reqId) { removed.cancel(true); } } + + // Cancel update request + GridQueryCancel updCancel = updCancels.remove(reqId); + + if (updCancel != null) + updCancel.cancel(); } /** @@ -110,12 +120,35 @@ public MapQueryResults put(long reqId, int segmentId, MapQueryResults qr) { return res.put(new MapRequestKey(nodeId, reqId, segmentId), qr); } + /** + * @param reqId Request id. + * @return Cancel state. + */ + public GridQueryCancel putUpdate(long reqId) { + GridQueryCancel cancel = new GridQueryCancel(); + + updCancels.put(reqId, cancel); + + return cancel; + } + + /** + * @param reqId Request id. + */ + public void removeUpdate(long reqId) { + updCancels.remove(reqId); + } + /** * Cancel all node queries. */ public void cancelAll() { for (MapQueryResults ress : res.values()) ress.cancel(true); + + // Cancel update requests + for (GridQueryCancel upd: updCancels.values()) + upd.cancel(); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlRequest.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlRequest.java new file mode 100644 index 0000000000000..e40bc2da87ab6 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlRequest.java @@ -0,0 +1,516 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.processors.query.h2.twostep.msg; + +import java.io.Externalizable; +import java.nio.ByteBuffer; +import java.util.List; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.GridDirectTransient; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.binary.BinaryMarshaller; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.marshaller.Marshaller; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; +import org.apache.ignite.plugin.extensions.communication.MessageReader; +import org.apache.ignite.plugin.extensions.communication.MessageWriter; + +import static org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery.EMPTY_PARAMS; + +/** + * Request for DML operation on remote node. + */ +public class GridH2DmlRequest implements Message, GridCacheQueryMarshallable { + /** */ + private static final long serialVersionUID = 0L; + + /** Request id. */ + @GridToStringInclude + private long reqId; + + /** Cache identifiers. */ + @GridToStringInclude + @GridDirectCollection(Integer.class) + private List caches; + + /** Topology version. */ + @GridToStringInclude + private AffinityTopologyVersion topVer; + + /** Query partitions. */ + @GridToStringInclude + private int[] qryParts; + + /** Page size. */ + private int pageSize; + + /** Query. */ + @GridToStringInclude + private String qry; + + /** Flags. */ + private byte flags; + + /** Timeout. */ + private int timeout; + + /** Query parameters. */ + @GridToStringInclude(sensitive = true) + @GridDirectTransient + private Object[] params; + + /** Query parameters as bytes. */ + private byte[] paramsBytes; + + /** Schema name. */ + @GridToStringInclude + private String schemaName; + + /** + * Required by {@link Externalizable} + */ + public GridH2DmlRequest() { + // No-op. + } + + /** + * @param req Request. + */ + public GridH2DmlRequest(GridH2DmlRequest req) { + reqId = req.reqId; + caches = req.caches; + topVer = req.topVer; + qryParts = req.qryParts; + pageSize = req.pageSize; + qry = req.qry; + flags = req.flags; + timeout = req.timeout; + params = req.params; + paramsBytes = req.paramsBytes; + schemaName = req.schemaName; + } + + /** + * @return Parameters. + */ + public Object[] parameters() { + return params; + } + + /** + * @param params Parameters. + * @return {@code this}. + */ + public GridH2DmlRequest parameters(Object[] params) { + if (params == null) + params = EMPTY_PARAMS; + + this.params = params; + + return this; + } + + /** + * @param reqId Request ID. + * @return {@code this}. + */ + public GridH2DmlRequest requestId(long reqId) { + this.reqId = reqId; + + return this; + } + + /** + * @return Request ID. + */ + public long requestId() { + return reqId; + } + + /** + * @param caches Caches. + * @return {@code this}. + */ + public GridH2DmlRequest caches(List caches) { + this.caches = caches; + + return this; + } + + /** + * @return Caches. + */ + public List caches() { + return caches; + } + + /** + * @param topVer Topology version. + * @return {@code this}. + */ + public GridH2DmlRequest topologyVersion(AffinityTopologyVersion topVer) { + this.topVer = topVer; + + return this; + } + + /** + * @return Topology version. + */ + public AffinityTopologyVersion topologyVersion() { + return topVer; + } + + /** + * @return Query partitions. + */ + public int[] queryPartitions() { + return qryParts; + } + + /** + * @param qryParts Query partitions. + * @return {@code this}. + */ + public GridH2DmlRequest queryPartitions(int[] qryParts) { + this.qryParts = qryParts; + + return this; + } + + /** + * @param pageSize Page size. + * @return {@code this}. + */ + public GridH2DmlRequest pageSize(int pageSize) { + this.pageSize = pageSize; + + return this; + } + + /** + * @return Page size. + */ + public int pageSize() { + return pageSize; + } + + /** + * @param qry SQL Query. + * @return {@code this}. + */ + public GridH2DmlRequest query(String qry) { + this.qry = qry; + + return this; + } + + /** + * @return SQL Query. + */ + public String query() { + return qry; + } + + /** + * @param flags Flags. + * @return {@code this}. + */ + public GridH2DmlRequest flags(int flags) { + assert flags >= 0 && flags <= 255: flags; + + this.flags = (byte)flags; + + return this; + } + + /** + * @param flags Flags to check. + * @return {@code true} If all the requested flags are set to {@code true}. + */ + public boolean isFlagSet(int flags) { + return (this.flags & flags) == flags; + } + + /** + * @return Timeout. + */ + public int timeout() { + return timeout; + } + + /** + * @param timeout New timeout. + * @return {@code this}. + */ + public GridH2DmlRequest timeout(int timeout) { + this.timeout = timeout; + + return this; + } + + /** + * @return Schema name. + */ + public String schemaName() { + return schemaName; + } + + /** + * @param schemaName Schema name. + * @return {@code this}. + */ + public GridH2DmlRequest schemaName(String schemaName) { + this.schemaName = schemaName; + + return this; + } + + /** {@inheritDoc} */ + @Override public void marshall(Marshaller m) { + if (paramsBytes != null) + return; + + assert params != null; + + try { + paramsBytes = U.marshal(m, params); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** {@inheritDoc} */ + @SuppressWarnings("IfMayBeConditional") + @Override public void unmarshall(Marshaller m, GridKernalContext ctx) { + if (params != null) + return; + + assert paramsBytes != null; + + try { + final ClassLoader ldr = U.resolveClassLoader(ctx.config()); + + if (m instanceof BinaryMarshaller) + // To avoid deserializing of enum types. + params = ((BinaryMarshaller)m).binaryMarshaller().unmarshal(paramsBytes, ldr); + else + params = U.unmarshal(m, paramsBytes, ldr); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** {@inheritDoc} */ + @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { + writer.setBuffer(buf); + + if (!writer.isHeaderWritten()) { + if (!writer.writeHeader(directType(), fieldsCount())) + return false; + + writer.onHeaderWritten(); + } + + switch (writer.state()) { + case 0: + if (!writer.writeCollection("caches", caches, MessageCollectionItemType.INT)) + return false; + + writer.incrementState(); + + case 1: + if (!writer.writeByte("flags", flags)) + return false; + + writer.incrementState(); + + case 2: + if (!writer.writeInt("pageSize", pageSize)) + return false; + + writer.incrementState(); + + case 3: + if (!writer.writeByteArray("paramsBytes", paramsBytes)) + return false; + + writer.incrementState(); + + case 4: + if (!writer.writeString("qry", qry)) + return false; + + writer.incrementState(); + + case 5: + if (!writer.writeIntArray("qryParts", qryParts)) + return false; + + writer.incrementState(); + + case 6: + if (!writer.writeLong("reqId", reqId)) + return false; + + writer.incrementState(); + + case 7: + if (!writer.writeString("schemaName", schemaName)) + return false; + + writer.incrementState(); + + case 8: + if (!writer.writeInt("timeout", timeout)) + return false; + + writer.incrementState(); + + case 9: + if (!writer.writeMessage("topVer", topVer)) + return false; + + writer.incrementState(); + + } + + return true; + } + + /** {@inheritDoc} */ + @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) { + reader.setBuffer(buf); + + if (!reader.beforeMessageRead()) + return false; + + switch (reader.state()) { + case 0: + caches = reader.readCollection("caches", MessageCollectionItemType.INT); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 1: + flags = reader.readByte("flags"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 2: + pageSize = reader.readInt("pageSize"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 3: + paramsBytes = reader.readByteArray("paramsBytes"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 4: + qry = reader.readString("qry"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 5: + qryParts = reader.readIntArray("qryParts"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 6: + reqId = reader.readLong("reqId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 7: + schemaName = reader.readString("schemaName"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 8: + timeout = reader.readInt("timeout"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 9: + topVer = reader.readMessage("topVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + } + + return reader.afterMessageRead(GridH2DmlRequest.class); + } + + /** {@inheritDoc} */ + @Override public short directType() { + return -55; + } + + /** {@inheritDoc} */ + @Override public byte fieldsCount() { + return 10; + } + + /** {@inheritDoc} */ + @Override public void onAckReceived() { + // No-op. + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(GridH2DmlRequest.class, this); + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlResponse.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlResponse.java new file mode 100644 index 0000000000000..808ff9eb160aa --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2DmlResponse.java @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.twostep.msg; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.GridDirectTransient; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.binary.BinaryMarshaller; +import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.marshaller.Marshaller; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.plugin.extensions.communication.MessageReader; +import org.apache.ignite.plugin.extensions.communication.MessageWriter; + +/** + * Response to remote DML request. + */ +public class GridH2DmlResponse implements Message, GridCacheQueryMarshallable { + /** */ + private static final long serialVersionUID = 0L; + + /** Request id. */ + @GridToStringInclude + private long reqId; + + /** Number of updated rows. */ + @GridToStringInclude + private long updCnt; + + /** Error message. */ + @GridToStringInclude + private String err; + + /** Keys that failed. */ + @GridToStringInclude + @GridDirectTransient + private Object[] errKeys; + + /** Keys that failed (after marshalling). */ + private byte[] errKeysBytes; + + /** + * Default constructor. + */ + public GridH2DmlResponse() { + // No-op. + } + + /** + * Constructor. + * + * @param reqId Request id. + * @param updCnt Updated row number. + * @param errKeys Erroneous keys. + * @param error Error message. + */ + public GridH2DmlResponse(long reqId, long updCnt, Object[] errKeys, String error) { + this.reqId = reqId; + this.updCnt = updCnt; + this.errKeys = errKeys; + this.err = error; + } + + /** + * @return Request id. + */ + public long requestId() { + return reqId; + } + + /** + * @return Update counter. + */ + public long updateCounter() { + return updCnt; + } + + /** + * @return Error keys. + */ + public Object[] errorKeys() { + return errKeys; + } + + /** + * @return Error message. + */ + public String error() { + return err; + } + + /** {@inheritDoc} */ + @Override public void marshall(Marshaller m) { + if (errKeysBytes != null || errKeys == null) + return; + + try { + errKeysBytes = U.marshal(m, errKeys); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** {@inheritDoc} */ + @SuppressWarnings("IfMayBeConditional") + @Override public void unmarshall(Marshaller m, GridKernalContext ctx) { + if (errKeys != null || errKeysBytes == null) + return; + + try { + final ClassLoader ldr = U.resolveClassLoader(ctx.config()); + + if (m instanceof BinaryMarshaller) + // To avoid deserializing of enum types. + errKeys = ((BinaryMarshaller)m).binaryMarshaller().unmarshal(errKeysBytes, ldr); + else + errKeys = U.unmarshal(m, errKeysBytes, ldr); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(GridH2DmlResponse.class, this); + } + + /** {@inheritDoc} */ + @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { + writer.setBuffer(buf); + + if (!writer.isHeaderWritten()) { + if (!writer.writeHeader(directType(), fieldsCount())) + return false; + + writer.onHeaderWritten(); + } + + switch (writer.state()) { + case 0: + if (!writer.writeString("err", err)) + return false; + + writer.incrementState(); + + case 1: + if (!writer.writeByteArray("errKeysBytes", errKeysBytes)) + return false; + + writer.incrementState(); + + case 2: + if (!writer.writeLong("reqId", reqId)) + return false; + + writer.incrementState(); + + case 3: + if (!writer.writeLong("updCnt", updCnt)) + return false; + + writer.incrementState(); + + } + + return true; + } + + /** {@inheritDoc} */ + @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) { + reader.setBuffer(buf); + + if (!reader.beforeMessageRead()) + return false; + + switch (reader.state()) { + case 0: + err = reader.readString("err"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 1: + errKeysBytes = reader.readByteArray("errKeysBytes"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 2: + reqId = reader.readLong("reqId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 3: + updCnt = reader.readLong("updCnt"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + } + + return reader.afterMessageRead(GridH2DmlResponse.class); + } + + /** {@inheritDoc} */ + @Override public short directType() { + return -56; + } + + /** {@inheritDoc} */ + @Override public byte fieldsCount() { + return 4; + } + + @Override public void onAckReceived() { + // No-op + } +} + diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java index 18b1afbaa3857..3c133928ef7c7 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java @@ -112,6 +112,12 @@ public class GridH2ValueMessageFactory implements MessageFactory { case -54: return new QueryTable(); + + case -55: + return new GridH2DmlRequest(); + + case -56: + return new GridH2DmlResponse(); } return null; diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java new file mode 100644 index 0000000000000..e5efc06ef09e0 --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java @@ -0,0 +1,783 @@ +package org.apache.ignite.internal.processors.query; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.cache.Cache; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * Tests for {@link SqlFieldsQueryEx#skipReducerOnUpdate} flag. + */ +public class IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest extends GridCommonAbstractTest { + /** IP finder. */ + private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** */ + private static int NODE_COUNT = 4; + + /** */ + private static String NODE_CLIENT = "client"; + + /** */ + private static String CACHE_ACCOUNT = "acc"; + + /** */ + private static String CACHE_REPORT = "rep"; + + /** */ + private static String CACHE_STOCK = "stock"; + + /** */ + private static String CACHE_TRADE = "trade"; + + /** */ + private static String CACHE_LIST = "list"; + + /** */ + private static IgniteEx client; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration c = super.getConfiguration(gridName); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(IP_FINDER); + + c.setDiscoverySpi(disco); + + List ccfgs = new ArrayList<>(); + + ccfgs.add(buildCacheConfiguration(CACHE_ACCOUNT)); + ccfgs.add(buildCacheConfiguration(CACHE_STOCK)); + ccfgs.add(buildCacheConfiguration(CACHE_TRADE)); + ccfgs.add(buildCacheConfiguration(CACHE_REPORT)); + ccfgs.add(buildCacheConfiguration(CACHE_LIST)); + + c.setCacheConfiguration(ccfgs.toArray(new CacheConfiguration[ccfgs.size()])); + + if (gridName.equals(NODE_CLIENT)) + c.setClientMode(true); + + return c; + } + + /** + * Creates a cache configuration. + * + * @param name Name of the cache. + * @return Cache configuration. + */ + private CacheConfiguration buildCacheConfiguration(String name) { + if (name.equals(CACHE_ACCOUNT)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_ACCOUNT); + + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity entity = new QueryEntity(Integer.class, Account.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + return ccfg; + } + if (name.equals(CACHE_STOCK)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_STOCK); + + ccfg.setCacheMode(CacheMode.REPLICATED); + + QueryEntity entity = new QueryEntity(Integer.class, Stock.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + return ccfg; + } + if (name.equals(CACHE_TRADE)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_TRADE); + + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity entity = new QueryEntity(Integer.class, Trade.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + return ccfg; + } + if (name.equals(CACHE_REPORT)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_REPORT); + + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity entity = new QueryEntity(Integer.class, Report.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + return ccfg; + } + if (name.equals(CACHE_LIST)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_LIST); + + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity entity = new QueryEntity(Integer.class, String.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + return ccfg; + } + + assert false; + + return null; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + startGrids(NODE_COUNT); + + client = (IgniteEx)startGrid(NODE_CLIENT); + + awaitPartitionMapExchange(); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + + super.afterTestsStopped(); + + stopAllGrids(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + awaitPartitionMapExchange(); + + client.cache(CACHE_ACCOUNT).clear(); + client.cache(CACHE_STOCK).clear(); + client.cache(CACHE_TRADE).clear(); + client.cache(CACHE_REPORT).clear(); + client.cache(CACHE_LIST).clear(); + } + + /** + * + * @throws Exception If failed. + */ + public void testUpdate() throws Exception { + Map accounts = getAccounts(100, 1, 100); + + String text = "UPDATE \"acc\".Account SET depo = depo - ? WHERE depo > 0"; + + checkUpdate(client.cache(CACHE_ACCOUNT), accounts, new SqlFieldsQueryEx(text, false).setArgs(10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testUpdateFastKey() throws Exception { + Map accounts = getAccounts(100, 1, 100); + + String text = "UPDATE \"acc\".Account SET depo = depo - ? WHERE _key = ?"; + + checkUpdate(client.cache(CACHE_ACCOUNT), accounts, + new SqlFieldsQueryEx(text, false).setArgs(10, 1)); + } + + /** + * + * @throws Exception If failed. + */ + public void testUpdateLimit() throws Exception { + Map accounts = getAccounts(100, 1, 100); + + String text = "UPDATE \"acc\".Account SET depo = depo - ? WHERE sn >= ? AND sn < ? LIMIT ?"; + + checkUpdate(client.cache(CACHE_ACCOUNT), accounts, + new SqlFieldsQueryEx(text, false).setArgs(10, 0, 10, 10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testUpdateWhereSubquery() throws Exception { + Map accounts = getAccounts(100, 1, -100); + + Map trades = getTrades(100, 2); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "UPDATE \"trade\".Trade t SET qty = ? " + + "WHERE accountId IN (SELECT p._key FROM \"acc\".Account p WHERE depo < ?)"; + + checkUpdate(client.cache(CACHE_TRADE), trades, + new SqlFieldsQueryEx(text, false).setArgs(0, 0)); + } + + /** + * + * @throws Exception If failed. + */ + public void testUpdateSetSubquery() throws Exception { + Map accounts = getAccounts(100, 1, 1000); + Map trades = getTrades(100, 2); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "UPDATE \"trade\".Trade t SET qty = " + + "(SELECT a.depo/t.price FROM \"acc\".Account a WHERE t.accountId = a._key)"; + + checkUpdate(client.cache(CACHE_TRADE), trades, + new SqlFieldsQueryEx(text, false)); + } + + /** + * + * @throws Exception If failed. + */ + public void testUpdateSetTableSubquery() throws Exception { + Map accounts = getAccounts(100, 1, 1000); + Map trades = getTrades(100, 2); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "UPDATE \"trade\".Trade t SET (qty) = " + + "(SELECT a.depo/t.price FROM \"acc\".Account a WHERE t.accountId = a._key)"; + + checkUpdate(client.cache(CACHE_TRADE), trades, + new SqlFieldsQueryEx(text, false)); + } + + /** + * + * @throws Exception If failed. + */ + public void testInsertValues() throws Exception { + String text = "INSERT INTO \"acc\".Account (_key, name, sn, depo)" + + " VALUES (?, ?, ?, ?), (?, ?, ?, ?)"; + + checkUpdate(client.cache(CACHE_ACCOUNT), null, + new SqlFieldsQueryEx(text, false).setArgs(1, "John Marry", 11111, 100, 2, "Marry John", 11112, 200)); + } + + /** + * + * @throws Exception If failed. + */ + public void testInsertFromSelect() throws Exception { + Map accounts = getAccounts(100, 1, 1000); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "INSERT INTO \"trade\".Trade (_key, accountId, stockId, qty, price) " + + "SELECT a._key, a._key, ?, a.depo/?, ? FROM \"acc\".Account a"; + + checkUpdate(client.cache(CACHE_TRADE), null, + new SqlFieldsQueryEx(text, false).setArgs(1, 10, 10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testInsertFromSelectOrderBy() throws Exception { + Map accounts = getAccounts(100, 1, 1000); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "INSERT INTO \"trade\".Trade (_key, accountId, stockId, qty, price) " + + "SELECT a._key, a._key, ?, a.depo/?, ? FROM \"acc\".Account a " + + "ORDER BY a.sn DESC"; + + checkUpdate(client.cache(CACHE_TRADE), null, + new SqlFieldsQueryEx(text, false).setArgs(1, 10, 10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testInsertFromSelectUnion() throws Exception { + Map accounts = getAccounts(20, 1, 1000); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "INSERT INTO \"trade\".Trade (_key, accountId, stockId, qty, price) " + + "SELECT a._key, a._key, 0, a.depo, 1 FROM \"acc\".Account a " + + "UNION " + + "SELECT 101 + a2._key, a2._key, 1, a2.depo, 1 FROM \"acc\".Account a2"; + + checkUpdate(client.cache(CACHE_TRADE), null, + new SqlFieldsQueryEx(text, false)); + } + + /** + * + * @throws Exception If failed. + */ + public void testInsertFromSelectGroupBy() throws Exception { + Map accounts = getAccounts(100, 1, 1000); + Map trades = getTrades(100, 2); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + client.cache(CACHE_TRADE).putAll(trades); + + String text = "INSERT INTO \"rep\".Report (_key, accountId, spends, count) " + + "SELECT accountId, accountId, SUM(qty * price), COUNT(*) " + + "FROM \"trade\".Trade " + + "GROUP BY accountId"; + + checkUpdate(client.cache(CACHE_REPORT), null, + new SqlFieldsQueryEx(text, false)); + } + + /** + * + * @throws Exception If failed. + */ + public void testInsertFromSelectDistinct() throws Exception { + Map accounts = getAccounts(100, 2, 100); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "INSERT INTO \"list\".String (_key, _val) " + + "SELECT DISTINCT sn, name FROM \"acc\".Account "; + + checkUpdate(client.cache(CACHE_LIST), null, + new SqlFieldsQueryEx(text, false)); + } + + /** + * + * @throws Exception If failed. + */ + public void testInsertFromSelectJoin() throws Exception { + Map accounts = getAccounts(100, 1, 100); + Map stocks = getStocks(5); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + client.cache(CACHE_STOCK).putAll(stocks); + + String text = "INSERT INTO \"trade\".Trade(_key, accountId, stockId, qty, price) " + + "SELECT 5*a._key + s._key, a._key, s._key, ?, a.depo/? " + + "FROM \"acc\".Account a JOIN \"stock\".Stock s ON 1=1"; + + checkUpdate(client.cache(CACHE_TRADE), null, + new SqlFieldsQueryEx(text, false).setArgs(10, 10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testDelete() throws Exception { + Map accounts = getAccounts(100, 1, 100); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "DELETE FROM \"acc\".Account WHERE sn > ?"; + + checkUpdate(client.cache(CACHE_ACCOUNT), accounts, + new SqlFieldsQueryEx(text, false).setArgs(10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testDeleteTop() throws Exception { + Map accounts = getAccounts(100, 1, 100); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + String text = "DELETE TOP ? FROM \"acc\".Account WHERE sn < ?"; + + checkUpdate(client.cache(CACHE_ACCOUNT), accounts, + new SqlFieldsQueryEx(text, false).setArgs(10, 10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testDeleteWhereSubquery() throws Exception { + Map accounts = getAccounts(20, 1, 100); + Map trades = getTrades(10, 2); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + client.cache(CACHE_TRADE).putAll(trades); + + String text = "DELETE FROM \"acc\".Account " + + "WHERE _key IN (SELECT t.accountId FROM \"trade\".Trade t)"; + + checkUpdate(client.cache(CACHE_ACCOUNT), accounts, + new SqlFieldsQueryEx(text, false)); + } + + /** + * + * @throws Exception If failed. + */ + public void testMergeValues() throws Exception { + Map accounts = getAccounts(1, 1, 100); + + String text = "MERGE INTO \"acc\".Account (_key, name, sn, depo)" + + " VALUES (?, ?, ?, ?), (?, ?, ?, ?)"; + + checkUpdate(client.cache(CACHE_ACCOUNT), accounts, + new SqlFieldsQueryEx(text, false).setArgs(0, "John Marry", 11111, 100, 1, "Marry John", 11112, 200)); + } + + /** + * + * @throws Exception If failed. + */ + public void testMergeFromSelectJoin() throws Exception { + Map accounts = getAccounts(100, 1, 100); + Map stocks = getStocks(5); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + client.cache(CACHE_STOCK).putAll(stocks); + + Map trades = new HashMap<>(); + + trades.put(5, new Trade(1, 1, 1, 1)); + + String text = "MERGE INTO \"trade\".Trade(_key, accountId, stockId, qty, price) " + + "SELECT 5*a._key + s._key, a._key, s._key, ?, a.depo/? " + + "FROM \"acc\".Account a JOIN \"stock\".Stock s ON 1=1"; + + checkUpdate(client.cache(CACHE_TRADE), trades, + new SqlFieldsQueryEx(text, false).setArgs(10, 10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testMergeFromSelectOrderBy() throws Exception { + Map accounts = getAccounts(100, 1, 1000); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + + Map trades = new HashMap<>(); + + trades.put(5, new Trade(1, 1, 1, 1)); + + String text = "MERGE INTO \"trade\".Trade (_key, accountId, stockId, qty, price) " + + "SELECT a._key, a._key, ?, a.depo/?, ? FROM \"acc\".Account a " + + "ORDER BY a.sn DESC"; + + checkUpdate(client.cache(CACHE_TRADE), trades, + new SqlFieldsQueryEx(text, false).setArgs(1, 10, 10)); + } + + /** + * + * @throws Exception If failed. + */ + public void testMergeFromSelectGroupBy() throws Exception { + Map accounts = getAccounts(100, 1, 1000); + Map trades = getTrades(100, 2); + + client.cache(CACHE_ACCOUNT).putAll(accounts); + client.cache(CACHE_TRADE).putAll(trades); + + Map reports = new HashMap<>(); + + reports.put(5, new Report(5, 1, 1)); + + String text = "MERGE INTO \"rep\".Report (_key, accountId, spends, count) " + + "SELECT accountId, accountId, SUM(qty * price), COUNT(*) " + + "FROM \"trade\".Trade " + + "GROUP BY accountId"; + + checkUpdate(client.cache(CACHE_REPORT), reports, + new SqlFieldsQueryEx(text, false)); + } + + /** + * Constructs multiple Account objects. + * + * @param num Number of accounts. + * @param numCopy Number of copies. + * @param depo Deposit amount. + * @return Map of accounts. + */ + private Map getAccounts(int num, int numCopy, int depo) { + Map res = new HashMap<>(); + + int count = 0; + + for (int i = 0; i < num; ++i) { + String name = "John doe #" + i; + + for (int j = 0; j < numCopy; ++j) + res.put(count++, new Account(name, i, depo)); + } + + return res; + } + + /** + * Constructs multiple Stock objects. + * + * @param num Number of stocks. + * @return Map of Stock objects. + */ + private Map getStocks(int num) { + Map res = new HashMap<>(); + + for (int i = 0; i < num; ++i) + res.put(i, new Stock("T" + i, "Stock #" + i)); + + return res; + } + + /** + * Constructs multiple Trade objects. + * + * @param numAccounts Number of accounts. + * @param numStocks Number of stocks. + * @return Map of Trade objects. + */ + private Map getTrades(int numAccounts, int numStocks) { + Map res = new HashMap<>(); + + int count = 0; + + for (int i = 0; i < numAccounts; ++i) { + for (int j = 0; j < numStocks; ++j) { + res.put(count++, new Trade(i, j, 100, 100)); + } + } + + return res; + } + + /** + * Executes provided sql update with skipReducerOnUpdate flag on and off and checks results are the same. + * + * @param cache Cache. + * @param initial Initial content of the cache. + * @param qry Query to execute. + * @param Key type. + * @param Value type. + */ + private void checkUpdate(IgniteCache cache, Map initial, SqlFieldsQueryEx qry) { + cache.clear(); + + if (!F.isEmpty(initial)) + cache.putAll(initial); + + List> updRes = cache.query(qry.setSkipReducerOnUpdate(true)).getAll(); + + Map result = new HashMap<>(cache.size()); + + for (Cache.Entry e : cache) + result.put(e.getKey(), e.getValue()); + + cache.clear(); + + if (!F.isEmpty(initial)) + cache.putAll(initial); + + List> updRes2 = cache.query(qry.setSkipReducerOnUpdate(false)).getAll(); + + assertTrue(((Number)updRes.get(0).get(0)).intValue() > 0); + + assertEquals(((Number)updRes.get(0).get(0)).intValue(), ((Number)updRes2.get(0).get(0)).intValue()); + + assertEquals(result.size(), cache.size()); + + for (Cache.Entry e : cache) + assertEquals(e.getValue(), result.get(e.getKey())); + } + + /** */ + public class Account { + /** */ + @QuerySqlField + String name; + + /** */ + @QuerySqlField + int sn; + + /** */ + @QuerySqlField + int depo; + + /** + * Constructor. + * + * @param name Name. + * @param sn ID. + * @param depo Deposit amount. + */ + Account(String name, int sn, int depo) { + this.name = name; + this.sn = sn; + this.depo = depo; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return (name == null ? 0 : name.hashCode()) ^ sn ^ depo; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { + if (obj == null) + return false; + + if (!obj.getClass().equals(Account.class)) + return false; + + Account other = (Account)obj; + + return F.eq(name, other.name) && sn == other.sn && depo == other.depo; + } + } + + /** */ + public class Stock { + /** */ + @QuerySqlField + String ticker; + + /** */ + @QuerySqlField + String name; + + /** + * Constructor. + * + * @param ticker Short name. + * @param name Name. + */ + Stock(String ticker, String name) { + this.ticker = ticker; + this.name = name; + } + } + + /** */ + public class Trade { + /** */ + @QuerySqlField + int accountId; + + /** */ + @QuerySqlField + int stockId; + + /** */ + @QuerySqlField + int qty; + + /** */ + @QuerySqlField + int price; + + /** + * Constructor. + * + * @param accountId Account id. + * @param stockId Stock id. + * @param qty Quantity. + * @param price Price. + */ + Trade(int accountId, int stockId, int qty, int price) { + this.accountId = accountId; + this.stockId = stockId; + this.qty = qty; + this.price = price; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return accountId ^ stockId ^ qty ^ price; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { + if (obj == null) + return false; + + if (!obj.getClass().equals(Trade.class)) + return false; + + Trade other = (Trade)obj; + + return accountId == other.accountId && stockId == other.stockId && + qty == other.qty && price == other.price; + } + + } + + /** */ + public class Report { + /** */ + @QuerySqlField + int accountId; + + /** */ + @QuerySqlField + int spends; + + /** */ + @QuerySqlField + int count; + + /** + * Constructor. + * + * @param accountId Account id. + * @param spends Spends. + * @param count Count. + */ + Report(int accountId, int spends, int count) { + this.accountId = accountId; + this.spends = spends; + this.count = count; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return accountId ^ spends ^ count; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { + if (obj == null) + return false; + + if (!obj.getClass().equals(Report.class)) + return false; + + Report other = (Report)obj; + + return accountId == other.accountId && spends == other.spends && + count == other.count; + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlSelfTest.java new file mode 100644 index 0000000000000..a2a6bf8ba79fd --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlSelfTest.java @@ -0,0 +1,755 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import javax.cache.CacheException; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.CacheKeyConfiguration; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.affinity.Affinity; +import org.apache.ignite.cache.affinity.AffinityKeyMapped; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.cache.query.annotations.QuerySqlFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.events.CacheQueryExecutedEvent; +import org.apache.ignite.events.Event; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.processors.query.h2.twostep.GridMapQueryExecutor; +import org.apache.ignite.internal.processors.query.h2.twostep.GridReduceQueryExecutor; +import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jsr166.ThreadLocalRandom8; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_EXECUTED; + +/** + * Tests for distributed DML. + */ +@SuppressWarnings({"unchecked", "ThrowableResultOfMethodCallIgnored"}) +public class IgniteSqlSkipReducerOnUpdateDmlSelfTest extends GridCommonAbstractTest { + /** IP finder. */ + private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** */ + private static int NODE_COUNT = 4; + + /** */ + private static String NODE_CLIENT = "client"; + + /** */ + private static String CACHE_ORG = "org"; + + /** */ + private static String CACHE_PERSON = "person"; + + /** */ + private static String CACHE_POSITION = "pos"; + + /** */ + private static Ignite client; + + /** */ + private static CountDownLatch latch; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration c = super.getConfiguration(gridName); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(IP_FINDER); + + c.setDiscoverySpi(disco); + + List ccfgs = new ArrayList<>(); + + ccfgs.add(buildCacheConfiguration(CACHE_ORG)); + ccfgs.add(buildCacheConfiguration(CACHE_PERSON)); + ccfgs.add(buildCacheConfiguration(CACHE_POSITION)); + + c.setCacheConfiguration(ccfgs.toArray(new CacheConfiguration[ccfgs.size()])); + + c.setLongQueryWarningTimeout(10000); + + if (gridName.equals(NODE_CLIENT)) + c.setClientMode(true); + + return c; + } + + /** + * Creates cache configuration. + * + * @param name Cache name. + * @return Cache configuration. + */ + private CacheConfiguration buildCacheConfiguration(String name) { + if (name.equals(CACHE_ORG)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_ORG); + + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity entity = new QueryEntity(Integer.class, Organization.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + ccfg.setSqlFunctionClasses(IgniteSqlSkipReducerOnUpdateDmlSelfTest.class); + + return ccfg; + } + if (name.equals(CACHE_PERSON)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_PERSON); + + ccfg.setCacheMode(CacheMode.PARTITIONED); + + QueryEntity entity = new QueryEntity(PersonKey.class, Person.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + ccfg.setKeyConfiguration(new CacheKeyConfiguration(PersonKey.class)); + + ccfg.setSqlFunctionClasses(IgniteSqlSkipReducerOnUpdateDmlSelfTest.class); + + return ccfg; + } + if (name.equals(CACHE_POSITION)) { + CacheConfiguration ccfg = new CacheConfiguration(CACHE_POSITION); + + ccfg.setCacheMode(CacheMode.REPLICATED); + + QueryEntity entity = new QueryEntity(Integer.class, Position.class); + + ccfg.setQueryEntities(Collections.singletonList(entity)); + + ccfg.setSqlFunctionClasses(IgniteSqlSkipReducerOnUpdateDmlSelfTest.class); + + return ccfg; + } + + assert false; + + return null; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + startGrids(NODE_COUNT); + + client = startGrid(NODE_CLIENT); + + awaitPartitionMapExchange(); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + checkNoLeaks(); + + super.afterTestsStopped(); + + stopAllGrids(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + // Stop additional node that is started in one of the test. + stopGrid(NODE_COUNT + 1); + + awaitPartitionMapExchange(); + + client.cache(CACHE_PERSON).clear(); + client.cache(CACHE_ORG).clear(); + client.cache(CACHE_POSITION).clear(); + } + + /** + * + * @throws Exception if failed. + */ + public void testSimpleUpdateDistributedReplicated() throws Exception { + fillCaches(); + + IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_POSITION); + + Position p = cache.get(1); + + List> r = cache.query(new SqlFieldsQueryEx("UPDATE Position p SET name = CONCAT('A ', name)", false) + .setSkipReducerOnUpdate(true)).getAll(); + + assertEquals((long)cache.size(), r.get(0).get(0)); + + assertEquals(cache.get(1).name, "A " + p.name); + } + + /** + * + * @throws Exception if failed. + */ + public void testSimpleUpdateDistributedPartitioned() throws Exception { + fillCaches(); + + IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_PERSON); + + List> r = cache.query(new SqlFieldsQueryEx( + "UPDATE Person SET position = CASEWHEN(position = 1, 1, position - 1)", false) + .setSkipReducerOnUpdate(true)).getAll(); + + assertEquals((long)cache.size(), r.get(0).get(0)); + } + + /** + * + * @throws Exception if failed. + */ + public void testDistributedUpdateFailedKeys() throws Exception { + // UPDATE can produce failed keys due to concurrent modification + fillCaches(); + + final IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_ORG); + + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() { + return cache.query(new SqlFieldsQueryEx("UPDATE Organization SET rate = Modify(_key, rate - 1)", false) + .setSkipReducerOnUpdate(true)); + } + }, CacheException.class, "Failed to update some keys because they had been modified concurrently"); + } + + /** + * + * @throws Exception if failed. + */ + public void testDistributedUpdateFail() throws Exception { + fillCaches(); + + final IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_PERSON); + + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() { + return cache.query(new SqlFieldsQueryEx("UPDATE Person SET name = Fail(name)", false) + .setSkipReducerOnUpdate(true)); + } + }, CacheException.class, "Failed to execute SQL query"); + } + + /** + * + * @throws Exception if failed. + */ + @SuppressWarnings("ConstantConditions") + public void testQueryParallelism() throws Exception { + String cacheName = CACHE_ORG + "x4"; + + CacheConfiguration cfg = buildCacheConfiguration(CACHE_ORG) + .setQueryParallelism(4) + .setName(cacheName); + + IgniteCache cache = grid(NODE_CLIENT).createCache(cfg); + + for (int i = 0; i < 1024; i++) + cache.put(i, new Organization("Acme Inc #" + i, 0)); + + List> r = cache.query(new SqlFieldsQueryEx("UPDATE \"" + cacheName + + "\".Organization o SET name = UPPER(name)", false).setSkipReducerOnUpdate(true)).getAll(); + + assertEquals((long)cache.size(), r.get(0).get(0)); + } + + /** + * + * @throws Exception if failed. + */ + public void testEvents() throws Exception { + final CountDownLatch latch = new CountDownLatch(NODE_COUNT); + + final IgnitePredicate pred = new IgnitePredicate() { + @Override public boolean apply(Event evt) { + assert evt instanceof CacheQueryExecutedEvent; + + CacheQueryExecutedEvent qe = (CacheQueryExecutedEvent)evt; + + assertNotNull(qe.clause()); + + latch.countDown(); + + return true; + } + }; + + for (int idx = 0; idx < NODE_COUNT; idx++) + grid(idx).events().localListen(pred, EVT_CACHE_QUERY_EXECUTED); + + IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_ORG); + + for (int i = 0; i < 1024; i++) + cache.put(i, new Organization("Acme Inc #" + i, 0)); + + cache.query(new SqlFieldsQueryEx("UPDATE \"org\".Organization o SET name = UPPER(name)", false) + .setSkipReducerOnUpdate(true)).getAll(); + + assertTrue(latch.await(5000, MILLISECONDS)); + + for (int idx = 0; idx < NODE_COUNT; idx++) + grid(idx).events().stopLocalListen(pred); + } + + /** + * + * @throws Exception if failed. + */ + public void testSpecificPartitionsUpdate() throws Exception { + fillCaches(); + + Affinity aff = grid(NODE_CLIENT).affinity(CACHE_PERSON); + + int numParts = aff.partitions(); + int parts[] = new int[numParts / 2]; + + for (int idx = 0; idx < numParts / 2; idx++) + parts[idx] = idx * 2; + + IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_PERSON); + + // UPDATE over even partitions + cache.query(new SqlFieldsQueryEx("UPDATE Person SET position = 0", false) + .setSkipReducerOnUpdate(true) + .setPartitions(parts)); + + List> rows = cache.query(new SqlFieldsQuery("SELECT _key, position FROM Person")).getAll(); + + for (List row : rows) { + PersonKey personKey = (PersonKey)row.get(0); + int pos = ((Number)row.get(1)).intValue(); + int part = aff.partition(personKey); + + assertTrue((part % 2 == 0) ^ (pos != 0)); + } + } + + /** + * + * @throws Exception if failed. + */ + public void testCancel() throws Exception { + latch = new CountDownLatch(NODE_COUNT + 1); + + fillCaches(); + + final IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_ORG); + + final IgniteInternalFuture fut = GridTestUtils.runAsync(new Callable() { + @Override public Object call() { + return cache.query(new SqlFieldsQueryEx("UPDATE Organization SET name = WAIT(name)", false) + .setSkipReducerOnUpdate(true)); + } + }); + + GridTestUtils.waitForCondition(new GridAbsPredicate() { + @Override public boolean apply() { + Collection qCol = + grid(NODE_CLIENT).context().query().runningQueries(0); + + if (qCol.isEmpty()) + return false; + + for (GridRunningQueryInfo queryInfo : qCol) + queryInfo.cancel(); + + return true; + } + }, 5000); + + latch.await(5000, MILLISECONDS); + + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws IgniteCheckedException { + return fut.get(); + } + }, IgniteCheckedException.class, "Future was cancelled"); + } + + /** + * + * @throws Exception if failed. + */ + public void testNodeStopDuringUpdate() throws Exception { + startGrid(NODE_COUNT + 1); + + awaitPartitionMapExchange(); + + fillCaches(); + + latch = new CountDownLatch(NODE_COUNT + 1 + 1); + + final IgniteCache cache = grid(NODE_CLIENT).cache(CACHE_ORG); + + final IgniteInternalFuture fut = GridTestUtils.runAsync(new Callable() { + @Override public Object call() { + return cache.query(new SqlFieldsQueryEx("UPDATE Organization SET name = WAIT(name)", false) + .setSkipReducerOnUpdate(true)); + } + }); + + final CountDownLatch finalLatch = latch; + + assertTrue(GridTestUtils.waitForCondition(new GridAbsPredicate() { + @Override public boolean apply() { + return finalLatch.getCount() == 1; + } + }, 5000)); + + latch.countDown(); + + stopGrid(NODE_COUNT + 1); + + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws IgniteCheckedException { + return fut.get(); + } + }, IgniteCheckedException.class, "Update failed because map node left topology"); + } + + /** + * Ensure there are no leaks in data structures associated with distributed dml execution. + */ + private void checkNoLeaks() { + GridQueryProcessor qryProc = grid(NODE_CLIENT).context().query(); + + IgniteH2Indexing h2Idx = GridTestUtils.getFieldValue(qryProc, GridQueryProcessor.class, "idx"); + + GridReduceQueryExecutor rdcQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "rdcQryExec"); + + Map updRuns = GridTestUtils.getFieldValue(rdcQryExec, GridReduceQueryExecutor.class, "updRuns"); + + assertEquals(0, updRuns.size()); + + for (int idx = 0; idx < NODE_COUNT; idx++) { + qryProc = grid(idx).context().query(); + + h2Idx = GridTestUtils.getFieldValue(qryProc, GridQueryProcessor.class, "idx"); + + GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec"); + + Map qryRess = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "qryRess"); + + for (Object obj : qryRess.values()) { + Map updCancels = GridTestUtils.getFieldValue(obj, "updCancels"); + + assertEquals(0, updCancels.size()); + } + } + } + + /** + * Fills caches with initial data. + */ + private void fillCaches() { + Ignite client = grid(NODE_CLIENT); + + IgniteCache posCache = client.cache(CACHE_POSITION); + + // Generate positions + Position[] positions = new Position[] { + new Position(1, "High Ranking Officer", 1), + new Position(2, "Administrative worker", 3), + new Position(3, "Worker", 7), + new Position(4, "Security", 2), + new Position(5, "Cleaner", 1) + }; + + for (Position pos: positions) + posCache.put(pos.id, pos); + + // Generate organizations + String[] forms = new String[] {" Inc", " Co", " AG", " Industries"}; + String[] orgNames = new String[] {"Acme", "Sierra", "Mesa", "Umbrella", "Robotics"}; + String[] names = new String[] {"Mary", "John", "William", "Tom", "Basil", "Ann", "Peter"}; + + IgniteCache personCache = client.cache(CACHE_PERSON); + + IgniteCache orgCache = client.cache(CACHE_ORG); + + int orgId = 0; + int personId = 0; + + for (String orgName : produceCombination(orgNames, orgNames, forms)) { + Organization org = new Organization(orgName, 1 + orgId); + + orgCache.put(++orgId, org); + + // Generate persons + + List personNames = produceCombination(names, names, new String[]{"s"}); + + int positionId = 0; + int posCounter = 0; + + for (String name : personNames) { + PersonKey pKey = new PersonKey(orgId, ++personId); + + if (positions[positionId].rate < posCounter++) { + posCounter = 0; + positionId = (positionId + 1) % positions.length; + } + + Person person = new Person(name, positions[positionId].id, org.rate * positions[positionId].rate); + + personCache.put(pKey, person); + } + } + } + + /** + * Produces all possible combinations. + * + * @param a First array. + * @param b Second array. + * @param ends Endings array. + * @return Result. + */ + private List produceCombination(String[] a, String[] b, String[] ends) { + List res = new ArrayList<>(); + + for (String s1 : a) { + for (String s2 : b) { + if (!s1.equals(s2)) { + String end = ends[ThreadLocalRandom8.current().nextInt(ends.length)]; + + res.add(s1 + " " + s2 + end); + } + } + } + + return res; + } + + /** */ + private static class Organization { + /** */ + @QuerySqlField + String name; + + /** */ + @QuerySqlField + int rate; + + /** */ + @QuerySqlField + Date updated; + + /** + * Constructor. + * + * @param name Organization name. + * @param rate Rate. + */ + public Organization(String name, int rate) { + this.name = name; + this.rate = rate; + this.updated = new Date(System.currentTimeMillis()); + } + } + + /** */ + public static class PersonKey { + /** */ + @AffinityKeyMapped + @QuerySqlField + private Integer orgId; + + /** */ + @QuerySqlField + private Integer id; + + /** + * Constructor. + * + * @param orgId Organization id. + * @param id Person id. + */ + PersonKey(int orgId, int id) { + this.orgId = orgId; + this.id = id; + } + } + + /** */ + public static class Person { + /** */ + @QuerySqlField + String name; + + /** */ + @QuerySqlField + int position; + + /** */ + @QuerySqlField + int amount; + /** */ + @QuerySqlField + Date updated; + + /** + * Constructor. + * + * @param name Name. + * @param position Position. + * @param amount Amount. + */ + private Person(String name, int position, int amount) { + this.name = name; + this.position = position; + this.amount = amount; + + this.updated = new Date(System.currentTimeMillis()); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return (name==null? 0: name.hashCode()) ^ position ^ amount ^ (updated == null ? 0 : updated.hashCode()); + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { + if (obj == null) + return false; + + if (!obj.getClass().equals(Person.class)) + return false; + + Person other = (Person)obj; + + return F.eq(name, other.name) && position == other.position && + amount == other.amount && F.eq(updated, other.updated); + } + } + + /** */ + private static class Position { + /** */ + @QuerySqlField + int id; + + /** */ + @QuerySqlField + String name; + + /** */ + @QuerySqlField + int rate; + + /** + * Constructor. + * + * @param id Id. + * @param name Name. + * @param rate Rate. + */ + public Position(int id, String name, int rate) { + this.id = id; + this.name = name; + this.rate = rate; + } + } + + /** + * SQL function that always fails. + * + * @param param Arbitrary parameter. + * @return Result. + */ + @QuerySqlFunction + public static String Fail(String param) { + throw new IgniteSQLException("Fail() called"); + } + + /** + * SQL function that waits for condition. + * + * @param param Arbitrary parameter. + * @return Result. + */ + @QuerySqlFunction + public static String Wait(String param) { + try { + if (latch.getCount() > 0) { + latch.countDown(); + + latch.await(5000, MILLISECONDS); + } + else + Thread.sleep(100); + } + catch (InterruptedException ignore) { + // No-op + } + return param; + } + + /** + * SQL function that makes a concurrent modification. + * + * @param id Id. + * @param rate Rate. + * @return Result. + */ + @QuerySqlFunction + public static int Modify(final int id, final int rate) { + try { + GridTestUtils.runAsync(new Callable() { + @Override public Object call() { + IgniteCache cache = client.cache(CACHE_ORG); + + cache.put(id, new Organization("Acme Inc #" + id, rate + 1)); + + return null; + } + }).get(); + } + catch (Exception e) { + // No-op + } + + return rate - 1; + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index c49649b56f3a0..83b4689678f5b 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -123,9 +123,11 @@ import org.apache.ignite.internal.processors.cache.query.IndexingSpiQuerySelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryTxSelfTest; import org.apache.ignite.internal.processors.client.ClientConnectorConfigurationValidationSelfTest; +import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlParameterizedQueryTest; import org.apache.ignite.internal.processors.query.h2.IgniteSqlBigIntegerKeyTest; import org.apache.ignite.internal.processors.query.IgniteQueryDedicatedPoolTest; +import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlEntryCacheModeAgnosticTest; import org.apache.ignite.internal.processors.query.IgniteSqlKeyValueFieldsTest; import org.apache.ignite.internal.processors.query.IgniteSqlNotNullConstraintTest; @@ -243,6 +245,8 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgniteCacheInsertSqlQuerySelfTest.class); suite.addTestSuite(IgniteCacheUpdateSqlQuerySelfTest.class); suite.addTestSuite(IgniteCacheDeleteSqlQuerySelfTest.class); + suite.addTestSuite(IgniteSqlSkipReducerOnUpdateDmlSelfTest.class); + suite.addTestSuite(IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.class); suite.addTestSuite(IgniteBinaryObjectQueryArgumentsTest.class); suite.addTestSuite(IgniteBinaryObjectLocalQueryArgumentsTest.class); diff --git a/modules/platforms/cpp/odbc-test/src/configuration_test.cpp b/modules/platforms/cpp/odbc-test/src/configuration_test.cpp index 7da67576b8474..3165c4d268b3c 100644 --- a/modules/platforms/cpp/odbc-test/src/configuration_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/configuration_test.cpp @@ -43,6 +43,7 @@ namespace const bool testReplicatedOnly = true; const bool testCollocated = true; const bool testLazy = true; + const bool testSkipReducerOnUpdate = true; const std::string testAddress = testServerHost + ':' + ignite::common::LexicalCast(testServerPort); } @@ -132,6 +133,7 @@ void CheckConnectionConfig(const Configuration& cfg) BOOST_CHECK_EQUAL(cfg.IsReplicatedOnly(), testReplicatedOnly); BOOST_CHECK_EQUAL(cfg.IsCollocated(), testCollocated); BOOST_CHECK_EQUAL(cfg.IsLazy(), testLazy); + BOOST_CHECK_EQUAL(cfg.IsSkipReducerOnUpdate(), testSkipReducerOnUpdate); std::stringstream constructor; @@ -143,7 +145,8 @@ void CheckConnectionConfig(const Configuration& cfg) << "lazy=" << BoolToStr(testLazy) << ';' << "page_size=" << testPageSize << ';' << "replicated_only=" << BoolToStr(testReplicatedOnly) << ';' - << "schema=" << testSchemaName << ';'; + << "schema=" << testSchemaName << ';' + << "skip_reducer_on_update=" << BoolToStr(testReplicatedOnly) << ';'; const std::string& expectedStr = constructor.str(); @@ -164,6 +167,7 @@ void CheckDsnConfig(const Configuration& cfg) BOOST_CHECK_EQUAL(cfg.IsReplicatedOnly(), false); BOOST_CHECK_EQUAL(cfg.IsCollocated(), false); BOOST_CHECK_EQUAL(cfg.IsLazy(), false); + BOOST_CHECK_EQUAL(cfg.IsSkipReducerOnUpdate(), false); } BOOST_AUTO_TEST_SUITE(ConfigurationTestSuite) @@ -180,6 +184,8 @@ BOOST_AUTO_TEST_CASE(CheckTestValuesNotEquealDefault) BOOST_CHECK_NE(testEnforceJoinOrder, Configuration::DefaultValue::enforceJoinOrder); BOOST_CHECK_NE(testReplicatedOnly, Configuration::DefaultValue::replicatedOnly); BOOST_CHECK_NE(testCollocated, Configuration::DefaultValue::collocated); + BOOST_CHECK_NE(testLazy, Configuration::DefaultValue::lazy); + BOOST_CHECK_NE(testSkipReducerOnUpdate, Configuration::DefaultValue::skipReducerOnUpdate); } BOOST_AUTO_TEST_CASE(TestConnectStringUppercase) @@ -196,7 +202,8 @@ BOOST_AUTO_TEST_CASE(TestConnectStringUppercase) << "COLLOCATED=" << BoolToStr(testCollocated, false) << ';' << "REPLICATED_ONLY=" << BoolToStr(testReplicatedOnly, false) << ';' << "PAGE_SIZE=" << testPageSize << ';' - << "SCHEMA=" << testSchemaName; + << "SCHEMA=" << testSchemaName << ';' + << "SKIP_REDUCER_ON_UPDATE=" << BoolToStr(testSkipReducerOnUpdate, false); const std::string& connectStr = constructor.str(); @@ -219,7 +226,8 @@ BOOST_AUTO_TEST_CASE(TestConnectStringLowercase) << "enforce_join_order=" << BoolToStr(testEnforceJoinOrder) << ';' << "replicated_only=" << BoolToStr(testReplicatedOnly) << ';' << "collocated=" << BoolToStr(testCollocated) << ';' - << "schema=" << testSchemaName; + << "schema=" << testSchemaName << ';' + << "skip_reducer_on_update=" << BoolToStr(testSkipReducerOnUpdate); const std::string& connectStr = constructor.str(); @@ -242,7 +250,8 @@ BOOST_AUTO_TEST_CASE(TestConnectStringZeroTerminated) << "collocated=" << BoolToStr(testCollocated) << ';' << "distributed_joins=" << BoolToStr(testDistributedJoins) << ';' << "enforce_join_order=" << BoolToStr(testEnforceJoinOrder) << ';' - << "schema=" << testSchemaName; + << "schema=" << testSchemaName << ';' + << "skip_reducer_on_update=" << BoolToStr(testSkipReducerOnUpdate); const std::string& connectStr = constructor.str(); @@ -265,7 +274,8 @@ BOOST_AUTO_TEST_CASE(TestConnectStringMixed) << "Enforce_Join_Order=" << BoolToStr(testEnforceJoinOrder) << ';' << "Replicated_Only=" << BoolToStr(testReplicatedOnly, false) << ';' << "Collocated=" << BoolToStr(testCollocated) << ';' - << "Schema=" << testSchemaName; + << "Schema=" << testSchemaName << ';' + << "Skip_Reducer_On_Update=" << BoolToStr(testSkipReducerOnUpdate); const std::string& connectStr = constructor.str(); @@ -288,7 +298,8 @@ BOOST_AUTO_TEST_CASE(TestConnectStringWhitepaces) << "COLLOCATED =" << BoolToStr(testCollocated, false) << " ;" << " REPLICATED_ONLY= " << BoolToStr(testReplicatedOnly, false) << ';' << "ENFORCE_JOIN_ORDER= " << BoolToStr(testEnforceJoinOrder, false) << " ;" - << "SCHEMA = \n\r" << testSchemaName; + << "SCHEMA = \n\r" << testSchemaName << ';' + << " skip_reducer_on_update=" << BoolToStr(testSkipReducerOnUpdate, false); const std::string& connectStr = constructor.str(); @@ -358,6 +369,7 @@ BOOST_AUTO_TEST_CASE(TestConnectStringInvalidBoolKeys) keys.insert("replicated_only"); keys.insert("collocated"); keys.insert("lazy"); + keys.insert("skip_reducer_on_update"); for (Set::const_iterator it = keys.begin(); it != keys.end(); ++it) { @@ -385,6 +397,7 @@ BOOST_AUTO_TEST_CASE(TestConnectStringValidBoolKeys) keys.insert("replicated_only"); keys.insert("collocated"); keys.insert("lazy"); + keys.insert("skip_reducer_on_update"); for (Set::const_iterator it = keys.begin(); it != keys.end(); ++it) { diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index 4c7e402134b0a..707669d398b2e 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -755,6 +755,14 @@ BOOST_AUTO_TEST_CASE(TestConnectionProtocolVersion_2_1_5) InsertTestBatch(11, 20, 9); } +BOOST_AUTO_TEST_CASE(TestConnectionProtocolVersion_2_3_0) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PROTOCOL_VERSION=2.3.0"); + + InsertTestStrings(10, false); + InsertTestBatch(11, 20, 9); +} + BOOST_AUTO_TEST_CASE(TestTwoRowsInt8) { CheckTwoRowsInt(SQL_C_STINYINT); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h b/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h index 2b1ec523aa00f..419a65e72a09b 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h @@ -82,6 +82,9 @@ namespace ignite /** Connection attribute keyword for lazy attribute. */ static const std::string lazy; + + /** Connection attribute keyword for skipReducerOnUpdate attribute. */ + static const std::string skipReducerOnUpdate; }; /** Default values for configuration. */ @@ -125,6 +128,9 @@ namespace ignite /** Default value for lazy attribute. */ static const bool lazy; + + /** Default value for skipReducerOnUpdate attribute. */ + static const bool skipReducerOnUpdate; }; /** @@ -383,6 +389,26 @@ namespace ignite SetBoolValue(Key::lazy, val); } + /** + * Check update on server flag. + * + * @return True if update on server. + */ + bool IsSkipReducerOnUpdate() const + { + return GetBoolValue(Key::skipReducerOnUpdate, DefaultValue::skipReducerOnUpdate); + } + + /** + * Set update on server. + * + * @param val Value to set. + */ + void SetSkipReducerOnUpdate(bool val) + { + SetBoolValue(Key::skipReducerOnUpdate, val); + } + /** * Get protocol version. * diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h index 91a808cab0f55..dda0ba93e8558 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h @@ -79,9 +79,10 @@ namespace ignite * @param replicatedOnly Replicated only flag. * @param collocated Collocated flag. * @param lazy Lazy flag. + * @param skipReducerOnUpdate Skip reducer on update. */ HandshakeRequest(const ProtocolVersion& version, bool distributedJoins, bool enforceJoinOrder, - bool replicatedOnly, bool collocated, bool lazy); + bool replicatedOnly, bool collocated, bool lazy, bool skipReducerOnUpdate); /** * Destructor. @@ -112,6 +113,9 @@ namespace ignite /** Lazy flag. */ bool lazy; + + /** Skip reducer on update flag. */ + bool skipReducerOnUpdate; }; /** diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h b/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h index c36d5dd709884..e6088a74b46d3 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h @@ -34,6 +34,7 @@ namespace ignite /** Current protocol version. */ static const ProtocolVersion VERSION_2_1_0; static const ProtocolVersion VERSION_2_1_5; + static const ProtocolVersion VERSION_2_3_0; typedef std::set VersionSet; diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h b/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h index 2974b677f2c02..90286b941fcb7 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h @@ -55,6 +55,7 @@ namespace ignite REPLICATED_ONLY_CHECK_BOX, COLLOCATED_CHECK_BOX, LAZY_CHECK_BOX, + SKIP_REDUCER_ON_UPDATE_CHECK_BOX, PROTOCOL_VERSION_LABEL, PROTOCOL_VERSION_COMBO_BOX, OK_BUTTON, @@ -149,6 +150,9 @@ namespace ignite /** Lazy CheckBox. */ std::auto_ptr lazyCheckBox; + /** Update on server CheckBox. */ + std::auto_ptr skipReducerOnUpdateCheckBox; + /** Protocol version edit field. */ std::auto_ptr protocolVersionLabel; diff --git a/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp b/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp index 9b13481815d47..d5aa0dbd723b8 100644 --- a/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp +++ b/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp @@ -180,6 +180,12 @@ namespace ignite lazyCheckBox->SetEnabled(version >= ProtocolVersion::VERSION_2_1_5); + skipReducerOnUpdateCheckBox = CreateCheckBox(editPosX + checkBoxSize + interval, rowPos, + checkBoxSize, rowSize, "Skip reducer on update", ChildId::SKIP_REDUCER_ON_UPDATE_CHECK_BOX, + config.IsSkipReducerOnUpdate()); + + skipReducerOnUpdateCheckBox->SetEnabled(version >= ProtocolVersion::VERSION_2_3_0); + rowPos += interval * 2 + rowSize; connectionSettingsGroupBox = CreateGroupBox(margin, sectionBegin, width - 2 * margin, @@ -264,6 +270,13 @@ namespace ignite break; } + case ChildId::SKIP_REDUCER_ON_UPDATE_CHECK_BOX: + { + skipReducerOnUpdateCheckBox->SetChecked(!skipReducerOnUpdateCheckBox->IsChecked()); + + break; + } + case ChildId::PROTOCOL_VERSION_COMBO_BOX: { std::string versionStr; @@ -271,6 +284,7 @@ namespace ignite ProtocolVersion version = ProtocolVersion::FromString(versionStr); lazyCheckBox->SetEnabled(version >= ProtocolVersion::VERSION_2_1_5); + skipReducerOnUpdateCheckBox->SetEnabled(version >= ProtocolVersion::VERSION_2_3_0); break; } @@ -309,6 +323,7 @@ namespace ignite bool replicatedOnly; bool collocated; bool lazy; + bool skipReducerOnUpdate; nameEdit->GetText(dsn); addressEdit->GetText(address); @@ -330,6 +345,9 @@ namespace ignite collocated = collocatedCheckBox->IsEnabled() && collocatedCheckBox->IsChecked(); lazy = lazyCheckBox->IsEnabled() && lazyCheckBox->IsChecked(); + skipReducerOnUpdate = + skipReducerOnUpdateCheckBox->IsEnabled() && skipReducerOnUpdateCheckBox->IsChecked(); + LOG_MSG("Retriving arguments:"); LOG_MSG("DSN: " << dsn); LOG_MSG("Address: " << address); @@ -341,6 +359,7 @@ namespace ignite LOG_MSG("Replicated only: " << (replicatedOnly ? "true" : "false")); LOG_MSG("Collocated: " << (collocated ? "true" : "false")); LOG_MSG("Lazy: " << (lazy ? "true" : "false")); + LOG_MSG("Skip reducer on update: " << (skipReducerOnUpdate ? "true" : "false")); if (dsn.empty()) throw IgniteError(IgniteError::IGNITE_ERR_GENERIC, "DSN name can not be empty."); @@ -355,6 +374,7 @@ namespace ignite cfg.SetReplicatedOnly(replicatedOnly); cfg.SetCollocated(collocated); cfg.SetLazy(lazy); + cfg.SetSkipReducerOnUpdate(skipReducerOnUpdate); } } } diff --git a/modules/platforms/cpp/odbc/src/config/configuration.cpp b/modules/platforms/cpp/odbc/src/config/configuration.cpp index 95ed96401c9b0..be5a7814fb46d 100644 --- a/modules/platforms/cpp/odbc/src/config/configuration.cpp +++ b/modules/platforms/cpp/odbc/src/config/configuration.cpp @@ -32,34 +32,36 @@ namespace ignite { namespace config { - const std::string Configuration::Key::dsn = "dsn"; - const std::string Configuration::Key::driver = "driver"; - const std::string Configuration::Key::schema = "schema"; - const std::string Configuration::Key::address = "address"; - const std::string Configuration::Key::server = "server"; - const std::string Configuration::Key::port = "port"; - const std::string Configuration::Key::distributedJoins = "distributed_joins"; - const std::string Configuration::Key::enforceJoinOrder = "enforce_join_order"; - const std::string Configuration::Key::protocolVersion = "protocol_version"; - const std::string Configuration::Key::pageSize = "page_size"; - const std::string Configuration::Key::replicatedOnly = "replicated_only"; - const std::string Configuration::Key::collocated = "collocated"; - const std::string Configuration::Key::lazy = "lazy"; - - const std::string Configuration::DefaultValue::dsn = "Apache Ignite DSN"; - const std::string Configuration::DefaultValue::driver = "Apache Ignite"; - const std::string Configuration::DefaultValue::schema = "PUBLIC"; - const std::string Configuration::DefaultValue::address = ""; - const std::string Configuration::DefaultValue::server = ""; + const std::string Configuration::Key::dsn = "dsn"; + const std::string Configuration::Key::driver = "driver"; + const std::string Configuration::Key::schema = "schema"; + const std::string Configuration::Key::address = "address"; + const std::string Configuration::Key::server = "server"; + const std::string Configuration::Key::port = "port"; + const std::string Configuration::Key::distributedJoins = "distributed_joins"; + const std::string Configuration::Key::enforceJoinOrder = "enforce_join_order"; + const std::string Configuration::Key::protocolVersion = "protocol_version"; + const std::string Configuration::Key::pageSize = "page_size"; + const std::string Configuration::Key::replicatedOnly = "replicated_only"; + const std::string Configuration::Key::collocated = "collocated"; + const std::string Configuration::Key::lazy = "lazy"; + const std::string Configuration::Key::skipReducerOnUpdate = "skip_reducer_on_update"; + + const std::string Configuration::DefaultValue::dsn = "Apache Ignite DSN"; + const std::string Configuration::DefaultValue::driver = "Apache Ignite"; + const std::string Configuration::DefaultValue::schema = "PUBLIC"; + const std::string Configuration::DefaultValue::address = ""; + const std::string Configuration::DefaultValue::server = ""; const uint16_t Configuration::DefaultValue::port = 10800; const int32_t Configuration::DefaultValue::pageSize = 1024; - const bool Configuration::DefaultValue::distributedJoins = false; - const bool Configuration::DefaultValue::enforceJoinOrder = false; - const bool Configuration::DefaultValue::replicatedOnly = false; - const bool Configuration::DefaultValue::collocated = false; - const bool Configuration::DefaultValue::lazy = false; + const bool Configuration::DefaultValue::distributedJoins = false; + const bool Configuration::DefaultValue::enforceJoinOrder = false; + const bool Configuration::DefaultValue::replicatedOnly = false; + const bool Configuration::DefaultValue::collocated = false; + const bool Configuration::DefaultValue::lazy = false; + const bool Configuration::DefaultValue::skipReducerOnUpdate = false; const ProtocolVersion& Configuration::DefaultValue::protocolVersion = ProtocolVersion::GetCurrent(); diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp index 161e1c4acca1b..8f4bf14b94312 100644 --- a/modules/platforms/cpp/odbc/src/connection.cpp +++ b/modules/platforms/cpp/odbc/src/connection.cpp @@ -417,6 +417,7 @@ namespace ignite bool replicatedOnly = false; bool collocated = false; bool lazy = false; + bool skipReducerOnUpdate = false; ProtocolVersion protocolVersion; try @@ -427,6 +428,7 @@ namespace ignite replicatedOnly = config.IsReplicatedOnly(); collocated = config.IsCollocated(); lazy = config.IsLazy(); + skipReducerOnUpdate = config.IsSkipReducerOnUpdate(); } catch (const IgniteError& err) { @@ -443,7 +445,8 @@ namespace ignite return SqlResult::AI_ERROR; } - HandshakeRequest req(protocolVersion, distributedJoins, enforceJoinOrder, replicatedOnly, collocated, lazy); + HandshakeRequest req(protocolVersion, distributedJoins, enforceJoinOrder, replicatedOnly, collocated, lazy, + skipReducerOnUpdate); HandshakeResponse rsp; try diff --git a/modules/platforms/cpp/odbc/src/dsn_config.cpp b/modules/platforms/cpp/odbc/src/dsn_config.cpp index c91cd8cac7ce0..536f6797319b1 100644 --- a/modules/platforms/cpp/odbc/src/dsn_config.cpp +++ b/modules/platforms/cpp/odbc/src/dsn_config.cpp @@ -108,6 +108,9 @@ namespace ignite bool lazy = ReadDsnBool(dsn, Configuration::Key::lazy, config.IsLazy()); + bool skipReducerOnUpdate = + ReadDsnBool(dsn, Configuration::Key::skipReducerOnUpdate, config.IsSkipReducerOnUpdate()); + std::string version = ReadDsnString(dsn, Configuration::Key::protocolVersion, config.GetProtocolVersion().ToString().c_str()); @@ -125,6 +128,7 @@ namespace ignite config.SetReplicatedOnly(replicatedOnly); config.SetCollocated(collocated); config.SetLazy(lazy); + config.SetSkipReducerOnUpdate(skipReducerOnUpdate); config.SetProtocolVersion(version); config.SetPageSize(pageSize); } diff --git a/modules/platforms/cpp/odbc/src/message.cpp b/modules/platforms/cpp/odbc/src/message.cpp index 36015913dae1c..4767c74b2424f 100644 --- a/modules/platforms/cpp/odbc/src/message.cpp +++ b/modules/platforms/cpp/odbc/src/message.cpp @@ -23,13 +23,14 @@ namespace ignite namespace odbc { HandshakeRequest::HandshakeRequest(const ProtocolVersion& version, bool distributedJoins, - bool enforceJoinOrder, bool replicatedOnly, bool collocated, bool lazy): + bool enforceJoinOrder, bool replicatedOnly, bool collocated, bool lazy, bool skipReducerOnUpdate): version(version), distributedJoins(distributedJoins), enforceJoinOrder(enforceJoinOrder), replicatedOnly(replicatedOnly), collocated(collocated), - lazy(lazy) + lazy(lazy), + skipReducerOnUpdate(skipReducerOnUpdate) { // No-op. } @@ -53,7 +54,12 @@ namespace ignite writer.WriteBool(enforceJoinOrder); writer.WriteBool(replicatedOnly); writer.WriteBool(collocated); - writer.WriteBool(lazy); + + if (version >= ProtocolVersion::VERSION_2_1_5) + writer.WriteBool(lazy); + + if (version >= ProtocolVersion::VERSION_2_3_0) + writer.WriteBool(skipReducerOnUpdate); } QueryExecuteRequest::QueryExecuteRequest(const std::string& schema, const std::string& sql, diff --git a/modules/platforms/cpp/odbc/src/protocol_version.cpp b/modules/platforms/cpp/odbc/src/protocol_version.cpp index b668fb8d99e8f..b0b91219f9f8f 100644 --- a/modules/platforms/cpp/odbc/src/protocol_version.cpp +++ b/modules/platforms/cpp/odbc/src/protocol_version.cpp @@ -28,10 +28,12 @@ namespace ignite { const ProtocolVersion ProtocolVersion::VERSION_2_1_0(2, 1, 0); const ProtocolVersion ProtocolVersion::VERSION_2_1_5(2, 1, 5); + const ProtocolVersion ProtocolVersion::VERSION_2_3_0(2, 3, 0); ProtocolVersion::VersionSet::value_type supportedArray[] = { ProtocolVersion::VERSION_2_1_0, - ProtocolVersion::VERSION_2_1_5 + ProtocolVersion::VERSION_2_1_5, + ProtocolVersion::VERSION_2_3_0, }; const ProtocolVersion::VersionSet ProtocolVersion::supported(supportedArray, @@ -60,7 +62,7 @@ namespace ignite const ProtocolVersion& ProtocolVersion::GetCurrent() { - return VERSION_2_1_5; + return VERSION_2_3_0; } void ThrowParseError() From 6df7ebc430cf5a099474361b61b2593a5884992b Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Fri, 13 Oct 2017 16:00:18 +0300 Subject: [PATCH 028/243] IGNITE-6621 .NET: Disable thin client This closes #2846 --- .../processors/odbc/ClientListenerNioListener.java | 7 ------- .../Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs | 3 ++- .../Client/Cache/CacheTestNoMeta.cs | 3 ++- .../Apache.Ignite.Core.Tests/Client/Cache/ScanQueryTest.cs | 3 ++- .../Client/ClientConnectionTest.cs | 3 ++- .../Apache.Ignite.Core.Tests/Client/ClientTestBase.cs | 2 +- .../Client/IgniteClientConfigurationTest.cs | 3 ++- .../Apache.Ignite.Core.Tests/Client/RawSocketTest.cs | 3 ++- .../dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs | 2 +- .../dotnet/Apache.Ignite.Core/Client/IIgniteClient.cs | 2 +- .../Apache.Ignite.Core/Client/IgniteClientConfiguration.cs | 2 +- .../Apache.Ignite.Core/Client/IgniteClientException.cs | 2 +- modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs | 2 +- 13 files changed, 18 insertions(+), 19 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java index faecab3c157dc..11f5a8ce32b05 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java @@ -27,7 +27,6 @@ import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext; import org.apache.ignite.internal.processors.odbc.odbc.OdbcConnectionContext; -import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.nio.GridNioServerListenerAdapter; import org.apache.ignite.internal.util.nio.GridNioSession; @@ -44,9 +43,6 @@ public class ClientListenerNioListener extends GridNioServerListenerAdapter /// Thin client cache test. /// - public sealed class CacheTest : ClientTestBase + [Ignore] + internal sealed class CacheTest : ClientTestBase { /// /// Tests the cache put / get with primitive data types. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs index 782e3cc43fe92..cb276e4c2e5e2 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTestNoMeta.cs @@ -31,7 +31,8 @@ namespace Apache.Ignite.Core.Tests.Client.Cache /// /// Client cache test without metadata (no-op binary processor). /// - public class CacheTestNoMeta : ClientTestBase + [Ignore] + internal class CacheTestNoMeta : ClientTestBase { /// /// Tests the cache put / get with user data types. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ScanQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ScanQueryTest.cs index 17a2b3f263952..77f8aa8d85533 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ScanQueryTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ScanQueryTest.cs @@ -32,7 +32,8 @@ namespace Apache.Ignite.Core.Tests.Client.Cache /// /// Tests scan queries. /// - public class ScanQueryTest : ClientTestBase + [Ignore] + internal class ScanQueryTest : ClientTestBase { /// /// Initializes a new instance of the class. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs index 66aa8442891c9..9878724e20843 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs @@ -29,7 +29,8 @@ namespace Apache.Ignite.Core.Tests.Client /// /// Tests client connection: port ranges, version checks, etc. /// - public class ClientConnectionTest + [Ignore] + internal class ClientConnectionTest { /// /// Fixture tear down. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs index 408eb7363ca2e..d25dfc5634e06 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientTestBase.cs @@ -25,7 +25,7 @@ namespace Apache.Ignite.Core.Tests.Client /// /// Base class for client tests. /// - public class ClientTestBase + internal class ClientTestBase { /** Cache name. */ protected const string CacheName = "cache"; diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/IgniteClientConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/IgniteClientConfigurationTest.cs index 0734f42a18abc..08164a904526f 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/IgniteClientConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/IgniteClientConfigurationTest.cs @@ -23,7 +23,8 @@ namespace Apache.Ignite.Core.Tests.Client /// /// Tests for . /// - public class IgniteClientConfigurationTest + [Ignore] + internal class IgniteClientConfigurationTest { /// /// Tests the defaults. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/RawSocketTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/RawSocketTest.cs index b637e880fd258..b5eac6b402cd7 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/RawSocketTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/RawSocketTest.cs @@ -30,7 +30,8 @@ namespace Apache.Ignite.Core.Tests.Client /// /// Tests the thin client mode with a raw socket. /// - public class RawSocketTest : ClientTestBase + [Ignore] + internal class RawSocketTest : ClientTestBase { /// /// Tests the socket handshake connection. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs index d772ba6054c22..c881809854c2d 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/ICacheClient.cs @@ -25,7 +25,7 @@ namespace Apache.Ignite.Core.Client.Cache /// Client cache API. See . /// // ReSharper disable once TypeParameterCanBeVariant (ICache shoul not be variant, more methods will be added) - public interface ICacheClient + internal interface ICacheClient { /// /// Name of this cache (null for default cache). diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IIgniteClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IIgniteClient.cs index ceb8f2689fe78..4a37989779750 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IIgniteClient.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IIgniteClient.cs @@ -24,7 +24,7 @@ namespace Apache.Ignite.Core.Client /// Main entry point for Ignite Thin Client APIs. /// You can obtain an instance of through . /// - public interface IIgniteClient : IDisposable + internal interface IIgniteClient : IDisposable { /// /// Gets the cache instance for the given name to work with keys and values of specified types. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientConfiguration.cs index 8c9b6a106630e..4a191d7cf52e8 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientConfiguration.cs @@ -28,7 +28,7 @@ namespace Apache.Ignite.Core.Client /// This configuration should correspond to /// on a target node. /// - public class IgniteClientConfiguration + internal class IgniteClientConfiguration { /// /// Default port. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs index 2df3d1bb06e41..205041e3eed30 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/IgniteClientException.cs @@ -26,7 +26,7 @@ namespace Apache.Ignite.Core.Client /// Ignite thin client exception. /// [Serializable] - public class IgniteClientException : IgniteException + internal class IgniteClientException : IgniteException { /** Error code field. */ private const string ErrorCodeField = "ErrorCode"; diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs index 4e5eae5868a3b..cb6e83bf26170 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs @@ -742,7 +742,7 @@ public static void StopAll(bool cancel) /// /// The client configuration. /// Ignite instance. - public static IIgniteClient StartClient(IgniteClientConfiguration clientConfiguration) + internal static IIgniteClient StartClient(IgniteClientConfiguration clientConfiguration) { IgniteArgumentCheck.NotNull(clientConfiguration, "clientConfiguration"); IgniteArgumentCheck.NotNull(clientConfiguration.Host, "clientConfiguration.Host"); From b00a05c621ba8200536484257400a6e4dcf30086 Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Fri, 13 Oct 2017 14:19:14 +0300 Subject: [PATCH 029/243] IGNITE-4723 .NET: Support REGEXP_LIKE in LINQ This closes #2842 --- .../Cache/Query/Linq/CacheLinqTest.Strings.cs | 18 ++++++ .../Impl/CacheQueryExpressionVisitor.cs | 5 ++ .../Apache.Ignite.Linq/Impl/MethodVisitor.cs | 55 ++++++++++++++++++- 3 files changed, 76 insertions(+), 2 deletions(-) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs index 7457d0a043b50..b2bcfddc85e8a 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs @@ -78,6 +78,24 @@ public void TestStrings() Assert.Throws(() => CheckFunc(x => x.TrimEnd(toTrimFails), strings)); CheckFunc(x => Regex.Replace(x, @"son.\d", "kele!"), strings); + CheckFunc(x => Regex.Replace(x, @"son.\d", "kele!", RegexOptions.None), strings); + CheckFunc(x => Regex.Replace(x, @"person.\d", "akele!", RegexOptions.IgnoreCase), strings); + CheckFunc(x => Regex.Replace(x, @"person.\d", "akele!", RegexOptions.Multiline), strings); + CheckFunc(x => Regex.Replace(x, @"person.\d", "akele!", RegexOptions.IgnoreCase | RegexOptions.Multiline), + strings); + var notSupportedException = Assert.Throws(() => CheckFunc(x => + Regex.IsMatch(x, @"^person\d", RegexOptions.IgnoreCase | RegexOptions.CultureInvariant), strings)); + Assert.AreEqual("RegexOptions.CultureInvariant is not supported", notSupportedException.Message); + + CheckFunc(x => Regex.IsMatch(x, @"^Person_9\d"), strings); + CheckFunc(x => Regex.IsMatch(x, @"^person_9\d", RegexOptions.None), strings); + CheckFunc(x => Regex.IsMatch(x, @"^person_9\d", RegexOptions.IgnoreCase), strings); + CheckFunc(x => Regex.IsMatch(x, @"^Person_9\d", RegexOptions.Multiline), strings); + CheckFunc(x => Regex.IsMatch(x, @"^person_9\d", RegexOptions.IgnoreCase | RegexOptions.Multiline), strings); + notSupportedException = Assert.Throws(() => CheckFunc(x => + Regex.IsMatch(x, @"^person_9\d",RegexOptions.IgnoreCase | RegexOptions.CultureInvariant), strings)); + Assert.AreEqual("RegexOptions.CultureInvariant is not supported", notSupportedException.Message); + CheckFunc(x => x.Replace("son", ""), strings); CheckFunc(x => x.Replace("son", "kele"), strings); diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs index d187f089c2fa6..4caefe1b11c49 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/CacheQueryExpressionVisitor.cs @@ -474,6 +474,11 @@ private bool VisitGroupByMember(Expression expression) [SuppressMessage("Microsoft.Design", "CA1062:Validate arguments of public methods")] protected override Expression VisitConstant(ConstantExpression expression) { + if (MethodVisitor.VisitConstantCall(expression, this)) + { + return expression; + } + AppendParameter(expression.Value); return expression; diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs index 9446af3baeb20..054a984646c8d 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs @@ -67,8 +67,12 @@ internal static class MethodVisitor GetParameterizedTrimMethod("TrimEnd", "rtrim"), GetStringMethod("Replace", "replace", typeof(string), typeof(string)), - GetMethod(typeof (Regex), "Replace", new[] {typeof (string), typeof (string), typeof (string)}, - GetFunc("regexp_replace")), + GetRegexMethod("Replace", "regexp_replace", typeof (string), typeof (string), typeof (string)), + GetRegexMethod("Replace", "regexp_replace", typeof (string), typeof (string), typeof (string), + typeof(RegexOptions)), + GetRegexMethod("IsMatch", "regexp_like", typeof (string), typeof (string)), + GetRegexMethod("IsMatch", "regexp_like", typeof (string), typeof (string), typeof(RegexOptions)), + GetMethod(typeof (DateTime), "ToString", new[] {typeof (string)}, (e, v) => VisitFunc(e, v, "formatdatetime", ", 'en', 'UTC'")), @@ -113,6 +117,13 @@ internal static class MethodVisitor GetMathMethod("Truncate", typeof (decimal)), }.ToDictionary(x => x.Key, x => x.Value); + /// RegexOptions transformations. + private static readonly Dictionary RegexOptionFlags = new Dictionary + { + { RegexOptions.IgnoreCase, "i" }, + { RegexOptions.Multiline, "m" } + }; + /// /// Visits the property call expression. /// @@ -148,6 +159,37 @@ public static void VisitMethodCall(MethodCallExpression expression, CacheQueryEx del(expression, visitor); } + /// + /// Visits the constant call expression. + /// + public static bool VisitConstantCall(ConstantExpression expression, CacheQueryExpressionVisitor visitor) + { + if (expression.Type != typeof(RegexOptions)) + { + return false; + } + + var regexOptions = expression.Value as RegexOptions? ?? RegexOptions.None; + var result = string.Empty; + foreach (var option in RegexOptionFlags) + { + if (regexOptions.HasFlag(option.Key)) + { + result += option.Value; + regexOptions &= ~option.Key; + } + } + + if (regexOptions != RegexOptions.None) + { + throw new NotSupportedException(string.Format("RegexOptions.{0} is not supported", regexOptions)); + } + + visitor.AppendParameter(result); + + return true; + } + /// /// Gets the function. /// @@ -297,6 +339,15 @@ private static void AppendAdjustment(CacheQueryExpressionVisitor visitor, int[] return GetMethod(typeof(string), name, argTypes, GetFunc(sqlName)); } + /// + /// Gets the Regex method. + /// + private static KeyValuePair GetRegexMethod(string name, string sqlName, + params Type[] argTypes) + { + return GetMethod(typeof(Regex), name, argTypes, GetFunc(sqlName)); + } + /// /// Gets string parameterized Trim(TrimStart, TrimEnd) method. /// From 68468d0c93397b9fb116da3c903fde85433060b2 Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Fri, 6 Oct 2017 12:18:38 +0300 Subject: [PATCH 030/243] IGNITE-5224 .NET: PadLeft and PadRight support in LINQ This closes #2808 --- .../Cache/Query/Linq/CacheLinqTest.Strings.cs | 5 +++++ .../dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs index b2bcfddc85e8a..35996b0b0b1f5 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Strings.cs @@ -46,6 +46,11 @@ public void TestStrings() { var strings = GetSecondPersonCache().AsCacheQueryable().Select(x => x.Value.Name); + CheckFunc(x => x.PadLeft(20), strings); + CheckFunc(x => x.PadLeft(20, 'l'), strings); + CheckFunc(x => x.PadRight(20), strings); + CheckFunc(x => x.PadRight(20, 'r'), strings); + CheckFunc(x => x.ToLower(), strings); CheckFunc(x => x.ToUpper(), strings); CheckFunc(x => x.StartsWith("Person_9"), strings); diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs index 054a984646c8d..84bd98f43e29e 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/MethodVisitor.cs @@ -66,6 +66,10 @@ internal static class MethodVisitor GetParameterizedTrimMethod("TrimStart", "ltrim"), GetParameterizedTrimMethod("TrimEnd", "rtrim"), GetStringMethod("Replace", "replace", typeof(string), typeof(string)), + GetStringMethod("PadLeft", "lpad", typeof (int)), + GetStringMethod("PadLeft", "lpad", typeof (int), typeof (char)), + GetStringMethod("PadRight", "rpad", typeof (int)), + GetStringMethod("PadRight", "rpad", typeof (int), typeof (char)), GetRegexMethod("Replace", "regexp_replace", typeof (string), typeof (string), typeof (string)), GetRegexMethod("Replace", "regexp_replace", typeof (string), typeof (string), typeof (string), From 08798f8e47bdfdd68a557385ed2ce98b4bb1609a Mon Sep 17 00:00:00 2001 From: devozerov Date: Fri, 13 Oct 2017 14:12:44 +0300 Subject: [PATCH 031/243] IGNITE-6605: SQL: common backup filter. This closes #2836. --- .../cache/query/GridCacheQueryManager.java | 40 ++-------- .../indexing/IndexingQueryCacheFilter.java | 72 +++++++++++++++++ .../spi/indexing/IndexingQueryFilter.java | 12 +-- .../spi/indexing/IndexingQueryFilterImpl.java | 79 +++++++++++++++++++ .../processors/query/h2/H2Cursor.java | 25 +++--- .../processors/query/h2/IgniteH2Indexing.java | 61 +------------- .../query/h2/database/H2PkHashIndex.java | 20 ++--- .../query/h2/database/H2TreeIndex.java | 4 +- .../query/h2/opt/GridH2IndexBase.java | 58 ++++++-------- .../query/h2/opt/GridLuceneIndex.java | 25 ++---- 10 files changed, 211 insertions(+), 185 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java create mode 100644 modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java index 64e74fb6f1cd9..392b19f5e3ff4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java @@ -119,6 +119,7 @@ import org.apache.ignite.resources.IgniteInstanceResource; import org.apache.ignite.spi.IgniteSpiCloseableIterator; import org.apache.ignite.spi.indexing.IndexingQueryFilter; +import org.apache.ignite.spi.indexing.IndexingQueryFilterImpl; import org.apache.ignite.spi.indexing.IndexingSpi; import org.jetbrains.annotations.Nullable; import org.jsr166.ConcurrentHashMap8; @@ -130,7 +131,6 @@ import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.internal.GridClosureCallMode.BROADCAST; -import static org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion.NONE; import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SCAN; import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SPI; @@ -1984,39 +1984,6 @@ public Collection sqlMetadataV2() throws IgniteCheckedExce } } - /** - * @param Key type. - * @param Value type. - * @param includeBackups Include backups. - * @return Predicate. - */ - @SuppressWarnings("unchecked") - @Nullable public IndexingQueryFilter backupsFilter(boolean includeBackups) { - if (includeBackups) - return null; - - return new IndexingQueryFilter() { - @Nullable @Override public IgniteBiPredicate forCache(final String cacheName) { - final GridKernalContext ctx = cctx.kernalContext(); - - final GridCacheAdapter cache = ctx.cache().internalCache(cacheName); - - if (cache.context().isReplicated() || cache.configuration().getBackups() == 0) - return null; - - return new IgniteBiPredicate() { - @Override public boolean apply(K k, V v) { - return cache.context().affinity().primaryByKey(ctx.discovery().localNode(), k, NONE); - } - }; - } - - @Override public boolean isValueRequired() { - return false; - } - }; - } - /** * @return Topology version for query requests. */ @@ -2029,7 +1996,10 @@ public AffinityTopologyVersion queryTopologyVersion() { * @return Filter. */ private IndexingQueryFilter filter(GridCacheQueryAdapter qry) { - return backupsFilter(qry.includeBackups()); + if (qry.includeBackups()) + return null; + + return new IndexingQueryFilterImpl(cctx.kernalContext(), AffinityTopologyVersion.NONE, null); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java new file mode 100644 index 0000000000000..6257f47e2f516 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.indexing; + +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.GridCacheAffinityManager; + +import java.util.Set; + +/** + * Indexing query filter for specific cache. + */ +public class IndexingQueryCacheFilter { + /** Affinity manager. */ + private final GridCacheAffinityManager aff; + + /** Partitions. */ + private final Set parts; + + /** Topology version. */ + private final AffinityTopologyVersion topVer; + + /** Local node. */ + private final ClusterNode locNode; + + /** + * Constructor. + * + * @param aff Affinity. + * @param parts Partitions. + * @param topVer Topology version. + * @param locNode Local node. + */ + public IndexingQueryCacheFilter(GridCacheAffinityManager aff, Set parts, + AffinityTopologyVersion topVer, ClusterNode locNode) { + this.aff = aff; + this.parts = parts; + this.topVer = topVer; + this.locNode = locNode; + } + + /** + * Apply filter. + * + * @param key Key. + * @return {@code True} if passed. + */ + public boolean apply(Object key) { + int part = aff.partition(key); + + if (parts == null) + return aff.primaryByPartition(locNode, part, topVer); + else + return parts.contains(part); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilter.java b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilter.java index 74d349a459348..b0d969384f0e0 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilter.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilter.java @@ -17,7 +17,6 @@ package org.apache.ignite.spi.indexing; -import org.apache.ignite.lang.IgniteBiPredicate; import org.jetbrains.annotations.Nullable; /** @@ -30,14 +29,5 @@ public interface IndexingQueryFilter { * @param cacheName Cache name. * @return Predicate or {@code null} if no filtering is needed. */ - @Nullable public IgniteBiPredicate forCache(String cacheName); - - /** - * Is the value required for filtering logic? - * If false then null instead of value will be passed - * to IgniteBiPredicate returned by {@link #forCache(String)} method. - * - * @return true if value is required for filtering, false otherwise. - */ - public boolean isValueRequired(); + @Nullable public IndexingQueryCacheFilter forCache(String cacheName); } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java new file mode 100644 index 0000000000000..53dcbf634aaf4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryFilterImpl.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.indexing; + +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.GridCacheAdapter; +import org.apache.ignite.internal.util.typedef.F; +import org.jetbrains.annotations.Nullable; + +import java.util.HashSet; + +/** + * Indexing query filter. + */ +public class IndexingQueryFilterImpl implements IndexingQueryFilter { + /** Kernal context. */ + private final GridKernalContext ctx; + + /** Topology version. */ + private final AffinityTopologyVersion topVer; + + /** Partitions. */ + private final HashSet parts; + + /** + * Constructor. + * + * @param ctx Kernal context. + * @param topVer Topology version. + * @param partsArr Partitions array. + */ + public IndexingQueryFilterImpl(GridKernalContext ctx, @Nullable AffinityTopologyVersion topVer, + @Nullable int[] partsArr) { + this.ctx = ctx; + + this.topVer = topVer != null ? topVer : AffinityTopologyVersion.NONE; + + if (F.isEmpty(partsArr)) + parts = null; + else { + parts = new HashSet<>(); + + for (int part : partsArr) + parts.add(part); + } + } + + /** {@inheritDoc} */ + @Nullable @Override public IndexingQueryCacheFilter forCache(String cacheName) { + final GridCacheAdapter cache = ctx.cache().internalCache(cacheName); + + // REPLICATED -> nothing to filter (explicit partitions are not supported). + if (cache.context().isReplicated()) + return null; + + // No backups and explicit partitions -> nothing to filter. + if (cache.configuration().getBackups() == 0 && parts == null) + return null; + + return new IndexingQueryCacheFilter(cache.context().affinity(), parts, topVer, + ctx.discovery().localNode()); + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java index de3111d078375..e09108d7a01db 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java @@ -17,14 +17,15 @@ package org.apache.ignite.internal.processors.query.h2; -import org.apache.ignite.*; -import org.apache.ignite.internal.processors.query.h2.opt.*; -import org.apache.ignite.internal.util.lang.*; -import org.apache.ignite.internal.util.typedef.internal.*; -import org.apache.ignite.lang.*; -import org.h2.index.*; -import org.h2.message.*; -import org.h2.result.*; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; +import org.apache.ignite.internal.util.lang.GridCursor; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; +import org.h2.index.Cursor; +import org.h2.message.DbException; +import org.h2.result.Row; +import org.h2.result.SearchRow; /** * Cursor. @@ -34,7 +35,7 @@ public class H2Cursor implements Cursor { private final GridCursor cursor; /** */ - private final IgniteBiPredicate filter; + private final IndexingQueryCacheFilter filter; /** */ private final long time = U.currentTimeMillis(); @@ -43,7 +44,7 @@ public class H2Cursor implements Cursor { * @param cursor Cursor. * @param filter Filter. */ - public H2Cursor(GridCursor cursor, IgniteBiPredicate filter) { + public H2Cursor(GridCursor cursor, IndexingQueryCacheFilter filter) { assert cursor != null; this.cursor = cursor; @@ -85,12 +86,10 @@ public H2Cursor(GridCursor cursor) { return true; Object key = row.getValue(0).getObject(); - Object val = row.getValue(1).getObject(); assert key != null; - assert val != null; - if (filter.apply(key, val)) + if (filter.apply(key)) return true; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index fddd2e8744b09..c172e65eb6748 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -65,8 +65,6 @@ import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.CacheObjectUtils; import org.apache.ignite.internal.processors.cache.CacheObjectValueContext; -import org.apache.ignite.internal.processors.cache.GridCacheAdapter; -import org.apache.ignite.internal.processors.cache.GridCacheAffinityManager; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryEx; import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException; @@ -130,7 +128,6 @@ import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiClosure; -import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgniteInClosure; @@ -139,6 +136,7 @@ import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.resources.LoggerResource; import org.apache.ignite.spi.indexing.IndexingQueryFilter; +import org.apache.ignite.spi.indexing.IndexingQueryFilterImpl; import org.h2.api.ErrorCode; import org.h2.api.JavaObjectSerializer; import org.h2.command.Prepared; @@ -2375,62 +2373,7 @@ public void clearCachedQueries() { /** {@inheritDoc} */ @Override public IndexingQueryFilter backupFilter(@Nullable final AffinityTopologyVersion topVer, @Nullable final int[] parts) { - final AffinityTopologyVersion topVer0 = topVer != null ? topVer : AffinityTopologyVersion.NONE; - - return new IndexingQueryFilter() { - @Nullable @Override public IgniteBiPredicate forCache(String cacheName) { - final GridCacheAdapter cache = ctx.cache().internalCache(cacheName); - - if (cache.context().isReplicated()) - return null; - - final GridCacheAffinityManager aff = cache.context().affinity(); - - if (parts != null) { - if (parts.length < 64) { // Fast scan for small arrays. - return new IgniteBiPredicate() { - @Override public boolean apply(K k, V v) { - int p = aff.partition(k); - - for (int p0 : parts) { - if (p0 == p) - return true; - - if (p0 > p) // Array is sorted. - return false; - } - - return false; - } - }; - } - - return new IgniteBiPredicate() { - @Override public boolean apply(K k, V v) { - int p = aff.partition(k); - - return Arrays.binarySearch(parts, p) >= 0; - } - }; - } - - final ClusterNode locNode = ctx.discovery().localNode(); - - return new IgniteBiPredicate() { - @Override public boolean apply(K k, V v) { - return aff.primaryByKey(locNode, k, topVer0); - } - }; - } - - @Override public boolean isValueRequired() { - return false; - } - - @Override public String toString() { - return "IndexingQueryFilter [ver=" + topVer + ']'; - } - }; + return new IndexingQueryFilterImpl(ctx, topVer, parts); } /** diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java index b32bfb8f8d49e..6691485420df0 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java @@ -31,8 +31,8 @@ import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.util.lang.GridCursor; -import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.spi.indexing.IndexingQueryFilter; +import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; import org.h2.engine.Session; import org.h2.index.Cursor; import org.h2.index.IndexType; @@ -85,7 +85,7 @@ public H2PkHashIndex( /** {@inheritDoc} */ @Override public Cursor find(Session ses, final SearchRow lower, final SearchRow upper) { IndexingQueryFilter f = threadLocalFilter(); - IgniteBiPredicate p = null; + IndexingQueryCacheFilter p = null; if (f != null) { String cacheName = getTable().cacheName(); @@ -179,13 +179,13 @@ private class H2Cursor implements Cursor { final GridCursor cursor; /** */ - final IgniteBiPredicate filter; + final IndexingQueryCacheFilter filter; /** * @param cursor Cursor. * @param filter Filter. */ - private H2Cursor(GridCursor cursor, IgniteBiPredicate filter) { + private H2Cursor(GridCursor cursor, IndexingQueryCacheFilter filter) { assert cursor != null; this.cursor = cursor; @@ -222,17 +222,7 @@ private H2Cursor(GridCursor cursor, IgniteBiPredicate getAvailableInlineColumns(IndexColumn[] cols) { @Override public Cursor find(Session ses, SearchRow lower, SearchRow upper) { try { IndexingQueryFilter f = threadLocalFilter(); - IgniteBiPredicate p = null; + IndexingQueryCacheFilter p = null; if (f != null) { String cacheName = getTable().cacheName(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java index 048192a3407ba..92b7d108da414 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java @@ -17,12 +17,6 @@ package org.apache.ignite.internal.processors.query.h2.opt; -import java.util.*; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import javax.cache.CacheException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteInterruptedException; import org.apache.ignite.IgniteLogger; @@ -41,17 +35,18 @@ import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2RowRangeBounds; import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2ValueMessage; import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2ValueMessageFactory; -import org.apache.ignite.internal.util.*; -import org.apache.ignite.internal.util.lang.*; +import org.apache.ignite.internal.util.GridSpinBusyLock; +import org.apache.ignite.internal.util.IgniteTree; +import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.typedef.CIX2; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.logger.NullLogger; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.spi.indexing.IndexingQueryFilter; +import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; import org.h2.engine.Session; import org.h2.index.BaseIndex; import org.h2.index.Cursor; @@ -68,13 +63,29 @@ import org.h2.value.ValueNull; import org.jetbrains.annotations.Nullable; +import javax.cache.CacheException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.UUID; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; + import static java.util.Collections.emptyIterator; import static java.util.Collections.singletonList; import static org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode.LOCAL_ONLY; import static org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode.OFF; -import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.KEY_COL; -import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.VAL_COL; import static org.apache.ignite.internal.processors.query.h2.opt.GridH2CollocationModel.buildCollocationModel; +import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.KEY_COL; import static org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryType.MAP; import static org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryType.PREPARE; import static org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2IndexRangeResponse.STATUS_ERROR; @@ -1574,15 +1585,13 @@ protected GridCursor doFind0( protected static class FilteringCursor implements GridCursor { /** */ private final GridCursor cursor; + /** */ - private final IgniteBiPredicate fltr; + private final IndexingQueryCacheFilter fltr; /** */ private final long time; - /** Is value required for filtering predicate? */ - private final boolean isValRequired; - /** */ private GridH2Row next; @@ -1595,19 +1604,8 @@ protected static class FilteringCursor implements GridCursor { protected FilteringCursor(GridCursor cursor, long time, IndexingQueryFilter qryFilter, String cacheName) { this.cursor = cursor; - this.time = time; - - if (qryFilter != null) { - this.fltr = qryFilter.forCache(cacheName); - - this.isValRequired = qryFilter.isValueRequired(); - } - else { - this.fltr = null; - - this.isValRequired = false; - } + this.fltr = qryFilter != null ? qryFilter.forCache(cacheName) : null; } /** @@ -1623,12 +1621,8 @@ protected boolean accept(GridH2Row row) { return true; Object key = row.getValue(KEY_COL).getObject(); - Object val = isValRequired ? row.getValue(VAL_COL).getObject() : null; - - assert key != null; - assert !isValRequired || val != null; - return fltr.apply(key, val); + return fltr.apply(key); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java index f8d3ef2f66a92..b5d2456612df5 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java @@ -32,9 +32,9 @@ import org.apache.ignite.internal.util.lang.GridCloseableIterator; import org.apache.ignite.internal.util.offheap.unsafe.GridUnsafeMemory; import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.spi.indexing.IndexingQueryFilter; +import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -290,7 +290,7 @@ public GridCloseableIterator> query(String qry, throw new IgniteCheckedException(e); } - IgniteBiPredicate fltr = null; + IndexingQueryCacheFilter fltr = null; if (filters != null) fltr = filters.forCache(cacheName); @@ -321,7 +321,7 @@ private class It extends GridCloseableIteratorAdapter> private final ScoreDoc[] docs; /** */ - private final IgniteBiPredicate filters; + private final IndexingQueryCacheFilter filters; /** */ private int idx; @@ -341,7 +341,7 @@ private class It extends GridCloseableIteratorAdapter> * @param filters Filters over result. * @throws IgniteCheckedException if failed. */ - private It(IndexReader reader, IndexSearcher searcher, ScoreDoc[] docs, IgniteBiPredicate filters) + private It(IndexReader reader, IndexSearcher searcher, ScoreDoc[] docs, IndexingQueryCacheFilter filters) throws IgniteCheckedException { this.reader = reader; this.searcher = searcher; @@ -353,17 +353,6 @@ private It(IndexReader reader, IndexSearcher searcher, ScoreDoc[] docs, IgniteBi findNext(); } - /** - * Filters key using predicates. - * - * @param key Key. - * @param val Value. - * @return {@code True} if key passes filter. - */ - private boolean filter(K key, V val) { - return filters == null || filters.apply(key, val); - } - /** * @param bytes Bytes. * @param ldr Class loader. @@ -404,15 +393,15 @@ private void findNext() throws IgniteCheckedException { K k = unmarshall(doc.getBinaryValue(KEY_FIELD_NAME).bytes, ldr); + if (filters != null && !filters.apply(k)) + continue; + V v = type.valueClass() == String.class ? (V)doc.get(VAL_STR_FIELD_NAME) : this.unmarshall(doc.getBinaryValue(VAL_FIELD_NAME).bytes, ldr); assert v != null; - if (!filter(k, v)) - continue; - curr = new IgniteBiTuple<>(k, v); break; From 2b59a241de3935a338842b8fc3221aedc8e11e1d Mon Sep 17 00:00:00 2001 From: devozerov Date: Mon, 16 Oct 2017 10:33:36 +0300 Subject: [PATCH 032/243] IGNITE-6631: Minor improvements to GridH2KeyValueRowOnheap. This closes #2855. --- .../processors/query/h2/H2RowDescriptor.java | 431 ------------------ .../query/h2/H2TableDescriptor.java | 2 +- .../processors/query/h2/H2TableEngine.java | 5 +- .../internal/processors/query/h2/H2Utils.java | 3 +- .../processors/query/h2/IgniteH2Indexing.java | 9 +- .../query/h2/database/H2PkHashIndex.java | 7 +- .../query/h2/database/H2RowFactory.java | 6 +- .../query/h2/opt/GridH2KeyValueRowOnheap.java | 147 +++--- .../query/h2/opt/GridH2RowDescriptor.java | 412 +++++++++++++++-- .../processors/query/h2/opt/GridH2Table.java | 28 +- 10 files changed, 473 insertions(+), 577 deletions(-) delete mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2RowDescriptor.java diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2RowDescriptor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2RowDescriptor.java deleted file mode 100644 index 8fb81ba652e83..0000000000000 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2RowDescriptor.java +++ /dev/null @@ -1,431 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.processors.query.h2; - -import java.math.BigDecimal; -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Arrays; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.internal.processors.cache.CacheObject; -import org.apache.ignite.internal.processors.cache.GridCacheContext; -import org.apache.ignite.internal.processors.cache.KeyCacheObject; -import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; -import org.apache.ignite.internal.processors.query.GridQueryProperty; -import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowFactory; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2ValueCacheObject; -import org.h2.message.DbException; -import org.h2.result.SearchRow; -import org.h2.result.SimpleRow; -import org.h2.util.LocalDateTimeUtils; -import org.h2.value.DataType; -import org.h2.value.Value; -import org.h2.value.ValueArray; -import org.h2.value.ValueBoolean; -import org.h2.value.ValueByte; -import org.h2.value.ValueBytes; -import org.h2.value.ValueDate; -import org.h2.value.ValueDecimal; -import org.h2.value.ValueDouble; -import org.h2.value.ValueFloat; -import org.h2.value.ValueGeometry; -import org.h2.value.ValueInt; -import org.h2.value.ValueJavaObject; -import org.h2.value.ValueLong; -import org.h2.value.ValueNull; -import org.h2.value.ValueShort; -import org.h2.value.ValueString; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueUuid; -import org.jetbrains.annotations.Nullable; - -import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.DEFAULT_COLUMNS_COUNT; -import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.KEY_COL; -import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.VAL_COL; -import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.VER_COL; - -/** - * Row descriptor. - */ -public class H2RowDescriptor implements GridH2RowDescriptor { - /** Indexing SPI. */ - private final IgniteH2Indexing idx; - - /** Table descriptor. */ - private final H2TableDescriptor tbl; - - /** */ - private final GridQueryTypeDescriptor type; - - /** */ - private volatile String[] fields; - - /** */ - private volatile int[] fieldTypes; - - /** */ - private final int keyType; - - /** */ - private final int valType; - - /** */ - private volatile GridQueryProperty[] props; - - /** Id of user-defined key column */ - private volatile int keyAliasColId; - - /** Id of user-defined value column */ - private volatile int valAliasColId; - - /** - * Constructor. - * - * @param idx Indexing. - * @param tbl Table. - * @param type Type descriptor. - */ - H2RowDescriptor(IgniteH2Indexing idx, H2TableDescriptor tbl, GridQueryTypeDescriptor type) { - assert type != null; - - this.idx = idx; - this.tbl = tbl; - this.type = type; - - keyType = DataType.getTypeFromClass(type.keyClass()); - valType = DataType.getTypeFromClass(type.valueClass()); - - refreshMetadataFromTypeDescriptor(); - } - - /** - * Update metadata of this row descriptor according to current state of type descriptor. - */ - @SuppressWarnings("WeakerAccess") - public final void refreshMetadataFromTypeDescriptor() { - Map> allFields = new LinkedHashMap<>(); - - allFields.putAll(type.fields()); - - fields = allFields.keySet().toArray(new String[allFields.size()]); - - fieldTypes = new int[fields.length]; - - Class[] classes = allFields.values().toArray(new Class[fields.length]); - - for (int i = 0; i < fieldTypes.length; i++) - fieldTypes[i] = DataType.getTypeFromClass(classes[i]); - - props = new GridQueryProperty[fields.length]; - - for (int i = 0; i < fields.length; i++) { - GridQueryProperty p = type.property(fields[i]); - - assert p != null : fields[i]; - - props[i] = p; - } - - List fieldsList = Arrays.asList(fields); - - keyAliasColId = - (type.keyFieldName() != null) ? DEFAULT_COLUMNS_COUNT + fieldsList.indexOf(type.keyFieldAlias()) : -1; - - valAliasColId = - (type.valueFieldName() != null) ? DEFAULT_COLUMNS_COUNT + fieldsList.indexOf(type.valueFieldAlias()) : -1; - } - - /** {@inheritDoc} */ - @Override public IgniteH2Indexing indexing() { - return idx; - } - - /** {@inheritDoc} */ - @Override public GridQueryTypeDescriptor type() { - return type; - } - - /** {@inheritDoc} */ - @Override public GridCacheContext context() { - return tbl.cache(); - } - - /** {@inheritDoc} */ - @SuppressWarnings("ConstantConditions") - @Override public Value wrap(Object obj, int type) throws IgniteCheckedException { - assert obj != null; - - if (obj instanceof CacheObject) { // Handle cache object. - CacheObject co = (CacheObject)obj; - - if (type == Value.JAVA_OBJECT) - return new GridH2ValueCacheObject(co, idx.objectContext()); - - obj = co.value(idx.objectContext(), false); - } - - switch (type) { - case Value.BOOLEAN: - return ValueBoolean.get((Boolean)obj); - case Value.BYTE: - return ValueByte.get((Byte)obj); - case Value.SHORT: - return ValueShort.get((Short)obj); - case Value.INT: - return ValueInt.get((Integer)obj); - case Value.FLOAT: - return ValueFloat.get((Float)obj); - case Value.LONG: - return ValueLong.get((Long)obj); - case Value.DOUBLE: - return ValueDouble.get((Double)obj); - case Value.UUID: - UUID uuid = (UUID)obj; - return ValueUuid.get(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); - case Value.DATE: - if (LocalDateTimeUtils.isLocalDate(obj.getClass())) - return LocalDateTimeUtils.localDateToDateValue(obj); - - return ValueDate.get((Date)obj); - - case Value.TIME: - if (LocalDateTimeUtils.isLocalTime(obj.getClass())) - return LocalDateTimeUtils.localTimeToTimeValue(obj); - - return ValueTime.get((Time)obj); - - case Value.TIMESTAMP: - if (obj instanceof java.util.Date && !(obj instanceof Timestamp)) - obj = new Timestamp(((java.util.Date)obj).getTime()); - - if (LocalDateTimeUtils.isLocalDateTime(obj.getClass())) - return LocalDateTimeUtils.localDateTimeToValue(obj); - - return ValueTimestamp.get((Timestamp)obj); - - case Value.DECIMAL: - return ValueDecimal.get((BigDecimal)obj); - case Value.STRING: - return ValueString.get(obj.toString()); - case Value.BYTES: - return ValueBytes.get((byte[])obj); - case Value.JAVA_OBJECT: - return ValueJavaObject.getNoCopy(obj, null, null); - case Value.ARRAY: - Object[] arr = (Object[])obj; - - Value[] valArr = new Value[arr.length]; - - for (int i = 0; i < arr.length; i++) { - Object o = arr[i]; - - valArr[i] = o == null ? ValueNull.INSTANCE : wrap(o, DataType.getTypeFromClass(o.getClass())); - } - - return ValueArray.get(valArr); - - case Value.GEOMETRY: - return ValueGeometry.getFromGeometry(obj); - } - - throw new IgniteCheckedException("Failed to wrap value[type=" + type + ", value=" + obj + "]"); - } - - /** {@inheritDoc} */ - @Override public GridH2Row createRow(KeyCacheObject key, int partId, @Nullable CacheObject val, - GridCacheVersion ver, long expirationTime) throws IgniteCheckedException { - GridH2Row row; - - try { - if (val == null) // Only can happen for remove operation, can create simple search row. - row = GridH2RowFactory.create(wrap(key, keyType)); - else - row = new GridH2KeyValueRowOnheap(this, key, keyType, val, valType, ver, expirationTime); - } - catch (ClassCastException e) { - throw new IgniteCheckedException("Failed to convert key to SQL type. " + - "Please make sure that you always store each value type with the same key type " + - "or configure key type as common super class for all actual keys for this value type.", e); - } - - row.ver = ver; - - row.key = key; - row.val = val; - row.partId = partId; - - return row; - } - - /** {@inheritDoc} */ - @Override public int valueType() { - return valType; - } - - /** {@inheritDoc} */ - @Override public int fieldsCount() { - return fields.length; - } - - /** {@inheritDoc} */ - @Override public int fieldType(int col) { - return fieldTypes[col]; - } - - /** {@inheritDoc} */ - @Override public Object columnValue(Object key, Object val, int col) { - try { - return props[col].value(key, val); - } - catch (IgniteCheckedException e) { - throw DbException.convert(e); - } - } - - /** {@inheritDoc} */ - @Override public void setColumnValue(Object key, Object val, Object colVal, int col) { - try { - props[col].setValue(key, val, colVal); - } - catch (IgniteCheckedException e) { - throw DbException.convert(e); - } - } - - /** {@inheritDoc} */ - @Override public boolean isColumnKeyProperty(int col) { - return props[col].key(); - } - - /** {@inheritDoc} */ - @Override public boolean isKeyColumn(int colId) { - assert colId >= 0; - return colId == KEY_COL || colId == keyAliasColId; - } - - /** {@inheritDoc} */ - @Override public boolean isValueColumn(int colId) { - assert colId >= 0; - return colId == VAL_COL || colId == valAliasColId; - } - - /** {@inheritDoc} */ - @SuppressWarnings("RedundantIfStatement") - @Override public boolean isKeyValueOrVersionColumn(int colId) { - assert colId >= 0; - - if (colId < DEFAULT_COLUMNS_COUNT) - return true; - - if (colId == keyAliasColId) - return true; - - if (colId == valAliasColId) - return true; - - return false; - } - - /** {@inheritDoc} */ - @Override public boolean checkKeyIndexCondition(int masks[], int mask) { - assert masks != null; - assert masks.length > 0; - - if (keyAliasColId < 0) - return (masks[KEY_COL] & mask) != 0; - else - return (masks[KEY_COL] & mask) != 0 || (masks[keyAliasColId] & mask) != 0; - } - - /** {@inheritDoc} */ - @Override public void initValueCache(Value valCache[], Value key, Value val, Value ver) { - assert valCache != null; - assert valCache.length > 0; - - valCache[KEY_COL] = key; - valCache[VAL_COL] = val; - valCache[VER_COL] = ver; - - if (keyAliasColId > 0) - valCache[keyAliasColId] = key; - - if (valAliasColId > 0) - valCache[valAliasColId] = val; - } - - /** {@inheritDoc} */ - @Override public SearchRow prepareProxyIndexRow(SearchRow row) { - if (row == null) - return null; - - Value[] data = new Value[row.getColumnCount()]; - for (int idx = 0; idx < data.length; idx++) - data[idx] = row.getValue(idx); - - copyAliasColumnData(data, KEY_COL, keyAliasColId); - copyAliasColumnData(data, VAL_COL, valAliasColId); - - return new SimpleRow(data); - } - - /** - * Copies data between original and alias columns - * - * @param data Array of values. - * @param colId Original column id. - * @param aliasColId Alias column id. - */ - private void copyAliasColumnData(Value[] data, int colId, int aliasColId) { - if (aliasColId <= 0) - return; - - if (data[aliasColId] == null && data[colId] != null) - data[aliasColId] = data[colId]; - - if (data[colId] == null && data[aliasColId] != null) - data[colId] = data[aliasColId]; - } - - /** {@inheritDoc} */ - @Override public int getAlternativeColumnId(int colId) { - if (keyAliasColId > 0) { - if (colId == KEY_COL) - return keyAliasColId; - else if (colId == keyAliasColId) - return KEY_COL; - } - if (valAliasColId > 0) { - if (colId == VAL_COL) - return valAliasColId; - else if (colId == valAliasColId) - return VAL_COL; - } - - return colId; - } -} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableDescriptor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableDescriptor.java index 391b00274fdb7..899bdda0b457a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableDescriptor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableDescriptor.java @@ -141,7 +141,7 @@ String typeName() { /** * @return Cache context. */ - GridCacheContext cache() { + public GridCacheContext cache() { return cctx; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableEngine.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableEngine.java index d3e9560340b57..c05aaf6189526 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableEngine.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2TableEngine.java @@ -21,6 +21,7 @@ import java.sql.SQLException; import java.sql.Statement; import org.apache.ignite.internal.processors.query.h2.database.H2RowFactory; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.h2.api.TableEngine; import org.h2.command.ddl.CreateTableData; @@ -31,7 +32,7 @@ */ public class H2TableEngine implements TableEngine { /** */ - private static H2RowDescriptor rowDesc0; + private static GridH2RowDescriptor rowDesc0; /** */ private static H2RowFactory rowFactory0; @@ -53,7 +54,7 @@ public class H2TableEngine implements TableEngine { * @throws SQLException If failed. * @return Created table. */ - public static synchronized GridH2Table createTable(Connection conn, String sql, H2RowDescriptor rowDesc, + public static synchronized GridH2Table createTable(Connection conn, String sql, GridH2RowDescriptor rowDesc, H2RowFactory rowFactory, H2TableDescriptor tblDesc) throws SQLException { rowDesc0 = rowDesc; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java index 157e1ba7ac8ae..cfbb7bb08c2d2 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java @@ -247,8 +247,7 @@ public static void setupConnection(Connection conn, boolean distributedJoins, bo * @return Converted object. * @throws IgniteCheckedException if failed. */ - public static Object convert(Object val, GridH2RowDescriptor desc, int type) - throws IgniteCheckedException { + public static Object convert(Object val, GridH2RowDescriptor desc, int type) throws IgniteCheckedException { if (val == null) return null; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index c172e65eb6748..dd35723f9cb21 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -675,9 +675,7 @@ private void addInitialUserIndex(String schemaName, H2TableDescriptor desc, Grid if (expTime == 0L) expTime = Long.MAX_VALUE; - GridH2Row row = rowDesc.createRow(key, part, val, ver, expTime); - - row.link(link); + GridH2Row row = rowDesc.createRow(key, part, val, ver, expTime, link); h2Idx.put(row); } @@ -1775,7 +1773,7 @@ private void createTable(String schemaName, H2Schema schema, H2TableDescriptor t if (log.isDebugEnabled()) log.debug("Creating DB table with SQL: " + sql); - H2RowDescriptor rowDesc = new H2RowDescriptor(this, tbl, tbl.type()); + GridH2RowDescriptor rowDesc = new GridH2RowDescriptor(this, tbl, tbl.type()); H2RowFactory rowFactory = tbl.rowFactory(rowDesc); @@ -2450,8 +2448,7 @@ private int bindPartitionInfoParameter(CacheQueryPartitionInfo partInfo, Object[ assert partInfo != null; assert partInfo.partition() < 0; - GridH2RowDescriptor desc = dataTable(schema(partInfo.cacheName()), - partInfo.tableName()).rowDescriptor(); + GridH2RowDescriptor desc = dataTable(schema(partInfo.cacheName()), partInfo.tableName()).rowDescriptor(); Object param = H2Utils.convert(params[partInfo.paramIdx()], desc, partInfo.dataType()); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java index 6691485420df0..891e59f86a4df 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java @@ -197,11 +197,8 @@ private H2Cursor(GridCursor cursor, IndexingQueryCacheFi try { CacheDataRow dataRow = cursor.get(); - GridH2Row row = tbl.rowDescriptor().createRow(dataRow.key(), dataRow.partition(), dataRow.value(), dataRow.version(), 0); - - row.link(dataRow.link()); - - return row; + return tbl.rowDescriptor().createRow(dataRow.key(), dataRow.partition(), dataRow.value(), + dataRow.version(), 0, dataRow.link()); } catch (IgniteCheckedException e) { throw DbException.convert(e); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java index 2e57ca34af6b1..92ecd3d148e73 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java @@ -22,8 +22,8 @@ import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; /** * Data store for H2 rows. @@ -65,9 +65,7 @@ public GridH2Row getRow(long link) throws IgniteCheckedException { try { row = rowDesc.createRow(rowBuilder.key(), - PageIdUtils.partId(link), rowBuilder.value(), rowBuilder.version(), rowBuilder.expireTime()); - - row.link = link; + PageIdUtils.partId(link), rowBuilder.value(), rowBuilder.version(), rowBuilder.expireTime(), link); } catch (IgniteCheckedException e) { throw new IgniteException(e); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java index 390015bf6f15d..63b4606eb3314 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java @@ -61,7 +61,7 @@ public class GridH2KeyValueRowOnheap extends GridH2Row { private Value[] valCache; /** */ - private Value version; + private Value ver; /** * Constructor. @@ -79,13 +79,13 @@ public GridH2KeyValueRowOnheap(GridH2RowDescriptor desc, Object key, int keyType this.desc = desc; this.expirationTime = expirationTime; - setValue(KEY_COL, desc.wrap(key, keyType)); + this.key = desc.wrap(key, keyType); - if (val != null) // We remove by key only, so value can be null here. - setValue(VAL_COL, desc.wrap(val, valType)); + if (val != null) + this.val = desc.wrap(val, valType); if (ver != null) - setValue(VER_COL, desc.wrap(ver, Value.JAVA_OBJECT)); + this.ver = desc.wrap(ver, Value.JAVA_OBJECT); } /** {@inheritDoc} */ @@ -103,59 +103,39 @@ public GridH2KeyValueRowOnheap(GridH2RowDescriptor desc, Object key, int keyType return DEFAULT_COLUMNS_COUNT + desc.fieldsCount(); } - /** - * @param col Column index. - * @return Value if exists. - */ - protected final Value peekValue(int col) { - if (col == KEY_COL) - return key; - - if (col == VAL_COL) - return val; - - assert col == VER_COL; - - return version; - } - /** {@inheritDoc} */ @Override public Value getValue(int col) { - Value[] vCache = valCache; - - if (vCache != null) { - Value v = vCache[col]; + switch (col) { + case KEY_COL: + return key; - if (v != null) - return v; - } + case VAL_COL: + return val; - Value v; + case VER_COL: + return ver; - if (desc.isValueColumn(col)) { - v = peekValue(VAL_COL); + default: + if (desc.isKeyAliasColumn(col)) + return key; + else if (desc.isValueAliasColumn(col)) + return val; - return v; + return getValue0(col - DEFAULT_COLUMNS_COUNT); } - else if (desc.isKeyColumn(col)) { - v = peekValue(KEY_COL); + } - assert v != null; + /** + * Get real column value. + * + * @param col Adjusted column index (without default columns). + * @return Value. + */ + private Value getValue0(int col) { + Value v = getCached(col); + if (v != null) return v; - } - else if (col == VER_COL) - return version; - - col -= DEFAULT_COLUMNS_COUNT; - - assert col >= 0; - - Value key = getValue(KEY_COL); - Value val = getValue(VAL_COL); - - assert key != null; - assert val != null; Object res = desc.columnValue(key.getObject(), val.getObject(), col); @@ -170,21 +150,44 @@ else if (col == VER_COL) } } - if (vCache != null) - vCache[col + DEFAULT_COLUMNS_COUNT] = v; + setCached(col, v); return v; } /** - * @param valCache Value cache. + * Prepare values cache. */ - public void valuesCache(Value[] valCache) { - if (valCache != null) { - desc.initValueCache(valCache, key, val, version); - } + public void prepareValuesCache() { + this.valCache = new Value[desc.fieldsCount()]; + } + + /** + * Clear values cache. + */ + public void clearValuesCache() { + this.valCache = null; + } + + /** + * Get cached value (if any). + * + * @param colIdx Column index. + * @return Value. + */ + private Value getCached(int colIdx) { + return valCache != null ? valCache[colIdx] : null; + } - this.valCache = valCache; + /** + * Set cache value. + * + * @param colIdx Column index. + * @param val Value. + */ + private void setCached(int colIdx, Value val) { + if (valCache != null) + valCache[colIdx] = val; } /** {@inheritDoc} */ @@ -193,13 +196,13 @@ public void valuesCache(Value[] valCache) { sb.a(Integer.toHexString(System.identityHashCode(this))); - Value v = peekValue(KEY_COL); + Value v = key; sb.a("[ key: ").a(v == null ? "nil" : v.getString()); - v = peekValue(VAL_COL); + v = val; sb.a(", val: ").a(v == null ? "nil" : v.getString()); - v = peekValue(VER_COL); + v = ver; sb.a(", ver: ").a(v == null ? "nil" : v.getString()); sb.a(" ][ "); @@ -223,54 +226,46 @@ public void valuesCache(Value[] valCache) { /** {@inheritDoc} */ @Override public void setKeyAndVersion(SearchRow old) { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public void setKey(long key) { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public Row getCopy() { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public void setDeleted(boolean deleted) { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public long getKey() { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public void setSessionId(int sesId) { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public void setVersion(int ver) { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public void setValue(int idx, Value v) { - if (desc.isValueColumn(idx)) - val = v; - else if (idx == VER_COL) - version = v; - else { - assert desc.isKeyColumn(idx) : idx + " " + v; - - key = v; - } + throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public final int hashCode() { - throw new IllegalStateException(); + throw new UnsupportedOperationException(); } } \ No newline at end of file diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java index 1f6ff88f8b161..081805e5a92ee 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java @@ -17,41 +17,259 @@ package org.apache.ignite.internal.processors.query.h2.opt; +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; -import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.query.GridQueryProperty; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; +import org.apache.ignite.internal.processors.query.h2.H2TableDescriptor; import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.h2.message.DbException; import org.h2.result.SearchRow; +import org.h2.result.SimpleRow; +import org.h2.util.LocalDateTimeUtils; +import org.h2.value.DataType; import org.h2.value.Value; +import org.h2.value.ValueArray; +import org.h2.value.ValueBoolean; +import org.h2.value.ValueByte; +import org.h2.value.ValueBytes; +import org.h2.value.ValueDate; +import org.h2.value.ValueDecimal; +import org.h2.value.ValueDouble; +import org.h2.value.ValueFloat; +import org.h2.value.ValueGeometry; +import org.h2.value.ValueInt; +import org.h2.value.ValueJavaObject; +import org.h2.value.ValueLong; +import org.h2.value.ValueNull; +import org.h2.value.ValueShort; +import org.h2.value.ValueString; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestamp; +import org.h2.value.ValueUuid; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.DEFAULT_COLUMNS_COUNT; +import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.KEY_COL; +import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.VAL_COL; + /** * Row descriptor. */ -public interface GridH2RowDescriptor { +public class GridH2RowDescriptor { + /** Indexing SPI. */ + private final IgniteH2Indexing idx; + + /** Table descriptor. */ + private final H2TableDescriptor tbl; + + /** */ + private final GridQueryTypeDescriptor type; + + /** */ + private volatile String[] fields; + + /** */ + private volatile int[] fieldTypes; + + /** */ + private final int keyType; + + /** */ + private final int valType; + + /** */ + private volatile GridQueryProperty[] props; + + /** Id of user-defined key column */ + private volatile int keyAliasColId; + + /** Id of user-defined value column */ + private volatile int valAliasColId; + + /** + * Constructor. + * + * @param idx Indexing. + * @param tbl Table. + * @param type Type descriptor. + */ + public GridH2RowDescriptor(IgniteH2Indexing idx, H2TableDescriptor tbl, GridQueryTypeDescriptor type) { + assert type != null; + + this.idx = idx; + this.tbl = tbl; + this.type = type; + + keyType = DataType.getTypeFromClass(type.keyClass()); + valType = DataType.getTypeFromClass(type.valueClass()); + + refreshMetadataFromTypeDescriptor(); + } + + /** + * Update metadata of this row descriptor according to current state of type descriptor. + */ + @SuppressWarnings("WeakerAccess") + public final void refreshMetadataFromTypeDescriptor() { + Map> allFields = new LinkedHashMap<>(); + + allFields.putAll(type.fields()); + + fields = allFields.keySet().toArray(new String[allFields.size()]); + + fieldTypes = new int[fields.length]; + + Class[] classes = allFields.values().toArray(new Class[fields.length]); + + for (int i = 0; i < fieldTypes.length; i++) + fieldTypes[i] = DataType.getTypeFromClass(classes[i]); + + props = new GridQueryProperty[fields.length]; + + for (int i = 0; i < fields.length; i++) { + GridQueryProperty p = type.property(fields[i]); + + assert p != null : fields[i]; + + props[i] = p; + } + + List fieldsList = Arrays.asList(fields); + + keyAliasColId = + (type.keyFieldName() != null) ? DEFAULT_COLUMNS_COUNT + fieldsList.indexOf(type.keyFieldAlias()) : -1; + + valAliasColId = + (type.valueFieldName() != null) ? DEFAULT_COLUMNS_COUNT + fieldsList.indexOf(type.valueFieldAlias()) : -1; + } + /** * Gets indexing. * * @return indexing. */ - public IgniteH2Indexing indexing(); + public IgniteH2Indexing indexing() { + return idx; + } /** * Gets type descriptor. * * @return Type descriptor. */ - public GridQueryTypeDescriptor type(); + public GridQueryTypeDescriptor type() { + return type; + } /** * Gets cache context for this row descriptor. * * @return Cache context. */ - public GridCacheContext context(); + public GridCacheContext context() { + return tbl.cache(); + } + + /** + * Wraps object to respective {@link Value}. + * + * @param obj Object. + * @param type Value type. + * @return Value. + * @throws IgniteCheckedException If failed. + */ + @SuppressWarnings("ConstantConditions") + public Value wrap(Object obj, int type) throws IgniteCheckedException { + assert obj != null; + + if (obj instanceof CacheObject) { // Handle cache object. + CacheObject co = (CacheObject)obj; + + if (type == Value.JAVA_OBJECT) + return new GridH2ValueCacheObject(co, idx.objectContext()); + + obj = co.value(idx.objectContext(), false); + } + + switch (type) { + case Value.BOOLEAN: + return ValueBoolean.get((Boolean)obj); + case Value.BYTE: + return ValueByte.get((Byte)obj); + case Value.SHORT: + return ValueShort.get((Short)obj); + case Value.INT: + return ValueInt.get((Integer)obj); + case Value.FLOAT: + return ValueFloat.get((Float)obj); + case Value.LONG: + return ValueLong.get((Long)obj); + case Value.DOUBLE: + return ValueDouble.get((Double)obj); + case Value.UUID: + UUID uuid = (UUID)obj; + return ValueUuid.get(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); + case Value.DATE: + if (LocalDateTimeUtils.isLocalDate(obj.getClass())) + return LocalDateTimeUtils.localDateToDateValue(obj); + + return ValueDate.get((Date)obj); + + case Value.TIME: + if (LocalDateTimeUtils.isLocalTime(obj.getClass())) + return LocalDateTimeUtils.localTimeToTimeValue(obj); + + return ValueTime.get((Time)obj); + + case Value.TIMESTAMP: + if (obj instanceof java.util.Date && !(obj instanceof Timestamp)) + obj = new Timestamp(((java.util.Date)obj).getTime()); + + if (LocalDateTimeUtils.isLocalDateTime(obj.getClass())) + return LocalDateTimeUtils.localDateTimeToValue(obj); + + return ValueTimestamp.get((Timestamp)obj); + + case Value.DECIMAL: + return ValueDecimal.get((BigDecimal)obj); + case Value.STRING: + return ValueString.get(obj.toString()); + case Value.BYTES: + return ValueBytes.get((byte[])obj); + case Value.JAVA_OBJECT: + return ValueJavaObject.getNoCopy(obj, null, null); + case Value.ARRAY: + Object[] arr = (Object[])obj; + + Value[] valArr = new Value[arr.length]; + + for (int i = 0; i < arr.length; i++) { + Object o = arr[i]; + + valArr[i] = o == null ? ValueNull.INSTANCE : wrap(o, DataType.getTypeFromClass(o.getClass())); + } + + return ValueArray.get(valArr); + + case Value.GEOMETRY: + return ValueGeometry.getFromGeometry(obj); + } + + throw new IgniteCheckedException("Failed to wrap value[type=" + type + ", value=" + obj + "]"); + } /** * Creates new row. @@ -63,18 +281,46 @@ public interface GridH2RowDescriptor { * @return Row. * @throws IgniteCheckedException If failed. */ - public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject val, GridCacheVersion ver, - long expirationTime) throws IgniteCheckedException; + public GridH2Row createRow(KeyCacheObject key, int partId, @Nullable CacheObject val, GridCacheVersion ver, + long expirationTime, long link) throws IgniteCheckedException { + GridH2Row row; + + try { + if (val == null) // Only can happen for remove operation, can create simple search row. + row = GridH2RowFactory.create(wrap(key, keyType)); + else + row = new GridH2KeyValueRowOnheap(this, key, keyType, val, valType, ver, expirationTime); + } + catch (ClassCastException e) { + throw new IgniteCheckedException("Failed to convert key to SQL type. " + + "Please make sure that you always store each value type with the same key type " + + "or configure key type as common super class for all actual keys for this value type.", e); + } + + row.ver = ver; + + row.key = key; + row.val = val; + row.partId = partId; + + row.link(link); + + return row; + } /** * @return Value type. */ - public int valueType(); + public int valueType() { + return valType; + } /** * @return Total fields count. */ - public int fieldsCount(); + public int fieldsCount() { + return fields.length; + } /** * Gets value type for column index. @@ -82,7 +328,9 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param col Column index. * @return Value type. */ - public int fieldType(int col); + public int fieldType(int col) { + return fieldTypes[col]; + } /** * Gets column value by column index. @@ -92,7 +340,14 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param col Column index. * @return Column value. */ - public Object columnValue(Object key, Object val, int col); + public Object columnValue(Object key, Object val, int col) { + try { + return props[col].value(key, val); + } + catch (IgniteCheckedException e) { + throw DbException.convert(e); + } + } /** * Gets column value by column index. @@ -102,7 +357,14 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param colVal Value to set to column. * @param col Column index. */ - public void setColumnValue(Object key, Object val, Object colVal, int col); + public void setColumnValue(Object key, Object val, Object colVal, int col) { + try { + props[col].setValue(key, val, colVal); + } + catch (IgniteCheckedException e) { + throw DbException.convert(e); + } + } /** * Determine whether a column corresponds to a property of key or to one of value. @@ -110,25 +372,31 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param col Column index. * @return {@code true} if given column corresponds to a key property, {@code false} otherwise */ - public boolean isColumnKeyProperty(int col); + public boolean isColumnKeyProperty(int col) { + return props[col].key(); + } /** - * Wraps object to respective {@link Value}. + * Checks if provided column id matches key column or key alias. * - * @param o Object. - * @param type Value type. - * @return Value. - * @throws IgniteCheckedException If failed. + * @param colId Column id. + * @return Result. */ - public Value wrap(Object o, int type) throws IgniteCheckedException; + public boolean isKeyColumn(int colId) { + assert colId >= 0; + return colId == KEY_COL || colId == keyAliasColId; + } /** - * Checks if provided column id matches key column or key alias. + * Checks if provided column id matches key alias column. * * @param colId Column id. * @return Result. */ - public boolean isKeyColumn(int colId); + public boolean isKeyAliasColumn(int colId) { + assert colId >= 0; + return colId == keyAliasColId; + } /** * Checks if provided column id matches value column or alias. @@ -136,7 +404,21 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param colId Column id. * @return Result. */ - public boolean isValueColumn(int colId); + public boolean isValueColumn(int colId) { + assert colId >= 0; + return colId == VAL_COL || colId == valAliasColId; + } + + /** + * Checks if provided column id matches value alias column. + * + * @param colId Column id. + * @return Result. + */ + public boolean isValueAliasColumn(int colId) { + assert colId >= 0; + return colId == valAliasColId; + } /** * Checks if provided column id matches key, key alias, @@ -145,7 +427,21 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param colId Column id. * @return Result. */ - public boolean isKeyValueOrVersionColumn(int colId); + @SuppressWarnings("RedundantIfStatement") + public boolean isKeyValueOrVersionColumn(int colId) { + assert colId >= 0; + + if (colId < DEFAULT_COLUMNS_COUNT) + return true; + + if (colId == keyAliasColId) + return true; + + if (colId == valAliasColId) + return true; + + return false; + } /** * Checks if provided index condition is allowed for key column or key alias column. @@ -154,17 +450,15 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param mask Index Condition to check. * @return Result. */ - public boolean checkKeyIndexCondition(int masks[], int mask); + public boolean checkKeyIndexCondition(int masks[], int mask) { + assert masks != null; + assert masks.length > 0; - /** - * Initializes value cache with key, val and version. - * - * @param valCache Value cache. - * @param key Key. - * @param value Value. - * @param version Version. - */ - public void initValueCache(Value valCache[], Value key, Value value, Value version); + if (keyAliasColId < 0) + return (masks[KEY_COL] & mask) != 0; + else + return (masks[KEY_COL] & mask) != 0 || (masks[keyAliasColId] & mask) != 0; + } /** * Clones provided row and copies values of alias key and val columns @@ -173,7 +467,38 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param row Source row. * @return Result. */ - public SearchRow prepareProxyIndexRow(SearchRow row); + public SearchRow prepareProxyIndexRow(SearchRow row) { + if (row == null) + return null; + + Value[] data = new Value[row.getColumnCount()]; + + for (int idx = 0; idx < data.length; idx++) + data[idx] = row.getValue(idx); + + copyAliasColumnData(data, KEY_COL, keyAliasColId); + copyAliasColumnData(data, VAL_COL, valAliasColId); + + return new SimpleRow(data); + } + + /** + * Copies data between original and alias columns + * + * @param data Array of values. + * @param colId Original column id. + * @param aliasColId Alias column id. + */ + private void copyAliasColumnData(Value[] data, int colId, int aliasColId) { + if (aliasColId <= 0) + return; + + if (data[aliasColId] == null && data[colId] != null) + data[aliasColId] = data[colId]; + + if (data[colId] == null && data[aliasColId] != null) + data[colId] = data[aliasColId]; + } /** * Gets alternative column id that may substitute the given column id. @@ -186,5 +511,20 @@ public GridH2Row createRow(KeyCacheObject key, int part, @Nullable CacheObject v * @param colId Column id. * @return Result. */ - public int getAlternativeColumnId(int colId); -} \ No newline at end of file + public int getAlternativeColumnId(int colId) { + if (keyAliasColId > 0) { + if (colId == KEY_COL) + return keyAliasColId; + else if (colId == keyAliasColId) + return KEY_COL; + } + if (valAliasColId > 0) { + if (colId == VAL_COL) + return valAliasColId; + else if (colId == valAliasColId) + return VAL_COL; + } + + return colId; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java index 79eed12dbf595..d20b56bf769d7 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java @@ -34,7 +34,6 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryField; -import org.apache.ignite.internal.processors.query.h2.H2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.database.H2RowFactory; import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex; import org.apache.ignite.internal.util.typedef.F; @@ -55,7 +54,6 @@ import org.h2.table.TableBase; import org.h2.table.TableType; import org.h2.value.DataType; -import org.h2.value.Value; import org.jetbrains.annotations.Nullable; import org.jsr166.ConcurrentHashMap8; import org.jsr166.LongAdder8; @@ -71,7 +69,7 @@ public class GridH2Table extends TableBase { private final GridCacheContext cctx; /** */ - private final H2RowDescriptor desc; + private final GridH2RowDescriptor desc; /** */ private volatile ArrayList idxs; @@ -124,7 +122,7 @@ public class GridH2Table extends TableBase { * @param idxsFactory Indexes factory. * @param cctx Cache context. */ - public GridH2Table(CreateTableData createTblData, H2RowDescriptor desc, H2RowFactory rowFactory, + public GridH2Table(CreateTableData createTblData, GridH2RowDescriptor desc, H2RowFactory rowFactory, GridH2SystemIndexFactory idxsFactory, GridCacheContext cctx) { super(createTblData); @@ -408,19 +406,21 @@ public boolean update(KeyCacheObject key, throws IgniteCheckedException { assert desc != null; - GridH2Row row = desc.createRow(key, partId, val, ver, expirationTime); + GridH2Row row = desc.createRow(key, partId, val, ver, expirationTime, link); - row.link = link; + if (rmv) + return doUpdate(row, true); + else { + GridH2KeyValueRowOnheap row0 = (GridH2KeyValueRowOnheap)row; - if (!rmv) - ((GridH2KeyValueRowOnheap)row).valuesCache(new Value[getColumns().length]); + row0.prepareValuesCache(); - try { - return doUpdate(row, rmv); - } - finally { - if (!rmv) - ((GridH2KeyValueRowOnheap)row).valuesCache(null); + try { + return doUpdate(row, false); + } + finally { + row0.clearValuesCache(); + } } } From 98438c954c5f9a08634cf3132361268456397864 Mon Sep 17 00:00:00 2001 From: devozerov Date: Mon, 16 Oct 2017 12:38:54 +0300 Subject: [PATCH 033/243] IGNITE-6632: SQL: simplified GridH2Row inheritance tree. This closes #2856. --- .../processors/query/h2/IgniteH2Indexing.java | 4 +- .../query/h2/database/H2RowFactory.java | 2 +- .../query/h2/database/io/H2ExtrasInnerIO.java | 4 +- .../query/h2/database/io/H2ExtrasLeafIO.java | 4 +- .../query/h2/database/io/H2InnerIO.java | 4 +- .../query/h2/database/io/H2LeafIO.java | 4 +- .../query/h2/opt/GridH2KeyRowOnheap.java | 59 +++ .../query/h2/opt/GridH2KeyValueRowOnheap.java | 35 -- .../query/h2/opt/GridH2MetaTable.java | 13 +- ...actory.java => GridH2PlainRowFactory.java} | 37 +- .../processors/query/h2/opt/GridH2Row.java | 122 ++---- .../query/h2/opt/GridH2RowDescriptor.java | 12 +- .../query/h2/opt/GridH2SearchRowAdapter.java | 103 +++++ .../processors/query/h2/opt/GridH2Table.java | 2 +- .../h2/twostep/GridMergeIndexSorted.java | 4 +- .../h2/twostep/GridMergeIndexUnsorted.java | 4 +- .../query/h2/opt/GridH2TableSelfTest.java | 369 ------------------ 17 files changed, 231 insertions(+), 551 deletions(-) create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java rename modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/{GridH2RowFactory.java => GridH2PlainRowFactory.java} (82%) create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java delete mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TableSelfTest.java diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index dd35723f9cb21..0fdc2e48b9a9e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -106,7 +106,7 @@ import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowFactory; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2PlainRowFactory; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter; @@ -199,7 +199,7 @@ public class IgniteH2Indexing implements GridQueryIndexing { private static final String DB_OPTIONS = ";LOCK_MODE=3;MULTI_THREADED=1;DB_CLOSE_ON_EXIT=FALSE" + ";DEFAULT_LOCK_TIMEOUT=10000;FUNCTIONS_IN_SCHEMA=true;OPTIMIZE_REUSE_RESULTS=0;QUERY_CACHE_SIZE=0" + ";RECOMPILE_ALWAYS=1;MAX_OPERATION_MEMORY=0;NESTED_JOINS=0;BATCH_JOINS=1" + - ";ROW_FACTORY=\"" + GridH2RowFactory.class.getName() + "\"" + + ";ROW_FACTORY=\"" + GridH2PlainRowFactory.class.getName() + "\"" + ";DEFAULT_TABLE_ENGINE=" + GridH2DefaultTableEngine.class.getName(); // Uncomment this setting to get debug output from H2 to sysout. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java index 92ecd3d148e73..7116fe71e4c13 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java @@ -71,7 +71,7 @@ public GridH2Row getRow(long link) throws IgniteCheckedException { throw new IgniteException(e); } - assert row.ver != null; + assert row.version() != null; return row; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java index 7d41617f73545..b8877e9471011 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java @@ -81,7 +81,7 @@ private H2ExtrasInnerIO(short type, int ver, int payloadSize) { @Override public void storeByOffset(long pageAddr, int off, SearchRow row) { GridH2Row row0 = (GridH2Row)row; - assert row0.link != 0 : row0; + assert row0.link() != 0 : row0; List inlineIdxs = InlineIndexHelper.getCurrentInlineIndexes(); @@ -101,7 +101,7 @@ private H2ExtrasInnerIO(short type, int ver, int payloadSize) { fieldOff += size; } - PageUtils.putLong(pageAddr, off + payloadSize, row0.link); + PageUtils.putLong(pageAddr, off + payloadSize, row0.link()); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java index 3fe72b78da784..6161f8dee2063 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java @@ -81,7 +81,7 @@ private H2ExtrasLeafIO(short type, int ver, int payloadSize) { @Override public void storeByOffset(long pageAddr, int off, SearchRow row) { GridH2Row row0 = (GridH2Row)row; - assert row0.link != 0; + assert row0.link() != 0; List inlineIdxs = InlineIndexHelper.getCurrentInlineIndexes(); @@ -100,7 +100,7 @@ private H2ExtrasLeafIO(short type, int ver, int payloadSize) { fieldOff += size; } - PageUtils.putLong(pageAddr, off + payloadSize, row0.link); + PageUtils.putLong(pageAddr, off + payloadSize, row0.link()); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java index 4d7b3a2f03bdb..a1f1ce91e0194 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java @@ -47,9 +47,9 @@ private H2InnerIO(int ver) { @Override public void storeByOffset(long pageAddr, int off, SearchRow row) { GridH2Row row0 = (GridH2Row)row; - assert row0.link != 0; + assert row0.link() != 0; - PageUtils.putLong(pageAddr, off, row0.link); + PageUtils.putLong(pageAddr, off, row0.link()); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java index f292fc10b0ad9..85dcf501e0e57 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java @@ -47,9 +47,9 @@ protected H2LeafIO(int ver) { @Override public void storeByOffset(long pageAddr, int off, SearchRow row) { GridH2Row row0 = (GridH2Row)row; - assert row0.link != 0; + assert row0.link() != 0; - PageUtils.putLong(pageAddr, off, row0.link); + PageUtils.putLong(pageAddr, off, row0.link()); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java new file mode 100644 index 0000000000000..a0716c9ccee40 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.opt; + +import org.h2.value.Value; + +/** + * Heap-based key-only row for remove operations. + */ +public class GridH2KeyRowOnheap extends GridH2Row { + /** */ + private Value key; + + /** + * @param key Key. + */ + public GridH2KeyRowOnheap(Value key) { + this.key = key; + } + + /** {@inheritDoc} */ + @Override public int getColumnCount() { + return 1; + } + + /** {@inheritDoc} */ + @Override public Value getValue(int idx) { + assert idx == 0 : idx; + + return key; + } + + /** {@inheritDoc} */ + @Override public void setValue(int idx, Value v) { + assert idx == 0 : idx; + + key = v; + } + + /** {@inheritDoc} */ + @Override public long expireTime() { + return 0; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java index 63b4606eb3314..ad93fecb6682a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java @@ -88,11 +88,6 @@ public GridH2KeyValueRowOnheap(GridH2RowDescriptor desc, Object key, int keyType this.ver = desc.wrap(ver, Value.JAVA_OBJECT); } - /** {@inheritDoc} */ - @Override public Value[] getValueList() { - throw new UnsupportedOperationException(); - } - /** {@inheritDoc} */ @Override public long expireTime() { return expirationTime; @@ -224,41 +219,11 @@ private void setCached(int colIdx, Value val) { return sb.toString(); } - /** {@inheritDoc} */ - @Override public void setKeyAndVersion(SearchRow old) { - throw new UnsupportedOperationException(); - } - /** {@inheritDoc} */ @Override public void setKey(long key) { throw new UnsupportedOperationException(); } - /** {@inheritDoc} */ - @Override public Row getCopy() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setDeleted(boolean deleted) { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public long getKey() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setSessionId(int sesId) { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setVersion(int ver) { - throw new UnsupportedOperationException(); - } - /** {@inheritDoc} */ @Override public void setValue(int idx, Value v) { throw new UnsupportedOperationException(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java index d23515b686653..5e09a86b9b305 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java @@ -86,7 +86,7 @@ public GridH2MetaTable(CreateTableData data) { /** {@inheritDoc} */ @Override public SearchRow getTemplateSimpleRow(boolean singleColumn) { if (singleColumn) - return GridH2RowFactory.create((Value)null); + return GridH2PlainRowFactory.create((Value)null); return new MetaRow(); } @@ -219,7 +219,7 @@ public GridH2MetaTable(CreateTableData data) { /** * Get value row. */ - private static class MetaRow extends GridH2Row { + private static class MetaRow extends GridH2SearchRowAdapter { /** */ private Value v0; @@ -284,11 +284,6 @@ private static class MetaRow extends GridH2Row { throw new IllegalStateException("Index: " + idx); } } - - /** {@inheritDoc} */ - @Override public long expireTime() { - return 0; - } } /** @@ -296,7 +291,7 @@ private static class MetaRow extends GridH2Row { */ private static class MetaIndex extends BaseIndex { /** */ - private final ConcurrentMap rows = new ConcurrentHashMap8<>(); + private final ConcurrentMap rows = new ConcurrentHashMap8<>(); /** {@inheritDoc} */ @Override public void checkRename() { @@ -322,7 +317,7 @@ private static ValueInt id(SearchRow row) { /** {@inheritDoc} */ @Override public void add(Session session, Row row) { - rows.put(id(row), (GridH2Row)row); + rows.put(id(row), row); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2PlainRowFactory.java similarity index 82% rename from modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowFactory.java rename to modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2PlainRowFactory.java index d33917f4307ff..fd8a613247baf 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowFactory.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2PlainRowFactory.java @@ -19,35 +19,27 @@ import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; +import org.h2.result.Row; import org.h2.result.RowFactory; import org.h2.value.Value; /** * Row factory. */ -public class GridH2RowFactory extends RowFactory { +public class GridH2PlainRowFactory extends RowFactory { /** * @param v Value. * @return Row. */ - public static GridH2Row create(Value v) { + public static Row create(Value v) { return new RowKey(v); } - /** - * @param v1 Value 1. - * @param v2 Value 2. - * @return Row. - */ - public static GridH2Row create(Value v1, Value v2) { - return new RowPair(v1, v2); - } - /** * @param data Values. * @return Row. */ - public static GridH2Row create(Value... data) { + public static Row create(Value... data) { switch (data.length) { case 0: throw new IllegalStateException("Zero columns row."); @@ -64,14 +56,14 @@ public static GridH2Row create(Value... data) { } /** {@inheritDoc} */ - @Override public GridH2Row createRow(Value[] data, int memory) { + @Override public Row createRow(Value[] data, int memory) { return create(data); } /** * Single value row. */ - private static final class RowKey extends GridH2Row { + private static final class RowKey extends GridH2SearchRowAdapter { /** */ private Value key; @@ -100,15 +92,15 @@ public RowKey(Value key) { } /** {@inheritDoc} */ - @Override public long expireTime() { - return 0; + @Override public String toString() { + return S.toString(RowKey.class, this); } } /** * Row of two values. */ - private static final class RowPair extends GridH2Row { + private static final class RowPair extends GridH2SearchRowAdapter { /** */ private Value v1; @@ -146,15 +138,15 @@ private RowPair(Value v1, Value v2) { } /** {@inheritDoc} */ - @Override public long expireTime() { - return 0; + @Override public String toString() { + return S.toString(RowPair.class, this); } } /** * Simple array based row. */ - private static final class RowSimple extends GridH2Row { + private static final class RowSimple extends GridH2SearchRowAdapter { /** */ @GridToStringInclude private Value[] vals; @@ -185,10 +177,5 @@ private RowSimple(Value[] vals) { @Override public String toString() { return S.toString(RowSimple.class, this); } - - /** {@inheritDoc} */ - @Override public long expireTime() { - return 0; - } } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java index fdeb009ac8c0a..4cb603baf170b 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java @@ -21,29 +21,25 @@ import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; -import org.h2.result.Row; -import org.h2.result.SearchRow; -import org.h2.store.Data; -import org.h2.value.Value; /** * Row with locking support needed for unique key conflicts resolution. */ -public abstract class GridH2Row implements SearchRow, CacheDataRow, Row { - /** */ - public long link; // TODO remove +public abstract class GridH2Row extends GridH2SearchRowAdapter implements CacheDataRow { + /** Link. */ + private long link; - /** */ - public KeyCacheObject key; // TODO remove + /** Key. */ + private KeyCacheObject key; - /** */ - public CacheObject val; // TODO remove + /** Value. */ + private CacheObject val; - /** */ - public GridCacheVersion ver; // TODO remove + /** Version. */ + private GridCacheVersion ver; - /** */ - public int partId; // TODO remove + /** Partition. */ + private int partId; /** {@inheritDoc} */ @Override public KeyCacheObject key() { @@ -60,16 +56,37 @@ public abstract class GridH2Row implements SearchRow, CacheDataRow, Row { return val; } + /** + * @param val Value. + */ + public void value(CacheObject val) { + this.val = val; + } + /** {@inheritDoc} */ @Override public GridCacheVersion version() { return ver; } + /** + * @param ver Version. + */ + public void version(GridCacheVersion ver) { + this.ver = ver; + } + /** {@inheritDoc} */ @Override public int partition() { return partId; } + /** + * @param partId Partition. + */ + public void partition(int partId) { + this.partId = partId; + } + /** {@inheritDoc} */ @Override public long link() { return link; @@ -80,81 +97,6 @@ public abstract class GridH2Row implements SearchRow, CacheDataRow, Row { this.link = link; } - /** {@inheritDoc} */ - @Override public Row getCopy() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setVersion(int version) { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public int getByteCount(Data dummy) { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public boolean isEmpty() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setDeleted(boolean deleted) { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setSessionId(int sessionId) { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public int getSessionId() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void commit() { - // No-op. - } - - /** {@inheritDoc} */ - @Override public boolean isDeleted() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setKeyAndVersion(SearchRow old) { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public int getVersion() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public void setKey(long key) { - // No-op, may be set in H2 INFORMATION_SCHEMA. - } - - /** {@inheritDoc} */ - @Override public long getKey() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public int getMemory() { - throw new UnsupportedOperationException(); - } - - /** {@inheritDoc} */ - @Override public Value[] getValueList() { - throw new UnsupportedOperationException(); - } - /** {@inheritDoc} */ @Override public int hash() { throw new UnsupportedOperationException(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java index 081805e5a92ee..503e487252306 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java @@ -287,7 +287,7 @@ public GridH2Row createRow(KeyCacheObject key, int partId, @Nullable CacheObject try { if (val == null) // Only can happen for remove operation, can create simple search row. - row = GridH2RowFactory.create(wrap(key, keyType)); + row = new GridH2KeyRowOnheap(wrap(key, keyType)); else row = new GridH2KeyValueRowOnheap(this, key, keyType, val, valType, ver, expirationTime); } @@ -297,12 +297,10 @@ public GridH2Row createRow(KeyCacheObject key, int partId, @Nullable CacheObject "or configure key type as common super class for all actual keys for this value type.", e); } - row.ver = ver; - - row.key = key; - row.val = val; - row.partId = partId; - + row.version(ver); + row.key(key); + row.value(val); + row.partition(partId); row.link(link); return row; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java new file mode 100644 index 0000000000000..24a90b3115131 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.opt; + +import org.h2.result.Row; +import org.h2.result.SearchRow; +import org.h2.store.Data; +import org.h2.value.Value; + +/** + * Dummy H2 search row adadpter. + */ +public abstract class GridH2SearchRowAdapter implements Row { + /** {@inheritDoc} */ + @Override public void setKeyAndVersion(SearchRow old) { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public int getVersion() { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public void setKey(long key) { + // No-op, may be set in H2 INFORMATION_SCHEMA. + } + + /** {@inheritDoc} */ + @Override public long getKey() { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public int getMemory() { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public Row getCopy() { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public void setVersion(int version) { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public int getByteCount(Data dummy) { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public boolean isEmpty() { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public void setDeleted(boolean deleted) { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public void setSessionId(int sessionId) { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public int getSessionId() { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public void commit() { + // No-op. + } + + /** {@inheritDoc} */ + @Override public boolean isDeleted() { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Override public Value[] getValueList() { + throw new UnsupportedOperationException(); + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java index d20b56bf769d7..add248864313e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java @@ -463,7 +463,7 @@ boolean doUpdate(final GridH2Row row, boolean del) throws IgniteCheckedException GridH2IndexBase pk = pk(); if (!del) { - assert rowFactory == null || row.link != 0 : row; + assert rowFactory == null || row.link() != 0 : row; GridH2Row old = pk.put(row); // Put to PK. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java index 54c8dd40e1586..0dc8354a4413b 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java @@ -33,7 +33,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Cursor; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowFactory; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2PlainRowFactory; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; import org.h2.engine.Session; @@ -368,7 +368,7 @@ private boolean next() { if (!iter.hasNext()) return false; - cur = GridH2RowFactory.create(iter.next()); + cur = GridH2PlainRowFactory.create(iter.next()); return true; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexUnsorted.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexUnsorted.java index c53b58fe49672..487d386f35d9f 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexUnsorted.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexUnsorted.java @@ -27,7 +27,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Cursor; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowFactory; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2PlainRowFactory; import org.h2.engine.Session; import org.h2.index.Cursor; import org.h2.index.IndexType; @@ -139,7 +139,7 @@ private GridMergeIndexUnsorted(GridKernalContext ctx) { } @Override public Row next() { - return GridH2RowFactory.create(iter.next()); + return GridH2PlainRowFactory.create(iter.next()); } @Override public void remove() { diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TableSelfTest.java deleted file mode 100644 index a1a64e8f5d5ea..0000000000000 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2TableSelfTest.java +++ /dev/null @@ -1,369 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.processors.query.h2.opt; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicInteger; -import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; -import org.h2.value.ValueLong; -import org.h2.value.ValueString; -import org.h2.value.ValueTimestamp; -import org.h2.value.ValueUuid; - -/** - * Tests H2 Table. - */ -@SuppressWarnings({"TypeMayBeWeakened", "FieldAccessedSynchronizedAndUnsynchronized"}) -public class GridH2TableSelfTest extends GridCommonAbstractTest { - /** */ - private static final long MAX_X = 2000; - - /** */ - private static final String DB_URL = "jdbc:h2:mem:gg_table_engine;MULTI_THREADED=1;OPTIMIZE_REUSE_RESULTS=0;" + - "QUERY_CACHE_SIZE=0;RECOMPILE_ALWAYS=1"; - - /** */ - private static final String CREATE_TABLE_SQL = "CREATE TABLE T(ID UUID, T TIMESTAMP, STR VARCHAR, X BIGINT)"; - - /** */ - private static final String PK_NAME = "__GG_PK_"; - - /** Hash. */ - private static final String HASH = "__GG_HASH"; - - /** */ - private static final String STR_IDX_NAME = "__GG_IDX_"; - - /** */ - private static final String NON_UNIQUE_IDX_NAME = "__GG_IDX_"; - - /** */ - private static final String SCAN_IDX_NAME = GridH2PrimaryScanIndex.SCAN_INDEX_NAME_SUFFIX; - - /** */ - private Connection conn; - - /** */ - private GridH2Table tbl; - - /** {@inheritDoc} */ - @Override protected void beforeTest() throws Exception { - // TODO: IGNITE-4994: Restore mock. -// Driver.load(); -// -// conn = DriverManager.getConnection(DB_URL); -// -// tbl = GridH2Table.Engine.createTable(conn, CREATE_TABLE_SQL, null, new GridH2Table.IndexesFactory() { -// @Override public void onTableCreated(GridH2Table tbl) { -// // No-op. -// } -// -// @Override public H2RowFactory createRowFactory(GridH2Table tbl) { -// return null; -// } -// -// @Override public ArrayList createIndexes(GridH2Table tbl) { -// ArrayList idxs = new ArrayList<>(); -// -// IndexColumn id = tbl.indexColumn(0, SortOrder.ASCENDING); -// IndexColumn t = tbl.indexColumn(1, SortOrder.ASCENDING); -// IndexColumn str = tbl.indexColumn(2, SortOrder.DESCENDING); -// IndexColumn x = tbl.indexColumn(3, SortOrder.DESCENDING); -// -// idxs.add(new H2PkHashIndex(null, tbl, HASH, F.asList(id))); -// idxs.add(new GridH2TreeIndex(PK_NAME, tbl, true, F.asList(id))); -// idxs.add(new GridH2TreeIndex(NON_UNIQUE_IDX_NAME, tbl, false, F.asList(x, t, id))); -// idxs.add(new GridH2TreeIndex(STR_IDX_NAME, tbl, false, F.asList(str, id))); -// -// return idxs; -// } -// }, null); - } - - /** {@inheritDoc} */ - @Override protected void afterTest() throws Exception { - conn.close(); - - conn = null; - tbl = null; - } - - /** - * @param id Id. - * @param t Timestamp. - * @param str String. - * @param x X. - * @return New row. - */ - private GridH2Row row(UUID id, long t, String str, long x) { - return GridH2RowFactory.create( - ValueUuid.get(id.getMostSignificantBits(), id.getLeastSignificantBits()), - ValueTimestamp.get(new Timestamp(t)), - ValueString.get(str), - ValueLong.get(x)); - } - - - /** - * Simple table test. - * - * @throws Exception If failed. - */ - public void testTable() throws Exception { - // Test insert. - long x = MAX_X; - - Random rnd = new Random(); - - while(x-- > 0) { - UUID id = UUID.randomUUID(); - - GridH2Row row = row(id, System.currentTimeMillis(), rnd.nextBoolean() ? id.toString() : - UUID.randomUUID().toString(), rnd.nextInt(100)); - - tbl.doUpdate(row, false); - } - - assertEquals(MAX_X, tbl.getRowCountApproximation()); - assertEquals(MAX_X, tbl.getRowCount(null)); - - for (GridH2IndexBase idx : tbl.indexes()) { - assertEquals(MAX_X, idx.getRowCountApproximation()); - assertEquals(MAX_X, idx.getRowCount(null)); - } - - // Check unique index. - UUID id = UUID.randomUUID(); - UUID id2 = UUID.randomUUID(); - - assertTrue(tbl.doUpdate(row(id, System.currentTimeMillis(), id.toString(), rnd.nextInt(100)), false)); - assertTrue(tbl.doUpdate(row(id2, System.currentTimeMillis(), id2.toString(), rnd.nextInt(100)), false)); - - // Check index selection. - checkQueryPlan(conn, "SELECT * FROM T", SCAN_IDX_NAME); - - checkQueryPlan(conn, "SELECT * FROM T WHERE ID IS NULL", PK_NAME); - checkQueryPlan(conn, "SELECT * FROM T WHERE ID = RANDOM_UUID()", PK_NAME); - checkQueryPlan(conn, "SELECT * FROM T WHERE ID > RANDOM_UUID()", PK_NAME); - checkQueryPlan(conn, "SELECT * FROM T ORDER BY ID", PK_NAME); - - checkQueryPlan(conn, "SELECT * FROM T WHERE STR IS NULL", STR_IDX_NAME); - checkQueryPlan(conn, "SELECT * FROM T WHERE STR = 'aaaa'", STR_IDX_NAME); - checkQueryPlan(conn, "SELECT * FROM T WHERE STR > 'aaaa'", STR_IDX_NAME); - checkQueryPlan(conn, "SELECT * FROM T ORDER BY STR DESC", STR_IDX_NAME); - - checkQueryPlan(conn, "SELECT * FROM T WHERE X IS NULL", NON_UNIQUE_IDX_NAME); - checkQueryPlan(conn, "SELECT * FROM T WHERE X = 10000", NON_UNIQUE_IDX_NAME); - checkQueryPlan(conn, "SELECT * FROM T WHERE X > 10000", NON_UNIQUE_IDX_NAME); - checkQueryPlan(conn, "SELECT * FROM T ORDER BY X DESC", NON_UNIQUE_IDX_NAME); - checkQueryPlan(conn, "SELECT * FROM T ORDER BY X DESC, T", NON_UNIQUE_IDX_NAME); - - checkQueryPlan(conn, "SELECT * FROM T ORDER BY T, X DESC", SCAN_IDX_NAME); - - // Simple queries. - - Statement s = conn.createStatement(); - - ResultSet rs = s.executeQuery("select id from t where x between 0 and 100"); - - int i = 0; - while (rs.next()) - i++; - - assertEquals(MAX_X + 2, i); - - // ----- - - rs = s.executeQuery("select id from t where t is not null"); - - i = 0; - while (rs.next()) - i++; - - assertEquals(MAX_X + 2, i); - - // ---- - - int cnt = 10 + rnd.nextInt(25); - - long t = System.currentTimeMillis(); - - for (i = 0; i < cnt; i++) { - id = UUID.randomUUID(); - - assertTrue(tbl.doUpdate(row(id, t, id.toString(), 51), false)); - } - - rs = s.executeQuery("select x, id from t where x = 51 limit " + cnt); - - i = 0; - - while (rs.next()) { - assertEquals(51, rs.getInt(1)); - - i++; - } - - assertEquals(cnt, i); - } - - /** - * @throws Exception If failed. - */ - public void testRangeQuery() throws Exception { - int rows = 3000; - int xs = 37; - - long t = System.currentTimeMillis(); - - Random rnd = new Random(); - - for (int i = 0 ; i < rows; i++) { - UUID id = UUID.randomUUID(); - - GridH2Row row = row(id, t++, id.toString(), rnd.nextInt(xs)); - - assertTrue(tbl.doUpdate(row, false)); - } - - PreparedStatement ps = conn.prepareStatement("select count(*) from t where x = ?"); - - int cnt = 0; - - for (int x = 0; x < xs; x++) { - ps.setInt(1, x); - - ResultSet rs = ps.executeQuery(); - - assertTrue(rs.next()); - - cnt += rs.getInt(1); - } - - assertEquals(rows, cnt); - } - - /** - * @throws Exception If failed. - */ - public void testDataLoss() throws Exception { - final int threads = 37; - final int iterations = 15000; - - final AtomicInteger cntr = new AtomicInteger(); - - final UUID[] ids = new UUID[threads * iterations]; - - for (int i = 0; i < ids.length; i++) - ids[i] = UUID.randomUUID(); - - final long t = System.currentTimeMillis(); - - final AtomicInteger deleted = new AtomicInteger(); - - multithreaded(new Callable() { - @Override public Void call() throws Exception { - Random rnd = new Random(); - - int offset = cntr.getAndIncrement() * iterations; - - synchronized (ids[offset]) { - for (int i = 0; i < iterations; i++) { - UUID id = ids[offset + i]; - - int x = rnd.nextInt(50); - - GridH2Row row = row(id, t, id.toString(), x); - - assertTrue(tbl.doUpdate(row, false)); - } - } - - offset = (offset + iterations) % ids.length; - - synchronized (ids[offset]) { - for (int i = 0; i < iterations; i += 2) { - UUID id = ids[offset + i]; - - int x = rnd.nextInt(50); - - GridH2Row row = row(id, t, id.toString(), x); - - if (tbl.doUpdate(row, true)) - deleted.incrementAndGet(); - } - } - - return null; - } - }, threads); - - assertTrue(deleted.get() > 0); - - PreparedStatement p = conn.prepareStatement("select count(*) from t where id = ?"); - - for (int i = 1; i < ids.length; i += 2) { - p.setObject(1, ids[i]); - - ResultSet rs = p.executeQuery(); - - assertTrue(rs.next()); - - assertEquals(1, rs.getInt(1)); - } - - Statement s = conn.createStatement(); - - ResultSet rs = s.executeQuery("select count(*) from t"); - - assertTrue(rs.next()); - - assertEquals(ids.length - deleted.get(), rs.getInt(1)); - } - - - /** - * Check query plan to correctly select index. - * - * @param conn Connection. - * @param sql Select. - * @param search Search token in result. - * @throws SQLException If failed. - */ - private void checkQueryPlan(Connection conn, String sql, String search) throws SQLException { - - try (Statement s = conn.createStatement()) { - try (ResultSet r = s.executeQuery("EXPLAIN ANALYZE " + sql)) { - assertTrue(r.next()); - - String plan = r.getString(1); - - assertTrue("Execution plan for '" + sql + "' query should contain '" + search + "'", - plan.contains(search)); - } - } - } -} \ No newline at end of file From 95b7ab518dd3c3db6fcc5142c2ee85da2516c2b6 Mon Sep 17 00:00:00 2001 From: devozerov Date: Mon, 16 Oct 2017 13:37:11 +0300 Subject: [PATCH 034/243] IGNITE-6634: Removed IgniteDistributedJoinTestSuite. It's tests are distributed between "Query" and "Query 2" suites. This closes #2857. --- ...butedJoinPartitionedAndReplicatedTest.java | 2 + .../IgniteCacheQuerySelfTestSuite.java | 17 ++++++ .../IgniteCacheQuerySelfTestSuite2.java | 6 ++ .../IgniteCacheQuerySelfTestSuite3.java | 3 - .../IgniteDistributedJoinTestSuite.java | 55 ------------------- 5 files changed, 25 insertions(+), 58 deletions(-) delete mode 100644 modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteDistributedJoinTestSuite.java diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDistributedJoinPartitionedAndReplicatedTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDistributedJoinPartitionedAndReplicatedTest.java index 5e906afb1b348..eb568dfcf209e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDistributedJoinPartitionedAndReplicatedTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDistributedJoinPartitionedAndReplicatedTest.java @@ -194,6 +194,8 @@ public void testJoin1() throws Exception { * @throws Exception If failed. */ public void testJoin2() throws Exception { + fail("https://issues.apache.org/jira/browse/IGNITE-5956"); + join(true, PARTITIONED, REPLICATED, PARTITIONED); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index 83b4689678f5b..0b1a753d17173 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -46,6 +46,12 @@ import org.apache.ignite.internal.processors.cache.IgniteBinaryWrappedObjectFieldsQuerySelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheCollocatedQuerySelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheDeleteSqlQuerySelfTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinCollocatedAndNotTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinCustomAffinityMapper; +import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinNoIndexTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinPartitionedAndReplicatedTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinQueryConditionsTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinTest; import org.apache.ignite.internal.processors.cache.IgniteCacheDuplicateEntityConfigurationSelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheFieldsQueryNoDataSelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheFullTextQueryNodeJoiningSelfTest; @@ -123,6 +129,7 @@ import org.apache.ignite.internal.processors.cache.query.IndexingSpiQuerySelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryTxSelfTest; import org.apache.ignite.internal.processors.client.ClientConnectorConfigurationValidationSelfTest; +import org.apache.ignite.internal.processors.query.IgniteSqlDistributedJoinSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlParameterizedQueryTest; import org.apache.ignite.internal.processors.query.h2.IgniteSqlBigIntegerKeyTest; @@ -325,6 +332,16 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgniteCacheDistributedQueryCancelSelfTest.class); suite.addTestSuite(IgniteCacheLocalQueryCancelOrTimeoutSelfTest.class); + // Distributed joins. + suite.addTestSuite(H2CompareBigQueryDistributedJoinsTest.class); + suite.addTestSuite(IgniteCacheDistributedJoinCollocatedAndNotTest.class); + suite.addTestSuite(IgniteCacheDistributedJoinCustomAffinityMapper.class); + suite.addTestSuite(IgniteCacheDistributedJoinNoIndexTest.class); + suite.addTestSuite(IgniteCacheDistributedJoinPartitionedAndReplicatedTest.class); + suite.addTestSuite(IgniteCacheDistributedJoinQueryConditionsTest.class); + suite.addTestSuite(IgniteCacheDistributedJoinTest.class); + suite.addTestSuite(IgniteSqlDistributedJoinSelfTest.class); + // Other. suite.addTestSuite(CacheIteratorScanQueryTest.class); suite.addTestSuite(CacheQueryNewClientSelfTest.class); diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java index f84ddc2f3a827..91e447881b88e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java @@ -28,8 +28,10 @@ import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheClientQueryReplicatedNodeRestartSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheDistributedQueryStopOnCancelOrTimeoutSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeFailTest; +import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartDistributedJoinSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartSelfTest2; +import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryStopOnCancelOrTimeoutDistributedJoinSelfTest; import org.apache.ignite.internal.processors.cache.index.DynamicColumnsConcurrentAtomicPartitionedSelfTest; import org.apache.ignite.internal.processors.cache.index.DynamicColumnsConcurrentAtomicReplicatedSelfTest; import org.apache.ignite.internal.processors.cache.index.DynamicColumnsConcurrentTransactionalPartitionedSelfTest; @@ -67,6 +69,10 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(DynamicColumnsConcurrentAtomicReplicatedSelfTest.class); suite.addTestSuite(DynamicColumnsConcurrentTransactionalReplicatedSelfTest.class); + // Distributed joins. + suite.addTestSuite(IgniteCacheQueryNodeRestartDistributedJoinSelfTest.class); + suite.addTestSuite(IgniteCacheQueryStopOnCancelOrTimeoutDistributedJoinSelfTest.class); + // Other tests. suite.addTestSuite(IgniteCacheQueryMultiThreadedSelfTest.class); diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java index e523cf32db469..4ea8bca19e4e9 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java @@ -37,7 +37,6 @@ import org.apache.ignite.internal.processors.cache.query.continuous.CacheKeepBinaryIterationStoreEnabledTest; import org.apache.ignite.internal.processors.cache.query.continuous.ClientReconnectContinuousQueryTest; import org.apache.ignite.internal.processors.cache.query.continuous.ContinuousQueryPeerClassLoadingTest; -import org.apache.ignite.internal.processors.cache.query.continuous.ClientReconnectContinuousQueryTest; import org.apache.ignite.internal.processors.cache.query.continuous.ContinuousQueryRemoteFilterMissingInClassPathSelfTest; import org.apache.ignite.internal.processors.cache.query.continuous.GridCacheContinuousQueryAtomicNearEnabledSelfTest; import org.apache.ignite.internal.processors.cache.query.continuous.GridCacheContinuousQueryAtomicP2PDisabledSelfTest; @@ -125,8 +124,6 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(CacheContinuousQueryConcurrentPartitionUpdateTest.class); suite.addTestSuite(CacheContinuousQueryEventBufferTest.class); - suite.addTest(IgniteDistributedJoinTestSuite.suite()); - return suite; } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteDistributedJoinTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteDistributedJoinTestSuite.java deleted file mode 100644 index cf6704181ed19..0000000000000 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteDistributedJoinTestSuite.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.testsuites; - -import junit.framework.TestSuite; -import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinCollocatedAndNotTest; -import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinCustomAffinityMapper; -import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinNoIndexTest; -import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinPartitionedAndReplicatedTest; -import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinQueryConditionsTest; -import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinTest; -import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartDistributedJoinSelfTest; -import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryStopOnCancelOrTimeoutDistributedJoinSelfTest; -import org.apache.ignite.internal.processors.query.IgniteSqlDistributedJoinSelfTest; -import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryDistributedJoinsTest; - -/** - * - */ -public class IgniteDistributedJoinTestSuite extends TestSuite { - /** - * @return Suite. - */ - public static TestSuite suite() { - TestSuite suite = new TestSuite("Distributed Joins Test Suite."); - - suite.addTestSuite(H2CompareBigQueryDistributedJoinsTest.class); - suite.addTestSuite(IgniteCacheDistributedJoinCollocatedAndNotTest.class); - suite.addTestSuite(IgniteCacheDistributedJoinCustomAffinityMapper.class); - suite.addTestSuite(IgniteCacheDistributedJoinNoIndexTest.class); - suite.addTestSuite(IgniteCacheDistributedJoinPartitionedAndReplicatedTest.class); - suite.addTestSuite(IgniteCacheDistributedJoinQueryConditionsTest.class); - suite.addTestSuite(IgniteCacheDistributedJoinTest.class); - suite.addTestSuite(IgniteCacheQueryNodeRestartDistributedJoinSelfTest.class); - suite.addTestSuite(IgniteCacheQueryStopOnCancelOrTimeoutDistributedJoinSelfTest.class); - suite.addTestSuite(IgniteSqlDistributedJoinSelfTest.class); - - return suite; - } -} From 9b81994bb17dbcd8134e22fe57ecd31bd6bd246d Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Mon, 16 Oct 2017 16:41:36 +0300 Subject: [PATCH 035/243] IGNITE-6637: SQL: add statements cache clear on cache destroy. This closes #2860. --- .../processors/query/h2/IgniteH2Indexing.java | 6 ++- .../cache/index/H2DynamicTableSelfTest.java | 43 +++++++++++++++++++ 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index fddd2e8744b09..9515023425b94 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -59,7 +59,6 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridTopic; import org.apache.ignite.internal.IgniteInternalFuture; -import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheEntryImpl; import org.apache.ignite.internal.processors.cache.CacheObject; @@ -81,6 +80,7 @@ import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.cache.query.QueryTable; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.query.CacheQueryObjectValueContext; import org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator; @@ -2346,6 +2346,8 @@ private boolean isDefaultSchema(String schemaName) { } } + stmtCache.clear(); + for (H2TableDescriptor tbl : rmvTbls) { for (Index idx : tbl.table().getIndexes()) idx.close(null); @@ -2368,7 +2370,7 @@ private boolean isDefaultSchema(String schemaName) { /** * Remove all cached queries from cached two-steps queries. */ - public void clearCachedQueries() { + private void clearCachedQueries() { twoStepCache.clear(); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java index b108bb3f71c10..c56db84cbbb5d 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java @@ -48,6 +48,7 @@ import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; +import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.processors.query.GridQueryProperty; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; @@ -996,6 +997,39 @@ public void testTableAndIndexRecreate() { execute("drop table \"PUBLIC\".t"); } + /** + * @throws Exception If test failed. + */ + public void testQueryLocalWithRecreate() throws Exception { + execute("CREATE TABLE A(id int primary key, name varchar, surname varchar) WITH \"cache_name=cache," + + "template=replicated\""); + + // In order for local queries to work, let's use non client node. + IgniteInternalCache cache = grid(0).cachex("cache"); + + assertNotNull(cache); + + executeLocal(cache.context(), "INSERT INTO A(id, name, surname) values (1, 'X', 'Y')"); + + assertEqualsCollections(Collections.singletonList(Arrays.asList(1, "X", "Y")), + executeLocal(cache.context(), "SELECT id, name, surname FROM A")); + + execute("DROP TABLE A"); + + execute("CREATE TABLE A(id int primary key, name varchar, surname varchar) WITH \"cache_name=cache\""); + + cache = grid(0).cachex("cache"); + + assertNotNull(cache); + + try { + executeLocal(cache.context(), "INSERT INTO A(id, name, surname) values (1, 'X', 'Y')"); + } + finally { + execute("DROP TABLE A"); + } + } + /** * Test that it's impossible to create tables with same name regardless of key/value wrapping settings. */ @@ -1442,6 +1476,15 @@ private List> execute(Ignite node, String sql) { return queryProcessor(node).querySqlFieldsNoCache(new SqlFieldsQuery(sql).setSchema("PUBLIC"), true).getAll(); } + /** + * Execute DDL statement on given node. + * + * @param sql Statement. + */ + private List> executeLocal(GridCacheContext cctx, String sql) { + return queryProcessor(cctx.grid()).querySqlFields(cctx, new SqlFieldsQuery(sql).setLocal(true), true).getAll(); + } + /** * @return Client node. */ From fccf2f71405a41a40cdbe775f8b48caa02e167d6 Mon Sep 17 00:00:00 2001 From: devozerov Date: Tue, 17 Oct 2017 11:20:20 +0300 Subject: [PATCH 036/243] Added missing license header to IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest. --- ...teSqlSkipReducerOnUpdateDmlFlagSelfTest.java | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java index e5efc06ef09e0..f31cb24dfd99b 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.ignite.internal.processors.query; import java.util.ArrayList; From ae793a537d44a715d257f091b912444c7b33bbca Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Tue, 17 Oct 2017 15:18:39 +0300 Subject: [PATCH 037/243] IGNITE-6648 ML javadoc is missing in 2.2 binary release --- parent/pom.xml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/parent/pom.xml b/parent/pom.xml index b133b1e0792ae..cb335d1232214 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -460,6 +460,10 @@ Ignite Development Utils org.apache.ignite.development.utils* + + Ignite ML + org.apache.ignite.ml* +
      Date: Tue, 17 Oct 2017 14:45:42 +0300 Subject: [PATCH 038/243] IGNITE-6627 .NET: Fix serialization of enums within generic collections * Fix EnumEqualityComparer serialization * Fix enum arrays serialization * Fix empty objects missing metadata This closes #2864 --- .../Apache.Ignite.Core.Tests.csproj | 2 + .../Serializable/GenericCollectionsTest.cs | 112 ++++++++++++++++++ .../Client/Cache/CacheTest.cs | 76 ++++++++++++ .../Client/Cache/EmptyObject.cs | 54 +++++++++ .../Impl/Binary/BinarySystemHandlers.cs | 16 +-- .../Impl/Binary/BinaryWriter.cs | 7 ++ .../Impl/Binary/SerializableSerializer.cs | 11 +- .../Structure/BinaryStructureTracker.cs | 12 +- 8 files changed, 274 insertions(+), 16 deletions(-) create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/GenericCollectionsTest.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/EmptyObject.cs diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj index ec85ca2516f13..7ec75af650460 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj @@ -78,6 +78,7 @@ + @@ -94,6 +95,7 @@ + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/GenericCollectionsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/GenericCollectionsTest.cs new file mode 100644 index 0000000000000..cfbe824d95480 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/GenericCollectionsTest.cs @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Binary.Serializable +{ + using System.Collections.Generic; + using NUnit.Framework; + + /// + /// Tests Generic collections serializtion/deserialization scenarios. + /// + public class GenericCollectionsTest + { + /// + /// Tests Dictionary. + /// + [Test] + public void TestDictionary() + { + TestCollection(new Dictionary {{1, 1}, {2, 2}}); + TestCollection(new Dictionary {{ByteEnum.One, 1}, {ByteEnum.Two, 2}}); + TestCollection(new Dictionary {{IntEnum.One, 1}, {IntEnum.Two, 2}}); + } + + /// + /// Tests SortedDictionary. + /// + [Test] + public void TestSortedDictionary() + { + TestCollection(new SortedDictionary {{1, 1}, {2, 2}}); + TestCollection(new SortedDictionary {{ByteEnum.One, 1}, {ByteEnum.Two, 2}}); + TestCollection(new SortedDictionary {{IntEnum.One, 1}, {IntEnum.Two, 2}}); + } + + /// + /// Tests List. + /// + [Test] + public void TestList() + { + TestCollection(new List {1, 2}); + TestCollection(new List {ByteEnum.One, ByteEnum.Two}); + TestCollection(new List {IntEnum.One, IntEnum.Two}); + } + + /// + /// Tests LinkedList. + /// + [Test] + public void TestLinkedList() + { + TestCollection(new LinkedList(new List { 1, 2 })); + TestCollection(new LinkedList(new List {ByteEnum.One, ByteEnum.Two})); + TestCollection(new LinkedList(new List {IntEnum.One, IntEnum.Two})); + } + + /// + /// Tests HashSet. + /// + [Test] + public void TestHashSet() + { + TestCollection(new HashSet {1, 2}); + TestCollection(new HashSet {ByteEnum.One, ByteEnum.Two}); + TestCollection(new HashSet {IntEnum.One, IntEnum.Two}); + } + + /// + /// Tests SortedSet. + /// + [Test] + public void TestSortedSet() + { + TestCollection(new SortedSet {1, 2}); + TestCollection(new SortedSet {ByteEnum.One, ByteEnum.Two}); + TestCollection(new SortedSet {IntEnum.One, IntEnum.Two}); + } + + private static void TestCollection(ICollection collection) + { + var res = TestUtils.SerializeDeserialize(collection); + Assert.AreEqual(collection, res); + } + + private enum ByteEnum : byte + { + One = 1, + Two = 2, + } + + private enum IntEnum + { + One = 1, + Two = 2, + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs index a659d5f7f6879..bd90a08ae7e31 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/CacheTest.cs @@ -68,6 +68,22 @@ public void TestPutGetPrimitives() } } + /// + /// Tests the cache put / get for Empty object type. + /// + [Test] + public void TestPutGetEmptyObject() + { + using (var client = GetClient()) + { + var serverCache = GetCache(); + var clientCache = client.GetCache(CacheName); + + serverCache.Put(1, new EmptyObject()); + Assert.IsNotNull(clientCache.Get(1)); + } + } + /// /// Tests the cache put / get with user data types. /// @@ -116,6 +132,60 @@ public void TestPutGetUserObjects([Values(true, false)] bool compactFooter) } } + /// + /// Tests the cache put / get for Dictionary with Enum keys. + /// + [Test] + public void TestPutGetDictionary([Values(true, false)] bool compactFooter) + { + var cfg = GetClientConfiguration(); + + cfg.BinaryConfiguration = new BinaryConfiguration + { + CompactFooter = compactFooter + }; + + using (var client = Ignition.StartClient(cfg)) + { + var dict = new Dictionary { { ByteEnum.One, 1 }, { ByteEnum.Two, 2 } }; + + var serverCache = GetCache>(); + var clientCache = client.GetCache>(CacheName); + + serverCache.Put(1, dict); + var res = clientCache.Get(1); + + Assert.AreEqual(dict, res); + } + } + + /// + /// Tests the cache put / get for HashSet with Enum keys. + /// + [Test] + public void TestPutGetHashSet([Values(true, false)] bool compactFooter) + { + var cfg = GetClientConfiguration(); + + cfg.BinaryConfiguration = new BinaryConfiguration + { + CompactFooter = compactFooter + }; + + using (var client = Ignition.StartClient(cfg)) + { + var hashSet = new HashSet { ByteEnum.One, ByteEnum.Two }; + + var serverCache = GetCache>(); + var clientCache = client.GetCache>(CacheName); + + serverCache.Put(1, hashSet); + var res = clientCache.Get(1); + + Assert.AreEqual(hashSet, res); + } + } + /// /// Tests the TryGet method. /// @@ -780,5 +850,11 @@ private class Container { public Container Inner; } + + public enum ByteEnum : byte + { + One = 1, + Two = 2, + } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/EmptyObject.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/EmptyObject.cs new file mode 100644 index 0000000000000..47db939921acf --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/EmptyObject.cs @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Client.Cache +{ + using System; + using System.Runtime.Serialization; + using NUnit.Framework; + + /// + /// Object with no fields. + /// + [Serializable] + public class EmptyObject : ISerializable + { + /// + /// Initializes a new instance of the EmptyObject class. + /// + public EmptyObject() + { + // No-op. + } + + /// + /// Initializes a new instance of the EmptyObject class. + /// + private EmptyObject(SerializationInfo info, StreamingContext context) + { + Assert.AreEqual(StreamingContextStates.All, context.State); + Assert.IsNull(context.Context); + } + + /** */ + public void GetObjectData(SerializationInfo info, StreamingContext context) + { + Assert.AreEqual(StreamingContextStates.All, context.State); + Assert.IsNull(context.Context); + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySystemHandlers.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySystemHandlers.cs index f55a11ff1ecb5..3f16bc0dcb3f9 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySystemHandlers.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySystemHandlers.cs @@ -110,9 +110,9 @@ static BinarySystemHandlers() // 13. Arbitrary dictionary. ReadHandlers[BinaryTypeId.Dictionary] = new BinarySystemReader(ReadDictionary); - - // 14. Enum. - ReadHandlers[BinaryTypeId.ArrayEnum] = new BinarySystemReader(ReadEnumArray); + + // 14. Enum. Should be read as Array, see WriteEnumArray implementation. + ReadHandlers[BinaryTypeId.ArrayEnum] = new BinarySystemReader(ReadArray); } /// @@ -473,16 +473,6 @@ private static void WriteBinaryEnum(BinaryWriter ctx, BinaryEnum obj) ctx.WriteInt(binEnum.EnumValue); } - /** - * Read enum array. - */ - private static object ReadEnumArray(BinaryReader ctx, Type type) - { - var elemType = type.GetElementType() ?? typeof(object); - - return BinaryUtils.ReadTypedArray(ctx, true, elemType); - } - /// /// Reads the array. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriter.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriter.cs index f59f17c12488d..b98ad5f50d06c 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriter.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriter.cs @@ -1493,6 +1493,13 @@ internal void SaveMetadata(IBinaryTypeDescriptor desc, IDictionary(1) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableSerializer.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableSerializer.cs index e660cff57204a..80f267a870fc7 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableSerializer.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/SerializableSerializer.cs @@ -304,9 +304,16 @@ private static Type GetCustomType(SerializationInfo serInfo, ISerializable seria { return new TypeResolver().ResolveType(serInfo.FullTypeName, serInfo.AssemblyName); } - - if (serInfo.ObjectType != serializable.GetType()) + + if (serInfo.ObjectType != serializable.GetType() && + typeof(ISerializable).IsAssignableFrom(serInfo.ObjectType)) { + // serInfo.ObjectType should be ISerializable. There is a known case for generic collections: + // serializable is EnumEqualityComparer : ISerializable + // and serInfo.ObjectType is ObjectEqualityComparer (does not implement ISerializable interface). + // Please read a possible explanation here: + // http://dotnetstudio.blogspot.ru/2012/06/net-35-to-net-40-enum.html + return serInfo.ObjectType; } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs index 8f44e007e2abe..3517342be28cf 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs @@ -110,11 +110,21 @@ public void UpdateWriterStructure(BinaryWriter writer) var fields = metaHnd.OnObjectWriteFinished(); - // A new schema may be added, but no new fields. + // A new schema may be added, but no new fields. // In this case, we should still call SaveMetadata even if fields are null writer.SaveMetadata(_desc, fields); } } + else + { + // Special case when the object is with no properties. + // Save meta to Marshaller. + writer.Marshaller.GetBinaryTypeHandler(_desc); + + // Save meta to cluster. + writer.SaveMetadata(_desc, null); + return; + } } /// From 425b65f2df9f02c6f15af4122a87e6dbaf6a1f95 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Wed, 18 Oct 2017 10:59:53 +0300 Subject: [PATCH 039/243] IGNITE-6595 Cleanup entries after index rebuild --- .../internal/processors/query/h2/IgniteH2Indexing.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 9515023425b94..8129607b58088 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -1942,8 +1942,10 @@ private void cleanupStatementCache() { KeyCacheObject key = keyIter.next(); while (true) { + GridCacheEntryEx entry = null; + try { - GridCacheEntryEx entry = cctx.isNear() ? + entry = cctx.isNear() ? cctx.near().dht().entryEx(key) : cctx.cache().entryEx(key); entry.ensureIndexed(); @@ -1956,6 +1958,9 @@ private void cleanupStatementCache() { catch (GridDhtInvalidPartitionException ignore) { break; } + finally { + entry.context().evicts().touch(entry, AffinityTopologyVersion.NONE); + } } } finally { From 3ba374c319ac7048a05871692060e2f143d6acdf Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Sat, 7 Oct 2017 00:11:37 +0700 Subject: [PATCH 040/243] IGNITE-6463 Web Console: Fixed output of big numbers in SQL query results. (cherry picked from commit 35589a7) --- modules/web-console/backend/package.json | 4 +++- .../frontend/app/modules/agent/decompress.worker.js | 3 ++- modules/web-console/frontend/package.json | 7 +++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/modules/web-console/backend/package.json b/modules/web-console/backend/package.json index 837f41c7e1495..07af45f22d49b 100644 --- a/modules/web-console/backend/package.json +++ b/modules/web-console/backend/package.json @@ -17,7 +17,9 @@ } ], "license": "Apache-2.0", - "keywords": "grid", + "keywords": [ + "Apache Ignite Web console" + ], "homepage": "https://ignite.apache.org/", "engines": { "npm": "^3.x.x", diff --git a/modules/web-console/frontend/app/modules/agent/decompress.worker.js b/modules/web-console/frontend/app/modules/agent/decompress.worker.js index d8e176d59b89e..2fd294da5f1e1 100644 --- a/modules/web-console/frontend/app/modules/agent/decompress.worker.js +++ b/modules/web-console/frontend/app/modules/agent/decompress.worker.js @@ -17,6 +17,7 @@ import _ from 'lodash'; import pako from 'pako'; +import bigIntJSON from 'json-bigint'; /** This worker decode & decompress BASE64/Zipped data and parse to JSON. */ // eslint-disable-next-line no-undef @@ -27,7 +28,7 @@ onmessage = function(e) { const unzipped = pako.inflate(binaryString, {to: 'string'}); - const res = JSON.parse(unzipped); + const res = bigIntJSON({storeAsString: true}).parse(unzipped); postMessage(_.get(res, 'result', res)); }; diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index 2083640b60df5..82c3eeafb4513 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -18,7 +18,9 @@ } ], "license": "Apache-2.0", - "keywords": "grid", + "keywords": [ + "Apache Ignite Web console" + ], "homepage": "https://ignite.apache.org/", "engines": { "npm": "3.x.x", @@ -47,7 +49,7 @@ "angular-touch": "1.5.11", "angular-translate": "2.15.2", "angular-tree-control": "0.2.28", - "angular-ui-grid": "4.0.6", + "angular-ui-grid": "4.0.7", "@uirouter/angularjs": "1.0.5", "babel-core": "6.25.0", "babel-eslint": "7.2.3", @@ -76,6 +78,7 @@ "html-webpack-plugin": "2.29.0", "jquery": "3.2.1", "json-loader": "0.5.7", + "json-bigint": "0.2.3", "jszip": "3.1.4", "lodash": "4.17.4", "node-sass": "4.5.3", From b67feb0f175bfbd6ffbefe82a8d693c8ab7d4213 Mon Sep 17 00:00:00 2001 From: Vasiliy Sisko Date: Mon, 9 Oct 2017 17:55:23 +0700 Subject: [PATCH 041/243] IGNITE-5767 Web console: Use byte array type instead of java.lang.Object for binary JDBC types. (cherry picked from commit 3184437) --- .../frontend/app/helpers/jade/mixins.pug | 9 +++------ .../generator/ConfigurationGenerator.js | 6 +++--- .../states/configuration/caches/store.pug | 4 ++-- .../states/configuration/clusters/attributes.pug | 4 ++-- .../clusters/collision/job-stealing.pug | 4 ++-- .../states/configuration/domains/general.pug | 2 +- .../states/configuration/domains/query.pug | 8 ++++---- .../frontend/app/services/JavaTypes.service.js | 15 +++++++++++++++ .../frontend/app/services/LegacyUtils.service.js | 16 ++++++++++++---- .../frontend/controllers/domains-controller.js | 7 +++++-- 10 files changed, 49 insertions(+), 26 deletions(-) diff --git a/modules/web-console/frontend/app/helpers/jade/mixins.pug b/modules/web-console/frontend/app/helpers/jade/mixins.pug index 9ccbde2f7f7ab..3e390778ed21a 100644 --- a/modules/web-console/frontend/app/helpers/jade/mixins.pug +++ b/modules/web-console/frontend/app/helpers/jade/mixins.pug @@ -572,7 +572,7 @@ mixin btn-remove-cond(cond, click, tip) i.tipField.fa.fa-remove(ng-show=cond ng-click=click bs-tooltip=tip data-trigger='hover') //- LEGACY mixin for LEGACY pair values tables. -mixin table-pair-edit(tbl, prefix, keyPlaceholder, valPlaceholder, keyJavaBuiltInTypes, valueJavaBuiltInTypes, focusId, index, divider) +mixin table-pair-edit(tbl, prefix, keyPlaceholder, valPlaceholder, valueJavaBuiltInClasses, focusId, index, divider) -var keyModel = `${tbl}.${prefix}Key` -var valModel = `${tbl}.${prefix}Value` @@ -582,10 +582,7 @@ mixin table-pair-edit(tbl, prefix, keyPlaceholder, valPlaceholder, keyJavaBuiltI .col-xs-6.col-sm-6.col-md-6 .fieldSep !{divider} .input-tip - if keyJavaBuiltInTypes - input.form-control(id=keyFocusId ignite-on-enter-focus-move=valFocusId type='text' ng-model=keyModel placeholder=keyPlaceholder bs-typeahead container='body' ignite-retain-selection data-min-length='1' bs-options='javaClass for javaClass in javaBuiltInClasses' ignite-on-escape='tableReset(false)') - else - input.form-control(id=keyFocusId ignite-on-enter-focus-move=valFocusId type='text' ng-model=keyModel placeholder=keyPlaceholder ignite-on-escape='tableReset(false)') + input.form-control(id=keyFocusId ignite-on-enter-focus-move=valFocusId type='text' ng-model=keyModel placeholder=keyPlaceholder ignite-on-escape='tableReset(false)') .col-xs-6.col-sm-6.col-md-6 -var btnVisible = 'tablePairSaveVisible(' + tbl + ', ' + index + ')' -var btnSave = 'tablePairSave(tablePairValid, backupItem, ' + tbl + ', ' + index + ')' @@ -593,7 +590,7 @@ mixin table-pair-edit(tbl, prefix, keyPlaceholder, valPlaceholder, keyJavaBuiltI +btn-save(btnVisible, btnSave) .input-tip - if valueJavaBuiltInTypes + if valueJavaBuiltInClasses input.form-control(id=valFocusId type='text' ng-model=valModel placeholder=valPlaceholder bs-typeahead container='body' ignite-retain-selection data-min-length='1' bs-options='javaClass for javaClass in javaBuiltInClasses' ignite-on-enter=btnVisibleAndSave ignite-on-escape='tableReset(false)') else input.form-control(id=valFocusId type='text' ng-model=valModel placeholder=valPlaceholder ignite-on-enter=btnVisibleAndSave ignite-on-escape='tableReset(false)') diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js index 9d7887ae47761..75ee47cd14810 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js @@ -1681,7 +1681,7 @@ export default class IgniteConfigurationGenerator { static domainModelQuery(domain, available, cfg = this.domainConfigurationBean(domain)) { if (cfg.valueOf('queryMetadata') === 'Configuration') { const fields = _.filter(_.map(domain.fields, - (e) => ({name: e.name, className: javaTypes.fullClassName(e.className)})), (field) => { + (e) => ({name: e.name, className: javaTypes.stringClassName(e.className)})), (field) => { return field.name !== domain.keyFieldName && field.name !== domain.valueFieldName; }); @@ -1695,10 +1695,10 @@ export default class IgniteConfigurationGenerator { const valFieldName = cfg.valueOf('valueFieldName'); if (keyFieldName) - fields.push({name: keyFieldName, className: javaTypes.fullClassName(domain.keyType)}); + fields.push({name: keyFieldName, className: javaTypes.stringClassName(domain.keyType)}); if (valFieldName) - fields.push({name: valFieldName, className: javaTypes.fullClassName(domain.valueType)}); + fields.push({name: valFieldName, className: javaTypes.stringClassName(domain.valueType)}); } cfg.collectionProperty('keyFields', 'keyFields', domain.queryKeyFields, 'java.lang.String', 'java.util.HashSet') diff --git a/modules/web-console/frontend/app/modules/states/configuration/caches/store.pug b/modules/web-console/frontend/app/modules/states/configuration/caches/store.pug index d6dcbbe92a2b0..0c983a2c65b35 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/caches/store.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/caches/store.pug @@ -194,11 +194,11 @@ mixin hibernateField(name, model, items, valid, save, newItem) a.labelFormField(ng-click='tableStartEdit(backupItem, hibernatePropsTbl, $index)') {{item.name}} = {{item.value}} +btn-remove('tableRemove(backupItem, hibernatePropsTbl, $index)', '"Remove Property"') td.col-sm-12(ng-if='tableEditing(hibernatePropsTbl, $index)') - +table-pair-edit('hibernatePropsTbl', 'cur', 'Property name', 'Property value', false, false, '{{::hibernatePropsTbl.focusId + $index}}', '$index', '=') + +table-pair-edit('hibernatePropsTbl', 'cur', 'Property name', 'Property value', false, '{{::hibernatePropsTbl.focusId + $index}}', '$index', '=') tfoot(ng-show='tableNewItemActive(hibernatePropsTbl)') tr td.col-sm-12 - +table-pair-edit('hibernatePropsTbl', 'new', 'Property name', 'Property value', false, false, '{{::hibernatePropsTbl.focusId + $index}}', '-1', '=') + +table-pair-edit('hibernatePropsTbl', 'new', 'Property name', 'Property value', false, '{{::hibernatePropsTbl.focusId + $index}}', '-1', '=') .settings-row diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/attributes.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/attributes.pug index cac122b6b00ef..beb07396cf6cc 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/clusters/attributes.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/attributes.pug @@ -48,10 +48,10 @@ include /app/helpers/jade/mixins a.labelFormField(ng-click='tableStartEdit(backupItem, attributesTbl, $index)') {{item.name}} = {{item.value}} +btn-remove('tableRemove(backupItem, attributesTbl, $index)', '"Remove attribute"') td.col-sm-12(ng-show='tableEditing(attributesTbl, $index)') - +table-pair-edit('attributesTbl', 'cur', 'Attribute name', 'Attribute value', false, false, '{{::attributesTbl.focusId + $index}}', '$index', '=') + +table-pair-edit('attributesTbl', 'cur', 'Attribute name', 'Attribute value', false, '{{::attributesTbl.focusId + $index}}', '$index', '=') tfoot(ng-show='tableNewItemActive(attributesTbl)') tr td.col-sm-12 - +table-pair-edit('attributesTbl', 'new', 'Attribute name', 'Attribute value', false, false, '{{::attributesTbl.focusId + $index}}', '-1', '=') + +table-pair-edit('attributesTbl', 'new', 'Attribute name', 'Attribute value', false, '{{::attributesTbl.focusId + $index}}', '-1', '=') .col-sm-6 +preview-xml-java(model, 'clusterUserAttributes') diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/collision/job-stealing.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/collision/job-stealing.pug index d10a02eb56e23..eeb6114d2ca1e 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/clusters/collision/job-stealing.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/collision/job-stealing.pug @@ -56,8 +56,8 @@ div a.labelFormField(ng-click='tableStartEdit(backupItem, stealingAttributesTbl, $index)') {{item.name}} = {{item.value}} +btn-remove('tableRemove(backupItem, stealingAttributesTbl, $index)', '"Remove attribute"') td.col-sm-12(ng-show='tableEditing(stealingAttributesTbl, $index)') - +table-pair-edit('stealingAttributesTbl', 'cur', 'Attribute name', 'Attribute value', false, false, '{{::stealingAttributesTbl.focusId + $index}}', '$index', '=') + +table-pair-edit('stealingAttributesTbl', 'cur', 'Attribute name', 'Attribute value', false, '{{::stealingAttributesTbl.focusId + $index}}', '$index', '=') tfoot(ng-show='tableNewItemActive(stealingAttributesTbl)') tr td.col-sm-12 - +table-pair-edit('stealingAttributesTbl', 'new', 'Attribute name', 'Attribute value', false, false, '{{::stealingAttributesTbl.focusId + $index}}', '-1', '=') + +table-pair-edit('stealingAttributesTbl', 'new', 'Attribute name', 'Attribute value', false, '{{::stealingAttributesTbl.focusId + $index}}', '-1', '=') diff --git a/modules/web-console/frontend/app/modules/states/configuration/domains/general.pug b/modules/web-console/frontend/app/modules/states/configuration/domains/general.pug index f6f4e7293137f..7c8de9a4d917b 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/domains/general.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/domains/general.pug @@ -44,7 +44,7 @@ include /app/helpers/jade/mixins
    • Configuration via QueryEntity class
    • \ ') .settings-row - +java-class-typeahead('Key type:', `${model}.keyType`, '"keyType"', 'javaBuiltInClasses', 'true', 'true', '{{ ' + generatePojo + ' ? "Full class name for Key" : "Key type name" }}', 'Key class used to store key in cache', generatePojo) + +java-class-typeahead('Key type:', `${model}.keyType`, '"keyType"', 'javaBuiltInClassesBase', 'true', 'true', '{{ ' + generatePojo + ' ? "Full class name for Key" : "Key type name" }}', 'Key class used to store key in cache', generatePojo) .settings-row +java-class-autofocus-placholder('Value type:', `${model}.valueType`, '"valueType"', 'true', 'true', 'false', '{{ ' + generatePojo +' ? "Enter fully qualified class name" : "Value type name" }}', 'Value class used to store value in cache', generatePojo) diff --git a/modules/web-console/frontend/app/modules/states/configuration/domains/query.pug b/modules/web-console/frontend/app/modules/states/configuration/domains/query.pug index e8eceacc507dc..b4b5abe98ee33 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/domains/query.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/domains/query.pug @@ -95,11 +95,11 @@ mixin table-index-item-edit(prefix, index, sortAvailable, idAddition) a.labelFormField(ng-click='tableStartEdit(backupItem, queryFieldsTbl, $index)') {{item.name}} / {{item.className}} +btn-remove('tableRemove(backupItem, queryFieldsTbl, $index)', '"Remove path"') td.col-sm-12(ng-show='tableEditing(queryFieldsTbl, $index)') - +table-pair-edit('queryFieldsTbl', 'cur', 'Field name', 'Field full class name', false, true, '{{::queryFieldsTbl.focusId + $index}}', '$index', '/') + +table-pair-edit('queryFieldsTbl', 'cur', 'Field name', 'Field full class name', true, '{{::queryFieldsTbl.focusId + $index}}', '$index', '/') tfoot(ng-show='tableNewItemActive(queryFieldsTbl)') tr td.col-sm-12 - +table-pair-edit('queryFieldsTbl', 'new', 'Field name', 'Field full class name', false, true, '{{::queryFieldsTbl.focusId + $index}}', '-1', '/') + +table-pair-edit('queryFieldsTbl', 'new', 'Field name', 'Field full class name', true, '{{::queryFieldsTbl.focusId + $index}}', '-1', '/') .settings-row +ignite-form-field-dropdown('Key fields:', queryKeyFields, '"queryKeyFields"', false, false, true, 'Select key fields', 'Configure available fields', 'fields(\'cur\', ' + queryKeyFields + ')', @@ -125,11 +125,11 @@ mixin table-index-item-edit(prefix, index, sortAvailable, idAddition) a.labelFormField(ng-click='tableStartEdit(backupItem, aliasesTbl, $index)') {{item.field}} → {{item.alias}} +btn-remove('tableRemove(backupItem, aliasesTbl, $index)', '"Remove alias"') td.col-sm-12(ng-show='tableEditing(aliasesTbl, $index)') - +table-pair-edit('aliasesTbl', 'cur', 'Field name', 'Field Alias', false, false, '{{::aliasesTbl.focusId + $index}}', '$index', '→') + +table-pair-edit('aliasesTbl', 'cur', 'Field name', 'Field Alias', false, '{{::aliasesTbl.focusId + $index}}', '$index', '→') tfoot(ng-show='tableNewItemActive(aliasesTbl)') tr td.col-sm-12 - +table-pair-edit('aliasesTbl', 'new', 'Field name', 'Field Alias', false, false, '{{::aliasesTbl.focusId + $index}}', '-1', '→') + +table-pair-edit('aliasesTbl', 'new', 'Field name', 'Field Alias', false, '{{::aliasesTbl.focusId + $index}}', '-1', '→') .settings-row(ng-init='indexesTbl={type: "table-indexes", model: "indexes", focusId: "IndexName", ui: "table-indexes"}') +ignite-form-group(ng-model=queryIndexes ng-form=queryIndexesForm) ignite-form-field-label diff --git a/modules/web-console/frontend/app/services/JavaTypes.service.js b/modules/web-console/frontend/app/services/JavaTypes.service.js index 944fea5aa893a..dff73a4d5dc97 100644 --- a/modules/web-console/frontend/app/services/JavaTypes.service.js +++ b/modules/web-console/frontend/app/services/JavaTypes.service.js @@ -36,6 +36,9 @@ const VALID_PACKAGE = /^(([a-zA-Z_$][a-zA-Z0-9_$]*)\.)*([a-zA-Z_$][a-zA-Z0-9_$]* // Regular expression to check UUID string representation. const VALID_UUID = /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/im; +// Extended list of Java built-in class names. +const JAVA_CLASS_STRINGS = JAVA_CLASSES.slice(); + /** * Utility service for various check on java types. */ @@ -45,6 +48,8 @@ export default class JavaTypes { constructor(clusterDflts, cacheDflts, igfsDflts) { this.enumClasses = _.uniq(this._enumClassesAcc(_.merge(clusterDflts, cacheDflts, igfsDflts), [])); this.shortEnumClasses = _.map(this.enumClasses, (cls) => this.shortClassName(cls)); + + JAVA_CLASS_STRINGS.push({short: 'byte[]', full: 'byte[]', stringValue: '[B'}); } /** @@ -94,6 +99,16 @@ export default class JavaTypes { return type ? type.full : clsName; } + /** + * @param clsName Class name to check. + * @returns {String} Full class name string presentation for java build-in types or source class otherwise. + */ + stringClassName(clsName) { + const type = _.find(JAVA_CLASS_STRINGS, (clazz) => clsName === clazz.short); + + return type ? type.stringValue || type.full : clsName; + } + /** * Extract class name from full class name. * diff --git a/modules/web-console/frontend/app/services/LegacyUtils.service.js b/modules/web-console/frontend/app/services/LegacyUtils.service.js index e7c064bcc4505..be593b052b1c4 100644 --- a/modules/web-console/frontend/app/services/LegacyUtils.service.js +++ b/modules/web-console/frontend/app/services/LegacyUtils.service.js @@ -50,6 +50,7 @@ export default ['IgniteLegacyUtils', ['IgniteErrorPopover', (ErrorPopover) => { 'boolean', 'Boolean', 'byte', + 'byte[]', 'Byte', 'Date', 'double', @@ -88,13 +89,15 @@ export default ['IgniteLegacyUtils', ['IgniteErrorPopover', (ErrorPopover) => { /** * @param clsName Class name to check. + * @param additionalClasses List of classes to check as builtin. * @returns {Boolean} 'true' if given class name is a java build-in type. */ - function isJavaBuiltInClass(clsName) { + function isJavaBuiltInClass(clsName, additionalClasses) { if (isEmptyString(clsName)) return false; - return _.includes(javaBuiltInClasses, clsName) || _.includes(javaBuiltInFullNameClasses, clsName); + return _.includes(javaBuiltInClasses, clsName) || _.includes(javaBuiltInFullNameClasses, clsName) + || (_.isArray(additionalClasses) && _.includes(additionalClasses, clsName)); } const SUPPORTED_JDBC_TYPES = [ @@ -323,8 +326,13 @@ export default ['IgniteLegacyUtils', ['IgniteErrorPopover', (ErrorPopover) => { if (!allowBuiltInClass && isJavaBuiltInClass(ident)) return !stopEdit && ErrorPopover.show(elemId, msg + ' should not be the Java build-in class!', panels, panelId); - if (len < 2 && !isJavaBuiltInClass(ident) && !packageOnly) - return !stopEdit && ErrorPopover.show(elemId, msg + ' does not have package specified!', panels, panelId); + if (len < 2) { + if (isJavaBuiltInClass(ident, allowBuiltInClass)) + return true; + + if (!packageOnly) + return !stopEdit && ErrorPopover.show(elemId, msg + ' does not have package specified!', panels, panelId); + } for (let i = 0; i < parts.length; i++) { const part = parts[i]; diff --git a/modules/web-console/frontend/controllers/domains-controller.js b/modules/web-console/frontend/controllers/domains-controller.js index 840086ea86f40..646f8e5dca481 100644 --- a/modules/web-console/frontend/controllers/domains-controller.js +++ b/modules/web-console/frontend/controllers/domains-controller.js @@ -81,7 +81,10 @@ export default ['$rootScope', '$scope', '$http', '$state', '$filter', '$timeout' return !item.empty && (!item._id || _.find($scope.displayedRows, {_id: item._id})); }; - $scope.javaBuiltInClasses = LegacyUtils.javaBuiltInClasses; + $scope.javaBuiltInClassesBase = LegacyUtils.javaBuiltInClasses; + $scope.javaBuiltInClasses = $scope.javaBuiltInClassesBase.slice(); + $scope.javaBuiltInClasses.splice(3, 0, 'byte[]'); + $scope.compactJavaName = FormUtils.compactJavaName; $scope.widthIsSufficient = FormUtils.widthIsSufficient; $scope.saveBtnTipText = FormUtils.saveBtnTipText; @@ -1600,7 +1603,7 @@ export default ['$rootScope', '$scope', '$http', '$state', '$filter', '$timeout' return !stopEdit && ErrorPopover.show(LegacyTable.tableFieldId(index, pairField.idPrefix + pairField.id), 'Field with such ' + pairField.dupObjName + ' already exists!', $scope.ui, 'query'); } - if (pairField.classValidation && !LegacyUtils.isValidJavaClass(pairField.msg, pairValue.value, true, LegacyTable.tableFieldId(index, 'Value' + pairField.id), false, $scope.ui, 'query', stopEdit)) { + if (pairField.classValidation && !LegacyUtils.isValidJavaClass(pairField.msg, pairValue.value, ['byte[]'], LegacyTable.tableFieldId(index, 'Value' + pairField.id), false, $scope.ui, 'query', stopEdit)) { if (stopEdit) return false; From 8e1560322b87d79b3d3250832a3969ac4032d6fc Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Sat, 7 Oct 2017 01:10:08 +0700 Subject: [PATCH 042/243] IGNITE-6574 Remove pending requests in case STATUS_AUTH_FAILURE && credentials == null. (cherry picked from commit 85261a3) --- .../client/impl/connection/GridClientNioTcpConnection.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java index f72a009139c09..3bedd5fbc6d29 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java @@ -575,6 +575,8 @@ private void handleClientResponse(TcpClientFuture fut, GridClientResponse resp) "(client has no credentials) [clientId=" + clientId + ", srvAddr=" + serverAddress() + ", errMsg=" + resp.errorMessage() +']')); + removePending(resp.requestId()); + return; } From 22100d4f2d11677fd583246cbec7169947c0873a Mon Sep 17 00:00:00 2001 From: Oleg Ostanin Date: Wed, 18 Oct 2017 14:14:41 +0300 Subject: [PATCH 043/243] IGNITE-5608: Added sqlline utility to the build. This closes #2861. --- assembly/dependencies-sqlline.xml | 65 +++++++++++++++++ modules/sqlline/bin/ignitesql.bat | 112 ++++++++++++++++++++++++++++++ modules/sqlline/bin/ignitesql.sh | 54 ++++++++++++++ modules/sqlline/pom.xml | 74 ++++++++++++++++++++ pom.xml | 17 +++++ 5 files changed, 322 insertions(+) create mode 100644 assembly/dependencies-sqlline.xml create mode 100644 modules/sqlline/bin/ignitesql.bat create mode 100644 modules/sqlline/bin/ignitesql.sh create mode 100644 modules/sqlline/pom.xml diff --git a/assembly/dependencies-sqlline.xml b/assembly/dependencies-sqlline.xml new file mode 100644 index 0000000000000..f8953a17f9aa3 --- /dev/null +++ b/assembly/dependencies-sqlline.xml @@ -0,0 +1,65 @@ + + + + + + dependencies-sqlline + + + dir + + + false + + + + + org.apache.ignite:ignite-sqlline + + + false + + + target + include/sqlline + + *.jar + + + *-tests.jar + *-javadoc.jar + *-sources.jar + + + + target/libs + include/sqlline + + + + bin + / + + + + + + diff --git a/modules/sqlline/bin/ignitesql.bat b/modules/sqlline/bin/ignitesql.bat new file mode 100644 index 0000000000000..828e93a1217fb --- /dev/null +++ b/modules/sqlline/bin/ignitesql.bat @@ -0,0 +1,112 @@ +:: +:: Licensed to the Apache Software Foundation (ASF) under one or more +:: contributor license agreements. See the NOTICE file distributed with +:: this work for additional information regarding copyright ownership. +:: The ASF licenses this file to You under the Apache License, Version 2.0 +:: (the "License"); you may not use this file except in compliance with +:: the License. You may obtain a copy of the License at +:: +:: http://www.apache.org/licenses/LICENSE-2.0 +:: +:: Unless required by applicable law or agreed to in writing, software +:: distributed under the License is distributed on an "AS IS" BASIS, +:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +:: See the License for the specific language governing permissions and +:: limitations under the License. +:: + +:: +:: Ignite database connector. +:: + +@echo off +Setlocal EnableDelayedExpansion + +if "%OS%" == "Windows_NT" setlocal + +:: Check JAVA_HOME. +if defined JAVA_HOME goto checkJdk + echo %0, ERROR: + echo JAVA_HOME environment variable is not found. + echo Please point JAVA_HOME variable to location of JDK 1.7 or JDK 1.8. + echo You can also download latest JDK at http://java.com/download. +goto error_finish + +:checkJdk +:: Check that JDK is where it should be. +if exist "%JAVA_HOME%\bin\java.exe" goto checkJdkVersion + echo %0, ERROR: + echo JAVA is not found in JAVA_HOME=%JAVA_HOME%. + echo Please point JAVA_HOME variable to installation of JDK 1.7 or JDK 1.8. + echo You can also download latest JDK at http://java.com/download. +goto error_finish + +:checkJdkVersion +"%JAVA_HOME%\bin\java.exe" -version 2>&1 | findstr "1\.[78]\." > nul +if %ERRORLEVEL% equ 0 goto checkIgniteHome1 + echo %0, ERROR: + echo The version of JAVA installed in %JAVA_HOME% is incorrect. + echo Please point JAVA_HOME variable to installation of JDK 1.7 or JDK 1.8. + echo You can also download latest JDK at http://java.com/download. +goto error_finish + +:: Check IGNITE_HOME. +:checkIgniteHome1 +if defined IGNITE_HOME goto checkIgniteHome2 + pushd "%~dp0"/.. + set IGNITE_HOME=%CD% + popd + +:checkIgniteHome2 +:: Strip double quotes from IGNITE_HOME +set IGNITE_HOME=%IGNITE_HOME:"=% + +:: remove all trailing slashes from IGNITE_HOME. +if %IGNITE_HOME:~-1,1% == \ goto removeTrailingSlash +if %IGNITE_HOME:~-1,1% == / goto removeTrailingSlash +goto checkIgniteHome3 + +:removeTrailingSlash +set IGNITE_HOME=%IGNITE_HOME:~0,-1% +goto checkIgniteHome2 + +:checkIgniteHome3 +if exist "%IGNITE_HOME%\config" goto checkIgniteHome4 + echo %0, ERROR: Ignite installation folder is not found or IGNITE_HOME environment variable is not valid. + echo Please create IGNITE_HOME environment variable pointing to location of + echo Ignite installation folder. + goto error_finish + +:checkIgniteHome4 + +:: +:: Set SCRIPTS_HOME - base path to scripts. +:: +set SCRIPTS_HOME=%IGNITE_HOME%\bin + +:: Remove trailing spaces +for /l %%a in (1,1,31) do if /i "%SCRIPTS_HOME:~-1%" == " " set SCRIPTS_HOME=%SCRIPTS_HOME:~0,-1% + +if /i "%SCRIPTS_HOME%\" == "%~dp0" goto setProgName + echo %0, WARN: IGNITE_HOME environment variable may be pointing to wrong folder: %IGNITE_HOME% + +:setProgName +:: +:: Set program name. +:: +set PROG_NAME=ignitesql.bat +if "%OS%" == "Windows_NT" set PROG_NAME=%~nx0% + +:run + +:: +:: Set IGNITE_LIBS +:: +call "%SCRIPTS_HOME%\include\setenv.bat" + +set CP=%IGNITE_LIBS% +set CP=%CP%;%IGNITE_HOME%\bin\include\sqlline\* + +"%JAVA_HOME%\bin\java.exe" -cp "%CP%" sqlline.SqlLine -d org.apache.ignite.IgniteJdbcThinDriver %* + +:error_finish \ No newline at end of file diff --git a/modules/sqlline/bin/ignitesql.sh b/modules/sqlline/bin/ignitesql.sh new file mode 100644 index 0000000000000..5745aea489a1c --- /dev/null +++ b/modules/sqlline/bin/ignitesql.sh @@ -0,0 +1,54 @@ +#!/bin/bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# +# Ignite database connector. +# + +# +# Import common functions. +# +if [ "${IGNITE_HOME}" = "" ]; + then IGNITE_HOME_TMP="$(dirname "$(cd "$(dirname "$0")"; "pwd")")"; + else IGNITE_HOME_TMP=${IGNITE_HOME}; +fi + +# +# Set SCRIPTS_HOME - base path to scripts. +# +SCRIPTS_HOME="${IGNITE_HOME_TMP}/bin" + +source "${SCRIPTS_HOME}"/include/functions.sh + +# +# Discover IGNITE_HOME environment variable. +# +setIgniteHome + +# +# Set IGNITE_LIBS. +# +. "${SCRIPTS_HOME}"/include/setenv.sh + +JDBCLINK="jdbc:ignite:thin://${HOST_AND_PORT}${SCHEMA_DELIMITER}${SCHEMA}${PARAMS}" + +CP="${IGNITE_LIBS}" + +CP="${CP}${SEP}${IGNITE_HOME_TMP}/bin/include/sqlline/*" + +java -cp ${CP} sqlline.SqlLine -d org.apache.ignite.IgniteJdbcThinDriver $@ \ No newline at end of file diff --git a/modules/sqlline/pom.xml b/modules/sqlline/pom.xml new file mode 100644 index 0000000000000..bcb71ec536ed6 --- /dev/null +++ b/modules/sqlline/pom.xml @@ -0,0 +1,74 @@ + + + + + + + + + apache-ignite + org.apache.ignite + 2.3.0-SNAPSHOT + ../../pom.xml + + 4.0.0 + + ignite-sqlline + jar + + sqlline + http://maven.apache.org + + + UTF-8 + + + + + sqlline + sqlline + 1.3.0 + + + + + + + maven-dependency-plugin + + + copy-libs + test-compile + + copy-dependencies + + + org.apache.ignite + target/libs + runtime + false + + + + + + + diff --git a/pom.xml b/pom.xml index d60863f9da179..befbf4c7e1911 100644 --- a/pom.xml +++ b/pom.xml @@ -95,6 +95,7 @@ modules/kubernetes modules/zeromq modules/rocketmq + modules/sqlline @@ -469,6 +470,22 @@ + + dependencies-sqlline + validate + + single + + + + assembly/dependencies-sqlline.xml + + target/release-package-${ignite.edition} + bin + false + + + scala-scripts validate From 5df25fc8adf01a8e4999563f3f31b79c195801d4 Mon Sep 17 00:00:00 2001 From: devozerov Date: Wed, 18 Oct 2017 15:03:28 +0300 Subject: [PATCH 044/243] IGNITE-6662: SQL: fixed affinity key field name resolution during both parsig and table creation. This closes #2875. --- .../internal/processors/query/h2/opt/GridH2Table.java | 6 +++++- .../processors/query/h2/sql/GridSqlQueryParser.java | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java index 79eed12dbf595..0b3462e33f79d 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java @@ -141,8 +141,12 @@ public GridH2Table(CreateTableData createTblData, H2RowDescriptor desc, H2RowFac int affKeyColId = -1; if (affKey != null) { - if (doesColumnExist(affKey)) + if (doesColumnExist(affKey)) { affKeyColId = getColumn(affKey).getColumnId(); + + if (desc.isKeyColumn(affKeyColId)) + affKeyColId = KEY_COL; + } else affinityColExists = false; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java index bf72200c45b02..280fb2d420154 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java @@ -1104,7 +1104,7 @@ private GridSqlCreateTable parseCreateTable(CreateTable createTbl) { if (res.affinityKey() == null) { LinkedHashSet pkCols0 = res.primaryKeyColumns(); - if (!F.isEmpty(pkCols0) && pkCols0.size() == 1) + if (!F.isEmpty(pkCols0) && pkCols0.size() == 1 && wrapKey0) res.affinityKey(pkCols0.iterator().next()); } From ad01f9b099d0bf92537378859ad6d5a52de57748 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 19 Oct 2017 09:43:20 +0700 Subject: [PATCH 045/243] IGNITE-6647 Web Console: Implemented support of schema migration scripts. (cherry picked from commit c65399c) --- modules/web-console/DEVNOTES.txt | 6 +++ modules/web-console/backend/index.js | 53 ++++++++++++++----- .../web-console/backend/migrations/README.txt | 4 ++ modules/web-console/backend/package.json | 1 + 4 files changed, 52 insertions(+), 12 deletions(-) create mode 100644 modules/web-console/backend/migrations/README.txt diff --git a/modules/web-console/DEVNOTES.txt b/modules/web-console/DEVNOTES.txt index 85ec9587fd24f..aa8702e04c53c 100644 --- a/modules/web-console/DEVNOTES.txt +++ b/modules/web-console/DEVNOTES.txt @@ -27,3 +27,9 @@ How to run console in development mode: If needed run "npm install --no-optional" (if dependencies changed) and start webpack in development mode "npm run dev". 4. In browser open: http://localhost:9000 + +How to migrate model: + +1. Model will be upgraded on first start. +2. To downgrade model execute in terminal following command: "./node_modules/.bin/migrate down -d ". + Example: "./node_modules/.bin/migrate down add_index -d mongodb://localhost/console". diff --git a/modules/web-console/backend/index.js b/modules/web-console/backend/index.js index f6ba4398e3cc9..06a38f8f5f70f 100644 --- a/modules/web-console/backend/index.js +++ b/modules/web-console/backend/index.js @@ -17,13 +17,15 @@ 'use strict'; +const _ = require('lodash'); const fs = require('fs'); const path = require('path'); const http = require('http'); const https = require('https'); +const MigrateMongoose = require('migrate-mongoose'); const igniteModules = process.env.IGNITE_MODULES ? - path.join(path.normalize(process.env.IGNITE_MODULES), 'backend') : './ignite_modules'; + path.join(path.normalize(process.env.IGNITE_MODULES), 'backend') : path.join(__dirname, 'ignite_modules'); let injector; @@ -35,7 +37,7 @@ try { injector = require(igniteModulesInjector); } catch (ignore) { - injector = require(path.join(__dirname, './injector')); + injector = require(path.join(__dirname, 'injector')); } /** @@ -62,15 +64,6 @@ const _onError = (addr, error) => { } }; -/** - * Event listener for HTTP server "listening" event. - */ -const _onListening = (addr) => { - const bind = typeof addr === 'string' ? 'pipe ' + addr : 'port ' + addr.port; - - console.log('Start listening on ' + bind); -}; - /** * @param settings * @param {ApiServer} apiSrv @@ -98,7 +91,43 @@ const init = ([settings, apiSrv, agentsHnd, browsersHnd]) => { process.send('running'); }; -Promise.all([injector('settings'), injector('api-server'), injector('agents-handler'), injector('browsers-handler')]) +/** + * Run mongo model migration. + * + * @param dbConnectionUri Mongo connection url. + * @param group Migrations group. + * @param migrationsPath Migrations path. + */ +const migrate = (dbConnectionUri, group, migrationsPath) => { + const migrator = new MigrateMongoose({ + migrationsPath, + dbConnectionUri, + autosync: true + }); + + console.log(`Running ${group} migrations...`); + + return migrator.run('up') + .then(() => console.log(`All ${group} migrations finished successfully.`)) + .catch((err) => { + const msg = _.get(err, 'message'); + + if (_.startsWith(msg, 'There are no migrations to run') || _.startsWith(msg, 'There are no pending migrations.')) { + console.log(`There are no ${group} migrations to run.`); + + return; + } + + throw err; + }); +}; + +injector('settings') + .then(({mongoUrl}) => { + return migrate(mongoUrl, 'Ignite', path.join(__dirname, 'migrations')) + .then(() => migrate(mongoUrl, 'Ignite Modules', path.join(igniteModules, 'migrations'))); + }) + .then(() => Promise.all([injector('settings'), injector('api-server'), injector('agents-handler'), injector('browsers-handler')])) .then(init) .catch((err) => { console.error(err); diff --git a/modules/web-console/backend/migrations/README.txt b/modules/web-console/backend/migrations/README.txt new file mode 100644 index 0000000000000..e907fad18bb20 --- /dev/null +++ b/modules/web-console/backend/migrations/README.txt @@ -0,0 +1,4 @@ +Ignite Web Console +====================================== + +This folder contains scripts for model migration. diff --git a/modules/web-console/backend/package.json b/modules/web-console/backend/package.json index 07af45f22d49b..29aa7341e3688 100644 --- a/modules/web-console/backend/package.json +++ b/modules/web-console/backend/package.json @@ -40,6 +40,7 @@ "glob": "7.1.2", "jszip": "3.1.3", "lodash": "4.17.4", + "migrate-mongoose": "3.2.2", "mongoose": "4.11.4", "morgan": "1.8.2", "nconf": "0.8.4", From 0c66344bc752dac98b256dd140fcab95d1662862 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Thu, 19 Oct 2017 12:36:39 +0300 Subject: [PATCH 046/243] IGNITE-6627 .NET: Fix repeated known metadata updates This closes #2876 --- .../BasicSerializableObjectsTest.cs | 3 +- .../Impl/Binary/BinaryFullTypeDescriptor.cs | 19 ++- .../Binary/BinarySurrogateTypeDescriptor.cs | 8 +- .../Impl/Binary/IBinaryTypeDescriptor.cs | 6 +- .../Impl/Binary/Structure/BinaryStructure.cs | 147 +++++++++--------- .../Structure/BinaryStructureTracker.cs | 16 +- 6 files changed, 97 insertions(+), 102 deletions(-) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/BasicSerializableObjectsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/BasicSerializableObjectsTest.cs index e9b557695d00d..846cd4c53fd19 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/BasicSerializableObjectsTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Binary/Serializable/BasicSerializableObjectsTest.cs @@ -44,12 +44,13 @@ public void TestEmptyObject() public void TestEmptyObjectOnline() { using (var ignite = Ignition.Start(TestUtils.GetTestConfiguration())) + using (var ignite2 = Ignition.Start(TestUtils.GetTestConfiguration(name: "1"))) { var cache = ignite.CreateCache("c"); cache[1] = new EmptyObject(); - var res = cache[1]; + var res = ignite2.GetCache("c")[1]; Assert.IsNotNull(res); } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryFullTypeDescriptor.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryFullTypeDescriptor.cs index e38b5ba0fc865..50c8c275b4fc2 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryFullTypeDescriptor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryFullTypeDescriptor.cs @@ -55,7 +55,7 @@ internal class BinaryFullTypeDescriptor : IBinaryTypeDescriptor private readonly string _affKeyFieldName; /** Type structure. */ - private volatile BinaryStructure _writerTypeStruct = BinaryStructure.CreateEmpty(); + private volatile BinaryStructure _writerTypeStruct; /** Type structure. */ private volatile BinaryStructure _readerTypeStructure = BinaryStructure.CreateEmpty(); @@ -230,22 +230,27 @@ public BinaryStructure ReaderTypeStructure } /** */ - public void UpdateWriteStructure(BinaryStructure exp, int pathIdx, - IList updates) + public void UpdateWriteStructure(int pathIdx, IList updates) { lock (this) { - _writerTypeStruct = _writerTypeStruct.Merge(exp, pathIdx, updates); + if (_writerTypeStruct == null) + { + // Null struct serves as an indication of a binary type that has never been sent to the cluster, + // which is important for types without any fields. + _writerTypeStruct = BinaryStructure.CreateEmpty(); + } + + _writerTypeStruct = _writerTypeStruct.Merge(pathIdx, updates); } } /** */ - public void UpdateReadStructure(BinaryStructure exp, int pathIdx, - IList updates) + public void UpdateReadStructure(int pathIdx, IList updates) { lock (this) { - _readerTypeStructure = _readerTypeStructure.Merge(exp, pathIdx, updates); + _readerTypeStructure = _readerTypeStructure.Merge(pathIdx, updates); } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySurrogateTypeDescriptor.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySurrogateTypeDescriptor.cs index 737c7c452d326..6fece77805562 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySurrogateTypeDescriptor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinarySurrogateTypeDescriptor.cs @@ -134,20 +134,20 @@ public BinaryStructure ReaderTypeStructure } /** */ - public void UpdateWriteStructure(BinaryStructure exp, int pathIdx, IList updates) + public void UpdateWriteStructure(int pathIdx, IList updates) { lock (this) { - _writerTypeStruct = _writerTypeStruct.Merge(exp, pathIdx, updates); + _writerTypeStruct = _writerTypeStruct.Merge(pathIdx, updates); } } /** */ - public void UpdateReadStructure(BinaryStructure exp, int pathIdx, IList updates) + public void UpdateReadStructure(int pathIdx, IList updates) { lock (this) { - _readerTypeStructure = _readerTypeStructure.Merge(exp, pathIdx, updates); + _readerTypeStructure = _readerTypeStructure.Merge(pathIdx, updates); } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryTypeDescriptor.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryTypeDescriptor.cs index 4bd7e73777051..840fc0859d7ee 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryTypeDescriptor.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryTypeDescriptor.cs @@ -90,18 +90,16 @@ internal interface IBinaryTypeDescriptor /// /// Update write type structure. /// - /// Expected type structure. /// Path index. /// Recorded updates. - void UpdateWriteStructure(BinaryStructure exp, int pathIdx, IList updates); + void UpdateWriteStructure(int pathIdx, IList updates); /// /// Update read type structure. /// - /// Expected type structure. /// Path index. /// Recorded updates. - void UpdateReadStructure(BinaryStructure exp, int pathIdx, IList updates); + void UpdateReadStructure(int pathIdx, IList updates); /// /// Gets the schema. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructure.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructure.cs index 92c841c2d5b55..908059abdce77 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructure.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructure.cs @@ -118,14 +118,12 @@ public int GetFieldId(string fieldName, byte fieldType, ref int pathIdx, int act /// /// Merge updates into a new type structure. /// - /// Expected type structure to apply updates to /// Path index. /// Updates. /// New type structure with updates. - public BinaryStructure Merge(BinaryStructure exp, int pathIdx, - IList updates) + public BinaryStructure Merge(int pathIdx, IList updates) { - if (updates.Count == 0) + if (updates == null || updates.Count == 0) return this; // Algorithm ensures that updates are applied to the same type structure, @@ -137,102 +135,97 @@ public int GetFieldId(string fieldName, byte fieldType, ref int pathIdx, int act // Note that field types are merged anyway to avoid metadata clashes. BinaryStructure res = MergeFieldTypes(updates); + BinaryStructureUpdate firstUpdate = updates[0]; - if (ReferenceEquals(exp, this)) + if (firstUpdate.Index == 0) { - BinaryStructureUpdate firstUpdate = updates[0]; + // Special case: the very first structure update. Simply attach all updates. + Debug.Assert(_paths.Length == 1); + Debug.Assert(_paths[0].Length == 0); + Debug.Assert(pathIdx == 0); - if (firstUpdate.Index == 0) - { - // Special case: the very first structure update. Simply attach all updates. - Debug.Assert(_paths.Length == 1); - Debug.Assert(_paths[0].Length == 0); - Debug.Assert(pathIdx == 0); - - var newPaths = CopyPaths(updates.Count, 0); + var newPaths = CopyPaths(updates.Count, 0); - ApplyUpdatesToPath(newPaths[0], updates); - - res = new BinaryStructure(newPaths, _jumps, res._fieldTypes); - } - else - { - // Get entry where updates should start. - BinaryStructureEntry[] path = _paths[pathIdx]; + ApplyUpdatesToPath(newPaths[0], updates); - BinaryStructureEntry startEntry = default(BinaryStructureEntry); - - if (firstUpdate.Index < path.Length) - startEntry = path[firstUpdate.Index]; + res = new BinaryStructure(newPaths, _jumps, res._fieldTypes); + } + else + { + // Get entry where updates should start. + BinaryStructureEntry[] path = _paths[pathIdx]; - if (startEntry.IsEmpty) - { - // We are on the empty/non-existent entry. Continue the path without branching. - var newPaths = CopyPaths(firstUpdate.Index + updates.Count, 0); + BinaryStructureEntry startEntry = default(BinaryStructureEntry); - ApplyUpdatesToPath(newPaths[pathIdx], updates); + if (firstUpdate.Index < path.Length) + startEntry = path[firstUpdate.Index]; - res = new BinaryStructure(newPaths, _jumps, res._fieldTypes); - } - else if (startEntry.IsJumpTable) - { - // We are on the jump table. Add a new path and record it in the jump table. - - // 1. Prepare new structures. - var newPaths = CopyPaths(firstUpdate.Index + updates.Count, 1); - var newJumps = CopyJumps(0); + if (startEntry.IsEmpty) + { + // We are on the empty/non-existent entry. Continue the path without branching. + var newPaths = CopyPaths(firstUpdate.Index + updates.Count, 0); - // New path will be the last one. - int newPathIdx = newPaths.Length - 1; + ApplyUpdatesToPath(newPaths[pathIdx], updates); - // Apply updates to the new path. - ApplyUpdatesToPath(newPaths[newPathIdx], updates); + res = new BinaryStructure(newPaths, _jumps, res._fieldTypes); + } + else if (startEntry.IsJumpTable) + { + // We are on the jump table. Add a new path and record it in the jump table. - // Add the jump to the table. - newJumps[startEntry.Id] = - newJumps[startEntry.Id].CopyAndAdd(firstUpdate.FieldName, newPathIdx); + // 1. Prepare new structures. + var newPaths = CopyPaths(firstUpdate.Index + updates.Count, 1); + var newJumps = CopyJumps(0); - res = new BinaryStructure(newPaths, newJumps, res._fieldTypes); - } - else - { - // We are on existing entry. Need to create a new jump table here and two new paths. + // New path will be the last one. + int newPathIdx = newPaths.Length - 1; - // 1. Prepaare new structures. - var newPaths = CopyPaths(firstUpdate.Index + updates.Count, 2); - var newJumps = CopyJumps(1); + // Apply updates to the new path. + ApplyUpdatesToPath(newPaths[newPathIdx], updates); - // Old path will be moved here. - int oldPathIdx = newPaths.Length - 2; + // Add the jump to the table. + newJumps[startEntry.Id] = + newJumps[startEntry.Id].CopyAndAdd(firstUpdate.FieldName, newPathIdx); - // New path will reside here. - int newPathIdx = newPaths.Length - 1; + res = new BinaryStructure(newPaths, newJumps, res._fieldTypes); + } + else + { + // We are on existing entry. Need to create a new jump table here and two new paths. - // Create new jump table. - int newJumpIdx = newJumps.Length - 1; + // 1. Prepaare new structures. + var newPaths = CopyPaths(firstUpdate.Index + updates.Count, 2); + var newJumps = CopyJumps(1); - newJumps[newJumpIdx] = new BinaryStructureJumpTable(startEntry.Name, oldPathIdx, - firstUpdate.FieldName, newPathIdx); + // Old path will be moved here. + int oldPathIdx = newPaths.Length - 2; - // Re-create old path in two steps: move old path to the new place, then clean the old path. - for (int i = firstUpdate.Index; i < path.Length; i++) - { - newPaths[oldPathIdx][i] = newPaths[pathIdx][i]; + // New path will reside here. + int newPathIdx = newPaths.Length - 1; - if (i == firstUpdate.Index) - // Inject jump table ... - newPaths[pathIdx][i] = new BinaryStructureEntry(newJumpIdx); - else - // ... or just reset. - newPaths[pathIdx][i] = new BinaryStructureEntry(); - } + // Create new jump table. + int newJumpIdx = newJumps.Length - 1; - // Apply updates to the new path. - ApplyUpdatesToPath(newPaths[newPaths.Length - 1], updates); + newJumps[newJumpIdx] = new BinaryStructureJumpTable(startEntry.Name, oldPathIdx, + firstUpdate.FieldName, newPathIdx); - res = new BinaryStructure(newPaths, newJumps, res._fieldTypes); + // Re-create old path in two steps: move old path to the new place, then clean the old path. + for (int i = firstUpdate.Index; i < path.Length; i++) + { + newPaths[oldPathIdx][i] = newPaths[pathIdx][i]; + + if (i == firstUpdate.Index) + // Inject jump table ... + newPaths[pathIdx][i] = new BinaryStructureEntry(newJumpIdx); + else + // ... or just reset. + newPaths[pathIdx][i] = new BinaryStructureEntry(); } + // Apply updates to the new path. + ApplyUpdatesToPath(newPaths[newPaths.Length - 1], updates); + + res = new BinaryStructure(newPaths, newJumps, res._fieldTypes); } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs index 3517342be28cf..ee2e7e17047df 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/Structure/BinaryStructureTracker.cs @@ -68,7 +68,7 @@ public int GetFieldId(string fieldName, byte fieldTypeId = 0) { _curStructAction++; - if (_curStructUpdates == null) + if (_curStructUpdates == null && _portStruct != null) { var fieldId = _portStruct.GetFieldId(fieldName, fieldTypeId, ref _curStructPath, _curStructAction); @@ -86,7 +86,7 @@ public int GetFieldId(string fieldName, byte fieldTypeId = 0) public void UpdateReaderStructure() { if (_curStructUpdates != null) - _desc.UpdateReadStructure(_desc.ReaderTypeStructure, _curStructPath, _curStructUpdates); + _desc.UpdateReadStructure(_curStructPath, _curStructUpdates); } /// @@ -97,7 +97,7 @@ public void UpdateWriterStructure(BinaryWriter writer) { if (_curStructUpdates != null) { - _desc.UpdateWriteStructure(_desc.WriterTypeStructure, _curStructPath, _curStructUpdates); + _desc.UpdateWriteStructure(_curStructPath, _curStructUpdates); var marsh = writer.Marshaller; @@ -115,15 +115,13 @@ public void UpdateWriterStructure(BinaryWriter writer) writer.SaveMetadata(_desc, fields); } } - else + else if (_desc.WriterTypeStructure == null) { - // Special case when the object is with no properties. - // Save meta to Marshaller. + // Empty object (no fields). + // Null WriterTypeStructure indicates that meta has never been sent for this type. writer.Marshaller.GetBinaryTypeHandler(_desc); - - // Save meta to cluster. writer.SaveMetadata(_desc, null); - return; + _desc.UpdateWriteStructure(_curStructPath, null); } } From 9c4411af5f6b6bf7686e52d91daa6b82e089d57a Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Thu, 19 Oct 2017 18:42:12 +0300 Subject: [PATCH 047/243] IGNITE-6675 .NET: Fix ignored IgniteConfiguration.IgniteHome This closes #2886 --- .../DeploymentTest.cs | 33 ++----------------- .../IgniteConfigurationTest.cs | 3 ++ .../IgniteManagerTest.cs | 10 +++++- .../Apache.Ignite.Core/IgniteConfiguration.cs | 1 + 4 files changed, 15 insertions(+), 32 deletions(-) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/DeploymentTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/DeploymentTest.cs index cb97076b14688..1d80c60f1a264 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/DeploymentTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/DeploymentTest.cs @@ -21,6 +21,7 @@ namespace Apache.Ignite.Core.Tests using System.IO; using System.Linq; using Apache.Ignite.Core.Compute; + using Apache.Ignite.Core.Impl; using Apache.Ignite.Core.Impl.Common; using Apache.Ignite.Core.Resource; using Apache.Ignite.Core.Tests.Process; @@ -38,7 +39,7 @@ public class DeploymentTest public void TestCustomDeployment() { // Create temp folder - var folder = GetTempFolder(); + var folder = IgniteUtils.GetTempDirectoryName(); // Copy jars var home = IgniteHome.Resolve(null); @@ -139,36 +140,6 @@ private static void VerifyNodeStarted(string exePath) } } - /// - /// Gets the temporary folder. - /// - private static string GetTempFolder() - { - const string prefix = "ig-test-"; - var temp = Path.GetTempPath(); - - for (int i = 0; i < int.MaxValue; i++) - { - { - try - { - var path = Path.Combine(temp, prefix + i); - - if (Directory.Exists(path)) - Directory.Delete(path, true); - - return Directory.CreateDirectory(path).FullName; - } - catch (Exception) - { - // Ignore - } - } - } - - throw new InvalidOperationException(); - } - #pragma warning disable 649 /// /// Function that returns process path. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs index 3fd47728d0ce1..cde216b507064 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs @@ -36,6 +36,7 @@ namespace Apache.Ignite.Core.Tests using Apache.Ignite.Core.Discovery.Tcp.Static; using Apache.Ignite.Core.Events; using Apache.Ignite.Core.Impl; + using Apache.Ignite.Core.Impl.Common; using Apache.Ignite.Core.PersistentStore; using Apache.Ignite.Core.Tests.Plugin; using Apache.Ignite.Core.Transactions; @@ -131,6 +132,7 @@ public void TestAllConfigurationProperties() Assert.AreEqual(ip.Endpoints, resIp.Endpoints.Take(2).Select(x => x.Trim('/')).ToArray()); Assert.AreEqual(cfg.IgniteInstanceName, resCfg.IgniteInstanceName); + Assert.AreEqual(cfg.IgniteHome, resCfg.IgniteHome); Assert.AreEqual(cfg.IncludedEventTypes, resCfg.IncludedEventTypes); Assert.AreEqual(cfg.MetricsExpireTime, resCfg.MetricsExpireTime); Assert.AreEqual(cfg.MetricsHistorySize, resCfg.MetricsHistorySize); @@ -682,6 +684,7 @@ private static IgniteConfiguration GetCustomConfig() TopologyHistorySize = 1234567 }, IgniteInstanceName = "gridName1", + IgniteHome = IgniteHome.Resolve(null), IncludedEventTypes = EventType.DiscoveryAll, MetricsExpireTime = TimeSpan.FromMinutes(7), MetricsHistorySize = 125, diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteManagerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteManagerTest.cs index 2b73da96e38c7..c019f0c644bef 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteManagerTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteManagerTest.cs @@ -19,6 +19,7 @@ namespace Apache.Ignite.Core.Tests { using System; using System.IO; + using Apache.Ignite.Core.Common; using Apache.Ignite.Core.Impl.Common; using NUnit.Framework; @@ -39,7 +40,14 @@ public void TestIgniteHome() try { - Assert.IsTrue(Directory.Exists(IgniteHome.Resolve(null))); + var home = IgniteHome.Resolve(null); + Assert.IsTrue(Directory.Exists(home)); + + // Invalid home. + var cfg = new IgniteConfiguration {IgniteHome = @"c:\foo\bar"}; + var ex = Assert.Throws(() => IgniteHome.Resolve(new IgniteConfiguration(cfg))); + Assert.AreEqual(string.Format( + "IgniteConfiguration.IgniteHome is not valid: '{0}'", cfg.IgniteHome), ex.Message); } finally { diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs index a7a5ff48bbf48..b0fe0dfc363dc 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs @@ -734,6 +734,7 @@ private void CopyLocalProperties(IgniteConfiguration cfg) } SpringConfigUrl = cfg.SpringConfigUrl; + IgniteHome = cfg.IgniteHome; JvmClasspath = cfg.JvmClasspath; JvmOptions = cfg.JvmOptions; Assemblies = cfg.Assemblies; From 5dfeb916036984d3d8e12ad6d2d43e17a19f25ba Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Thu, 19 Oct 2017 22:20:18 +0300 Subject: [PATCH 048/243] IGNITE-6529: JDBC: fixed not-null column metadata. This closes #2884. --- .../internal/jdbc2/JdbcMetadataSelfTest.java | 12 ++ .../jdbc/thin/JdbcThinMetadataSelfTest.java | 14 +++ .../jdbc/thin/JdbcThinDatabaseMetadata.java | 89 ++++++++------- .../internal/jdbc2/JdbcDatabaseMetadata.java | 104 +++++++++++++----- 4 files changed, 149 insertions(+), 70 deletions(-) diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java index bdc6644bb64c5..1e5db0a35532b 100755 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcMetadataSelfTest.java @@ -220,6 +220,8 @@ public void testGetColumns() throws Exception { assertNotNull(rs); + assertEquals(24, rs.getMetaData().getColumnCount()); + Collection names = new ArrayList<>(2); names.add("NAME"); @@ -237,14 +239,20 @@ public void testGetColumns() throws Exception { assertEquals(VARCHAR, rs.getInt("DATA_TYPE")); assertEquals("VARCHAR", rs.getString("TYPE_NAME")); assertEquals(0, rs.getInt("NULLABLE")); + assertEquals(0, rs.getInt(11)); // nullable column by index + assertEquals("NO", rs.getString("IS_NULLABLE")); } else if ("AGE".equals(name)) { assertEquals(INTEGER, rs.getInt("DATA_TYPE")); assertEquals("INTEGER", rs.getString("TYPE_NAME")); assertEquals(0, rs.getInt("NULLABLE")); + assertEquals(0, rs.getInt(11)); // nullable column by index + assertEquals("NO", rs.getString("IS_NULLABLE")); } else if ("ORGID".equals(name)) { assertEquals(INTEGER, rs.getInt("DATA_TYPE")); assertEquals("INTEGER", rs.getString("TYPE_NAME")); assertEquals(1, rs.getInt("NULLABLE")); + assertEquals(1, rs.getInt(11)); // nullable column by index + assertEquals("YES", rs.getString("IS_NULLABLE")); } cnt++; @@ -271,10 +279,14 @@ public void testGetColumns() throws Exception { assertEquals(INTEGER, rs.getInt("DATA_TYPE")); assertEquals("INTEGER", rs.getString("TYPE_NAME")); assertEquals(0, rs.getInt("NULLABLE")); + assertEquals(0, rs.getInt(11)); // nullable column by index + assertEquals("NO", rs.getString("IS_NULLABLE")); } else if ("name".equals(name)) { assertEquals(VARCHAR, rs.getInt("DATA_TYPE")); assertEquals("VARCHAR", rs.getString("TYPE_NAME")); assertEquals(1, rs.getInt("NULLABLE")); + assertEquals(1, rs.getInt(11)); // nullable column by index + assertEquals("YES", rs.getString("IS_NULLABLE")); } cnt++; diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java index 4e1ae4d59e71b..2fd40d1805d2a 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java @@ -260,6 +260,10 @@ public void testGetColumns() throws Exception { ResultSet rs = meta.getColumns("", "pers", "PERSON", "%"); + ResultSetMetaData rsMeta = rs.getMetaData(); + + assert rsMeta.getColumnCount() == 24 : "Invalid columns count: " + rsMeta.getColumnCount(); + assert rs != null; Collection names = new ArrayList<>(2); @@ -279,24 +283,34 @@ public void testGetColumns() throws Exception { assert rs.getInt("DATA_TYPE") == VARCHAR; assert "VARCHAR".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 0; + assert rs.getInt(11) == 0; // nullable column by index + assert rs.getString("IS_NULLABLE").equals("NO"); } else if ("ORGID".equals(name)) { assert rs.getInt("DATA_TYPE") == INTEGER; assert "INTEGER".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 1; + assert rs.getInt(11) == 1; // nullable column by index + assert rs.getString("IS_NULLABLE").equals("YES"); } else if ("AGE".equals(name)) { assert rs.getInt("DATA_TYPE") == INTEGER; assert "INTEGER".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 0; + assert rs.getInt(11) == 0; // nullable column by index + assert rs.getString("IS_NULLABLE").equals("NO"); } else if ("_KEY".equals(name)) { assert rs.getInt("DATA_TYPE") == OTHER; assert "OTHER".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 0; + assert rs.getInt(11) == 0; // nullable column by index + assert rs.getString("IS_NULLABLE").equals("NO"); } else if ("_VAL".equals(name)) { assert rs.getInt("DATA_TYPE") == OTHER; assert "OTHER".equals(rs.getString("TYPE_NAME")); assert rs.getInt("NULLABLE") == 0; + assert rs.getInt(11) == 0; // nullable column by index + assert rs.getString("IS_NULLABLE").equals("NO"); } cnt++; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java index 8b26900d9b9ef..cfc3b689b5cd7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java @@ -793,26 +793,31 @@ private List tableRow(JdbcTableMeta tblMeta) { conn.ensureNotClosed(); final List meta = Arrays.asList( - new JdbcColumnMeta(null, null, "TABLE_CAT", String.class), - new JdbcColumnMeta(null, null, "TABLE_SCHEM", String.class), - new JdbcColumnMeta(null, null, "TABLE_NAME", String.class), - new JdbcColumnMeta(null, null, "COLUMN_NAME", String.class), - new JdbcColumnMeta(null, null, "DATA_TYPE", Short.class), - new JdbcColumnMeta(null, null, "TYPE_NAME", String.class), - new JdbcColumnMeta(null, null, "COLUMN_SIZE", Integer.class), - new JdbcColumnMeta(null, null, "DECIMAL_DIGITS", Integer.class), - new JdbcColumnMeta(null, null, "NUM_PREC_RADIX", Short.class), - new JdbcColumnMeta(null, null, "NULLABLE", Short.class), - new JdbcColumnMeta(null, null, "REMARKS", String.class), - new JdbcColumnMeta(null, null, "COLUMN_DEF", String.class), - new JdbcColumnMeta(null, null, "CHAR_OCTET_LENGTH", Integer.class), - new JdbcColumnMeta(null, null, "ORDINAL_POSITION", Integer.class), - new JdbcColumnMeta(null, null, "IS_NULLABLE", String.class), - new JdbcColumnMeta(null, null, "SCOPE_CATLOG", String.class), - new JdbcColumnMeta(null, null, "SCOPE_SCHEMA", String.class), - new JdbcColumnMeta(null, null, "SCOPE_TABLE", String.class), - new JdbcColumnMeta(null, null, "SOURCE_DATA_TYPE", Short.class), - new JdbcColumnMeta(null, null, "IS_AUTOINCREMENT", String.class)); + new JdbcColumnMeta(null, null, "TABLE_CAT", String.class), // 1 + new JdbcColumnMeta(null, null, "TABLE_SCHEM", String.class), // 2 + new JdbcColumnMeta(null, null, "TABLE_NAME", String.class), // 3 + new JdbcColumnMeta(null, null, "COLUMN_NAME", String.class), // 4 + new JdbcColumnMeta(null, null, "DATA_TYPE", Short.class), // 5 + new JdbcColumnMeta(null, null, "TYPE_NAME", String.class), // 6 + new JdbcColumnMeta(null, null, "COLUMN_SIZE", Integer.class), // 7 + new JdbcColumnMeta(null, null, "BUFFER_LENGTH ", Integer.class), // 8 + new JdbcColumnMeta(null, null, "DECIMAL_DIGITS", Integer.class), // 9 + new JdbcColumnMeta(null, null, "NUM_PREC_RADIX", Short.class), // 10 + new JdbcColumnMeta(null, null, "NULLABLE", Short.class), // 11 + new JdbcColumnMeta(null, null, "REMARKS", String.class), // 12 + new JdbcColumnMeta(null, null, "COLUMN_DEF", String.class), // 13 + new JdbcColumnMeta(null, null, "SQL_DATA_TYPE", Integer.class), // 14 + new JdbcColumnMeta(null, null, "SQL_DATETIME_SUB", Integer.class), // 15 + new JdbcColumnMeta(null, null, "CHAR_OCTET_LENGTH", Integer.class), // 16 + new JdbcColumnMeta(null, null, "ORDINAL_POSITION", Integer.class), // 17 + new JdbcColumnMeta(null, null, "IS_NULLABLE", String.class), // 18 + new JdbcColumnMeta(null, null, "SCOPE_CATLOG", String.class), // 19 + new JdbcColumnMeta(null, null, "SCOPE_SCHEMA", String.class), // 20 + new JdbcColumnMeta(null, null, "SCOPE_TABLE", String.class), // 21 + new JdbcColumnMeta(null, null, "SOURCE_DATA_TYPE", Short.class), // 22 + new JdbcColumnMeta(null, null, "IS_AUTOINCREMENT", String.class), // 23 + new JdbcColumnMeta(null, null, "IS_GENERATEDCOLUMN ", String.class) // 24 + ); if (!validCatalogPattern(catalog)) return new JdbcThinResultSet(Collections.>emptyList(), meta); @@ -835,26 +840,30 @@ private List tableRow(JdbcTableMeta tblMeta) { private List columnRow(JdbcColumnMeta colMeta, int pos) { List row = new ArrayList<>(20); - row.add((String)null); - row.add(colMeta.schemaName()); - row.add(colMeta.tableName()); - row.add(colMeta.columnName()); - row.add(colMeta.dataType()); - row.add(colMeta.dataTypeName()); - row.add((Integer)null); - row.add((Integer)null); - row.add(10); - row.add(colMeta.isNullable() ? 1 : 0); - row.add((String)null); - row.add((String)null); - row.add(Integer.MAX_VALUE); - row.add(pos); - row.add("YES"); - row.add((String)null); - row.add((String)null); - row.add((String)null); - row.add((Short)null); - row.add("NO"); + row.add((String)null); // 1. TABLE_CAT + row.add(colMeta.schemaName()); // 2. TABLE_SCHEM + row.add(colMeta.tableName()); // 3. TABLE_NAME + row.add(colMeta.columnName()); // 4. COLUMN_NAME + row.add(colMeta.dataType()); // 5. DATA_TYPE + row.add(colMeta.dataTypeName()); // 6. TYPE_NAME + row.add((Integer)null); // 7. COLUMN_SIZE + row.add((Integer)null); // 8. BUFFER_LENGTH + row.add((Integer)null); // 9. DECIMAL_DIGITS + row.add(10); // 10. NUM_PREC_RADIX + row.add(colMeta.isNullable() ? columnNullable : columnNoNulls); // 11. NULLABLE + row.add((String)null); // 12. REMARKS + row.add((String)null); // 13. COLUMN_DEF + row.add(colMeta.dataType()); // 14. SQL_DATA_TYPE + row.add((Integer)null); // 15. SQL_DATETIME_SUB + row.add(Integer.MAX_VALUE); // 16. CHAR_OCTET_LENGTH + row.add(pos); // 17. ORDINAL_POSITION + row.add(colMeta.isNullable() ? "YES" : "NO"); // 18. IS_NULLABLE + row.add((String)null); // 19. SCOPE_CATALOG + row.add((String)null); // 20. SCOPE_SCHEMA + row.add((String)null); // 21. SCOPE_TABLE + row.add((Short)null); // 22. SOURCE_DATA_TYPE + row.add("NO"); // 23. IS_AUTOINCREMENT + row.add("NO"); // 23. IS_GENERATEDCOLUMN return row; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java index 2fe24bb1def15..eb55e4f073f75 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcDatabaseMetadata.java @@ -815,16 +815,56 @@ private List tableRow(String schema, String tbl) { return new JdbcResultSet(true, null, conn.createStatement0(), Collections.emptyList(), - Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "DATA_TYPE", - "TYPE_NAME", "COLUMN_SIZE", "DECIMAL_DIGITS", "NUM_PREC_RADIX", "NULLABLE", - "REMARKS", "COLUMN_DEF", "CHAR_OCTET_LENGTH", "ORDINAL_POSITION", "IS_NULLABLE", - "SCOPE_CATLOG", "SCOPE_SCHEMA", "SCOPE_TABLE", "SOURCE_DATA_TYPE", "IS_AUTOINCREMENT"), - Arrays.asList(String.class.getName(), String.class.getName(), String.class.getName(), - String.class.getName(), Integer.class.getName(), String.class.getName(), Integer.class.getName(), - Integer.class.getName(), Integer.class.getName(), Integer.class.getName(), String.class.getName(), - String.class.getName(), Integer.class.getName(), Integer.class.getName(), String.class.getName(), - String.class.getName(), String.class.getName(), String.class.getName(), Short.class.getName(), - String.class.getName()), + Arrays.asList( + "TABLE_CAT", // 1 + "TABLE_SCHEM", // 2 + "TABLE_NAME", // 3 + "COLUMN_NAME", // 4 + "DATA_TYPE", // 5 + "TYPE_NAME", // 6 + "COLUMN_SIZE", // 7 + "BUFFER_LENGTH", // 8 + "DECIMAL_DIGITS", // 9 + "NUM_PREC_RADIX", // 10 + "NULLABLE", // 11 + "REMARKS", // 12 + "COLUMN_DEF", // 13 + "SQL_DATA_TYPE", // 14 + "SQL_DATETIME_SUB", // 15 + "CHAR_OCTET_LENGTH", // 16 + "ORDINAL_POSITION", // 17 + "IS_NULLABLE", // 18 + "SCOPE_CATLOG", // 19 + "SCOPE_SCHEMA", // 20 + "SCOPE_TABLE", // 21 + "SOURCE_DATA_TYPE", // 22 + "IS_AUTOINCREMENT", // 23 + "IS_GENERATEDCOLUMN"), // 23 + Arrays.asList( + String.class.getName(), // 1 + String.class.getName(), // 2 + String.class.getName(), // 3 + String.class.getName(), // 4 + Integer.class.getName(), // 5 + String.class.getName(), // 6 + Integer.class.getName(), // 7 + Integer.class.getName(), // 8 + Integer.class.getName(), // 9 + Integer.class.getName(), // 10 + Integer.class.getName(), // 11 + String.class.getName(), // 12 + String.class.getName(), // 13 + Integer.class.getName(), // 14 + Integer.class.getName(), // 15 + Integer.class.getName(), // 16 + Integer.class.getName(), // 17 + String.class.getName(), // 18 + String.class.getName(), // 19 + String.class.getName(), // 20 + String.class.getName(), // 21 + Short.class.getName(), // 22 + String.class.getName(), // 23 + String.class.getName()), // 24 rows, true ); } @@ -843,26 +883,30 @@ private List columnRow(String schema, String tbl, String col, int type, boolean nullable, int pos) { List row = new ArrayList<>(20); - row.add(null); - row.add(schema); - row.add(tbl); - row.add(col); - row.add(type); - row.add(typeName); - row.add(null); - row.add(null); - row.add(10); - row.add(nullable ? columnNullable : columnNoNulls); - row.add(null); - row.add(null); - row.add(Integer.MAX_VALUE); - row.add(pos); - row.add("YES"); - row.add(null); - row.add(null); - row.add(null); - row.add(null); - row.add("NO"); + row.add(null); // 1. TABLE_CAT + row.add(schema); // 2. TABLE_SCHEM + row.add(tbl); // 3. TABLE_NAME + row.add(col); // 4. COLUMN_NAME + row.add(type); // 5. DATA_TYPE + row.add(typeName); // 6. TYPE_NAME + row.add(null); // 7. COLUMN_SIZE + row.add(null); // 8. BUFFER_LENGTH + row.add(null); // 9. DECIMAL_DIGITS + row.add(10); // 10. NUM_PREC_RADIX + row.add(nullable ? columnNullable : columnNoNulls); // 11. NULLABLE + row.add(null); // 12. REMARKS + row.add(null); // 13. COLUMN_DEF + row.add(type); // 14. SQL_DATA_TYPE + row.add(null); // 15. SQL_DATETIME_SUB + row.add(Integer.MAX_VALUE); // 16. CHAR_OCTET_LENGTH + row.add(pos); // 17. ORDINAL_POSITION + row.add(nullable ? "YES" : "NO"); // 18. IS_NULLABLE + row.add(null); // 19. SCOPE_CATALOG + row.add(null); // 20. SCOPE_SCHEMA + row.add(null); // 21. SCOPE_TABLE + row.add(null); // 22. SOURCE_DATA_TYPE + row.add("NO"); // 23. IS_AUTOINCREMENT + row.add("NO"); // 24. IS_GENERATEDCOLUMN return row; } From 008d87057734953b4e30059841a14eb2fbc3ddb7 Mon Sep 17 00:00:00 2001 From: devozerov Date: Thu, 19 Oct 2017 22:30:31 +0300 Subject: [PATCH 049/243] IGNITE-6684: Renamed "ignitesql.sh|bat" to "sqlline.sh|bat". --- modules/sqlline/bin/{ignitesql.bat => sqlline.bat} | 2 +- modules/sqlline/bin/{ignitesql.sh => sqlline.sh} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename modules/sqlline/bin/{ignitesql.bat => sqlline.bat} (98%) rename modules/sqlline/bin/{ignitesql.sh => sqlline.sh} (100%) diff --git a/modules/sqlline/bin/ignitesql.bat b/modules/sqlline/bin/sqlline.bat similarity index 98% rename from modules/sqlline/bin/ignitesql.bat rename to modules/sqlline/bin/sqlline.bat index 828e93a1217fb..fc6c14927d834 100644 --- a/modules/sqlline/bin/ignitesql.bat +++ b/modules/sqlline/bin/sqlline.bat @@ -94,7 +94,7 @@ if /i "%SCRIPTS_HOME%\" == "%~dp0" goto setProgName :: :: Set program name. :: -set PROG_NAME=ignitesql.bat +set PROG_NAME=sqlline.bat if "%OS%" == "Windows_NT" set PROG_NAME=%~nx0% :run diff --git a/modules/sqlline/bin/ignitesql.sh b/modules/sqlline/bin/sqlline.sh similarity index 100% rename from modules/sqlline/bin/ignitesql.sh rename to modules/sqlline/bin/sqlline.sh From 1b8abd214ed2afcd3fd1f6a4c71a19d6fe1a4b01 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Fri, 20 Oct 2017 11:23:23 +0700 Subject: [PATCH 050/243] IGNITE-6647 Added missing Mongo injector. (cherry picked from commit 173ecef) --- modules/web-console/backend/index.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/web-console/backend/index.js b/modules/web-console/backend/index.js index 06a38f8f5f70f..84547892da091 100644 --- a/modules/web-console/backend/index.js +++ b/modules/web-console/backend/index.js @@ -122,8 +122,8 @@ const migrate = (dbConnectionUri, group, migrationsPath) => { }); }; -injector('settings') - .then(({mongoUrl}) => { +Promise.all([injector('settings'), injector('mongo')]) + .then(([{mongoUrl}]) => { return migrate(mongoUrl, 'Ignite', path.join(__dirname, 'migrations')) .then(() => migrate(mongoUrl, 'Ignite Modules', path.join(igniteModules, 'migrations'))); }) From 745677631d260cb51cb601ae38af8528aa5d5c66 Mon Sep 17 00:00:00 2001 From: Ivan Rakov Date: Fri, 20 Oct 2017 10:29:57 +0300 Subject: [PATCH 051/243] IGNITE-6030 Allow enabling persistence per data region --- ...-policies.xml => example-data-regions.xml} | 40 +- .../example-persistent-store.xml | 14 +- ...esExample.java => DataRegionsExample.java} | 55 +- .../examples/CacheExamplesSelfTest.java | 6 +- .../jmh/tree/BPlusTreeBenchmark.java | 8 +- .../DummyPersistenceCompatibilityTest.java | 8 +- .../org/apache/ignite/DataRegionMetrics.java | 119 +++ .../ignite/DataRegionMetricsAdapter.java | 106 +++ .../org/apache/ignite/DataStorageMetrics.java | 114 +++ .../ignite/DataStorageMetricsAdapter.java | 101 ++ .../main/java/org/apache/ignite/Ignite.java | 48 +- .../apache/ignite/IgniteSystemProperties.java | 4 +- .../java/org/apache/ignite/MemoryMetrics.java | 11 +- .../org/apache/ignite/PersistenceMetrics.java | 4 +- .../configuration/CacheConfiguration.java | 36 +- .../configuration/DataPageEvictionMode.java | 8 +- .../DataRegionConfiguration.java | 406 ++++++++ .../DataStorageConfiguration.java | 882 ++++++++++++++++++ .../configuration/IgniteConfiguration.java | 46 +- .../configuration/MemoryConfiguration.java | 9 +- .../MemoryPolicyConfiguration.java | 32 +- .../PersistentStoreConfiguration.java | 4 +- .../org/apache/ignite/igfs/IgfsMetrics.java | 4 +- .../apache/ignite/internal/IgniteKernal.java | 77 +- .../ignite/internal/IgniteNodeAttributes.java | 4 + .../apache/ignite/internal/IgnitionEx.java | 132 ++- .../internal/MarshallerContextImpl.java | 3 +- .../discovery/GridDiscoveryManager.java | 31 +- .../pagemem/impl/PageMemoryNoStoreImpl.java | 32 +- .../cache/CacheAffinitySharedManager.java | 4 +- .../processors/cache/CacheGroupContext.java | 29 +- .../processors/cache/CacheGroupData.java | 17 +- .../cache/CacheGroupDescriptor.java | 19 +- .../processors/cache/ClusterCachesInfo.java | 13 +- .../processors/cache/GridCacheAdapter.java | 3 +- .../processors/cache/GridCacheContext.java | 8 +- .../processors/cache/GridCacheMapEntry.java | 10 +- .../processors/cache/GridCacheProcessor.java | 55 +- .../processors/cache/GridCacheUtils.java | 55 ++ .../cache/IgniteCacheOffheapManagerImpl.java | 6 +- .../cache/binary/BinaryMetadataFileStore.java | 7 +- .../GridDistributedTxRemoteAdapter.java | 5 +- .../dht/GridDhtLocalPartition.java | 2 +- .../dht/atomic/GridDhtAtomicCache.java | 3 +- .../distributed/near/GridNearGetRequest.java | 2 +- .../distributed/near/GridNearLockRequest.java | 2 +- .../distributed/near/GridNearTxLocal.java | 2 +- .../near/GridNearTxPrepareRequest.java | 2 +- .../local/atomic/GridLocalAtomicCache.java | 2 +- .../persistence/CacheDataRowAdapter.java | 2 +- .../{MemoryPolicy.java => DataRegion.java} | 24 +- ...csImpl.java => DataRegionMetricsImpl.java} | 20 +- ....java => DataRegionMetricsMXBeanImpl.java} | 32 +- ...ot.java => DataRegionMetricsSnapshot.java} | 6 +- ...sImpl.java => DataStorageMetricsImpl.java} | 6 +- ...t.java => DataStorageMetricsSnapshot.java} | 8 +- .../GridCacheDatabaseSharedManager.java | 166 ++-- .../persistence/GridCacheOffheapManager.java | 22 +- .../IgniteCacheDatabaseSharedManager.java | 538 +++++------ .../cache/persistence/RowStore.java | 38 +- .../evict/FairFifoPageEvictionTracker.java | 6 +- .../evict/PageAbstractEvictionTracker.java | 6 +- .../evict/Random2LruPageEvictionTracker.java | 8 +- .../evict/RandomLruPageEvictionTracker.java | 8 +- .../persistence/file/AsyncFileIOFactory.java | 28 +- .../cache/persistence/file/FilePageStore.java | 6 +- .../file/FilePageStoreManager.java | 12 +- .../persistence/file/FilePageStoreV2.java | 4 +- .../file/FileVersionCheckingFactory.java | 6 +- .../filename/PdsConsistentIdProcessor.java | 11 +- .../persistence/freelist/FreeListImpl.java | 12 +- .../cache/persistence/freelist/PagesList.java | 7 +- .../persistence/pagemem/PageMemoryImpl.java | 8 +- .../wal/FileWriteAheadLogManager.java | 77 +- .../wal/reader/IgniteWalIteratorFactory.java | 12 +- .../reader/StandaloneGridKernalContext.java | 11 +- .../cache/ratemetrics/HitRateMetrics.java | 2 +- .../transactions/IgniteTxLocalAdapter.java | 2 +- .../processors/cache/tree/CacheDataTree.java | 4 +- .../cache/tree/PendingEntriesTree.java | 2 +- .../cluster/GridClusterStateProcessor.java | 3 +- .../processors/igfs/IgfsDataManager.java | 4 +- .../processors/query/GridQueryProcessor.java | 3 +- .../visor/cache/VisorCacheConfiguration.java | 8 +- .../visor/cache/VisorMemoryMetrics.java | 6 +- .../node/VisorDataRegionConfiguration.java | 225 +++++ .../node/VisorDataStorageConfiguration.java | 453 +++++++++ .../visor/node/VisorGridConfiguration.java | 29 +- .../visor/node/VisorMemoryConfiguration.java | 26 +- .../node/VisorMemoryPolicyConfiguration.java | 10 +- .../visor/node/VisorNodeDataCollectorJob.java | 6 +- .../node/VisorNodeDataCollectorJobResult.java | 4 +- .../VisorNodeDataCollectorTaskResult.java | 4 +- .../visor/node/VisorPersistenceMetrics.java | 6 +- .../VisorPersistentStoreConfiguration.java | 22 +- .../mxbean/DataRegionMetricsMXBean.java | 139 +++ .../mxbean/DataStorageMetricsMXBean.java | 121 +++ .../ignite/mxbean/MemoryMetricsMXBean.java | 2 + .../mxbean/PersistenceMetricsMXBean.java | 2 + .../resources/META-INF/classnames.properties | 6 +- .../core/src/test/config/examples.properties | 2 +- .../ignite/cache/LargeEntryUpdateTest.java | 6 +- .../internal/ClusterNodeMetricsSelfTest.java | 8 +- .../IgniteSlowClientDetectionSelfTest.java | 6 +- .../impl/PageMemoryNoLoadSelfTest.java | 8 +- .../cache/CacheClientStoreSelfTest.java | 4 +- .../cache/CacheConfigurationLeakTest.java | 15 +- ... => CacheDataRegionConfigurationTest.java} | 48 +- .../cache/CacheStopAndDestroySelfTest.java | 10 +- ...rageConfigurationConsistencySelfTest.java} | 10 +- .../IgniteClusterActivateDeactivateTest.java | 32 +- ...ActivateDeactivateTestWithPersistence.java | 30 +- .../MemoryPolicyConfigValidationTest.java | 24 +- ...finityCoordinatorDynamicStartStopTest.java | 15 +- .../distributed/Cache64kPartitionsTest.java | 14 +- .../CacheLateAffinityAssignmentTest.java | 9 +- .../distributed/CacheStartOnJoinTest.java | 9 +- .../paged/PageEvictionAbstractTest.java | 25 +- .../IgniteCacheLargeValueExpireTest.java | 6 +- ... => IgniteDataStorageMetricsSelfTest.java} | 76 +- ...PdsBinaryMetadataOnClusterRestartTest.java | 10 +- ...IgnitePdsCacheRebalancingAbstractTest.java | 25 +- .../IgnitePdsClientNearCachePutGetTest.java | 6 +- .../IgnitePdsContinuousRestartTest.java | 28 +- .../IgnitePdsDynamicCacheTest.java | 28 +- ...IgnitePdsExchangeDuringCheckpointTest.java | 41 +- ...rshallerMappingRestoreOnNodeStartTest.java | 6 +- .../IgnitePdsMultiNodePutGetRestartTest.java | 24 +- .../persistence/IgnitePdsPageSizesTest.java | 29 +- ...itePdsRecoveryAfterFileCorruptionTest.java | 39 +- .../IgnitePdsRemoveDuringRebalancingTest.java | 35 +- ...itePdsSingleNodePutGetPersistenceTest.java | 6 +- ...tePersistenceSequentialCheckpointTest.java | 6 +- .../IgnitePersistentStoreCacheGroupsTest.java | 16 +- ...nitePersistentStoreDataStructuresTest.java | 23 +- .../MemoryPolicyInitializationTest.java | 22 +- .../db/IgnitePdsCacheRestoreTest.java | 45 +- .../IgnitePdsMultiNodePutGetRestartTest.java | 23 +- ...sPageEvictionDuringPartitionClearTest.java | 29 +- .../db/IgnitePdsPageEvictionTest.java | 30 +- ...PdsRebalancingOnNotStableTopologyTest.java | 28 +- .../db/IgnitePdsTransactionsHangTest.java | 26 +- .../db/IgnitePdsWholeClusterRestartTest.java | 26 +- ...ultPageSizeBackwardsCompatibilityTest.java | 21 +- .../file/IgnitePdsCacheIntegrationTest.java | 29 +- ...pointSimulationWithRealCpDisabledTest.java | 25 +- .../db/file/IgnitePdsEvictionTest.java | 25 +- .../file/IgnitePdsNoActualWalHistoryTest.java | 22 +- .../file/IgnitePdsThreadInterruptionTest.java | 51 +- .../IgniteUidAsConsistentIdMigrationTest.java | 28 +- .../db/wal/IgnitePdsWalTlbTest.java | 30 +- .../db/wal/IgniteWalFlushFailoverTest.java | 29 +- .../wal/IgniteWalHistoryReservationsTest.java | 26 +- .../db/wal/IgniteWalRecoveryPPCTest.java | 321 +++++++ .../IgniteWalRecoverySeveralRestartsTest.java | 29 +- .../db/wal/IgniteWalRecoveryTest.java | 33 +- .../wal/IgniteWalSerializerVersionTest.java | 8 +- .../wal/WalRecoveryTxLogicalRecordsTest.java | 18 +- .../db/wal/reader/IgniteWalReaderTest.java | 50 +- .../db/wal/reader/MockWalIteratorFactory.java | 14 +- .../pagemem/BPlusTreePageMemoryImplTest.java | 6 +- .../BPlusTreeReuseListPageMemoryImplTest.java | 6 +- .../MetadataStoragePageMemoryImplTest.java | 6 +- .../pagemem/PageMemoryImplNoLoadTest.java | 6 +- .../pagemem/PageMemoryImplTest.java | 6 +- .../PagesWriteThrottleSandboxTest.java | 40 +- .../pagemem/PagesWriteThrottleSmokeTest.java | 42 +- .../AbstractNodeJoinTemplate.java | 8 +- .../IgniteChangeGlobalStateAbstractTest.java | 31 +- .../IgniteChangeGlobalStateServiceTest.java | 2 + .../IgniteStandByClusterTest.java | 12 +- .../extended/GridActivateExtensionTest.java | 34 +- ...iteAbstractStandByClientReconnectTest.java | 9 +- ...teCacheContinuousQueryBackupQueueTest.java | 6 +- .../transactions/TxDeadlockCauseTest.java | 15 +- .../TxPessimisticDeadlockDetectionTest.java | 21 +- .../database/BPlusTreeSelfTest.java | 8 +- ...st.java => DataRegionMetricsSelfTest.java} | 22 +- .../database/FreeListImplSelfTest.java | 16 +- .../database/IgniteDbAbstractTest.java | 10 +- .../IgniteDbDynamicCacheSelfTest.java | 18 +- .../IgniteDbMemoryLeakAbstractTest.java | 14 +- .../database/MetadataStorageSelfTest.java | 8 +- .../SwapPathConstructionSelfTest.java | 28 +- .../processors/igfs/IgfsIgniteMock.java | 25 +- .../processors/igfs/IgfsSizeSelfTest.java | 12 +- .../testframework/junits/IgniteMock.java | 25 +- .../junits/multijvm/IgniteProcessProxy.java | 25 +- .../testsuites/IgniteBasicTestSuite.java | 4 +- .../testsuites/IgniteCacheTestSuite.java | 4 +- .../testsuites/IgniteCacheTestSuite2.java | 4 +- .../testsuites/IgnitePdsTestSuite2.java | 4 +- .../query/h2/database/H2TreeIndex.java | 2 +- ...NodeWithIndexingPutGetPersistenceTest.java | 6 +- ...ributedPartitionQueryAbstractSelfTest.java | 8 +- .../IgniteCacheQueryNodeRestartSelfTest2.java | 8 +- .../index/DynamicColumnsAbstractTest.java | 16 +- .../index/DynamicIndexAbstractSelfTest.java | 16 +- .../cache/index/LongIndexNameTest.java | 4 +- ...bSingleNodeWithIndexingWalRestoreTest.java | 11 +- ...eQueryWithMultipleClassesPerCacheTest.java | 8 +- .../IgnitePersistentStoreSchemaLoadTest.java | 13 +- .../query/IgniteSqlNotNullConstraintTest.java | 4 +- .../h2/database/InlineIndexHelperTest.java | 20 +- .../IgnitePdsWithIndexingCoreTestSuite.java | 3 + .../IgniteConfigurationTest.cs | 2 - .../http/jetty/GridJettyObjectMapper.java | 3 + .../org/apache/ignite/IgniteSpringBean.java | 27 +- .../top/VisorActivationCommandSpec.scala | 13 +- modules/web-console/backend/app/mongo.js | 55 ++ .../page-configure-basic/controller.js | 10 +- .../generator/AbstractTransformer.js | 5 + .../generator/ConfigurationGenerator.js | 108 ++- .../generator/defaults/Cluster.service.js | 40 + .../configuration/clusters/data-storage.pug | 255 +++++ .../states/configuration/clusters/memory.pug | 4 +- .../configuration/clusters/persistence.pug | 4 +- .../frontend/app/services/Clusters.js | 6 + .../frontend/app/services/Version.service.js | 6 +- .../controllers/clusters-controller.js | 69 +- .../views/configuration/clusters.tpl.pug | 8 +- .../yardstick/IgniteBenchmarkArguments.java | 8 +- .../apache/ignite/yardstick/IgniteNode.java | 15 +- 223 files changed, 5730 insertions(+), 1934 deletions(-) rename examples/config/{example-memory-policies.xml => example-data-regions.xml} (77%) rename examples/src/main/java/org/apache/ignite/examples/datagrid/{MemoryPoliciesExample.java => DataRegionsExample.java} (67%) create mode 100644 modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java create mode 100644 modules/core/src/main/java/org/apache/ignite/DataRegionMetricsAdapter.java create mode 100644 modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java create mode 100644 modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java create mode 100644 modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java create mode 100644 modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java rename modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/{MemoryPolicy.java => DataRegion.java} (75%) rename modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/{MemoryMetricsImpl.java => DataRegionMetricsImpl.java} (91%) rename modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/{MemoryMetricsMXBeanImpl.java => DataRegionMetricsMXBeanImpl.java} (77%) rename modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/{MemoryMetricsSnapshot.java => DataRegionMetricsSnapshot.java} (94%) rename modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/{PersistenceMetricsImpl.java => DataStorageMetricsImpl.java} (97%) rename modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/{PersistenceMetricsSnapshot.java => DataStorageMetricsSnapshot.java} (94%) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java create mode 100644 modules/core/src/main/java/org/apache/ignite/mxbean/DataRegionMetricsMXBean.java create mode 100644 modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java rename modules/core/src/test/java/org/apache/ignite/internal/processors/cache/{CacheMemoryPolicyConfigurationTest.java => CacheDataRegionConfigurationTest.java} (73%) rename modules/core/src/test/java/org/apache/ignite/internal/processors/cache/{GridMemoryConfigurationConsistencySelfTest.java => GridDataStorageConfigurationConsistencySelfTest.java} (86%) rename modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/{IgnitePersistenceMetricsSelfTest.java => IgniteDataStorageMetricsSelfTest.java} (74%) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryPPCTest.java rename modules/core/src/test/java/org/apache/ignite/internal/processors/database/{MemoryMetricsSelfTest.java => DataRegionMetricsSelfTest.java} (93%) create mode 100644 modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug diff --git a/examples/config/example-memory-policies.xml b/examples/config/example-data-regions.xml similarity index 77% rename from examples/config/example-memory-policies.xml rename to examples/config/example-data-regions.xml index 122300f734332..4ce71efd69bdd 100644 --- a/examples/config/example-memory-policies.xml +++ b/examples/config/example-data-regions.xml @@ -18,7 +18,7 @@ --> - - - - + + + + + + + + + + - - + + - - - - - - - - + @@ -68,14 +66,14 @@ This memory region is backed by a memory-mapped file which names is passed via 'swapFilePath' parameter. --> - + - + diff --git a/examples/config/persistentstore/example-persistent-store.xml b/examples/config/persistentstore/example-persistent-store.xml index 79138b085e3de..85580e47b7669 100644 --- a/examples/config/persistentstore/example-persistent-store.xml +++ b/examples/config/persistentstore/example-persistent-store.xml @@ -23,13 +23,13 @@ http://www.springframework.org/schema/beans/spring-beans.xsd"> - - - - - - - + + + + + + + diff --git a/examples/src/main/java/org/apache/ignite/examples/datagrid/MemoryPoliciesExample.java b/examples/src/main/java/org/apache/ignite/examples/datagrid/DataRegionsExample.java similarity index 67% rename from examples/src/main/java/org/apache/ignite/examples/datagrid/MemoryPoliciesExample.java rename to examples/src/main/java/org/apache/ignite/examples/datagrid/DataRegionsExample.java index 045f88b81e33f..5675602f627cb 100644 --- a/examples/src/main/java/org/apache/ignite/examples/datagrid/MemoryPoliciesExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/datagrid/DataRegionsExample.java @@ -24,30 +24,30 @@ import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.examples.ExampleNodeStartup; /** * This example demonstrates how to tweak particular settings of Apache Ignite page memory using - * {@link MemoryConfiguration} and set up several memory policies for different caches with - * {@link MemoryPolicyConfiguration}. + * {@link DataStorageConfiguration} and set up several data regions for different caches with + * {@link DataRegionConfiguration}. *

      * Additional remote nodes can be started with special configuration file which - * enables P2P class loading: {@code 'ignite.{sh|bat} example-memory-policies.xml'}. + * enables P2P class loading: {@code 'ignite.{sh|bat} example-data-regions.xml'}. *

      * Alternatively you can run {@link ExampleNodeStartup} in another JVM which passing - * {@code examples/config/example-memory-policies.xml} configuration to it. + * {@code examples/config/example-data-regions.xml} configuration to it. */ -public class MemoryPoliciesExample { - /** Name of the default memory policy defined in 'example-memory-policies.xml'. */ - public static final String POLICY_DEFAULT = "Default_Region"; +public class DataRegionsExample { + /** Name of the default data region defined in 'example-data-regions.xml'. */ + public static final String REGION_DEFAULT = "Default_Region"; - /** Name of the memory policy that creates a memory region limited by 40 MB with eviction enabled */ - public static final String POLICY_40MB_EVICTION = "40MB_Region_Eviction"; + /** Name of the data region that creates a memory region limited by 40 MB with eviction enabled */ + public static final String REGION_40MB_EVICTION = "40MB_Region_Eviction"; - /** Name of the memory policy that creates a memory region mapped to a memory-mapped file. */ - public static final String POLICY_30MB_MEMORY_MAPPED_FILE = "30MB_Region_Swapping"; + /** Name of the data region that creates a memory region mapped to a memory-mapped file. */ + public static final String REGION_30MB_MEMORY_MAPPED_FILE = "30MB_Region_Swapping"; /** * Executes example. @@ -56,52 +56,51 @@ public class MemoryPoliciesExample { * @throws IgniteException If example execution failed. */ public static void main(String[] args) throws IgniteException { - try (Ignite ignite = Ignition.start("examples/config/example-memory-policies.xml")) { + try (Ignite ignite = Ignition.start("examples/config/example-data-regions.xml")) { System.out.println(); - System.out.println(">>> Memory policies example started."); + System.out.println(">>> Data regions example started."); - /** + /* * Preparing configurations for 2 caches that will be bound to the memory region defined by - * '10MB_Region_Eviction' memory policy from 'example-memory-policies.xml' configuration. + * '10MB_Region_Eviction' data region from 'example-data-regions.xml' configuration. */ CacheConfiguration firstCacheCfg = new CacheConfiguration<>("firstCache"); - firstCacheCfg.setMemoryPolicyName(POLICY_40MB_EVICTION); + firstCacheCfg.setDataRegionName(REGION_40MB_EVICTION); firstCacheCfg.setCacheMode(CacheMode.PARTITIONED); firstCacheCfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); CacheConfiguration secondCacheCfg = new CacheConfiguration<>("secondCache"); - secondCacheCfg.setMemoryPolicyName(POLICY_40MB_EVICTION); + secondCacheCfg.setDataRegionName(REGION_40MB_EVICTION); secondCacheCfg.setCacheMode(CacheMode.REPLICATED); secondCacheCfg.setAtomicityMode(CacheAtomicityMode.ATOMIC); IgniteCache firstCache = ignite.createCache(firstCacheCfg); IgniteCache secondCache = ignite.createCache(secondCacheCfg); - System.out.println(">>> Started two caches bound to '" + POLICY_40MB_EVICTION + "' memory region."); + System.out.println(">>> Started two caches bound to '" + REGION_40MB_EVICTION + "' memory region."); - /** + /* * Preparing a configuration for a cache that will be bound to the memory region defined by - * '5MB_Region_Swapping' memory policy from 'example-memory-policies.xml' configuration. + * '5MB_Region_Swapping' data region from 'example-data-regions.xml' configuration. */ CacheConfiguration thirdCacheCfg = new CacheConfiguration<>("thirdCache"); - thirdCacheCfg.setMemoryPolicyName(POLICY_30MB_MEMORY_MAPPED_FILE); + thirdCacheCfg.setDataRegionName(REGION_30MB_MEMORY_MAPPED_FILE); IgniteCache thirdCache = ignite.createCache(thirdCacheCfg); - System.out.println(">>> Started a cache bound to '" + POLICY_30MB_MEMORY_MAPPED_FILE + "' memory region."); + System.out.println(">>> Started a cache bound to '" + REGION_30MB_MEMORY_MAPPED_FILE + "' memory region."); - - /** + /* * Preparing a configuration for a cache that will be bound to the default memory region defined by - * default 'Default_Region' memory policy from 'example-memory-policies.xml' configuration. + * default 'Default_Region' data region from 'example-data-regions.xml' configuration. */ CacheConfiguration fourthCacheCfg = new CacheConfiguration<>("fourthCache"); IgniteCache fourthCache = ignite.createCache(fourthCacheCfg); - System.out.println(">>> Started a cache bound to '" + POLICY_DEFAULT + "' memory region."); + System.out.println(">>> Started a cache bound to '" + REGION_DEFAULT + "' memory region."); System.out.println(">>> Destroying caches..."); diff --git a/examples/src/test/java/org/apache/ignite/examples/CacheExamplesSelfTest.java b/examples/src/test/java/org/apache/ignite/examples/CacheExamplesSelfTest.java index 30f0763f686dc..c42c91a200f6b 100644 --- a/examples/src/test/java/org/apache/ignite/examples/CacheExamplesSelfTest.java +++ b/examples/src/test/java/org/apache/ignite/examples/CacheExamplesSelfTest.java @@ -27,7 +27,7 @@ import org.apache.ignite.examples.datagrid.CacheQueryDmlExample; import org.apache.ignite.examples.datagrid.CacheQueryExample; import org.apache.ignite.examples.datagrid.CacheTransactionExample; -import org.apache.ignite.examples.datagrid.MemoryPoliciesExample; +import org.apache.ignite.examples.datagrid.DataRegionsExample; import org.apache.ignite.examples.datagrid.starschema.CacheStarSchemaExample; import org.apache.ignite.examples.datagrid.store.CacheLoadOnlyStoreExample; import org.apache.ignite.examples.datastructures.IgniteAtomicLongExample; @@ -195,7 +195,7 @@ public void testCacheLoadOnlyStoreExample() throws Exception { /** * @throws Exception If failed. */ - public void testMemoryPolicyExample() throws Exception { - MemoryPoliciesExample.main(EMPTY_ARGS); + public void testDataRegionExample() throws Exception { + DataRegionsExample.main(EMPTY_ARGS); } } diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java index 94abc86a64e61..cef00eedd11c5 100644 --- a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java @@ -21,7 +21,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.benchmarks.jmh.JmhAbstractBenchmark; import org.apache.ignite.internal.benchmarks.jmh.runner.JmhIdeBenchmarkRunner; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; @@ -30,7 +30,7 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; @@ -210,14 +210,14 @@ private PageMemory createPageMemory() throws Exception { for (int i = 0; i < sizes.length; i++) sizes[i] = 1024 * MB / CPUS; - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration().setMaxSize(1024 * MB); + DataRegionConfiguration plcCfg = new DataRegionConfiguration().setMaxSize(1024 * MB); PageMemory pageMem = new PageMemoryNoStoreImpl(new JavaLogger(), new UnsafeMemoryProvider(new JavaLogger()), null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), false); pageMem.start(); diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java index f548939099a19..655da522b16fe 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java @@ -22,6 +22,8 @@ import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.IgniteEx; @@ -40,7 +42,11 @@ public class DummyPersistenceCompatibilityTest extends IgnitePersistenceCompatib cfg.setPeerClassLoadingEnabled(false); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setPersistenceEnabled(true)); + + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java b/modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java new file mode 100644 index 0000000000000..86b91f44d83d9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite; + +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.mxbean.DataRegionMetricsMXBean; + +/** + * This interface provides page memory related metrics of a specific Apache Ignite node. The overall page memory + * architecture is covered in {@link DataStorageConfiguration}. + *

      + * Since there are can be several memory regions configured with {@link DataRegionConfiguration} on an individual + * Apache Ignite node, the metrics for every region will be collected and obtained separately. + *

      + * There are two ways to get the metrics of an Apache Ignite node. + *

        + *
      1. + * First, a collection of the metrics can be obtained through {@link Ignite#dataRegionMetrics()} method. Note that + * the method returns data region metrics snapshots rather than just in time memory state. + *
      2. + *
      3. + * Second, all {@link DataRegionMetrics} of a local Apache Ignite node are visible through JMX interface. Refer to + * {@link DataRegionMetricsMXBean} for more details. + *
      4. + *
      + *

      + *

      + * Data region metrics collection is not a free operation and might affect performance of an application. This is the reason + * why the metrics are turned off by default. To enable the collection you can use both + * {@link DataRegionConfiguration#setMetricsEnabled(boolean)} configuration property or + * {@link DataRegionMetricsMXBean#enableMetrics()} method of a respective JMX bean. + */ +public interface DataRegionMetrics { + /** + * A name of a memory region the metrics are collected for. + * + * @return Name of the memory region. + */ + public String getName(); + + /** + * Gets a total number of allocated pages related to the data region. When persistence is disabled, this + * metric shows the total number of pages in memory. When persistence is enabled, this metric shows the + * total number of pages in memory and on disk. + * + * @return Total number of allocated pages. + */ + public long getTotalAllocatedPages(); + + /** + * Gets pages allocation rate of a memory region. + * + * @return Number of allocated pages per second. + */ + public float getAllocationRate(); + + /** + * Gets eviction rate of a given memory region. + * + * @return Number of evicted pages per second. + */ + public float getEvictionRate(); + + /** + * Gets percentage of pages that are fully occupied by large entries that go beyond page size. The large entities + * are split into fragments in a way so that each fragment can fit into a single page. + * + * @return Percentage of pages fully occupied by large entities. + */ + public float getLargeEntriesPagesPercentage(); + + /** + * Gets the percentage of space that is still free and can be filled in. + * + * @return The percentage of space that is still free and can be filled in. + */ + public float getPagesFillFactor(); + + /** + * Gets the number of dirty pages (pages which contents is different from the current persistent storage state). + * This metric is enabled only for Ignite nodes with enabled persistence. + * + * @return Current number of dirty pages. + */ + public long getDirtyPages(); + + /** + * Gets rate (pages per second) at which pages get replaced with other pages from persistent storage. + * The rate effectively represents the rate at which pages get 'evicted' in favor of newly needed pages. + * This metric is enabled only for Ignite nodes with enabled persistence. + * + * @return Pages per second replace rate. + */ + public float getPagesReplaceRate(); + + /** + * Gets total number of pages currently loaded to the RAM. When persistence is disabled, this metric is equal + * to {@link #getTotalAllocatedPages()}. + * + * @return Total number of pages loaded to RAM. + */ + public long getPhysicalMemoryPages(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/DataRegionMetricsAdapter.java b/modules/core/src/main/java/org/apache/ignite/DataRegionMetricsAdapter.java new file mode 100644 index 0000000000000..dcf2049c6c6e2 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/DataRegionMetricsAdapter.java @@ -0,0 +1,106 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.ignite; + +import java.util.ArrayList; +import java.util.Collection; + +/** + * Converter class from {@link DataRegionMetrics} to legacy {@link MemoryMetrics}. + */ +public class DataRegionMetricsAdapter implements MemoryMetrics { + /** Delegate. */ + private final DataRegionMetrics delegate; + + /** + * @param delegate Delegate. + */ + private DataRegionMetricsAdapter(DataRegionMetrics delegate) { + this.delegate = delegate; + } + + /** + * Converts collection of {@link DataRegionMetrics} into collection of legacy {@link MemoryMetrics}. + * + * @param dataRegionMetrics Data region metrics collection. + */ + public static Collection collectionOf(Collection dataRegionMetrics) { + if (dataRegionMetrics == null) + return null; + + Collection res = new ArrayList<>(); + + for (DataRegionMetrics d : dataRegionMetrics) + res.add(new DataRegionMetricsAdapter(d)); + + return res; + } + + /** + * @param delegate DataRegionMetrics. + * @return Wrapped {@link DataRegionMetrics} that implements {@link MemoryMetrics}. + * Null value is not wrapped and returned as is. + */ + public static DataRegionMetricsAdapter valueOf(DataRegionMetrics delegate) { + return delegate == null ? null : new DataRegionMetricsAdapter(delegate); + } + + /** {@inheritDoc} */ + @Override public String getName() { + return delegate.getName(); + } + + /** {@inheritDoc} */ + @Override public long getTotalAllocatedPages() { + return delegate.getTotalAllocatedPages(); + } + + /** {@inheritDoc} */ + @Override public float getAllocationRate() { + return delegate.getAllocationRate(); + } + + /** {@inheritDoc} */ + @Override public float getEvictionRate() { + return delegate.getEvictionRate(); + } + + /** {@inheritDoc} */ + @Override public float getLargeEntriesPagesPercentage() { + return delegate.getLargeEntriesPagesPercentage(); + } + + /** {@inheritDoc} */ + @Override public float getPagesFillFactor() { + return delegate.getPagesFillFactor(); + } + + /** {@inheritDoc} */ + @Override public long getDirtyPages() { + return delegate.getDirtyPages(); + } + + /** {@inheritDoc} */ + @Override public float getPagesReplaceRate() { + return delegate.getPagesReplaceRate(); + } + + /** {@inheritDoc} */ + @Override public long getPhysicalMemoryPages() { + return delegate.getPhysicalMemoryPages(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java new file mode 100644 index 0000000000000..87095f6ee9618 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite; + +import org.apache.ignite.configuration.DataStorageConfiguration; + +/** + * Data storage metrics are used to obtain statistics on persistent store and whole data storage. + */ +public interface DataStorageMetrics { + /** + * Gets the average number of WAL records per second written during the last time interval. + *

      + * The length of time interval is configured via {@link DataStorageConfiguration#setMetricsRateTimeInterval(long)} + * configurartion property. + * The number of subintervals is configured via {@link DataStorageConfiguration#setMetricsSubIntervalCount(int)} + * configuration property. + */ + public float getWalLoggingRate(); + + /** + * Gets the average number of bytes per second written during the last time interval. + * The length of time interval is configured via {@link DataStorageConfiguration#setMetricsRateTimeInterval(long)} + * configurartion property. + * The number of subintervals is configured via {@link DataStorageConfiguration#setMetricsSubIntervalCount(int)} + * configuration property. + */ + public float getWalWritingRate(); + + /** + * Gets the current number of WAL segments in the WAL archive. + */ + public int getWalArchiveSegments(); + + /** + * Gets the average WAL fsync duration in microseconds over the last time interval. + *

      + * The length of time interval is configured via {@link DataStorageConfiguration#setMetricsRateTimeInterval(long)} + * configurartion property. + * The number of subintervals is configured via {@link DataStorageConfiguration#setMetricsSubIntervalCount(int)} + * configuration property. + */ + public float getWalFsyncTimeAverage(); + + /** + * Gets the duration of the last checkpoint in milliseconds. + * + * @return Total checkpoint duration in milliseconds. + */ + public long getLastCheckpointingDuration(); + + /** + * Gets the duration of last checkpoint lock wait in milliseconds. + * + * @return Checkpoint lock wait time in milliseconds. + */ + public long getLastCheckpointLockWaitDuration(); + + /** + * Gets the duration of last checkpoint mark phase in milliseconds. + * + * @return Checkpoint mark duration in milliseconds. + */ + public long getLastCheckpointMarkDuration(); + + /** + * Gets the duration of last checkpoint pages write phase in milliseconds. + * + * @return Checkpoint pages write phase in milliseconds. + */ + public long getLastCheckpointPagesWriteDuration(); + + /** + * Gets the duration of the sync phase of the last checkpoint in milliseconds. + * + * @return Checkpoint fsync time in milliseconds. + */ + public long getLastCheckpointFsyncDuration(); + + /** + * Gets the total number of pages written during the last checkpoint. + * + * @return Total number of pages written during the last checkpoint. + */ + public long getLastCheckpointTotalPagesNumber(); + + /** + * Gets the number of data pages written during the last checkpoint. + * + * @return Total number of data pages written during the last checkpoint. + */ + public long getLastCheckpointDataPagesNumber(); + + /** + * Gets the number of pages copied to a temporary checkpoint buffer during the last checkpoint. + * + * @return Total number of pages copied to a temporary checkpoint buffer during the last checkpoint. + */ + public long getLastCheckpointCopiedOnWritePagesNumber(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java b/modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java new file mode 100644 index 0000000000000..6bb4b7e47b635 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java @@ -0,0 +1,101 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.ignite; + +/** + * Converter class from {@link DataStorageMetrics} to legacy {@link PersistenceMetrics}. + */ +public class DataStorageMetricsAdapter implements PersistenceMetrics { + /** Delegate. */ + private final DataStorageMetrics delegate; + + /** + * @param delegate Delegate. + */ + private DataStorageMetricsAdapter(DataStorageMetrics delegate) { + this.delegate = delegate; + } + + /** + * @param delegate DataStorageMetrics. + * @return Wrapped {@link DataStorageMetrics} that implements {@link PersistenceMetrics}. + * Null value is not wrapped and returned as is. + */ + public static DataStorageMetricsAdapter valueOf(DataStorageMetrics delegate) { + return delegate == null ? null : new DataStorageMetricsAdapter(delegate); + } + + /** {@inheritDoc} */ + @Override public float getWalLoggingRate() { + return delegate.getWalLoggingRate(); + } + + /** {@inheritDoc} */ + @Override public float getWalWritingRate() { + return delegate.getWalWritingRate(); + } + + /** {@inheritDoc} */ + @Override public int getWalArchiveSegments() { + return delegate.getWalArchiveSegments(); + } + + /** {@inheritDoc} */ + @Override public float getWalFsyncTimeAverage() { + return delegate.getWalFsyncTimeAverage(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointingDuration() { + return delegate.getLastCheckpointingDuration(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointLockWaitDuration() { + return delegate.getLastCheckpointLockWaitDuration(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointMarkDuration() { + return delegate.getLastCheckpointMarkDuration(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointPagesWriteDuration() { + return delegate.getLastCheckpointPagesWriteDuration(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointFsyncDuration() { + return delegate.getLastCheckpointFsyncDuration(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointTotalPagesNumber() { + return delegate.getLastCheckpointTotalPagesNumber(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointDataPagesNumber() { + return delegate.getLastCheckpointDataPagesNumber(); + } + + /** {@inheritDoc} */ + @Override public long getLastCheckpointCopiedOnWritePagesNumber() { + return delegate.getLastCheckpointCopiedOnWritePagesNumber(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/Ignite.java b/modules/core/src/main/java/org/apache/ignite/Ignite.java index 866c3131ff545..c8de43b45c12a 100644 --- a/modules/core/src/main/java/org/apache/ignite/Ignite.java +++ b/modules/core/src/main/java/org/apache/ignite/Ignite.java @@ -27,9 +27,9 @@ import org.apache.ignite.configuration.AtomicConfiguration; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.CollectionConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.lang.IgniteProductVersion; @@ -676,30 +676,50 @@ public IgniteQueue queue(String name, int cap, @Nullable CollectionConfig public void resetLostPartitions(Collection cacheNames); /** - * Returns a collection of {@link MemoryMetrics} that reflects page memory usage on this Apache Ignite node + * @return Collection of {@link MemoryMetrics} snapshots. + * @deprecated Use {@link #dataRegionMetrics()} instead. + */ + @Deprecated + public Collection memoryMetrics(); + + /** + * @return {@link MemoryMetrics} snapshot or {@code null} if no memory region is configured under specified name. + * @deprecated Use {@link #dataRegionMetrics(String)} instead. + */ + @Deprecated + @Nullable public MemoryMetrics memoryMetrics(String memPlcName); + + /** + * @return {@link PersistenceMetrics} snapshot. + * @deprecated Use {@link #dataStorageMetrics()} instead. + */ + @Deprecated + public PersistenceMetrics persistentStoreMetrics(); + + /** + * Returns a collection of {@link DataRegionMetrics} that reflects page memory usage on this Apache Ignite node * instance. * Returns the collection that contains the latest snapshots for each memory region - * configured with {@link MemoryPolicyConfiguration configuration} on this Ignite node instance. + * configured with {@link DataRegionConfiguration configuration} on this Ignite node instance. * - * @return Collection of {@link MemoryMetrics} snapshots. + * @return Collection of {@link DataRegionMetrics} snapshots. */ - public Collection memoryMetrics(); + public Collection dataRegionMetrics(); /** - * Returns the latest {@link MemoryMetrics} snapshot for the memory region of the given name. + * Returns the latest {@link DataRegionMetrics} snapshot for the memory region of the given name. * * To get the metrics for the default memory region use - * {@link MemoryConfiguration#DFLT_MEM_PLC_DEFAULT_NAME} as the name + * {@link DataStorageConfiguration#DFLT_DATA_REG_DEFAULT_NAME} as the name * or a custom name if the default memory region has been renamed. * - * @param memPlcName Name of memory region configured with {@link MemoryPolicyConfiguration config}. - * @return {@link MemoryMetrics} snapshot or {@code null} if no memory region is configured under specified name. + * @param memPlcName Name of memory region configured with {@link DataRegionConfiguration config}. + * @return {@link DataRegionMetrics} snapshot or {@code null} if no memory region is configured under specified name. */ - @Nullable public MemoryMetrics memoryMetrics(String memPlcName); + @Nullable public DataRegionMetrics dataRegionMetrics(String memPlcName); /** - * - * @return {@link PersistenceMetrics} snapshot. + * @return {@link DataStorageMetrics} snapshot. */ - public PersistenceMetrics persistentStoreMetrics(); + public DataStorageMetrics dataStorageMetrics(); } diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index d7b4de95c6ef7..d7d4443867587 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -24,7 +24,7 @@ import java.util.Properties; import javax.net.ssl.HostnameVerifier; import org.apache.ignite.cluster.ClusterGroup; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; import org.apache.ignite.lang.IgnitePredicate; import org.jetbrains.annotations.Nullable; @@ -727,7 +727,7 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_WAL_LOG_TX_RECORDS = "IGNITE_WAL_LOG_TX_RECORDS"; - /** If this property is set, {@link PersistentStoreConfiguration#writeThrottlingEnabled} will be overridden to true + /** If this property is set, {@link DataStorageConfiguration#writeThrottlingEnabled} will be overridden to true * independent of initial value in configuration. */ public static final String IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED = "IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED"; diff --git a/modules/core/src/main/java/org/apache/ignite/MemoryMetrics.java b/modules/core/src/main/java/org/apache/ignite/MemoryMetrics.java index c709777b9193c..e0c22eda3a59e 100644 --- a/modules/core/src/main/java/org/apache/ignite/MemoryMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/MemoryMetrics.java @@ -19,7 +19,7 @@ import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.mxbean.MemoryMetricsMXBean; +import org.apache.ignite.mxbean.DataRegionMetricsMXBean; /** * This interface provides page memory related metrics of a specific Apache Ignite node. The overall page memory @@ -31,12 +31,12 @@ * There are two ways to get the metrics of an Apache Ignite node. *

        *
      1. - * First, a collection of the metrics can be obtained through {@link Ignite#memoryMetrics()} method. Note that + * First, a collection of the metrics can be obtained through {@link Ignite#dataRegionMetrics()} ()} method. Note that * the method returns memory metrics snapshots rather than just in time memory state. *
      2. *
      3. * Second, all {@link MemoryMetrics} of a local Apache Ignite node are visible through JMX interface. Refer to - * {@link MemoryMetricsMXBean} for more details. + * {@link DataRegionMetricsMXBean} for more details. *
      4. *
      *

      @@ -44,8 +44,11 @@ * Memory metrics collection is not a free operation and might affect performance of an application. This is the reason * why the metrics are turned off by default. To enable the collection you can use both * {@link MemoryPolicyConfiguration#setMetricsEnabled(boolean)} configuration property or - * {@link MemoryMetricsMXBean#enableMetrics()} method of a respective JMX bean. + * {@link DataRegionMetricsMXBean#enableMetrics()} method of a respective JMX bean. + * + * @deprecated Use {@link DataRegionMetrics} instead. */ +@Deprecated public interface MemoryMetrics { /** * A name of a memory region the metrics are collected for. diff --git a/modules/core/src/main/java/org/apache/ignite/PersistenceMetrics.java b/modules/core/src/main/java/org/apache/ignite/PersistenceMetrics.java index f3f763c22e51b..3b96b11ed8527 100644 --- a/modules/core/src/main/java/org/apache/ignite/PersistenceMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/PersistenceMetrics.java @@ -17,13 +17,13 @@ package org.apache.ignite; import org.apache.ignite.configuration.PersistentStoreConfiguration; -import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; /** * Persistence metrics used to obtain statistics on persistence. * - * Use {@link IgniteCacheDatabaseSharedManager#persistentStoreMetrics()} to obtain persistent metrics. + * @deprecated Use {@link DataStorageMetrics} instead. */ +@Deprecated public interface PersistenceMetrics { /** * Gets the average number of WAL records per second written during the last time interval. diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java index 6c43d13bf8065..37a067760b8db 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java @@ -187,7 +187,7 @@ public class CacheConfiguration extends MutableConfiguration { /** Cache group name. */ private String grpName; - /** Name of {@link MemoryPolicyConfiguration} for this cache */ + /** Name of {@link DataRegionConfiguration} for this cache */ private String memPlcName; /** Threshold for concurrent loading of keys from {@link CacheStore}. */ @@ -407,7 +407,7 @@ public CacheConfiguration(CompleteConfiguration cfg) { loadPrevVal = cc.isLoadPreviousValue(); longQryWarnTimeout = cc.getLongQueryWarningTimeout(); maxConcurrentAsyncOps = cc.getMaxConcurrentAsyncOperations(); - memPlcName = cc.getMemoryPolicyName(); + memPlcName = cc.getDataRegionName(); name = cc.getName(); nearCfg = cc.getNearConfiguration(); nodeFilter = cc.getNodeFilter(); @@ -453,7 +453,7 @@ public CacheConfiguration(CompleteConfiguration cfg) { * Since underlying cache is shared, the following configuration properties should be the same within group: * {@link #setAffinity(AffinityFunction)}, {@link #setNodeFilter(IgnitePredicate)}, {@link #cacheMode}, * {@link #setTopologyValidator(TopologyValidator)}, {@link #setPartitionLossPolicy(PartitionLossPolicy)}, - * {@link #setMemoryPolicyName(String)}. + * {@link #setDataRegionName(String)}. * * Grouping caches reduces overall overhead, since internal data structures are shared. * @@ -472,7 +472,7 @@ public String getGroupName() { * Since underlying cache is shared, the following configuration properties should be the same within group: * {@link #setAffinity(AffinityFunction)}, {@link #setNodeFilter(IgnitePredicate)}, {@link #cacheMode}, * {@link #setTopologyValidator(TopologyValidator)}, {@link #setPartitionLossPolicy(PartitionLossPolicy)}, - * {@link #setMemoryPolicyName(String)}. + * {@link #setDataRegionName(String)}. * * Grouping caches reduces overall overhead, since internal data structures are shared. * @@ -509,27 +509,43 @@ public CacheConfiguration setName(String name) { } /** - * @return {@link MemoryPolicyConfiguration} name. + * @return {@link DataRegionConfiguration} name. */ + @Nullable public String getDataRegionName() { + return memPlcName; + } + + /** + * @deprecated Use {@link #getDataRegionName()} (String)} instead. + */ + @Deprecated public String getMemoryPolicyName() { return memPlcName; } /** - * Sets a name of {@link MemoryPolicyConfiguration} for this cache. + * Sets a name of {@link DataRegionConfiguration} for this cache. * - * @param memPlcName MemoryPolicyConfiguration name. Can be null (default MemoryPolicyConfiguration will be used) + * @param dataRegionName DataRegionConfiguration name. Can be null (default DataRegionConfiguration will be used) * but should not be empty. * @return {@code this} for chaining. */ - public CacheConfiguration setMemoryPolicyName(String memPlcName) { - A.ensure(memPlcName == null || !memPlcName.isEmpty(), "Name cannot be empty."); + public CacheConfiguration setDataRegionName(@Nullable String dataRegionName) { + A.ensure(dataRegionName == null || !dataRegionName.isEmpty(), "Name cannot be empty."); - this.memPlcName = memPlcName; + this.memPlcName = dataRegionName; return this; } + /** + * @deprecated Use {@link #setDataRegionName(String)} instead. + */ + @Deprecated + public CacheConfiguration setMemoryPolicyName(String memPlcName) { + return setDataRegionName(memPlcName); + } + /** * Gets cache eviction policy. By default, returns {@code null} * which means that evictions are disabled for cache. diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataPageEvictionMode.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataPageEvictionMode.java index f61e870ee8131..2b4ee787df6f1 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DataPageEvictionMode.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataPageEvictionMode.java @@ -21,7 +21,7 @@ /** * Defines memory page eviction algorithm. A mode is set for a specific - * {@link MemoryPolicyConfiguration}. Only data pages, that store key-value entries, are eligible for eviction. The + * {@link DataRegionConfiguration}. Only data pages, that store key-value entries, are eligible for eviction. The * other types of pages, like index or meta pages, are not evictable. */ public enum DataPageEvictionMode { @@ -31,11 +31,11 @@ public enum DataPageEvictionMode { /** * Random-LRU algorithm. *
        - *
      • Once a memory region defined by a memory policy is configured, an off-heap array is allocated to track + *
      • Once a memory region defined by a data region is configured, an off-heap array is allocated to track * last usage timestamp for every individual data page. The size of the array is calculated this way - size = - * ({@link MemoryPolicyConfiguration#getMaxSize()} / {@link MemoryConfiguration#pageSize})
      • + * ({@link DataRegionConfiguration#getMaxSize()} / {@link DataStorageConfiguration#pageSize}) *
      • When a data page is accessed, its timestamp gets updated in the tracking array. The page index in the - * tracking array is calculated this way - index = (pageAddress / {@link MemoryPolicyConfiguration#getMaxSize()}
      • + * tracking array is calculated this way - index = (pageAddress / {@link DataRegionConfiguration#getMaxSize()} *
      • When it's required to evict some pages, the algorithm randomly chooses 5 indexes from the tracking array and * evicts a page with the latest timestamp. If some of the indexes point to non-data pages (index or system pages) * then the algorithm picks other pages.
      • diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java new file mode 100644 index 0000000000000..50edf5cb4d916 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java @@ -0,0 +1,406 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.configuration; + +import java.io.Serializable; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; +import org.apache.ignite.mxbean.DataRegionMetricsMXBean; + +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME; + +/** + * This class allows defining custom data regions' configurations with various parameters for Apache Ignite + * page memory (see {@link DataStorageConfiguration}. For each configured data region Apache Ignite instantiates + * respective memory regions with different parameters like maximum size, eviction policy, swapping options, + * persistent mode flag, etc. + * An Apache Ignite cache can be mapped to a particular region using + * {@link CacheConfiguration#setDataRegionName(String)} method. + *

        Sample configuration below shows how to configure several data regions:

        + *
        + *     {@code
        + *     
        + *         
        + *             
        + *                 
        + *                     
        + *                     
        + *                     
        + *                 
        + *             
        + *
        + *             
        + *
        + *             
        + *                 
        + *                      
        + *                          
        + *                          
        + *                          
        + *                      
        + *
        + *                      
        + *                          
        + *                          
        + *                          
        + *                          
        + *                      
        + *                  
        + *              
        + *     }
        + * 
        + */ +public final class DataRegionConfiguration implements Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** Default metrics enabled flag. */ + public static final boolean DFLT_METRICS_ENABLED = false; + + /** Default amount of sub intervals to calculate {@link DataRegionMetrics#getAllocationRate()} metric. */ + public static final int DFLT_SUB_INTERVALS = 5; + + /** Default length of interval over which {@link DataRegionMetrics#getAllocationRate()} metric is calculated. */ + public static final int DFLT_RATE_TIME_INTERVAL_MILLIS = 60_000; + + /** Data region name. */ + private String name = DFLT_DATA_REG_DEFAULT_NAME; + + /** Data region maximum size in memory. */ + private long maxSize = DataStorageConfiguration.DFLT_DATA_REGION_MAX_SIZE; + + /** Data region start size. */ + private long initSize = Math.min( + DataStorageConfiguration.DFLT_DATA_REGION_MAX_SIZE, DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE); + + /** An optional path to a memory mapped files directory for this data region. */ + private String swapPath; + + /** An algorithm for memory pages eviction. */ + private DataPageEvictionMode pageEvictionMode = DataPageEvictionMode.DISABLED; + + /** + * A threshold for memory pages eviction initiation. For instance, if the threshold is 0.9 it means that the page + * memory will start the eviction only after 90% data region is occupied. + */ + private double evictionThreshold = 0.9; + + /** Minimum number of empty pages in reuse lists. */ + private int emptyPagesPoolSize = 100; + + /** + * Flag to enable the memory metrics collection for this data region. + */ + private boolean metricsEnabled = DFLT_METRICS_ENABLED; + + /** Number of sub-intervals the whole {@link #setMetricsRateTimeInterval(long)} will be split into to calculate + * {@link DataRegionMetrics#getAllocationRate()} and {@link DataRegionMetrics#getEvictionRate()} rates (5 by default). + *

        + * Setting it to a bigger value will result in more precise calculation and smaller drops of + * {@link DataRegionMetrics#getAllocationRate()} metric when next sub-interval has to be recycled but introduces bigger + * calculation overhead. */ + private int metricsSubIntervalCount = DFLT_SUB_INTERVALS; + + /** + * Time interval (in milliseconds) for {@link DataRegionMetrics#getAllocationRate()} + * and {@link DataRegionMetrics#getEvictionRate()} monitoring purposes. + *

        + * For instance, after setting the interval to 60_000 milliseconds, subsequent calls to {@link DataRegionMetrics#getAllocationRate()} + * will return average allocation rate (pages per second) for the last minute. + */ + private long metricsRateTimeInterval = DFLT_RATE_TIME_INTERVAL_MILLIS; + + /** + * Flag to enable Ignite Native Persistence. + */ + private boolean persistenceEnabled = false; + + /** + * Gets data region name. + * + * @return Data region name. + */ + public String getName() { + return name; + } + + /** + * Sets data region name. The name must be non empty and must not be equal to the reserved 'sysMemPlc' one. + * + * If not specified, {@link DataStorageConfiguration#DFLT_DATA_REG_DEFAULT_NAME} value is used. + * + * @param name Data region name. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setName(String name) { + this.name = name; + + return this; + } + + /** + * Maximum memory region size defined by this data region. If the whole data can not fit into the memory region + * an out of memory exception will be thrown. + * + * @return Size in bytes. + */ + public long getMaxSize() { + return maxSize; + } + + /** + * Sets maximum memory region size defined by this data region. The total size should not be less than 10 MB + * due to the internal data structures overhead. + * + * @param maxSize Maximum data region size in bytes. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setMaxSize(long maxSize) { + this.maxSize = maxSize; + + return this; + } + + /** + * Gets initial memory region size defined by this data region. When the used memory size exceeds this value, + * new chunks of memory will be allocated. + * + * @return Data region start size. + */ + public long getInitialSize() { + return initSize; + } + + /** + * Sets initial memory region size defined by this data region. When the used memory size exceeds this value, + * new chunks of memory will be allocated. + * + * @param initSize Data region initial size. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setInitialSize(long initSize) { + this.initSize = initSize; + + return this; + } + + /** + * A path to the memory-mapped files the memory region defined by this data region will be mapped to. Having + * the path set, allows relying on swapping capabilities of an underlying operating system for the memory region. + * + * @return A path to the memory-mapped files or {@code null} if this feature is not used for the memory region + * defined by this data region. + */ + public String getSwapPath() { + return swapPath; + } + + /** + * Sets a path to the memory-mapped files. + * + * @param swapFilePath A Path to the memory mapped file. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setSwapPath(String swapFilePath) { + this.swapPath = swapFilePath; + + return this; + } + + /** + * Gets memory pages eviction mode. If {@link DataPageEvictionMode#DISABLED} is used (default) then an out of + * memory exception will be thrown if the memory region usage, defined by this data region, goes beyond its + * capacity which is {@link #getMaxSize()}. + * + * @return Memory pages eviction algorithm. {@link DataPageEvictionMode#DISABLED} used by default. + */ + public DataPageEvictionMode getPageEvictionMode() { + return pageEvictionMode; + } + + /** + * Sets memory pages eviction mode. + * + * @param evictionMode Eviction mode. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setPageEvictionMode(DataPageEvictionMode evictionMode) { + pageEvictionMode = evictionMode; + + return this; + } + + /** + * Gets a threshold for memory pages eviction initiation. For instance, if the threshold is 0.9 it means that the + * page memory will start the eviction only after 90% of the data region is occupied. + * + * @return Memory pages eviction threshold. + */ + public double getEvictionThreshold() { + return evictionThreshold; + } + + /** + * Sets memory pages eviction threshold. + * + * @param evictionThreshold Eviction threshold. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setEvictionThreshold(double evictionThreshold) { + this.evictionThreshold = evictionThreshold; + + return this; + } + + /** + * Specifies the minimal number of empty pages to be present in reuse lists for this data region. + * This parameter ensures that Ignite will be able to successfully evict old data entries when the size of + * (key, value) pair is slightly larger than page size / 2. + * Increase this parameter if cache can contain very big entries (total size of pages in this pool should be enough + * to contain largest cache entry). + * Increase this parameter if {@link IgniteOutOfMemoryException} occurred with enabled page eviction. + * + * @return Minimum number of empty pages in reuse list. + */ + public int getEmptyPagesPoolSize() { + return emptyPagesPoolSize; + } + + /** + * Specifies the minimal number of empty pages to be present in reuse lists for this data region. + * This parameter ensures that Ignite will be able to successfully evict old data entries when the size of + * (key, value) pair is slightly larger than page size / 2. + * Increase this parameter if cache can contain very big entries (total size of pages in this pool should be enough + * to contain largest cache entry). + * Increase this parameter if {@link IgniteOutOfMemoryException} occurred with enabled page eviction. + * + * @param emptyPagesPoolSize Empty pages pool size. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setEmptyPagesPoolSize(int emptyPagesPoolSize) { + this.emptyPagesPoolSize = emptyPagesPoolSize; + + return this; + } + + /** + * Gets whether memory metrics are enabled by default on node startup. Memory metrics can be enabled and disabled + * at runtime via memory metrics {@link DataRegionMetricsMXBean MX bean}. + * + * @return Metrics enabled flag. + */ + public boolean isMetricsEnabled() { + return metricsEnabled; + } + + /** + * Sets memory metrics enabled flag. If this flag is {@code true}, metrics will be enabled on node startup. + * Memory metrics can be enabled and disabled at runtime via memory metrics {@link DataRegionMetricsMXBean MX bean}. + * + * @param metricsEnabled Metrics enabled flag. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setMetricsEnabled(boolean metricsEnabled) { + this.metricsEnabled = metricsEnabled; + + return this; + } + + /** + * Gets whether persistence is enabled for this data region. All caches residing in this region will be persistent. + * + * @return Persistence enabled flag. + */ + public boolean isPersistenceEnabled() { + return persistenceEnabled; + } + + /** + * Sets persistence enabled flag. + * + * @param persistenceEnabled Persistence enabled flag. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setPersistenceEnabled(boolean persistenceEnabled) { + this.persistenceEnabled = persistenceEnabled; + + return this; + } + + /** + * Gets time interval for {@link DataRegionMetrics#getAllocationRate()} + * and {@link DataRegionMetrics#getEvictionRate()} monitoring purposes. + *

        + * For instance, after setting the interval to 60_000 milliseconds, + * subsequent calls to {@link DataRegionMetrics#getAllocationRate()} + * will return average allocation rate (pages per second) for the last minute. + * + * @return Time interval over which allocation rate is calculated. + */ + public long getMetricsRateTimeInterval() { + return metricsRateTimeInterval; + } + + /** + * Sets time interval for {@link DataRegionMetrics#getAllocationRate()} + * and {@link DataRegionMetrics#getEvictionRate()} monitoring purposes. + *

        + * For instance, after setting the interval to 60 seconds, + * subsequent calls to {@link DataRegionMetrics#getAllocationRate()} + * will return average allocation rate (pages per second) for the last minute. + * + * @param metricsRateTimeInterval Time interval used for allocation and eviction rates calculations. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setMetricsRateTimeInterval(long metricsRateTimeInterval) { + this.metricsRateTimeInterval = metricsRateTimeInterval; + + return this; + } + + /** + * Gets a number of sub-intervals the whole {@link #setMetricsRateTimeInterval(long)} + * will be split into to calculate {@link DataRegionMetrics#getAllocationRate()} + * and {@link DataRegionMetrics#getEvictionRate()} rates (5 by default). + *

        + * Setting it to a bigger value will result in more precise calculation and smaller drops of + * {@link DataRegionMetrics#getAllocationRate()} metric when next sub-interval has to be recycled but introduces bigger + * calculation overhead. + * + * @return number of sub intervals. + */ + public int getMetricsSubIntervalCount() { + return metricsSubIntervalCount; + } + + /** + * Sets a number of sub-intervals the whole {@link #setMetricsRateTimeInterval(long)} will be split into to calculate + * {@link DataRegionMetrics#getAllocationRate()} and {@link DataRegionMetrics#getEvictionRate()} rates (5 by default). + *

        + * Setting it to a bigger value will result in more precise calculation and smaller drops of + * {@link DataRegionMetrics#getAllocationRate()} metric when next sub-interval has to be recycled but introduces bigger + * calculation overhead. + * + * @param metricsSubIntervalCnt A number of sub-intervals. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setMetricsSubIntervalCount(int metricsSubIntervalCnt) { + this.metricsSubIntervalCount = metricsSubIntervalCnt; + + return this; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java new file mode 100644 index 0000000000000..bd314abcdb314 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java @@ -0,0 +1,882 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.configuration; + +import java.io.Serializable; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; +import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * A durable memory configuration for an Apache Ignite node. The durable memory is a manageable off-heap based memory + * architecture that divides all expandable data regions into pages of fixed size + * (see {@link DataStorageConfiguration#getPageSize()}). An individual page can store one or many cache key-value entries + * that allows reusing the memory in the most efficient way and avoid memory fragmentation issues. + *

        + * By default, the durable memory allocates a single expandable data region with default settings. All the caches that + * will be configured in an application will be mapped to this data region by default, thus, all the cache data will + * reside in that data region. Parameters of default data region can be changed by setting + * {@link DataStorageConfiguration#setDefaultDataRegionConfiguration(DataRegionConfiguration)}. + * Other data regions (except default) can be configured with + * {@link DataStorageConfiguration#setDataRegionConfigurations(DataRegionConfiguration...)}. + *

        + * Data region can be used in memory-only mode, or in persistent mode, when memory is used as a caching layer for disk. + * Persistence for data region can be turned on with {@link DataRegionConfiguration#setPersistenceEnabled(boolean)} + * flag. To learn more about data regions refer to {@link DataRegionConfiguration} documentation. + *

        Sample configuration below shows how to make 5 GB data regions the default one for Apache Ignite:

        + *
        + *     {@code
        + *
        + *     
        + *         
        + *             
        + *
        + *             
        + *                 
        + *                     
        + *                     
        + *                 
        + *             
        + *         
        + *     
        + *     }
        + * 
        + */ +public class DataStorageConfiguration implements Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** Default data region start size (256 MB). */ + @SuppressWarnings("UnnecessaryBoxing") + public static final long DFLT_DATA_REGION_INITIAL_SIZE = 256L * 1024 * 1024; + + /** Fraction of available memory to allocate for default DataRegion. */ + private static final double DFLT_DATA_REGION_FRACTION = 0.2; + + /** Default data region's size is 20% of physical memory available on current machine. */ + public static final long DFLT_DATA_REGION_MAX_SIZE = Math.max( + (long)(DFLT_DATA_REGION_FRACTION * U.getTotalMemoryAvailable()), + DFLT_DATA_REGION_INITIAL_SIZE); + + /** Default initial size of a memory chunk for the system cache (40 MB). */ + private static final long DFLT_SYS_CACHE_INIT_SIZE = 40 * 1024 * 1024; + + /** Default max size of a memory chunk for the system cache (100 MB). */ + private static final long DFLT_SYS_CACHE_MAX_SIZE = 100 * 1024 * 1024; + + /** Default memory page size. */ + public static final int DFLT_PAGE_SIZE = 4 * 1024; + + /** This name is assigned to default Dataregion if no user-defined default MemPlc is specified */ + public static final String DFLT_DATA_REG_DEFAULT_NAME = "default"; + + /** */ + public static final int DFLT_CHECKPOINT_FREQ = 180000; + + /** Lock default wait time, 10 sec. */ + public static final int DFLT_LOCK_WAIT_TIME = 10 * 1000; + + /** */ + public static final boolean DFLT_METRICS_ENABLED = false; + + /** Default amount of sub intervals to calculate rate-based metric. */ + public static final int DFLT_SUB_INTERVALS = 5; + + /** Default length of interval over which rate-based metric is calculated. */ + public static final int DFLT_RATE_TIME_INTERVAL_MILLIS = 60_000; + + /** Default number of checkpoint threads. */ + public static final int DFLT_CHECKPOINT_THREADS = 4; + + /** Default checkpoint write order. */ + public static final CheckpointWriteOrder DFLT_CHECKPOINT_WRITE_ORDER = CheckpointWriteOrder.SEQUENTIAL; + + /** Default number of checkpoints to be kept in WAL after checkpoint is finished */ + public static final int DFLT_WAL_HISTORY_SIZE = 20; + + /** */ + public static final int DFLT_WAL_SEGMENTS = 10; + + /** Default WAL file segment size, 64MBytes */ + public static final int DFLT_WAL_SEGMENT_SIZE = 64 * 1024 * 1024; + + /** Default wal mode. */ + public static final WALMode DFLT_WAL_MODE = WALMode.DEFAULT; + + /** Default thread local buffer size. */ + public static final int DFLT_TLB_SIZE = 128 * 1024; + + /** Default Wal flush frequency. */ + public static final int DFLT_WAL_FLUSH_FREQ = 2000; + + /** Default wal fsync delay. */ + public static final int DFLT_WAL_FSYNC_DELAY = 1000; + + /** Default wal record iterator buffer size. */ + public static final int DFLT_WAL_RECORD_ITERATOR_BUFFER_SIZE = 64 * 1024 * 1024; + + /** Default wal always write full pages. */ + public static final boolean DFLT_WAL_ALWAYS_WRITE_FULL_PAGES = false; + + /** Default wal directory. */ + public static final String DFLT_WAL_PATH = "db/wal"; + + /** Default wal archive directory. */ + public static final String DFLT_WAL_ARCHIVE_PATH = "db/wal/archive"; + + /** Default write throttling enabled. */ + public static final boolean DFLT_WRITE_THROTTLING_ENABLED = false; + + /** Size of a memory chunk reserved for system cache initially. */ + private long sysRegionInitSize = DFLT_SYS_CACHE_INIT_SIZE; + + /** Maximum size of system cache. */ + private long sysCacheMaxSize = DFLT_SYS_CACHE_MAX_SIZE; + + /** Memory page size. */ + private int pageSize; + + /** Concurrency level. */ + private int concLvl; + + /** Configuration of default data region. */ + private DataRegionConfiguration dfltDataRegConf = new DataRegionConfiguration(); + + /** Data regions. */ + private DataRegionConfiguration[] dataRegions; + + /** Directory where index and partition files are stored. */ + private String storagePath; + + /** Checkpoint frequency. */ + private long checkpointFreq = DFLT_CHECKPOINT_FREQ; + + /** Lock wait time, in milliseconds. */ + private long lockWaitTime = DFLT_LOCK_WAIT_TIME; + + /** */ + private long checkpointPageBufSize; + + /** */ + private int checkpointThreads = DFLT_CHECKPOINT_THREADS; + + /** Checkpoint write order. */ + private CheckpointWriteOrder checkpointWriteOrder = DFLT_CHECKPOINT_WRITE_ORDER; + + /** Number of checkpoints to keep */ + private int walHistSize = DFLT_WAL_HISTORY_SIZE; + + /** Number of work WAL segments. */ + private int walSegments = DFLT_WAL_SEGMENTS; + + /** Size of one WAL segment in bytes. 64 Mb is used by default. Maximum value is 2Gb */ + private int walSegmentSize = DFLT_WAL_SEGMENT_SIZE; + + /** Directory where WAL is stored (work directory) */ + private String walPath = DFLT_WAL_PATH; + + /** WAL archive path. */ + private String walArchivePath = DFLT_WAL_ARCHIVE_PATH; + + /** Metrics enabled flag. */ + private boolean metricsEnabled = DFLT_METRICS_ENABLED; + + /** Wal mode. */ + private WALMode walMode = DFLT_WAL_MODE; + + /** WAl thread local buffer size. */ + private int walTlbSize = DFLT_TLB_SIZE; + + /** Wal flush frequency in milliseconds. */ + private long walFlushFreq = DFLT_WAL_FLUSH_FREQ; + + /** Wal fsync delay. */ + private long walFsyncDelay = DFLT_WAL_FSYNC_DELAY; + + /** Wal record iterator buffer size. */ + private int walRecordIterBuffSize = DFLT_WAL_RECORD_ITERATOR_BUFFER_SIZE; + + /** Always write full pages. */ + private boolean alwaysWriteFullPages = DFLT_WAL_ALWAYS_WRITE_FULL_PAGES; + + /** Factory to provide I/O interface for files */ + private FileIOFactory fileIOFactory = + IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_USE_ASYNC_FILE_IO_FACTORY, false) ? + new AsyncFileIOFactory() : new RandomAccessFileIOFactory(); + + /** + * Number of sub-intervals the whole {@link #setMetricsRateTimeInterval(long)} will be split into to calculate + * rate-based metrics. + *

        + * Setting it to a bigger value will result in more precise calculation and smaller drops of + * rate-based metrics when next sub-interval has to be recycled but introduces bigger + * calculation overhead. + */ + private int metricsSubIntervalCount = DFLT_SUB_INTERVALS; + + /** Time interval (in milliseconds) for rate-based metrics. */ + private long metricsRateTimeInterval = DFLT_RATE_TIME_INTERVAL_MILLIS; + + /** + * Time interval (in milliseconds) for running auto archiving for incompletely WAL segment + */ + private long walAutoArchiveAfterInactivity = -1; + + /** + * If true, threads that generate dirty pages too fast during ongoing checkpoint will be throttled. + */ + private boolean writeThrottlingEnabled = DFLT_WRITE_THROTTLING_ENABLED; + + /** + * Initial size of a data region reserved for system cache. + * + * @return Size in bytes. + */ + public long getSystemRegionInitialSize() { + return sysRegionInitSize; + } + + /** + * Sets initial size of a data region reserved for system cache. + * + * Default value is {@link #DFLT_SYS_CACHE_INIT_SIZE} + * + * @param sysRegionInitSize Size in bytes. + * + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setSystemRegionInitialSize(long sysRegionInitSize) { + A.ensure(sysCacheMaxSize > 0, "System region initial size can not be less zero."); + + this.sysRegionInitSize = sysRegionInitSize; + + return this; + } + + /** + * Maximum data region size reserved for system cache. + * + * @return Size in bytes. + */ + public long getSystemRegionMaxSize() { + return sysCacheMaxSize; + } + + /** + * Sets maximum data region size reserved for system cache. The total size should not be less than 10 MB + * due to internal data structures overhead. + * + * @param sysCacheMaxSize Maximum size in bytes for system cache data region. + * + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setSystemRegionMaxSize(long sysCacheMaxSize) { + A.ensure(sysCacheMaxSize > 0, "System cache max size can not be less zero."); + + this.sysCacheMaxSize = sysCacheMaxSize; + + return this; + } + + /** + * The page memory consists of one or more expandable data regions defined by {@link DataRegionConfiguration}. + * Every data region is split on pages of fixed size that store actual cache entries. + * + * @return Page size in bytes. + */ + public int getPageSize() { + return pageSize; + } + + /** + * Changes the page size. + * + * @param pageSize Page size in bytes. If value is not set (or zero), {@link #DFLT_PAGE_SIZE} will be used. + */ + public DataStorageConfiguration setPageSize(int pageSize) { + if (pageSize != 0) { + A.ensure(pageSize >= 1024 && pageSize <= 16 * 1024, "Page size must be between 1kB and 16kB."); + A.ensure(U.isPow2(pageSize), "Page size must be a power of 2."); + } + + this.pageSize = pageSize; + + return this; + } + + /** + * Gets an array of all data regions configured. Apache Ignite will instantiate a dedicated data region per + * region. An Apache Ignite cache can be mapped to a specific region with + * {@link CacheConfiguration#setDataRegionName(String)} method. + * + * @return Array of configured data regions. + */ + public DataRegionConfiguration[] getDataRegionConfigurations() { + return dataRegions; + } + + /** + * Sets data regions configurations. + * + * @param dataRegionConfigurations Data regions configurations. + */ + public DataStorageConfiguration setDataRegionConfigurations(DataRegionConfiguration... dataRegionConfigurations) { + this.dataRegions = dataRegionConfigurations; + + return this; + } + + /** + * Returns the number of concurrent segments in Ignite internal page mapping tables. By default equals + * to the number of available CPUs. + * + * @return Mapping table concurrency level. + */ + public int getConcurrencyLevel() { + return concLvl; + } + + /** + * Sets the number of concurrent segments in Ignite internal page mapping tables. + * + * @param concLvl Mapping table concurrency level. + */ + public DataStorageConfiguration setConcurrencyLevel(int concLvl) { + this.concLvl = concLvl; + + return this; + } + + /** + * @return Configuration of default data region. All cache groups will reside in this data region by default. + * For assigning a custom data region to cache group, use {@link CacheConfiguration#setDataRegionName(String)}. + */ + public DataRegionConfiguration getDefaultDataRegionConfiguration() { + return dfltDataRegConf; + } + + /** + * Overrides configuration of default data region which is created automatically. + * @param dfltDataRegConf Default data region configuration. + */ + public DataStorageConfiguration setDefaultDataRegionConfiguration(DataRegionConfiguration dfltDataRegConf) { + this.dfltDataRegConf = dfltDataRegConf; + + return this; + } + + /** + * Returns a path the root directory where the Persistent Store will persist data and indexes. + */ + public String getStoragePath() { + return storagePath; + } + + /** + * Sets a path to the root directory where the Persistent Store will persist data and indexes. + * By default the Persistent Store's files are located under Ignite work directory. + * + * @param persistenceStorePath Persistence store path. + */ + public DataStorageConfiguration setStoragePath(String persistenceStorePath) { + this.storagePath = persistenceStorePath; + + return this; + } + + /** + * Gets checkpoint frequency. + * + * @return checkpoint frequency in milliseconds. + */ + public long getCheckpointFrequency() { + return checkpointFreq <= 0 ? DFLT_CHECKPOINT_FREQ : checkpointFreq; + } + + /** + * Sets the checkpoint frequency which is a minimal interval when the dirty pages will be written + * to the Persistent Store. If the rate is high, checkpoint will be triggered more frequently. + * + * @param checkpointFreq checkpoint frequency in milliseconds. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setCheckpointFrequency(long checkpointFreq) { + this.checkpointFreq = checkpointFreq; + + return this; + } + + /** + * Gets amount of memory allocated for a checkpoint temporary buffer. + * + * @return Checkpoint page buffer size in bytes or {@code 0} for Ignite + * to choose the buffer size automatically. + */ + public long getCheckpointPageBufferSize() { + return checkpointPageBufSize; + } + + /** + * Sets amount of memory allocated for the checkpoint temporary buffer. The buffer is used to create temporary + * copies of pages that are being written to disk and being update in parallel while the checkpoint is in + * progress. + * + * @param checkpointPageBufSize Checkpoint page buffer size in bytes or {@code 0} for Ignite to + * choose the buffer size automatically. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setCheckpointPageBufferSize(long checkpointPageBufSize) { + this.checkpointPageBufSize = checkpointPageBufSize; + + return this; + } + + + /** + * Gets a number of threads to use for the checkpoint purposes. + * + * @return Number of checkpoint threads. + */ + public int getCheckpointThreads() { + return checkpointThreads; + } + + /** + * Sets a number of threads to use for the checkpoint purposes. + * + * @param checkpointThreads Number of checkpoint threads. Four threads are used by default. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setCheckpointThreads(int checkpointThreads) { + this.checkpointThreads = checkpointThreads; + + return this; + } + + /** + * Timeout in milliseconds to wait when acquiring persistence store lock file before failing the local node. + * + * @return Lock wait time in milliseconds. + */ + public long getLockWaitTime() { + return lockWaitTime; + } + + /** + * Timeout in milliseconds to wait when acquiring persistence store lock file before failing the local node. + * + * @param lockWaitTime Lock wait time in milliseconds. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setLockWaitTime(long lockWaitTime) { + this.lockWaitTime = lockWaitTime; + + return this; + } + + /** + * Gets a total number of checkpoints to keep in the WAL history. + * + * @return Number of checkpoints to keep in WAL after a checkpoint is finished. + */ + public int getWalHistorySize() { + return walHistSize <= 0 ? DFLT_WAL_HISTORY_SIZE : walHistSize; + } + + /** + * Sets a total number of checkpoints to keep in the WAL history. + * + * @param walHistSize Number of checkpoints to keep after a checkpoint is finished. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setWalHistorySize(int walHistSize) { + this.walHistSize = walHistSize; + + return this; + } + + /** + * Gets a number of WAL segments to work with. + * + * @return Number of work WAL segments. + */ + public int getWalSegments() { + return walSegments <= 0 ? DFLT_WAL_SEGMENTS : walSegments; + } + + /** + * Sets a number of WAL segments to work with. For performance reasons, + * the whole WAL is split into files of fixed length called segments. + * + * @param walSegments Number of WAL segments. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setWalSegments(int walSegments) { + this.walSegments = walSegments; + + return this; + } + + /** + * Gets size of a WAL segment in bytes. + * + * @return WAL segment size. + */ + public int getWalSegmentSize() { + return walSegmentSize <= 0 ? DFLT_WAL_SEGMENT_SIZE : walSegmentSize; + } + + /** + * Sets size of a WAL segment. + * + * @param walSegmentSize WAL segment size. 64 MB is used by default. Maximum value is 2Gb + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setWalSegmentSize(int walSegmentSize) { + this.walSegmentSize = walSegmentSize; + + return this; + } + + /** + * Gets a path to the directory where WAL is stored. + * + * @return WAL persistence path, absolute or relative to Ignite work directory. + */ + public String getWalPath() { + return walPath; + } + + /** + * Sets a path to the directory where WAL is stored. If this path is relative, it will be resolved + * relatively to Ignite work directory. + * + * @param walStorePath WAL persistence path, absolute or relative to Ignite work directory. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setWalPath(String walStorePath) { + this.walPath = walStorePath; + + return this; + } + + /** + * Gets a path to the WAL archive directory. + * + * @return WAL archive directory. + */ + public String getWalArchivePath() { + return walArchivePath; + } + + /** + * Sets a path for the WAL archive directory. Every WAL segment will be fully copied to this directory before + * it can be reused for WAL purposes. + * + * @param walArchivePath WAL archive directory. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setWalArchivePath(String walArchivePath) { + this.walArchivePath = walArchivePath; + + return this; + } + + /** + * Gets flag indicating whether persistence metrics collection is enabled. + * Default value is {@link #DFLT_METRICS_ENABLED}. + * + * @return Metrics enabled flag. + */ + public boolean isMetricsEnabled() { + return metricsEnabled; + } + + /** + * Sets flag indicating whether persistence metrics collection is enabled. + * + * @param metricsEnabled Metrics enabled flag. + */ + public DataStorageConfiguration setMetricsEnabled(boolean metricsEnabled) { + this.metricsEnabled = metricsEnabled; + + return this; + } + + /** + * Gets flag indicating whether write throttling is enabled. + */ + public boolean isWriteThrottlingEnabled() { + return writeThrottlingEnabled; + } + + /** + * Sets flag indicating whether write throttling is enabled. + * + * @param writeThrottlingEnabled Write throttling enabled flag. + */ + public DataStorageConfiguration setWriteThrottlingEnabled(boolean writeThrottlingEnabled) { + this.writeThrottlingEnabled = writeThrottlingEnabled; + + return this; + } + + /** + * Gets the length of the time interval for rate-based metrics. This interval defines a window over which + * hits will be tracked. Default value is {@link #DFLT_RATE_TIME_INTERVAL_MILLIS}. + * + * @return Time interval in milliseconds. + */ + public long getMetricsRateTimeInterval() { + return metricsRateTimeInterval; + } + + /** + * Sets the length of the time interval for rate-based metrics. This interval defines a window over which + * hits will be tracked. + * + * @param metricsRateTimeInterval Time interval in milliseconds. + */ + public DataStorageConfiguration setMetricsRateTimeInterval(long metricsRateTimeInterval) { + this.metricsRateTimeInterval = metricsRateTimeInterval; + + return this; + } + + /** + * Gets the number of sub-intervals to split the {@link #getMetricsRateTimeInterval()} into to track the update history. + * Default value is {@link #DFLT_SUB_INTERVALS}. + * + * @return The number of sub-intervals for history tracking. + */ + public int getMetricsSubIntervalCount() { + return metricsSubIntervalCount; + } + + /** + * Sets the number of sub-intervals to split the {@link #getMetricsRateTimeInterval()} into to track the update history. + * + * @param metricsSubIntervalCnt The number of sub-intervals for history tracking. + */ + public DataStorageConfiguration setMetricsSubIntervalCount(int metricsSubIntervalCnt) { + this.metricsSubIntervalCount = metricsSubIntervalCnt; + + return this; + } + + /** + * Property that defines behavior of wal fsync. + * Different type provides different guarantees for consistency. See {@link WALMode} for details. + * + * @return WAL mode. + */ + public WALMode getWalMode() { + return walMode == null ? DFLT_WAL_MODE : walMode; + } + + /** + * Sets property that defines behavior of wal fsync. + * Different type provides different guarantees for consistency. See {@link WALMode} for details. + * + * @param walMode Wal mode. + */ + public DataStorageConfiguration setWalMode(WALMode walMode) { + this.walMode = walMode; + + return this; + } + + /** + * Property for size of thread local buffer. + * Each thread which write to wal have thread local buffer for serialize recode before write in wal. + * + * @return Thread local buffer size (in bytes). + */ + public int getWalThreadLocalBufferSize() { + return walTlbSize <= 0 ? DFLT_TLB_SIZE : walTlbSize; + } + + /** + * Sets size of thread local buffer. + * Each thread which write to wal have thread local buffer for serialize recode before write in wal. + * + * @param walTlbSize Thread local buffer size (in bytes). + */ + public DataStorageConfiguration setWalThreadLocalBufferSize(int walTlbSize) { + this.walTlbSize = walTlbSize; + + return this; + } + + /** + * This property define how often WAL will be fsync-ed in {@code BACKGROUND} mode. Ignored for + * all other WAL modes. + * + * @return WAL flush frequency, in milliseconds. + */ + public long getWalFlushFrequency() { + return walFlushFreq; + } + + /** + * This property define how often WAL will be fsync-ed in {@code BACKGROUND} mode. Ignored for + * all other WAL modes. + * + * @param walFlushFreq WAL flush frequency, in milliseconds. + */ + public DataStorageConfiguration setWalFlushFrequency(long walFlushFreq) { + this.walFlushFreq = walFlushFreq; + + return this; + } + + /** + * Property that allows to trade latency for throughput in {@link WALMode#DEFAULT} mode. + * It limits minimum time interval between WAL fsyncs. First thread that initiates WAL fsync will wait for + * this number of nanoseconds, another threads will just wait fsync of first thread (similar to CyclicBarrier). + * Total throughput should increase under load as total WAL fsync rate will be limited. + */ + public long getWalFsyncDelayNanos() { + return walFsyncDelay <= 0 ? DFLT_WAL_FSYNC_DELAY : walFsyncDelay; + } + + /** + * Sets property that allows to trade latency for throughput in {@link WALMode#DEFAULT} mode. + * It limits minimum time interval between WAL fsyncs. First thread that initiates WAL fsync will wait for + * this number of nanoseconds, another threads will just wait fsync of first thread (similar to CyclicBarrier). + * Total throughput should increase under load as total WAL fsync rate will be limited. + * + * @param walFsyncDelayNanos Wal fsync delay, in nanoseconds. + */ + public DataStorageConfiguration setWalFsyncDelayNanos(long walFsyncDelayNanos) { + walFsyncDelay = walFsyncDelayNanos; + + return this; + } + + /** + * Property define how many bytes iterator read from + * disk (for one reading), during go ahead wal. + * + * @return Record iterator buffer size. + */ + public int getWalRecordIteratorBufferSize() { + return walRecordIterBuffSize <= 0 ? DFLT_WAL_RECORD_ITERATOR_BUFFER_SIZE : walRecordIterBuffSize; + } + + /** + * Sets property defining how many bytes iterator read from + * disk (for one reading), during go ahead wal. + * + * @param walRecordIterBuffSize Wal record iterator buffer size. + */ + public DataStorageConfiguration setWalRecordIteratorBufferSize(int walRecordIterBuffSize) { + this.walRecordIterBuffSize = walRecordIterBuffSize; + + return this; + } + + /** + * Gets flag that enforces writing full page to WAL on every change (instead of delta record). + * Can be used for debugging purposes: every version of page will be present in WAL. + * Note that WAL will take several times more space in this mode. + */ + public boolean isAlwaysWriteFullPages() { + return alwaysWriteFullPages; + } + + /** + * Sets flag that enforces writing full page to WAL on every change (instead of delta record). + * Can be used for debugging purposes: every version of page will be present in WAL. + * Note that WAL will take several times more space in this mode. + * + * @param alwaysWriteFullPages Always write full pages flag. + */ + public DataStorageConfiguration setAlwaysWriteFullPages(boolean alwaysWriteFullPages) { + this.alwaysWriteFullPages = alwaysWriteFullPages; + + return this; + } + + /** + * Factory to provide implementation of FileIO interface + * which is used for any file read/write operations + * + * @return File I/O factory + */ + public FileIOFactory getFileIOFactory() { + return fileIOFactory; + } + + /** + * Sets factory to provide implementation of FileIO interface + * which is used for any file read/write operations + * + * @param fileIOFactory File I/O factory + */ + public DataStorageConfiguration setFileIOFactory(FileIOFactory fileIOFactory) { + this.fileIOFactory = fileIOFactory; + + return this; + } + + /** + * Note: setting this value with {@link WALMode#DEFAULT} may generate file size overhead for WAL segments in case + * grid is used rarely. + * + * @param walAutoArchiveAfterInactivity time in millis to run auto archiving segment (even if incomplete) after last + * record logging.
        Positive value enables incomplete segment archiving after timeout (inactivity).
        Zero or + * negative value disables auto archiving. + * @return current configuration instance for chaining + */ + public DataStorageConfiguration setWalAutoArchiveAfterInactivity(long walAutoArchiveAfterInactivity) { + this.walAutoArchiveAfterInactivity = walAutoArchiveAfterInactivity; + + return this; + } + + /** + * @return time in millis to run auto archiving WAL segment (even if incomplete) after last record log + */ + public long getWalAutoArchiveAfterInactivity() { + return walAutoArchiveAfterInactivity; + } + + /** + * This property defines order of writing pages to disk storage during checkpoint. + * + * @return Checkpoint write order. + */ + public CheckpointWriteOrder getCheckpointWriteOrder() { + return checkpointWriteOrder; + } + + /** + * This property defines order of writing pages to disk storage during checkpoint. + * + * @param checkpointWriteOrder Checkpoint write order. + */ + public DataStorageConfiguration setCheckpointWriteOrder(CheckpointWriteOrder checkpointWriteOrder) { + this.checkpointWriteOrder = checkpointWriteOrder; + + return this; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java index a79d4360d5185..fc1fb6b4fe378 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java @@ -457,11 +457,16 @@ public class IgniteConfiguration { private ExecutorConfiguration[] execCfgs; /** Page memory configuration. */ + @Deprecated private MemoryConfiguration memCfg; /** Persistence store configuration. */ + @Deprecated private PersistentStoreConfiguration pstCfg; + /** Page memory configuration. */ + private DataStorageConfiguration dsCfg; + /** Active on start flag. */ private boolean activeOnStart = DFLT_ACTIVE_ON_START; @@ -510,6 +515,7 @@ public IgniteConfiguration(IgniteConfiguration cfg) { allResolversPassReq = cfg.isAllSegmentationResolversPassRequired(); atomicCfg = cfg.getAtomicConfiguration(); binaryCfg = cfg.getBinaryConfiguration(); + dsCfg = cfg.getDataStorageConfiguration(); memCfg = cfg.getMemoryConfiguration(); pstCfg = cfg.getPersistentStoreConfiguration(); cacheCfg = cfg.getCacheConfiguration(); @@ -2157,6 +2163,29 @@ public IgniteConfiguration setBinaryConfiguration(BinaryConfiguration binaryCfg) * * @return Memory configuration. */ + public DataStorageConfiguration getDataStorageConfiguration() { + return dsCfg; + } + + /** + * Sets durable memory configuration. + * + * @param dsCfg Data storage configuration. + * @return {@code this} for chaining. + */ + public IgniteConfiguration setDataStorageConfiguration(DataStorageConfiguration dsCfg) { + this.dsCfg = dsCfg; + + return this; + } + + /** + * Gets page memory configuration. + * + * @return Memory configuration. + * @deprecated Use {@link DataStorageConfiguration} instead. + */ + @Deprecated public MemoryConfiguration getMemoryConfiguration() { return memCfg; } @@ -2166,7 +2195,9 @@ public MemoryConfiguration getMemoryConfiguration() { * * @param memCfg Memory configuration. * @return {@code this} for chaining. + * @deprecated Use {@link DataStorageConfiguration} instead. */ + @Deprecated public IgniteConfiguration setMemoryConfiguration(MemoryConfiguration memCfg) { this.memCfg = memCfg; @@ -2177,14 +2208,20 @@ public IgniteConfiguration setMemoryConfiguration(MemoryConfiguration memCfg) { * Gets persistence configuration used by Apache Ignite Persistent Store. * * @return Persistence configuration. + * + * @deprecated Part of old API. Use {@link DataStorageConfiguration} for configuring persistence instead. */ + @Deprecated public PersistentStoreConfiguration getPersistentStoreConfiguration() { return pstCfg; } /** - * @return Flag {@code true} if persistent enable, {@code false} if disable. + * @return Flag {@code true} if persistence is enabled, {@code false} if disabled. + * + * @deprecated Part of legacy configuration API. Doesn't work if new configuration API is used. */ + @Deprecated public boolean isPersistentStoreEnabled() { return pstCfg != null; } @@ -2194,7 +2231,10 @@ public boolean isPersistentStoreEnabled() { * * @param pstCfg Persistence configuration. * @return {@code this} for chaining. + * + * @deprecated Part of old API. Use {@link DataStorageConfiguration} for configuring persistence instead. */ + @Deprecated public IgniteConfiguration setPersistentStoreConfiguration(PersistentStoreConfiguration pstCfg) { this.pstCfg = pstCfg; @@ -2208,7 +2248,7 @@ public IgniteConfiguration setPersistentStoreConfiguration(PersistentStoreConfig *

        * Default value is {@link #DFLT_ACTIVE_ON_START}. *

        - * This flag is ignored when {@link PersistentStoreConfiguration} is present: + * This flag is ignored when {@link DataStorageConfiguration} is present: * cluster is always inactive on start when Ignite Persistence is enabled. * * @return Active on start flag value. @@ -2221,7 +2261,7 @@ public boolean isActiveOnStart() { * Sets flag indicating whether the cluster will be active on start. This value should be the same on all * nodes in the cluster. *

        - * This flag is ignored when {@link PersistentStoreConfiguration} is present: + * This flag is ignored when {@link DataStorageConfiguration} is present: * cluster is always inactive on start when Ignite Persistence is enabled. * * @param activeOnStart Active on start flag value. diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java index 9ba26c8d64c62..c3d4e744ed7ea 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java @@ -57,7 +57,10 @@ * * } * + * + * @deprecated Use {@link DataStorageConfiguration} instead. */ +@Deprecated public class MemoryConfiguration implements Serializable { /** */ private static final long serialVersionUID = 0L; @@ -66,7 +69,7 @@ public class MemoryConfiguration implements Serializable { @SuppressWarnings("UnnecessaryBoxing") public static final long DFLT_MEMORY_POLICY_INITIAL_SIZE = 256L * 1024 * 1024; - /** Fraction of available memory to allocate for default MemoryPolicy. */ + /** Fraction of available memory to allocate for default DataRegion. */ private static final double DFLT_MEMORY_POLICY_FRACTION = 0.2; /** Default memory policy's size is 20% of physical memory available on current machine. */ @@ -83,7 +86,7 @@ public class MemoryConfiguration implements Serializable { /** Default memory page size. */ public static final int DFLT_PAGE_SIZE = 4 * 1024; - /** This name is assigned to default MemoryPolicy if no user-defined default MemPlc is specified */ + /** This name is assigned to default DataRegion if no user-defined default MemPlc is specified */ public static final String DFLT_MEM_PLC_DEFAULT_NAME = "default"; /** Size of a memory chunk reserved for system cache initially. */ @@ -101,7 +104,7 @@ public class MemoryConfiguration implements Serializable { /** A name of the memory policy that defines the default memory region. */ private String dfltMemPlcName = DFLT_MEM_PLC_DEFAULT_NAME; - /** Size of memory (in bytes) to use for default MemoryPolicy. */ + /** Size of memory (in bytes) to use for default DataRegion. */ private long dfltMemPlcSize = DFLT_MEMORY_POLICY_MAX_SIZE; /** Memory policies. */ diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java index dff8b2b2ec490..efe7ae2abae23 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java @@ -19,7 +19,7 @@ import java.io.Serializable; import org.apache.ignite.MemoryMetrics; import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; -import org.apache.ignite.mxbean.MemoryMetricsMXBean; +import org.apache.ignite.mxbean.DataRegionMetricsMXBean; import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEM_PLC_DEFAULT_NAME; @@ -60,7 +60,10 @@ * * } * + * + * @deprecated Use {@link DataRegionConfiguration} instead. */ +@Deprecated public final class MemoryPolicyConfiguration implements Serializable { /** */ private static final long serialVersionUID = 0L; @@ -120,6 +123,11 @@ public final class MemoryPolicyConfiguration implements Serializable { */ private long rateTimeInterval = DFLT_RATE_TIME_INTERVAL_MILLIS; + /** + * Flag to enable Ignite Native Persistence. + */ + private boolean persistenceEnabled = true; + /** * Gets memory policy name. * @@ -290,7 +298,7 @@ public MemoryPolicyConfiguration setEmptyPagesPoolSize(int emptyPagesPoolSize) { /** * Gets whether memory metrics are enabled by default on node startup. Memory metrics can be enabled and disabled - * at runtime via memory metrics {@link MemoryMetricsMXBean MX bean}. + * at runtime via memory metrics {@link DataRegionMetricsMXBean MX bean}. * * @return Metrics enabled flag. */ @@ -300,7 +308,7 @@ public boolean isMetricsEnabled() { /** * Sets memory metrics enabled flag. If this flag is {@code true}, metrics will be enabled on node startup. - * Memory metrics can be enabled and disabled at runtime via memory metrics {@link MemoryMetricsMXBean MX bean}. + * Memory metrics can be enabled and disabled at runtime via memory metrics {@link DataRegionMetricsMXBean MX bean}. * * @param metricsEnabled Metrics enabled flag. * @return {@code this} for chaining. @@ -311,6 +319,24 @@ public MemoryPolicyConfiguration setMetricsEnabled(boolean metricsEnabled) { return this; } + /** + * Gets whether Ignite Native Persistence is enabled for this memory policy. + * + * @return Persistence enabled flag. + */ + public boolean isPersistenceEnabled() { + return persistenceEnabled; + } + + /** + * Sets persistence enabled flag. + * + * @param persistenceEnabled Persistence enabled flag. + */ + public void setPersistenceEnabled(boolean persistenceEnabled) { + this.persistenceEnabled = persistenceEnabled; + } + /** * Gets time interval for {@link MemoryMetrics#getAllocationRate()} * and {@link MemoryMetrics#getEvictionRate()} monitoring purposes. diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/PersistentStoreConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/PersistentStoreConfiguration.java index c44e92d6621cf..c41721a5f9450 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/PersistentStoreConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/PersistentStoreConfiguration.java @@ -25,7 +25,9 @@ /** * Configures Apache Ignite Persistent store. + * @deprecated Use {@link DataStorageConfiguration} instead. */ +@Deprecated public class PersistentStoreConfiguration implements Serializable { /** */ private static final long serialVersionUID = 0L; @@ -144,7 +146,7 @@ public class PersistentStoreConfiguration implements Serializable { /** Factory to provide I/O interface for files */ private FileIOFactory fileIOFactory = IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_USE_ASYNC_FILE_IO_FACTORY, false) ? - new AsyncFileIOFactory() : new RandomAccessFileIOFactory(); + new AsyncFileIOFactory() : new RandomAccessFileIOFactory(); /** * Number of sub-intervals the whole {@link #setRateTimeInterval(long)} will be split into to calculate diff --git a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java index 7bd27fe3cd9fd..794a262a8bee7 100644 --- a/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/igfs/IgfsMetrics.java @@ -17,6 +17,8 @@ package org.apache.ignite.igfs; +import org.apache.ignite.configuration.DataRegionConfiguration; + /** * {@code IGFS} metrics snapshot for the file system. Note, that some metrics are global and * some are local (i.e. per each node). @@ -33,7 +35,7 @@ public interface IgfsMetrics { /** * Gets maximum amount of data that can be stored on local node. This metrics is related to - * to the {@link org.apache.ignite.configuration.MemoryPolicyConfiguration#getMaxSize()} of the IGFS data cache. + * to the {@link DataRegionConfiguration#getMaxSize()} of the IGFS data cache. * * @return Maximum IGFS local space size. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index 759bf643af94c..8a71e1a15b0f4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -49,6 +49,10 @@ import javax.cache.CacheException; import javax.management.JMException; import javax.management.ObjectName; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataRegionMetricsAdapter; +import org.apache.ignite.DataStorageMetrics; +import org.apache.ignite.DataStorageMetricsAdapter; import org.apache.ignite.IgniteAtomicLong; import org.apache.ignite.IgniteAtomicReference; import org.apache.ignite.IgniteAtomicSequence; @@ -85,6 +89,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.CollectionConfiguration; import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; @@ -114,7 +119,7 @@ import org.apache.ignite.internal.processors.cache.IgniteCacheProxy; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; @@ -176,6 +181,7 @@ import org.apache.ignite.lifecycle.LifecycleBean; import org.apache.ignite.lifecycle.LifecycleEventType; import org.apache.ignite.marshaller.MarshallerExclusions; +import org.apache.ignite.marshaller.jdk.JdkMarshaller; import org.apache.ignite.mxbean.ClusterLocalNodeMetricsMXBean; import org.apache.ignite.mxbean.IgniteMXBean; import org.apache.ignite.mxbean.StripedExecutorMXBean; @@ -214,6 +220,7 @@ import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_CLIENT_MODE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_CONSISTENCY_CHECK_SKIPPED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DAEMON; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DATA_STORAGE_CONFIG; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DATA_STREAMER_POOL_SIZE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DEPLOYMENT_MODE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME; @@ -231,10 +238,10 @@ import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MARSHALLER_USE_DFLT_SUID; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MEMORY_CONFIG; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_NODE_CONSISTENT_ID; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_OFFHEAP_SIZE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_PEER_CLASSLOADING; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_PHY_RAM; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_PREFIX; -import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_OFFHEAP_SIZE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_RESTART_ENABLED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_REST_PORT_RANGE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SPI_CLASS; @@ -1245,10 +1252,10 @@ private long checkPoolStarvation( int loadedPages = 0; - Collection policies = ctx.cache().context().database().memoryPolicies(); + Collection policies = ctx.cache().context().database().dataRegions(); if (!F.isEmpty(policies)) { - for (MemoryPolicy memPlc : policies) + for (DataRegion memPlc : policies) loadedPages += memPlc.pageMemory().loadedPages(); } @@ -1424,7 +1431,7 @@ private void checkPhysicalRam() { if (total > safeToUse) { U.quietAndWarn(log, "Nodes started on local machine require more than 80% of physical RAM what can " + - "lead to significant slowdown due to swapping (please decrease JVM heap size, memory policy " + + "lead to significant slowdown due to swapping (please decrease JVM heap size, data region " + "size or checkpoint buffer size) [required=" + (total >> 20) + "MB, available=" + (ram >> 20) + "MB]"); } @@ -1604,8 +1611,8 @@ private void fillNodeAttributes(boolean notifyEnabled) throws IgniteCheckedExcep if (cfg.getConnectorConfiguration() != null) add(ATTR_REST_PORT_RANGE, cfg.getConnectorConfiguration().getPortRange()); - // Save database configuration. - add(ATTR_MEMORY_CONFIG, cfg.getMemoryConfiguration()); + // Save data storage configuration. + addDataStorageConfigurationAttributes(); // Save transactions configuration. add(ATTR_TX_CONFIG, cfg.getTransactionConfiguration()); @@ -1632,6 +1639,25 @@ private void fillNodeAttributes(boolean notifyEnabled) throws IgniteCheckedExcep } } + /** + * + */ + private void addDataStorageConfigurationAttributes() throws IgniteCheckedException { + MemoryConfiguration memCfg = cfg.getMemoryConfiguration(); + + // Save legacy memory configuration if it's present. + if (memCfg != null) { + // Page size initialization is suspended, see IgniteCacheDatabaseSharedManager#checkPageSize. + // We should copy initialized value from new configuration. + memCfg.setPageSize(cfg.getDataStorageConfiguration().getPageSize()); + + add(ATTR_MEMORY_CONFIG, memCfg); + } + + // Save data storage configuration. + add(ATTR_DATA_STORAGE_CONFIG, new JdkMarshaller().marshal(cfg.getDataStorageConfiguration())); + } + /** * Add SPI version and class attributes into node attributes. * @@ -2509,14 +2535,14 @@ private void ackRebalanceConfiguration() throws IgniteCheckedException { * */ private void ackMemoryConfiguration() { - MemoryConfiguration memCfg = cfg.getMemoryConfiguration(); + DataStorageConfiguration memCfg = cfg.getDataStorageConfiguration(); if (memCfg == null) return; - U.log(log, "System cache's MemoryPolicy size is configured to " + - (memCfg.getSystemCacheInitialSize() / (1024 * 1024)) + " MB. " + - "Use MemoryConfiguration.systemCacheMemorySize property to change the setting."); + U.log(log, "System cache's DataRegion size is configured to " + + (memCfg.getSystemRegionInitialSize() / (1024 * 1024)) + " MB. " + + "Use DataStorageConfiguration.systemCacheMemorySize property to change the setting."); } /** @@ -2535,12 +2561,12 @@ private void ackCacheConfiguration() { for (CacheConfiguration c : cacheCfgs) { String cacheName = U.maskName(c.getName()); - String memPlcName = c.getMemoryPolicyName(); + String memPlcName = c.getDataRegionName(); if (CU.isSystemCache(cacheName)) memPlcName = "sysMemPlc"; - else if (memPlcName == null && cfg.getMemoryConfiguration() != null) - memPlcName = cfg.getMemoryConfiguration().getDefaultMemoryPolicyName(); + else if (memPlcName == null && cfg.getDataStorageConfiguration() != null) + memPlcName = cfg.getDataStorageConfiguration().getDefaultDataRegionConfiguration().getName(); if (!memPlcNamesMapping.containsKey(memPlcName)) memPlcNamesMapping.put(memPlcName, new ArrayList()); @@ -2551,7 +2577,7 @@ else if (memPlcName == null && cfg.getMemoryConfiguration() != null) } for (Map.Entry> e : memPlcNamesMapping.entrySet()) { - sb.a("in '").a(e.getKey()).a("' memoryPolicy: ["); + sb.a("in '").a(e.getKey()).a("' dataRegion: ["); for (String s : e.getValue()) sb.a("'").a(s).a("', "); @@ -3509,7 +3535,7 @@ public IgniteInternalFuture getOrCreateCacheAsync(String cacheName, boolean c } /** {@inheritDoc} */ - @Override public Collection memoryMetrics() { + @Override public Collection dataRegionMetrics() { guard(); try { @@ -3521,7 +3547,7 @@ public IgniteInternalFuture getOrCreateCacheAsync(String cacheName, boolean c } /** {@inheritDoc} */ - @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + @Nullable @Override public DataRegionMetrics dataRegionMetrics(String memPlcName) { guard(); try { @@ -3533,7 +3559,7 @@ public IgniteInternalFuture getOrCreateCacheAsync(String cacheName, boolean c } /** {@inheritDoc} */ - @Override public PersistenceMetrics persistentStoreMetrics() { + @Override public DataStorageMetrics dataStorageMetrics() { guard(); try { @@ -3544,6 +3570,21 @@ public IgniteInternalFuture getOrCreateCacheAsync(String cacheName, boolean c } } + /** {@inheritDoc} */ + @Override public Collection memoryMetrics() { + return DataRegionMetricsAdapter.collectionOf(dataRegionMetrics()); + } + + /** {@inheritDoc} */ + @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + return DataRegionMetricsAdapter.valueOf(dataRegionMetrics(memPlcName)); + } + + /** {@inheritDoc} */ + @Override public PersistenceMetrics persistentStoreMetrics() { + return DataStorageMetricsAdapter.valueOf(dataStorageMetrics()); + } + /** {@inheritDoc} */ @Nullable @Override public IgniteAtomicSequence atomicSequence(String name, long initVal, boolean create) { return atomicSequence(name, null, initVal, create); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java index 024f3395ad5fd..277ed79d9401f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java @@ -184,8 +184,12 @@ public final class IgniteNodeAttributes { public static final String ATTR_DATA_STREAMER_POOL_SIZE = ATTR_PREFIX + ".data.streamer.pool.size"; /** Memory configuration. */ + @Deprecated public static final String ATTR_MEMORY_CONFIG = ATTR_PREFIX + ".memory"; + /** Data storage configuration. */ + public static final String ATTR_DATA_STORAGE_CONFIG = ATTR_PREFIX + ".data.storage.config"; + /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index 07a5c43d7a80c..36257e274dc5e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -57,11 +57,15 @@ import org.apache.ignite.compute.ComputeJob; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.configuration.ExecutorConfiguration; import org.apache.ignite.configuration.FileSystemConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.TransactionConfiguration; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.managers.communication.GridIoPolicy; @@ -123,6 +127,8 @@ import static org.apache.ignite.cache.CacheRebalanceMode.SYNC; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.configuration.IgniteConfiguration.DFLT_THREAD_KEEP_ALIVE_TIME; +import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEMORY_POLICY_MAX_SIZE; +import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEM_PLC_DEFAULT_NAME; import static org.apache.ignite.internal.IgniteComponentType.SPRING; import static org.apache.ignite.plugin.segmentation.SegmentationPolicy.RESTART_JVM; @@ -2183,15 +2189,27 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) myCfg.setExecutorConfiguration(clone); } - if (!myCfg.isClientMode() && myCfg.getMemoryConfiguration() == null) { - MemoryConfiguration memCfg = new MemoryConfiguration(); + initializeDataStorageConfiguration(myCfg); - memCfg.setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); + return myCfg; + } - myCfg.setMemoryConfiguration(memCfg); + /** + * @param cfg Ignite configuration. + */ + private void initializeDataStorageConfiguration(IgniteConfiguration cfg) throws IgniteCheckedException { + if (cfg.getDataStorageConfiguration() != null && + (cfg.getMemoryConfiguration() != null || cfg.getPersistentStoreConfiguration() != null)) { + throw new IgniteCheckedException("Data storage can be configured with either legacy " + + "(MemoryConfiguration, PersistentStoreConfiguration) or new (DataStorageConfiguration) classes, " + + "but not both."); } - return myCfg; + if (cfg.getMemoryConfiguration() != null || cfg.getPersistentStoreConfiguration() != null) + convertLegacyDataStorageConfigurationToNew(cfg); + + if (!cfg.isClientMode() && cfg.getDataStorageConfiguration() == null) + cfg.setDataStorageConfiguration(new DataStorageConfiguration()); } /** @@ -2755,4 +2773,108 @@ public void setCounter(int cnt) { } } } + + /** + * @param cfg Ignite Configuration with legacy data storage configuration. + */ + private static void convertLegacyDataStorageConfigurationToNew( + IgniteConfiguration cfg) throws IgniteCheckedException { + boolean persistenceEnabled = cfg.getPersistentStoreConfiguration() != null; + + DataStorageConfiguration dsCfg = new DataStorageConfiguration(); + + MemoryConfiguration memCfg = cfg.getMemoryConfiguration() != null ? + cfg.getMemoryConfiguration() : new MemoryConfiguration(); + + dsCfg.setConcurrencyLevel(memCfg.getConcurrencyLevel()); + dsCfg.setPageSize(memCfg.getPageSize()); + dsCfg.setSystemRegionInitialSize(memCfg.getSystemCacheInitialSize()); + dsCfg.setSystemRegionMaxSize(memCfg.getSystemCacheMaxSize()); + + List optionalDataRegions = new ArrayList<>(); + + boolean customDfltPlc = false; + + if (memCfg.getMemoryPolicies() != null) { + for (MemoryPolicyConfiguration mpc : memCfg.getMemoryPolicies()) { + DataRegionConfiguration region = new DataRegionConfiguration(); + + region.setPersistenceEnabled(persistenceEnabled); + + if (mpc.getInitialSize() != 0L) + region.setInitialSize(mpc.getInitialSize()); + + region.setEmptyPagesPoolSize(mpc.getEmptyPagesPoolSize()); + region.setEvictionThreshold(mpc.getEvictionThreshold()); + region.setMaxSize(mpc.getMaxSize()); + region.setName(mpc.getName()); + region.setPageEvictionMode(mpc.getPageEvictionMode()); + region.setMetricsRateTimeInterval(mpc.getRateTimeInterval()); + region.setMetricsSubIntervalCount(mpc.getSubIntervals()); + region.setSwapPath(mpc.getSwapFilePath()); + region.setMetricsEnabled(mpc.isMetricsEnabled()); + + if (mpc.getName() == null) { + throw new IgniteCheckedException(new IllegalArgumentException( + "User-defined MemoryPolicyConfiguration must have non-null and non-empty name.")); + } + + if (mpc.getName().equals(memCfg.getDefaultMemoryPolicyName())) { + customDfltPlc = true; + + dsCfg.setDefaultDataRegionConfiguration(region); + } else + optionalDataRegions.add(region); + } + } + + if (!optionalDataRegions.isEmpty()) + dsCfg.setDataRegionConfigurations(optionalDataRegions.toArray(new DataRegionConfiguration[optionalDataRegions.size()])); + + if (!customDfltPlc) { + if (!DFLT_MEM_PLC_DEFAULT_NAME.equals(memCfg.getDefaultMemoryPolicyName())) { + throw new IgniteCheckedException(new IllegalArgumentException("User-defined default MemoryPolicy " + + "name must be presented among configured MemoryPolices: " + memCfg.getDefaultMemoryPolicyName())); + } + + dsCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(memCfg.getDefaultMemoryPolicySize()) + .setName(memCfg.getDefaultMemoryPolicyName()) + .setPersistenceEnabled(persistenceEnabled)); + } else { + if (memCfg.getDefaultMemoryPolicySize() != DFLT_MEMORY_POLICY_MAX_SIZE) + throw new IgniteCheckedException(new IllegalArgumentException("User-defined MemoryPolicy " + + "configuration and defaultMemoryPolicySize properties are set at the same time.")); + } + + if (persistenceEnabled) { + PersistentStoreConfiguration psCfg = cfg.getPersistentStoreConfiguration(); + + dsCfg.setCheckpointFrequency(psCfg.getCheckpointingFrequency()); + dsCfg.setCheckpointPageBufferSize(psCfg.getCheckpointingPageBufferSize()); + dsCfg.setCheckpointThreads(psCfg.getCheckpointingThreads()); + dsCfg.setCheckpointWriteOrder(psCfg.getCheckpointWriteOrder()); + dsCfg.setFileIOFactory(psCfg.getFileIOFactory()); + dsCfg.setLockWaitTime(psCfg.getLockWaitTime()); + dsCfg.setStoragePath(psCfg.getPersistentStorePath()); + dsCfg.setMetricsRateTimeInterval(psCfg.getRateTimeInterval()); + dsCfg.setMetricsSubIntervalCount(psCfg.getSubIntervals()); + dsCfg.setWalThreadLocalBufferSize(psCfg.getTlbSize()); + dsCfg.setWalArchivePath(psCfg.getWalArchivePath()); + dsCfg.setWalAutoArchiveAfterInactivity(psCfg.getWalAutoArchiveAfterInactivity()); + dsCfg.setWalFlushFrequency(psCfg.getWalFlushFrequency()); + dsCfg.setWalFsyncDelayNanos(psCfg.getWalFsyncDelayNanos()); + dsCfg.setWalHistorySize(psCfg.getWalHistorySize()); + dsCfg.setWalMode(psCfg.getWalMode()); + dsCfg.setWalRecordIteratorBufferSize(psCfg.getWalRecordIteratorBufferSize()); + dsCfg.setWalSegments(psCfg.getWalSegments()); + dsCfg.setWalSegmentSize(psCfg.getWalSegmentSize()); + dsCfg.setWalPath(psCfg.getWalStorePath()); + dsCfg.setAlwaysWriteFullPages(psCfg.isAlwaysWriteFullPages()); + dsCfg.setMetricsEnabled(psCfg.isMetricsEnabled()); + dsCfg.setWriteThrottlingEnabled(psCfg.isWriteThrottlingEnabled()); + } + + cfg.setDataStorageConfiguration(dsCfg); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java index f57bda710da17..1e5c3708d7422 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java @@ -48,6 +48,7 @@ import org.apache.ignite.internal.processors.marshaller.MarshallerMappingTransport; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.marshaller.MarshallerContext; import org.apache.ignite.plugin.PluginProvider; @@ -506,7 +507,7 @@ public void onMarshallerProcessorStarted( closProc = ctx.closure(); clientNode = ctx.clientNode(); - if (ctx.config().isPersistentStoreEnabled()) + if (CU.isPersistenceEnabled(ctx.config())) fileStore.restoreMappings(this); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 14485d2625f27..a3b157d75a1a6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -53,8 +53,8 @@ import org.apache.ignite.cluster.ClusterMetrics; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.Event; import org.apache.ignite.internal.ClusterMetricsSnapshot; @@ -1533,32 +1533,21 @@ private long requiredOffheap() { if(ctx.config().isClientMode()) return 0; - MemoryConfiguration memCfg = ctx.config().getMemoryConfiguration(); + DataStorageConfiguration memCfg = ctx.config().getDataStorageConfiguration(); assert memCfg != null; - long res = memCfg.getSystemCacheMaxSize(); + long res = memCfg.getSystemRegionMaxSize(); // Add memory policies. - MemoryPolicyConfiguration[] memPlcCfgs = memCfg.getMemoryPolicies(); + DataRegionConfiguration[] dataRegions = memCfg.getDataRegionConfigurations(); - if (memPlcCfgs != null) { - String dfltMemPlcName = memCfg.getDefaultMemoryPolicyName(); - - boolean customDflt = false; - - for (MemoryPolicyConfiguration memPlcCfg : memPlcCfgs) { - if(F.eq(dfltMemPlcName, memPlcCfg.getName())) - customDflt = true; - - res += memPlcCfg.getMaxSize(); - } - - if(!customDflt) - res += memCfg.getDefaultMemoryPolicySize(); + if (dataRegions != null) { + for (DataRegionConfiguration dataReg : dataRegions) + res += dataReg.getMaxSize(); } - else - res += memCfg.getDefaultMemoryPolicySize(); + + res += memCfg.getDefaultDataRegionConfiguration().getMaxSize(); // Add persistence (if any). res += GridCacheDatabaseSharedManager.checkpointBufferSize(ctx.config()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java index 8f146dc0bc1b5..6ba68c28e71a6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java @@ -27,14 +27,14 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteSystemProperties; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.DirectMemoryRegion; import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.IgniteUtils; @@ -125,11 +125,11 @@ public class PageMemoryNoStoreImpl implements PageMemory { /** Direct memory allocator. */ private final DirectMemoryProvider directMemoryProvider; - /** Name of MemoryPolicy this PageMemory is associated with. */ - private final MemoryPolicyConfiguration memoryPolicyCfg; + /** Name of DataRegion this PageMemory is associated with. */ + private final DataRegionConfiguration dataRegionCfg; /** Object to collect memory usage metrics. */ - private final MemoryMetricsImpl memMetrics; + private final DataRegionMetricsImpl memMetrics; /** */ private AtomicLong freePageListHead = new AtomicLong(INVALID_REL_PTR); @@ -163,7 +163,7 @@ public class PageMemoryNoStoreImpl implements PageMemory { * @param directMemoryProvider Memory allocator to use. * @param sharedCtx Cache shared context. * @param pageSize Page size. - * @param memPlcCfg Memory Policy configuration. + * @param dataRegionCfg Data region configuration. * @param memMetrics Memory Metrics. * @param trackAcquiredPages If {@code true} tracks number of allocated pages (for tests purpose only). */ @@ -172,8 +172,8 @@ public PageMemoryNoStoreImpl( DirectMemoryProvider directMemoryProvider, GridCacheSharedContext sharedCtx, int pageSize, - MemoryPolicyConfiguration memPlcCfg, - MemoryMetricsImpl memMetrics, + DataRegionConfiguration dataRegionCfg, + DataRegionMetricsImpl memMetrics, boolean trackAcquiredPages ) { assert log != null || sharedCtx != null; @@ -183,21 +183,21 @@ public PageMemoryNoStoreImpl( this.directMemoryProvider = directMemoryProvider; this.trackAcquiredPages = trackAcquiredPages; this.memMetrics = memMetrics; - memoryPolicyCfg = memPlcCfg; + this.dataRegionCfg = dataRegionCfg; sysPageSize = pageSize + PAGE_OVERHEAD; assert sysPageSize % 8 == 0 : sysPageSize; - totalPages = (int)(memPlcCfg.getMaxSize() / sysPageSize); + totalPages = (int)(dataRegionCfg.getMaxSize() / sysPageSize); rwLock = new OffheapReadWriteLock(lockConcLvl); } /** {@inheritDoc} */ @Override public void start() throws IgniteException { - long startSize = memoryPolicyCfg.getInitialSize(); - long maxSize = memoryPolicyCfg.getMaxSize(); + long startSize = dataRegionCfg.getInitialSize(); + long maxSize = dataRegionCfg.getMaxSize(); long[] chunks = new long[SEG_CNT]; @@ -290,9 +290,9 @@ public PageMemoryNoStoreImpl( if (relPtr == INVALID_REL_PTR) throw new IgniteOutOfMemoryException("Not enough memory allocated " + - "(consider increasing memory policy size or enabling evictions) " + - "[policyName=" + memoryPolicyCfg.getName() + - ", size=" + U.readableSize(memoryPolicyCfg.getMaxSize(), true) + "]" + "(consider increasing data region size or enabling evictions) " + + "[policyName=" + dataRegionCfg.getName() + + ", size=" + U.readableSize(dataRegionCfg.getMaxSize(), true) + "]" ); assert (relPtr & ~PageIdUtils.PAGE_IDX_MASK) == 0 : U.hexLong(relPtr & ~PageIdUtils.PAGE_IDX_MASK); @@ -615,7 +615,7 @@ private synchronized Segment addSegment(Segment[] oldRef) { if (oldRef != null) { if (log.isInfoEnabled()) - log.info("Allocated next memory segment [plcName=" + memoryPolicyCfg.getName() + + log.info("Allocated next memory segment [plcName=" + dataRegionCfg.getName() + ", chunkSize=" + U.readableSize(region.size(), true) + ']'); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index a413adea04665..eaaa24d19ba61 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -2592,7 +2592,9 @@ void clear() { * @param sql SQL flag. */ private void saveCacheConfiguration(CacheConfiguration cfg, boolean sql) { - if (cctx.pageStore() != null && cctx.database().persistenceEnabled() && !cctx.kernalContext().clientNode()) { + if (cctx.pageStore() != null && cctx.database().persistenceEnabled() && + CU.isPersistentCache(cfg, cctx.gridConfig().getDataStorageConfiguration()) && + !cctx.kernalContext().clientNode()) { try { StoredCacheData data = new StoredCacheData(cfg); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java index 5e5e02e74c4e0..18acacf8bc5d6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java @@ -42,8 +42,8 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopologyImpl; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPreloader; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.query.continuous.CounterSkipContext; @@ -126,7 +126,7 @@ public class CacheGroupContext { private GridCachePreloader preldr; /** */ - private final MemoryPolicy memPlc; + private final DataRegion dataRegion; /** */ private final CacheObjectContext cacheObjCtx; @@ -150,7 +150,7 @@ public class CacheGroupContext { * @param cacheType Cache type. * @param ccfg Cache configuration. * @param affNode Affinity node flag. - * @param memPlc Memory policy. + * @param dataRegion data region. * @param cacheObjCtx Cache object context. * @param freeList Free list. * @param reuseList Reuse list. @@ -163,13 +163,13 @@ public class CacheGroupContext { CacheType cacheType, CacheConfiguration ccfg, boolean affNode, - MemoryPolicy memPlc, + DataRegion dataRegion, CacheObjectContext cacheObjCtx, FreeList freeList, ReuseList reuseList, AffinityTopologyVersion locStartVer) { assert ccfg != null; - assert memPlc != null || !affNode; + assert dataRegion != null || !affNode; assert grpId != 0 : "Invalid group ID [cache=" + ccfg.getName() + ", grpName=" + ccfg.getGroupName() + ']'; this.grpId = grpId; @@ -177,7 +177,7 @@ public class CacheGroupContext { this.ctx = ctx; this.ccfg = ccfg; this.affNode = affNode; - this.memPlc = memPlc; + this.dataRegion = dataRegion; this.cacheObjCtx = cacheObjCtx; this.freeList = freeList; this.reuseList = reuseList; @@ -188,7 +188,7 @@ public class CacheGroupContext { depEnabled = ctx.kernalContext().deploy().enabled() && !ctx.kernalContext().cacheObjects().isBinaryEnabled(ccfg); - storeCacheId = affNode && memPlc.config().getPageEvictionMode() != DataPageEvictionMode.DISABLED; + storeCacheId = affNode && dataRegion.config().getPageEvictionMode() != DataPageEvictionMode.DISABLED; log = ctx.kernalContext().log(getClass()); @@ -523,10 +523,10 @@ public GridCacheSharedContext shared() { } /** - * @return Memory policy. + * @return data region. */ - public MemoryPolicy memoryPolicy() { - return memPlc; + public DataRegion dataRegion() { + return dataRegion; } /** @@ -862,7 +862,7 @@ public void start() throws IgniteCheckedException { else preldr = new GridCachePreloaderAdapter(this); - if (ctx.kernalContext().config().getPersistentStoreConfiguration() != null) { + if (persistenceEnabled()) { try { offheapMgr = new GridCacheOffheapManager(); } @@ -878,6 +878,13 @@ public void start() throws IgniteCheckedException { ctx.affinity().onCacheGroupCreated(this); } + /** + * @return Persistence enabled flag. + */ + public boolean persistenceEnabled() { + return dataRegion != null && dataRegion.config().isPersistenceEnabled(); + } + /** * @param nodeId Node ID. * @param req Request. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupData.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupData.java index 99b7b1e321a7a..617db567d8b0c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupData.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupData.java @@ -59,14 +59,18 @@ public class CacheGroupData implements Serializable { /** */ private long flags; + /** Persistence enabled flag. */ + private final boolean persistenceEnabled; + /** * @param cacheCfg Cache configuration. * @param grpName Group name. - * @param grpId Group ID. + * @param grpId Group ID. * @param rcvdFrom Node ID cache group received from. * @param startTopVer Start version for dynamically started group. * @param deploymentId Deployment ID. * @param caches Cache group caches. + * @param persistenceEnabled Persistence enabled flag. */ CacheGroupData( CacheConfiguration cacheCfg, @@ -76,7 +80,8 @@ public class CacheGroupData implements Serializable { @Nullable AffinityTopologyVersion startTopVer, IgniteUuid deploymentId, Map caches, - long flags) { + long flags, + boolean persistenceEnabled) { assert cacheCfg != null; assert grpId != 0 : cacheCfg.getName(); assert deploymentId != null : cacheCfg.getName(); @@ -89,6 +94,7 @@ public class CacheGroupData implements Serializable { this.deploymentId = deploymentId; this.caches = caches; this.flags = flags; + this.persistenceEnabled = persistenceEnabled; } /** @@ -140,6 +146,13 @@ Map caches() { return caches; } + /** + * @return Persistence enabled flag. + */ + public boolean persistenceEnabled() { + return persistenceEnabled; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(CacheGroupData.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java index 20301a60b0ee2..86e330e47a6dd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java @@ -58,14 +58,18 @@ public class CacheGroupDescriptor { /** */ private AffinityTopologyVersion rcvdFromVer; + /** Persistence enabled flag. */ + private final boolean persistenceEnabled; + /** * @param cacheCfg Cache configuration. * @param grpName Group name. - * @param grpId Group ID. + * @param grpId Group ID. * @param rcvdFrom Node ID cache group received from. * @param startTopVer Start version for dynamically started group. * @param deploymentId Deployment ID. * @param caches Cache group caches. + * @param persistenceEnabled Persistence enabled flag. */ CacheGroupDescriptor( CacheConfiguration cacheCfg, @@ -74,7 +78,8 @@ public class CacheGroupDescriptor { UUID rcvdFrom, @Nullable AffinityTopologyVersion startTopVer, IgniteUuid deploymentId, - Map caches) { + Map caches, + boolean persistenceEnabled) { assert cacheCfg != null; assert grpId != 0; @@ -85,6 +90,7 @@ public class CacheGroupDescriptor { this.deploymentId = deploymentId; this.cacheCfg = new CacheConfiguration<>(cacheCfg); this.caches = caches; + this.persistenceEnabled = persistenceEnabled; } /** @@ -202,7 +208,7 @@ void receivedFromStartVersion(AffinityTopologyVersion rcvdFromVer) { * @param otherDesc CacheGroup descriptor that must be merged with this one. */ void mergeWith(CacheGroupDescriptor otherDesc) { - assert otherDesc != null && otherDesc.config() != null: otherDesc; + assert otherDesc != null && otherDesc.config() != null : otherDesc; CacheConfiguration otherCfg = otherDesc.config(); @@ -221,6 +227,13 @@ void mergeWith(CacheGroupDescriptor otherDesc) { return startTopVer; } + /** + * @return Persistence enabled flag. + */ + public boolean persistenceEnabled() { + return persistenceEnabled; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(CacheGroupDescriptor.class, this, "cacheName", cacheCfg.getName()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index b4cc9c550eec1..8382821237aa3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -912,7 +912,8 @@ private CacheNodeCommonDiscoveryData collectCommonDiscoveryData() { grpDesc.startTopologyVersion(), grpDesc.deploymentId(), grpDesc.caches(), - 0); + 0, + grpDesc.persistenceEnabled()); cacheGrps.put(grpDesc.groupId(), grpData); } @@ -990,7 +991,8 @@ public void onGridDataReceived(DiscoveryDataBag.GridDiscoveryData data) { grpData.receivedFrom(), grpData.startTopologyVersion(), grpData.deploymentId(), - grpData.caches()); + grpData.caches(), + grpData.persistenceEnabled()); if (locCacheGrps.containsKey(grpDesc.groupId())) { CacheGroupDescriptor locGrpCfg = locCacheGrps.get(grpDesc.groupId()); @@ -1508,7 +1510,8 @@ private CacheGroupDescriptor registerCacheGroup( rcvdFrom, curTopVer != null ? curTopVer.nextMinorVersion() : null, deploymentId, - caches); + caches, + CU.isPersistentCache(startedCacheCfg, ctx.config().getDataStorageConfiguration())); CacheGroupDescriptor old = registeredCacheGrps.put(grpId, grpDesc); @@ -1560,8 +1563,8 @@ private void validateCacheGroupConfiguration(CacheConfiguration cfg, CacheConfig CU.validateCacheGroupsAttributesMismatch(log, cfg, startCfg, "nodeFilter", "Node filter", attr1.nodeFilterClassName(), attr2.nodeFilterClassName(), true); - CU.validateCacheGroupsAttributesMismatch(log, cfg, startCfg, "memoryPolicyName", "Memory policy", - cfg.getMemoryPolicyName(), startCfg.getMemoryPolicyName(), true); + CU.validateCacheGroupsAttributesMismatch(log, cfg, startCfg, "dataRegionName", "Data region", + cfg.getDataRegionName(), startCfg.getDataRegionName(), true); CU.validateCacheGroupsAttributesMismatch(log, cfg, startCfg, "topologyValidator", "Topology validator", attr1.topologyValidatorClassName(), attr2.topologyValidatorClassName(), true); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java index 6d9f0d32c1ab8..92e40b01fb176 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java @@ -58,7 +58,6 @@ import org.apache.ignite.cache.CachePeekMode; import org.apache.ignite.cache.affinity.Affinity; import org.apache.ignite.cluster.ClusterGroup; -import org.apache.ignite.cluster.ClusterGroupEmptyException; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.cluster.ClusterTopologyException; import org.apache.ignite.compute.ComputeJob; @@ -2024,7 +2023,7 @@ else if (storeEnabled) GridCacheEntryEx entry = null; try { - ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy()); + ctx.shared().database().ensureFreeSpace(ctx.dataRegion()); entry = entryEx(key); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java index 120007fd5e142..34d3c97137c0d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java @@ -71,7 +71,7 @@ import org.apache.ignite.internal.processors.cache.dr.GridCacheDrManager; import org.apache.ignite.internal.processors.cache.jta.CacheJtaManagerAdapter; import org.apache.ignite.internal.processors.cache.local.GridLocalCache; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager; import org.apache.ignite.internal.processors.cache.query.continuous.CacheContinuousQueryManager; import org.apache.ignite.internal.processors.cache.store.CacheStoreManager; @@ -736,10 +736,10 @@ public String name() { } /** - * @return Memory policy. + * @return Data region. */ - public MemoryPolicy memoryPolicy() { - return grp.memoryPolicy(); + public DataRegion dataRegion() { + return grp.dataRegion(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index 54b8dc35a57eb..0bdade6510113 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -48,7 +48,7 @@ import org.apache.ignite.internal.processors.cache.extras.GridCacheTtlEntryExtras; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.query.continuous.CacheContinuousQueryListener; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; @@ -2543,7 +2543,7 @@ protected final boolean hasValueUnlocked() { boolean update; - boolean walEnabled = !cctx.isNear() && cctx.shared().wal() != null; + boolean walEnabled = !cctx.isNear() && cctx.group().persistenceEnabled(); if (cctx.shared().database().persistenceEnabled()) { unswap(false); @@ -3204,7 +3204,7 @@ protected void logUpdate(GridCacheOperation op, CacheObject val, GridCacheVersio assert cctx.atomic(); try { - if (cctx.shared().wal() != null) + if (cctx.group().persistenceEnabled()) cctx.shared().wal().log(new DataRecord(new DataEntry( cctx.cacheId(), key, @@ -3326,13 +3326,13 @@ protected void removeValue() throws IgniteCheckedException { } /** - * Evicts necessary number of data pages if per-page eviction is configured in current {@link MemoryPolicy}. + * Evicts necessary number of data pages if per-page eviction is configured in current {@link DataRegion}. */ private void ensureFreeSpace() throws IgniteCheckedException { // Deadlock alert: evicting data page causes removing (and locking) all entries on the page one by one. assert !Thread.holdsLock(this); - cctx.shared().database().ensureFreeSpace(cctx.memoryPolicy()); + cctx.shared().database().ensureFreeSpace(cctx.dataRegion()); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index f3759e0db0dc4..ad8f74a71bd57 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -49,6 +49,7 @@ import org.apache.ignite.cache.store.CacheStoreSessionListener; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.configuration.FileSystemConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; @@ -88,7 +89,7 @@ import org.apache.ignite.internal.processors.cache.local.atomic.GridLocalAtomicCache; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; @@ -139,6 +140,7 @@ import org.apache.ignite.lifecycle.LifecycleAware; import org.apache.ignite.marshaller.Marshaller; import org.apache.ignite.marshaller.MarshallerUtils; +import org.apache.ignite.marshaller.jdk.JdkMarshaller; import org.apache.ignite.mxbean.IgniteMBeanAware; import org.apache.ignite.spi.IgniteNodeValidationResult; import org.apache.ignite.spi.discovery.DiscoveryDataBag; @@ -350,7 +352,7 @@ else if (task instanceof ClientCacheUpdateTimeout) { * @return {@code true} if cache is starting on client node and this node is affinity node for the cache. */ private boolean storesLocallyOnClient(IgniteConfiguration c, CacheConfiguration cc) { - if (c.isClientMode() && c.getMemoryConfiguration() == null) { + if (c.isClientMode() && c.getDataStorageConfiguration() == null) { if (cc.getCacheMode() == LOCAL) return true; @@ -385,8 +387,8 @@ private void validate(IgniteConfiguration c, } if (storesLocallyOnClient(c, cc)) - throw new IgniteCheckedException("MemoryPolicy for client caches must be explicitly configured " + - "on client node startup. Use MemoryConfiguration to configure MemoryPolicy."); + throw new IgniteCheckedException("DataRegion for client caches must be explicitly configured " + + "on client node startup. Use DataStorageConfiguration to configure DataRegion."); if (cc.getCacheMode() == LOCAL && !cc.getAffinity().getClass().equals(LocalAffinityFunction.class)) U.warn(log, "AffinityFunction configuration parameter will be ignored for local cache [cacheName=" + @@ -679,8 +681,8 @@ private void addCacheOnJoin(CacheConfiguration cfg, boolean sql, CacheType cacheType = cacheType(cacheName); - if (cacheType != CacheType.USER && cfg.getMemoryPolicyName() == null) - cfg.setMemoryPolicyName(sharedCtx.database().systemMemoryPolicyName()); + if (cacheType != CacheType.USER && cfg.getDataRegionName() == null) + cfg.setDataRegionName(sharedCtx.database().systemDateRegionName()); if (!cacheType.userCache()) stopSeq.addLast(cacheName); @@ -1112,12 +1114,10 @@ private void startCache(GridCacheAdapter cache, QuerySchema schema) throws cacheCtx.onStarted(); - String memPlcName = cfg.getMemoryPolicyName(); - - if (memPlcName == null - && ctx.config().getMemoryConfiguration() != null) - memPlcName = ctx.config().getMemoryConfiguration().getDefaultMemoryPolicyName(); + String memPlcName = cfg.getDataRegionName(); + if (memPlcName == null && ctx.config().getDataStorageConfiguration() != null) + memPlcName = ctx.config().getDataStorageConfiguration().getDefaultDataRegionConfiguration().getName(); if (log.isInfoEnabled()) { log.info("Started cache [name=" + cfg.getName() + @@ -1841,9 +1841,9 @@ private CacheGroupContext startCacheGroup( throws IgniteCheckedException { CacheConfiguration cfg = new CacheConfiguration(desc.config()); - String memPlcName = cfg.getMemoryPolicyName(); + String memPlcName = cfg.getDataRegionName(); - MemoryPolicy memPlc = sharedCtx.database().memoryPolicy(memPlcName); + DataRegion memPlc = sharedCtx.database().dataRegion(memPlcName); FreeList freeList = sharedCtx.database().freeList(memPlcName); ReuseList reuseList = sharedCtx.database().reuseList(memPlcName); @@ -2188,7 +2188,7 @@ private GridCacheSharedContext createSharedContext(GridKernalContext kernalCtx, IgnitePageStoreManager pageStoreMgr = null; IgniteWriteAheadLogManager walMgr = null; - if (ctx.config().isPersistentStoreEnabled() && !ctx.clientNode()) { + if (CU.isPersistenceEnabled(ctx.config()) && !ctx.clientNode()) { if (ctx.clientNode()) { U.warn(log, "Persistent Store is not supported on client nodes (Persistent Store's" + " configuration will be ignored)."); @@ -3062,15 +3062,32 @@ private void checkMemoryConfiguration(ClusterNode rmt) throws IgniteCheckedExcep if (ctx.config().isClientMode() || locNode.isDaemon() || rmt.isClient() || rmt.isDaemon()) return; - MemoryConfiguration memCfg = rmt.attribute(IgniteNodeAttributes.ATTR_MEMORY_CONFIG); + DataStorageConfiguration dsCfg = null; + + Object dsCfgBytes = rmt.attribute(IgniteNodeAttributes.ATTR_DATA_STORAGE_CONFIG); + + if (dsCfgBytes instanceof byte[]) + dsCfg = new JdkMarshaller().unmarshal((byte[])dsCfgBytes, U.resolveClassLoader(ctx.config())); + + if (dsCfg == null) { + // Try to use legacy memory configuration. + MemoryConfiguration memCfg = rmt.attribute(IgniteNodeAttributes.ATTR_MEMORY_CONFIG); + + if (memCfg != null) { + dsCfg = new DataStorageConfiguration(); + + // All properties that are used in validation should be converted here. + dsCfg.setPageSize(memCfg.getPageSize()); + } + } - if (memCfg != null) { - MemoryConfiguration locMemCfg = ctx.config().getMemoryConfiguration(); + if (dsCfg != null) { + DataStorageConfiguration locDsCfg = ctx.config().getDataStorageConfiguration(); - if (memCfg.getPageSize() != locMemCfg.getPageSize()) { + if (dsCfg.getPageSize() != locDsCfg.getPageSize()) { throw new IgniteCheckedException("Memory configuration mismatch (fix configuration or set -D" + IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK + "=true system property) [rmtNodeId=" + rmt.id() + - ", locPageSize = " + locMemCfg.getPageSize() + ", rmtPageSize = " + memCfg.getPageSize() + "]"); + ", locPageSize = " + locDsCfg.getPageSize() + ", rmtPageSize = " + dsCfg.getPageSize() + "]"); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 4f76875067d1f..26e22543be012 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -53,6 +53,8 @@ import org.apache.ignite.cache.store.CacheStoreSessionListener; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.TransactionConfiguration; import org.apache.ignite.internal.GridKernalContext; @@ -1673,4 +1675,57 @@ else if (cfg.getCacheMode() == REPLICATED) { cfg.clearQueryEntities().setQueryEntities(normalEntities); } } + + /** + * Checks if cache configuration belongs to persistent cache. + * + * @param ccfg Cache configuration. + * @param dsCfg Data storage config. + */ + public static boolean isPersistentCache(CacheConfiguration ccfg, DataStorageConfiguration dsCfg) { + if (dsCfg == null) + return false; + + String regName = ccfg.getDataRegionName(); + + if (regName == null || regName.equals(dsCfg.getDefaultDataRegionConfiguration().getName())) + return dsCfg.getDefaultDataRegionConfiguration().isPersistenceEnabled(); + + if (dsCfg.getDataRegionConfigurations() != null) { + for (DataRegionConfiguration drConf : dsCfg.getDataRegionConfigurations()) { + if (regName.equals(drConf.getName())) + return drConf.isPersistenceEnabled(); + } + } + + return false; + } + + /** + * @return {@code true} if persistence is enabled for at least one data region, {@code false} if not. + */ + public static boolean isPersistenceEnabled(IgniteConfiguration cfg) { + if (cfg.getDataStorageConfiguration() == null) + return false; + + DataRegionConfiguration dfltReg = cfg.getDataStorageConfiguration().getDefaultDataRegionConfiguration(); + + if (dfltReg == null) + return false; + + if (dfltReg.isPersistenceEnabled()) + return true; + + DataRegionConfiguration[] regCfgs = cfg.getDataStorageConfiguration().getDataRegionConfigurations(); + + if (regCfgs == null) + return false; + + for (DataRegionConfiguration regCfg : regCfgs) { + if (regCfg.isPersistenceEnabled()) + return true; + } + + return false; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index ba6f7d0b2661b..56879560384f7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -148,7 +148,7 @@ public void onCacheStarted(GridCacheContext cctx) throws IgniteCheckedException{ pendingEntries = new PendingEntriesTree( grp, name, - grp.memoryPolicy().pageMemory(), + grp.dataRegion().pageMemory(), rootPage, grp.reuseList(), true); @@ -794,7 +794,7 @@ private long allocateForTree() throws IgniteCheckedException { long pageId; if (reuseList == null || (pageId = reuseList.takeRecycledPage()) == 0L) - pageId = grp.memoryPolicy().pageMemory().allocatePage(grp.groupId(), INDEX_PARTITION, FLAG_IDX); + pageId = grp.dataRegion().pageMemory().allocatePage(grp.groupId(), INDEX_PARTITION, FLAG_IDX); return pageId; } @@ -1469,7 +1469,7 @@ private void finishRemove(GridCacheContext cctx, KeyCacheObject key, @Nullable C if (row != null) { row.key(key); - grp.memoryPolicy().evictionTracker().touchPage(row.link()); + grp.dataRegion().evictionTracker().touchPage(row.link()); } return row; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java index 420cde5dffc0f..19514c0e3acf4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.binary.BinaryMetadata; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; @@ -62,7 +63,7 @@ class BinaryMetadataFileStore { this.ctx = ctx; this.log = log; - if (!ctx.config().isPersistentStoreEnabled()) + if (!CU.isPersistenceEnabled(ctx.config())) return; if (binaryMetadataFileStoreDir != null) @@ -85,7 +86,7 @@ class BinaryMetadataFileStore { * @param binMeta Binary metadata to be written to disk. */ void saveMetadata(BinaryMetadata binMeta) { - if (!ctx.config().isPersistentStoreEnabled()) + if (!CU.isPersistenceEnabled(ctx.config())) return; try { @@ -107,7 +108,7 @@ void saveMetadata(BinaryMetadata binMeta) { * Restores metadata on startup of {@link CacheObjectBinaryProcessorImpl} but before starting discovery. */ void restoreMetadata() { - if (!ctx.config().isPersistentStoreEnabled()) + if (!CU.isPersistenceEnabled(ctx.config())) return; for (File file : workDir.listFiles()) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java index e5bcc46c1fec9..7a10c10ca38b5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java @@ -556,7 +556,8 @@ else if (conflictCtx.isMerge()) { GridCacheVersion dhtVer = cached.isNear() ? writeVersion() : null; - if (!near() && cctx.wal() != null && op != NOOP && op != RELOAD && op != READ) { + if (!near() && cacheCtx.group().persistenceEnabled() && + op != NOOP && op != RELOAD && op != READ) { if (dataEntries == null) dataEntries = new ArrayList<>(entries.size()); @@ -741,7 +742,7 @@ else if (op == READ) { } } - if (!near() && cctx.wal() != null) + if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null) cctx.wal().log(new DataRecord(dataEntries)); if (ptr != null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java index f0e0d47a27565..17d1e49bd1881 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java @@ -553,7 +553,7 @@ public void restoreState(GridDhtPartitionState stateToRestore) { * @return {@code true} if cas succeeds. */ private boolean casState(long state, GridDhtPartitionState toState) { - if (ctx.database().persistenceEnabled()) { + if (ctx.database().persistenceEnabled() && grp.dataRegion().config().isPersistenceEnabled()) { synchronized (this) { boolean update = this.state.compareAndSet(state, setPartState(state, toState)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java index 30614a39609ae..5095f45679bd7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java @@ -61,7 +61,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult; import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy; import org.apache.ignite.internal.processors.cache.KeyCacheObject; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry; @@ -1699,7 +1698,7 @@ private void updateAllAsyncInternal0( ctx.shared().database().checkpointReadLock(); try { - ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy()); + ctx.shared().database().ensureFreeSpace(ctx.dataRegion()); // If batch store update is enabled, we need to lock all entries. // First, need to acquire locks on cache entries, then check filter. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java index 1bffac4d11ae3..dcb167d81a892 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java @@ -47,7 +47,7 @@ import org.jetbrains.annotations.NotNull; /** - * Get request. Responsible for obtaining entry from primary node. 'Near' means 'Primary' here, not 'Near Cache'. + * Get request. Responsible for obtaining entry from primary node. 'Near' means 'Initiating node' here, not 'Near Cache'. */ public class GridNearGetRequest extends GridCacheIdMessage implements GridCacheDeployable, GridCacheVersionable { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java index b48693db4294b..f736cae61848c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java @@ -40,7 +40,7 @@ import org.jetbrains.annotations.Nullable; /** - * Near cache lock request. + * Near cache lock request to primary node. 'Near' means 'Initiating node' here, not 'Near Cache'. */ public class GridNearLockRequest extends GridDistributedLockRequest { /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java index e73f34ba8c8a6..085f0b76a09e7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java @@ -2682,7 +2682,7 @@ private IgniteInternalFuture localCacheLoadMissing( GridCacheEntryEx entry = cacheCtx.cache().entryEx(key, topVer); try { - cacheCtx.shared().database().ensureFreeSpace(cacheCtx.memoryPolicy()); + cacheCtx.shared().database().ensureFreeSpace(cacheCtx.dataRegion()); EntryGetResult verVal = entry.versionedValue(cacheVal, ver, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java index e352c8733dfe7..063eb27beb709 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java @@ -36,7 +36,7 @@ import org.jetbrains.annotations.Nullable; /** - * Near transaction prepare request. + * Near transaction prepare request to primary node. 'Near' means 'Initiating node' here, not 'Near Cache'. */ public class GridNearTxPrepareRequest extends GridDistributedTxPrepareRequest { /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java index 40d1fac36a98e..599a58c173177 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java @@ -830,7 +830,7 @@ private Object updateAllInternal(GridCacheOperation op, CacheEntryPredicate[] filters = CU.filterArray(filter); - ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy()); + ctx.shared().database().ensureFreeSpace(ctx.dataRegion()); if (writeThrough && keys.size() > 1) { return updateWithBatch(op, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java index 4d75475a8ed21..0fd8323523152 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java @@ -98,7 +98,7 @@ public CacheDataRowAdapter(KeyCacheObject key, CacheObject val, GridCacheVersion * @throws IgniteCheckedException If failed. */ public final void initFromLink(CacheGroupContext grp, RowData rowData) throws IgniteCheckedException { - initFromLink(grp, grp.shared(), grp.memoryPolicy().pageMemory(), rowData); + initFromLink(grp, grp.shared(), grp.dataRegion().pageMemory(), rowData); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegion.java similarity index 75% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicy.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegion.java index 4059c1290310b..0b0bf2bbabf28 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegion.java @@ -16,36 +16,36 @@ */ package org.apache.ignite.internal.processors.cache.persistence; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.persistence.evict.PageEvictionTracker; /** - * Memory policy provides access to objects configured with {@link MemoryPolicyConfiguration} configuration. + * Data region provides access to objects configured with {@link DataRegionConfiguration} configuration. */ -public class MemoryPolicy { +public class DataRegion { /** */ private final PageMemory pageMem; /** */ - private final MemoryMetricsImpl memMetrics; + private final DataRegionMetricsImpl memMetrics; /** */ - private final MemoryPolicyConfiguration cfg; + private final DataRegionConfiguration cfg; /** */ private final PageEvictionTracker evictionTracker; /** * @param pageMem PageMemory instance. - * @param memMetrics MemoryMetrics instance. - * @param cfg Configuration of given MemoryPolicy. + * @param memMetrics DataRegionMetrics instance. + * @param cfg Configuration of given DataRegion. * @param evictionTracker Eviction tracker. */ - public MemoryPolicy( + public DataRegion( PageMemory pageMem, - MemoryPolicyConfiguration cfg, - MemoryMetricsImpl memMetrics, + DataRegionConfiguration cfg, + DataRegionMetricsImpl memMetrics, PageEvictionTracker evictionTracker ) { this.pageMem = pageMem; @@ -64,14 +64,14 @@ public PageMemory pageMemory() { /** * @return Config. */ - public MemoryPolicyConfiguration config() { + public DataRegionConfiguration config() { return cfg; } /** * @return Memory Metrics. */ - public MemoryMetricsImpl memoryMetrics() { + public DataRegionMetricsImpl memoryMetrics() { return memMetrics; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsImpl.java similarity index 91% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsImpl.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsImpl.java index 32618744a0643..1d570f90df6f9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsImpl.java @@ -16,8 +16,8 @@ */ package org.apache.ignite.internal.processors.cache.persistence; -import org.apache.ignite.MemoryMetrics; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.ratemetrics.HitRateMetrics; import org.apache.ignite.internal.util.typedef.internal.U; @@ -28,7 +28,7 @@ /** * */ -public class MemoryMetricsImpl implements MemoryMetrics { +public class DataRegionMetricsImpl implements DataRegionMetrics { /** */ private final IgniteOutClosure fillFactorProvider; @@ -59,7 +59,7 @@ public class MemoryMetricsImpl implements MemoryMetrics { private volatile HitRateMetrics pageReplaceRate = new HitRateMetrics(60_000, 5); /** */ - private final MemoryPolicyConfiguration memPlcCfg; + private final DataRegionConfiguration memPlcCfg; /** */ private PageMemory pageMem; @@ -68,24 +68,24 @@ public class MemoryMetricsImpl implements MemoryMetrics { private volatile long rateTimeInterval; /** - * @param memPlcCfg MemoryPolicyConfiguration. + * @param memPlcCfg DataRegionConfiguration. */ - public MemoryMetricsImpl(MemoryPolicyConfiguration memPlcCfg) { + public DataRegionMetricsImpl(DataRegionConfiguration memPlcCfg) { this(memPlcCfg, null); } /** - * @param memPlcCfg MemoryPolicyConfiguration. + * @param memPlcCfg DataRegionConfiguration. */ - public MemoryMetricsImpl(MemoryPolicyConfiguration memPlcCfg, @Nullable IgniteOutClosure fillFactorProvider) { + public DataRegionMetricsImpl(DataRegionConfiguration memPlcCfg, @Nullable IgniteOutClosure fillFactorProvider) { this.memPlcCfg = memPlcCfg; this.fillFactorProvider = fillFactorProvider; metricsEnabled = memPlcCfg.isMetricsEnabled(); - rateTimeInterval = memPlcCfg.getRateTimeInterval(); + rateTimeInterval = memPlcCfg.getMetricsRateTimeInterval(); - subInts = memPlcCfg.getSubIntervals(); + subInts = memPlcCfg.getMetricsSubIntervalCount(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsMXBeanImpl.java similarity index 77% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsMXBeanImpl.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsMXBeanImpl.java index 392f83f4b5fb6..141d0dc8f0d3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsMXBeanImpl.java @@ -16,29 +16,29 @@ */ package org.apache.ignite.internal.processors.cache.persistence; -import org.apache.ignite.MemoryMetrics; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.mxbean.MemoryMetricsMXBean; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.mxbean.DataRegionMetricsMXBean; /** - * MBean to expose {@link MemoryMetrics} through JMX interface. + * MBean to expose {@link DataRegionMetrics} through JMX interface. */ -class MemoryMetricsMXBeanImpl implements MemoryMetricsMXBean { +class DataRegionMetricsMXBeanImpl implements DataRegionMetricsMXBean { /** */ - private final MemoryMetricsImpl memMetrics; + private final DataRegionMetricsImpl memMetrics; /** */ - private final MemoryPolicyConfiguration memPlcCfg; + private final DataRegionConfiguration dataRegCfg; /** - * @param memMetrics MemoryMetrics instance to expose through JMX interface. - * @param memPlcCfg configuration of memory policy this MX Bean is created for. + * @param memMetrics DataRegionMetrics instance to expose through JMX interface. + * @param dataRegCfg Configuration of data region this MX Bean is created for. */ - MemoryMetricsMXBeanImpl(MemoryMetricsImpl memMetrics, - MemoryPolicyConfiguration memPlcCfg + DataRegionMetricsMXBeanImpl(DataRegionMetricsImpl memMetrics, + DataRegionConfiguration dataRegCfg ) { this.memMetrics = memMetrics; - this.memPlcCfg = memPlcCfg; + this.dataRegCfg = dataRegCfg; } /** {@inheritDoc} */ @@ -116,16 +116,16 @@ class MemoryMetricsMXBeanImpl implements MemoryMetricsMXBean { /** {@inheritDoc} */ @Override public int getInitialSize() { - return (int) (memPlcCfg.getInitialSize() / (1024 * 1024)); + return (int) (dataRegCfg.getInitialSize() / (1024 * 1024)); } /** {@inheritDoc} */ @Override public int getMaxSize() { - return (int) (memPlcCfg.getMaxSize() / (1024 * 1024)); + return (int) (dataRegCfg.getMaxSize() / (1024 * 1024)); } /** {@inheritDoc} */ - @Override public String getSwapFilePath() { - return memPlcCfg.getSwapFilePath(); + @Override public String getSwapPath() { + return dataRegCfg.getSwapPath(); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsSnapshot.java similarity index 94% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsSnapshot.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsSnapshot.java index 4e7f90a1464a5..c39fdb0d9a10d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/MemoryMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsSnapshot.java @@ -17,12 +17,12 @@ package org.apache.ignite.internal.processors.cache.persistence; -import org.apache.ignite.MemoryMetrics; +import org.apache.ignite.DataRegionMetrics; /** * */ -public class MemoryMetricsSnapshot implements MemoryMetrics { +public class DataRegionMetricsSnapshot implements DataRegionMetrics { /** */ private String name; @@ -53,7 +53,7 @@ public class MemoryMetricsSnapshot implements MemoryMetrics { /** * @param metrics Metrics instance to take a copy. */ - public MemoryMetricsSnapshot(MemoryMetrics metrics) { + public DataRegionMetricsSnapshot(DataRegionMetrics metrics) { name = metrics.getName(); totalAllocatedPages = metrics.getTotalAllocatedPages(); allocationRate = metrics.getAllocationRate(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PersistenceMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java similarity index 97% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PersistenceMetricsImpl.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java index 7952937ec9b68..16707aaaf2f44 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PersistenceMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java @@ -18,12 +18,12 @@ import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.ratemetrics.HitRateMetrics; -import org.apache.ignite.mxbean.PersistenceMetricsMXBean; +import org.apache.ignite.mxbean.DataStorageMetricsMXBean; /** * */ -public class PersistenceMetricsImpl implements PersistenceMetricsMXBean { +public class DataStorageMetricsImpl implements DataStorageMetricsMXBean { /** */ private volatile HitRateMetrics walLoggingRate; @@ -77,7 +77,7 @@ public class PersistenceMetricsImpl implements PersistenceMetricsMXBean { * @param rateTimeInterval Rate time interval. * @param subInts Number of sub-intervals. */ - public PersistenceMetricsImpl( + public DataStorageMetricsImpl( boolean metricsEnabled, long rateTimeInterval, int subInts diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PersistenceMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java similarity index 94% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PersistenceMetricsSnapshot.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java index 0de995032d911..484138710129f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PersistenceMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java @@ -16,13 +16,13 @@ */ package org.apache.ignite.internal.processors.cache.persistence; -import org.apache.ignite.PersistenceMetrics; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.internal.util.typedef.internal.S; /** * */ -public class PersistenceMetricsSnapshot implements PersistenceMetrics { +public class DataStorageMetricsSnapshot implements DataStorageMetrics { /** */ private float walLoggingRate; @@ -62,7 +62,7 @@ public class PersistenceMetricsSnapshot implements PersistenceMetrics { /** * @param metrics Metrics. */ - public PersistenceMetricsSnapshot(PersistenceMetrics metrics) { + public DataStorageMetricsSnapshot(DataStorageMetrics metrics) { walLoggingRate = metrics.getWalLoggingRate(); walWritingRate = metrics.getWalWritingRate(); walArchiveSegments = metrics.getWalArchiveSegments(); @@ -139,6 +139,6 @@ public PersistenceMetricsSnapshot(PersistenceMetrics metrics) { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(PersistenceMetricsSnapshot.class, this); + return S.toString(DataStorageMetricsSnapshot.class, this); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index a56b8f45e828c..596b7b25b4be7 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -57,18 +57,17 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import javax.management.ObjectName; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteSystemProperties; -import org.apache.ignite.PersistenceMetrics; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CheckpointWriteOrder; import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.EventType; import org.apache.ignite.internal.GridKernalContext; @@ -139,7 +138,7 @@ import org.apache.ignite.internal.util.worker.GridWorker; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteOutClosure; -import org.apache.ignite.mxbean.PersistenceMetricsMXBean; +import org.apache.ignite.mxbean.DataStorageMetricsMXBean; import org.apache.ignite.thread.IgniteThread; import org.apache.ignite.thread.IgniteThreadPoolExecutor; import org.jetbrains.annotations.NotNull; @@ -229,7 +228,7 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan }; /** */ - private static final String MBEAN_NAME = "PersistenceMetrics"; + private static final String MBEAN_NAME = "DataStorageMetrics"; /** */ private static final String MBEAN_GROUP = "Persistent Store"; @@ -262,7 +261,7 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan private volatile boolean printCheckpointStats = true; /** Database configuration. */ - private final PersistentStoreConfiguration persistenceCfg; + private final DataStorageConfiguration persistenceCfg; /** */ private final Collection lsnrs = new CopyOnWriteArrayList<>(); @@ -301,7 +300,7 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan private IgniteCacheSnapshotManager snapshotMgr; /** */ - private PersistenceMetricsImpl persStoreMetrics; + private DataStorageMetricsImpl persStoreMetrics; /** */ private ObjectName persistenceMetricsMbeanName; @@ -318,18 +317,18 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan public GridCacheDatabaseSharedManager(GridKernalContext ctx) { IgniteConfiguration cfg = ctx.config(); - persistenceCfg = cfg.getPersistentStoreConfiguration(); + persistenceCfg = cfg.getDataStorageConfiguration(); - assert persistenceCfg != null : "PageStore should not be created if persistence is disabled."; + assert persistenceCfg != null; - checkpointFreq = persistenceCfg.getCheckpointingFrequency(); + checkpointFreq = persistenceCfg.getCheckpointFrequency(); lockWaitTime = persistenceCfg.getLockWaitTime(); - persStoreMetrics = new PersistenceMetricsImpl( + persStoreMetrics = new DataStorageMetricsImpl( persistenceCfg.isMetricsEnabled(), - persistenceCfg.getRateTimeInterval(), - persistenceCfg.getSubIntervals() + persistenceCfg.getMetricsRateTimeInterval(), + persistenceCfg.getMetricsSubIntervalCount() ); } @@ -400,12 +399,12 @@ public IgniteInternalFuture enableCheckpoints(boolean enable) { * */ private void initDataBase() { - if (persistenceCfg.getCheckpointingThreads() > 1) + if (persistenceCfg.getCheckpointThreads() > 1) asyncRunner = new IgniteThreadPoolExecutor( "checkpoint-runner", cctx.igniteInstanceName(), - persistenceCfg.getCheckpointingThreads(), - persistenceCfg.getCheckpointingThreads(), + persistenceCfg.getCheckpointThreads(), + persistenceCfg.getCheckpointThreads(), 30_000, new LinkedBlockingQueue() ); @@ -420,26 +419,26 @@ private void initDataBase() { * @return Checkpoint buffer size. */ public static long checkpointBufferSize(IgniteConfiguration cfg) { - PersistentStoreConfiguration persistenceCfg = cfg.getPersistentStoreConfiguration(); + DataStorageConfiguration persistenceCfg = cfg.getDataStorageConfiguration(); if (persistenceCfg == null) return 0L; - long res = persistenceCfg.getCheckpointingPageBufferSize(); + long res = persistenceCfg.getCheckpointPageBufferSize(); if (res == 0L) { res = DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE; - MemoryConfiguration memCfg = cfg.getMemoryConfiguration(); + DataStorageConfiguration memCfg = cfg.getDataStorageConfiguration(); assert memCfg != null; - long totalSize = memCfg.getSystemCacheMaxSize(); + long totalSize = memCfg.getSystemRegionMaxSize(); - if (memCfg.getMemoryPolicies() == null) - totalSize += MemoryConfiguration.DFLT_MEMORY_POLICY_MAX_SIZE; + if (memCfg.getDataRegionConfigurations() == null) + totalSize += DataStorageConfiguration.DFLT_DATA_REGION_MAX_SIZE; else { - for (MemoryPolicyConfiguration memPlc : memCfg.getMemoryPolicies()) { + for (DataRegionConfiguration memPlc : memCfg.getDataRegionConfigurations()) { if (Long.MAX_VALUE - memPlc.getMaxSize() > totalSize) totalSize += memPlc.getMaxSize(); else { @@ -464,11 +463,6 @@ public static long checkpointBufferSize(IgniteConfiguration cfg) { return res; } - /** {@inheritDoc} */ - @Override protected void initPageMemoryDataStructures(MemoryConfiguration dbCfg) throws IgniteCheckedException { - // No-op. - } - /** {@inheritDoc} */ @Override public void onActivate(GridKernalContext ctx) throws IgniteCheckedException { if (log.isDebugEnabled()) @@ -522,7 +516,7 @@ private void registrateMetricsMBean() throws IgniteCheckedException { MBEAN_GROUP, MBEAN_NAME, persStoreMetrics, - PersistenceMetricsMXBean.class); + DataStorageMetricsMXBean.class); } catch (Throwable e) { throw new IgniteCheckedException("Failed to register " + MBEAN_NAME + " MBean.", e); @@ -549,14 +543,19 @@ private void unRegistrateMetricsMBean() { } /** {@inheritDoc} */ - @Override protected IgniteOutClosure fillFactorProvider(final String memPlcName) { + @Override protected IgniteOutClosure fillFactorProvider(final DataRegionConfiguration dataRegCfg) { + if (!dataRegCfg.isPersistenceEnabled()) + return super.fillFactorProvider(dataRegCfg); + + final String dataRegName = dataRegCfg.getName(); + return new IgniteOutClosure() { @Override public Float apply() { long loadSize = 0L; long totalSize = 0L; for (CacheGroupContext grpCtx : cctx.cache().cacheGroups()) { - if (!grpCtx.memoryPolicy().config().getName().equals(memPlcName)) + if (!grpCtx.dataRegion().config().getName().equals(dataRegName)) continue; assert grpCtx.offheap() instanceof GridCacheOffheapManager; @@ -678,10 +677,13 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi /** {@inheritDoc} */ @Override protected PageMemory createPageMemory( DirectMemoryProvider memProvider, - MemoryConfiguration memCfg, - MemoryPolicyConfiguration plcCfg, - MemoryMetricsImpl memMetrics + DataStorageConfiguration memCfg, + DataRegionConfiguration plcCfg, + DataRegionMetricsImpl memMetrics ) { + if (!plcCfg.isPersistenceEnabled()) + return super.createPageMemory(memProvider, memCfg, plcCfg, memMetrics); + memMetrics.persistenceEnabled(true); long cacheSize = plcCfg.getMaxSize(); @@ -741,15 +743,18 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi } /** {@inheritDoc} */ - @Override protected void checkPolicyEvictionProperties(MemoryPolicyConfiguration plcCfg, MemoryConfiguration dbCfg) + @Override protected void checkRegionEvictionProperties(DataRegionConfiguration regCfg, DataStorageConfiguration dbCfg) throws IgniteCheckedException { - if (plcCfg.getPageEvictionMode() != DataPageEvictionMode.DISABLED) - U.warn(log, "Page eviction mode for [" + plcCfg.getName() + "] memory region is ignored " + + if (!regCfg.isPersistenceEnabled()) + super.checkRegionEvictionProperties(regCfg, dbCfg); + + if (regCfg.getPageEvictionMode() != DataPageEvictionMode.DISABLED) + U.warn(log, "Page eviction mode for [" + regCfg.getName() + "] memory region is ignored " + "because Ignite Native Persistence is enabled"); } /** {@inheritDoc} */ - @Override protected void checkPageSize(MemoryConfiguration memCfg) { + @Override protected void checkPageSize(DataStorageConfiguration memCfg) { if (memCfg.getPageSize() == 0) { try { assert cctx.pageStore() instanceof FilePageStoreManager : @@ -767,10 +772,10 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi catch (IgniteCheckedException | IOException | IllegalArgumentException e) { U.quietAndWarn(log, "Attempt to resolve pageSize from store files failed: " + e.getMessage()); - U.quietAndWarn(log, "Default page size will be used: " + MemoryConfiguration.DFLT_PAGE_SIZE + " bytes"); + U.quietAndWarn(log, "Default page size will be used: " + DataStorageConfiguration.DFLT_PAGE_SIZE + " bytes"); } - memCfg.setPageSize(MemoryConfiguration.DFLT_PAGE_SIZE); + memCfg.setPageSize(DataStorageConfiguration.DFLT_PAGE_SIZE); } } @@ -800,7 +805,7 @@ private int resolvePageSizeFromPartitionFile(Path partFile) throws IOException, int pageSize = hdr.getInt(); if (pageSize == 2048) { - U.quietAndWarn(log, "You are currently using persistent store with 2K pages (MemoryConfiguration#" + + U.quietAndWarn(log, "You are currently using persistent store with 2K pages (DataStorageConfiguration#" + "pageSize). If you use SSD disk, consider migrating to 4K pages for better IO performance."); } @@ -923,9 +928,12 @@ private void shutdownCheckpointer(boolean cancel) { for (IgniteBiTuple tup : stoppedGrps) { CacheGroupContext gctx = tup.get1(); + if (!gctx.persistenceEnabled()) + continue; + snapshotMgr.onCacheGroupStop(gctx); - PageMemoryEx pageMem = (PageMemoryEx)gctx.memoryPolicy().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx)gctx.dataRegion().pageMemory(); Collection grpIds = destroyed.get(pageMem); @@ -1024,12 +1032,15 @@ private void shutdownCheckpointer(boolean cancel) { * @return {@code true} if all PageMemory instances are safe to update. */ private boolean safeToUpdatePageMemories() { - Collection memPlcs = context().database().memoryPolicies(); + Collection memPlcs = context().database().dataRegions(); if (memPlcs == null) return true; - for (MemoryPolicy memPlc : memPlcs) { + for (DataRegion memPlc : memPlcs) { + if (!memPlc.config().isPersistenceEnabled()) + continue; + PageMemoryEx pageMemEx = (PageMemoryEx)memPlc.pageMemory(); if (!pageMemEx.safeToUpdate()) @@ -1049,11 +1060,14 @@ private boolean safeToUpdatePageMemories() { checkpointLock.readLock().unlock(); if (checkpointer != null) { - Collection memPlcs = context().database().memoryPolicies(); + Collection dataRegs = context().database().dataRegions(); + + if (dataRegs != null) { + for (DataRegion dataReg : dataRegs) { + if (!dataReg.config().isPersistenceEnabled()) + continue; - if (memPlcs != null) { - for (MemoryPolicy memPlc : memPlcs) { - PageMemoryEx mem = (PageMemoryEx)memPlc.pageMemory(); + PageMemoryEx mem = (PageMemoryEx)dataReg.pageMemory(); if (mem != null && !mem.safeToUpdate()) { checkpointer.wakeupForCheckpoint(0, "too many dirty pages"); @@ -1595,7 +1609,7 @@ else if (!F.eq(cpRec.checkpointId(), status.cpEndId)) * * @param grpId Cache group id. * @return PageMemoryEx instance. - * @throws IgniteCheckedException if no MemoryPolicy is configured for a name obtained from cache descriptor. + * @throws IgniteCheckedException if no DataRegion is configured for a name obtained from cache descriptor. */ private PageMemoryEx getPageMemoryForCacheGroup(int grpId) throws IgniteCheckedException { // TODO IGNITE-5075: cache descriptor can be removed. @@ -1606,9 +1620,9 @@ private PageMemoryEx getPageMemoryForCacheGroup(int grpId) throws IgniteCheckedE if (desc == null) throw new IgniteCheckedException("Failed to find cache group descriptor [grpId=" + grpId + ']'); - String memPlcName = desc.config().getMemoryPolicyName(); + String memPlcName = desc.config().getDataRegionName(); - return (PageMemoryEx)sharedCtx.database().memoryPolicy(memPlcName).pageMemory(); + return (PageMemoryEx)sharedCtx.database().dataRegion(memPlcName).pageMemory(); } /** @@ -1687,9 +1701,12 @@ private void restorePartitionState( continue; } + if (!grp.dataRegion().config().isPersistenceEnabled()) + continue; + int grpId = grp.groupId(); - PageMemoryEx pageMem = (PageMemoryEx)grp.memoryPolicy().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); for (int i = 0; i < grp.affinity().partitions(); i++) { if (storeMgr.exists(grpId, i)) { @@ -1822,14 +1839,17 @@ private void finalizeCheckpointOnRecovery(long cpTs, UUID cpId, WALPointer walPt long start = System.currentTimeMillis(); - Collection memPolicies = context().database().memoryPolicies(); + Collection memPolicies = context().database().dataRegions(); List>> cpEntities = new ArrayList<>(memPolicies.size()); - for (MemoryPolicy memPlc : memPolicies) { - PageMemoryEx pageMem = (PageMemoryEx)memPlc.pageMemory(); - cpEntities.add(new IgniteBiTuple>(pageMem, - (pageMem).beginCheckpoint())); + for (DataRegion memPlc : memPolicies) { + if (memPlc.config().isPersistenceEnabled()) { + PageMemoryEx pageMem = (PageMemoryEx)memPlc.pageMemory(); + + cpEntities.add(new IgniteBiTuple>( + pageMem, (pageMem).beginCheckpoint())); + } } tmpWriteBuf.order(ByteOrder.nativeOrder()); @@ -2423,11 +2443,14 @@ private boolean hasPageForWrite(Collection>, Integer> beginAllCheckpoints() { - Collection> res = new ArrayList(memoryPolicies().size()); + Collection> res = new ArrayList(dataRegions().size()); int pagesNum = 0; - for (MemoryPolicy memPlc : memoryPolicies()) { + for (DataRegion memPlc : dataRegions()) { + if (!memPlc.config().isPersistenceEnabled()) + continue; + GridMultiCollectionWrapper nextCpPagesCol = ((PageMemoryEx)memPlc.pageMemory()).beginCheckpoint(); pagesNum += nextCpPagesCol.size(); @@ -2443,8 +2466,12 @@ private IgniteBiTuple>, Intege */ private void markCheckpointEnd(Checkpoint chp) throws IgniteCheckedException { synchronized (this) { - for (MemoryPolicy memPlc : memoryPolicies()) + for (DataRegion memPlc : dataRegions()) { + if (!memPlc.config().isPersistenceEnabled()) + continue; + ((PageMemoryEx)memPlc.pageMemory()).finishCheckpoint(); + } if (chp.hasDelta()) writeCheckpointEntry( @@ -2492,8 +2519,8 @@ public void shutdownNow() { /** * Reorders list of checkpoint pages and splits them into needed number of sublists according to - * {@link PersistentStoreConfiguration#getCheckpointingThreads()} and - * {@link PersistentStoreConfiguration#getCheckpointWriteOrder()}. + * {@link DataStorageConfiguration#getCheckpointThreads()} and + * {@link DataStorageConfiguration#getCheckpointWriteOrder()}. * * @param cpPagesTuple Checkpoint pages tuple. */ @@ -2520,7 +2547,7 @@ private GridMultiCollectionWrapper splitAndSortCpPagesIfNeeded( }); } - int cpThreads = persistenceCfg.getCheckpointingThreads(); + int cpThreads = persistenceCfg.getCheckpointThreads(); int pagesSubLists = cpThreads == 1 ? 1 : cpThreads * 4; // Splitting pages to (threads * 4) subtasks. If any thread will be faster, it will help slower threads. @@ -2603,7 +2630,10 @@ private WriteCheckpointPages( if (grp == null) continue; - PageMemoryEx pageMem = (PageMemoryEx)grp.memoryPolicy().pageMemory(); + if (!grp.dataRegion().config().isPersistenceEnabled()) + continue; + + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); Integer tag = pageMem.getForCheckpoint( fullId, tmpWriteBuf, persStoreMetrics.metricsEnabled() ? tracker : null); @@ -3310,14 +3340,14 @@ private String lockPath() { } /** {@inheritDoc} */ - @Override public PersistenceMetrics persistentStoreMetrics() { - return new PersistenceMetricsSnapshot(persStoreMetrics); + @Override public DataStorageMetrics persistentStoreMetrics() { + return new DataStorageMetricsSnapshot(persStoreMetrics); } /** * */ - public PersistenceMetricsImpl persistentStoreMetricsImpl() { + public DataStorageMetricsImpl persistentStoreMetricsImpl() { return persStoreMetrics; } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 5c91a4f8467cb..6ed62f86b1d44 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -93,14 +93,14 @@ public class GridCacheOffheapManager extends IgniteCacheOffheapManagerImpl imple reuseList = new ReuseListImpl(grp.groupId(), grp.cacheOrGroupName(), - grp.memoryPolicy().pageMemory(), + grp.dataRegion().pageMemory(), ctx.wal(), reuseListRoot.pageId().pageId(), reuseListRoot.isAllocated()); RootPage metastoreRoot = metas.treeRoot; - metaStore = new MetadataStorage(grp.memoryPolicy().pageMemory(), + metaStore = new MetadataStorage(grp.dataRegion().pageMemory(), ctx.wal(), globalRemoveId(), grp.groupId(), @@ -126,7 +126,7 @@ public class GridCacheOffheapManager extends IgniteCacheOffheapManagerImpl imple pendingEntries = new PendingEntriesTree( grp, name, - grp.memoryPolicy().pageMemory(), + grp.dataRegion().pageMemory(), pendingRootPage.pageId().pageId(), reuseList, pendingRootPage.isAllocated() @@ -148,7 +148,7 @@ public class GridCacheOffheapManager extends IgniteCacheOffheapManagerImpl imple /** {@inheritDoc} */ @Override public void onCheckpointBegin(Context ctx) throws IgniteCheckedException { - assert grp.memoryPolicy().pageMemory() instanceof PageMemoryEx; + assert grp.dataRegion().pageMemory() instanceof PageMemoryEx; reuseList.saveMetadata(); @@ -185,7 +185,7 @@ private boolean saveStoreMetadata(CacheDataStore store, Context ctx, boolean sav int size = store.fullSize(); long rmvId = globalRemoveId().get(); - PageMemoryEx pageMem = (PageMemoryEx)grp.memoryPolicy().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); IgniteWriteAheadLogManager wal = this.ctx.wal(); if (size > 0 || updCntr > 0) { @@ -437,7 +437,7 @@ private static void addPartition( saveStoreMetadata(store, null, false, true); - PageMemoryEx pageMemory = (PageMemoryEx)grp.memoryPolicy().pageMemory(); + PageMemoryEx pageMemory = (PageMemoryEx)grp.dataRegion().pageMemory(); int tag = pageMemory.invalidate(grp.groupId(), p); @@ -511,7 +511,7 @@ private static void addPartition( * @throws IgniteCheckedException If failed. */ private Metas getOrAllocateCacheMetas() throws IgniteCheckedException { - PageMemoryEx pageMem = (PageMemoryEx)grp.memoryPolicy().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); IgniteWriteAheadLogManager wal = ctx.wal(); int grpId = grp.groupId(); @@ -915,8 +915,8 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException freeList = new FreeListImpl( grp.groupId(), grp.cacheOrGroupName() + "-" + partId, - grp.memoryPolicy().memoryMetrics(), - grp.memoryPolicy(), + grp.dataRegion().memoryMetrics(), + grp.dataRegion(), null, ctx.wal(), reuseRoot.pageId().pageId(), @@ -942,7 +942,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException } }; - PageMemoryEx pageMem = (PageMemoryEx)grp.memoryPolicy().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); delegate0 = new CacheDataStoreImpl(partId, name, rowStore, dataTree); @@ -1036,7 +1036,7 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException * @return Partition metas. */ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { - PageMemoryEx pageMem = (PageMemoryEx)grp.memoryPolicy().pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx)grp.dataRegion().pageMemory(); IgniteWriteAheadLogManager wal = ctx.wal(); int grpId = grp.groupId(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index d7682f0b9a5a6..da598d14486dc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -25,14 +25,14 @@ import java.util.List; import java.util.Map; import java.util.Set; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.MemoryMetrics; -import org.apache.ignite.PersistenceMetrics; import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.mem.DirectMemoryProvider; @@ -57,24 +57,24 @@ import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteOutClosure; -import org.apache.ignite.mxbean.MemoryMetricsMXBean; +import org.apache.ignite.mxbean.DataRegionMetricsMXBean; import org.jetbrains.annotations.Nullable; -import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEMORY_POLICY_INITIAL_SIZE; -import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEM_PLC_DEFAULT_NAME; -import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_PAGE_SIZE; +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME; +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_PAGE_SIZE; /** * */ public class IgniteCacheDatabaseSharedManager extends GridCacheSharedManagerAdapter implements IgniteChangeGlobalStateSupport, CheckpointLockStateChecker { - /** MemoryPolicyConfiguration name reserved for internal caches. */ - static final String SYSTEM_MEMORY_POLICY_NAME = "sysMemPlc"; + /** DataRegionConfiguration name reserved for internal caches. */ + static final String SYSTEM_DATA_REGION_NAME = "sysMemPlc"; /** Minimum size of memory chunk */ private static final long MIN_PAGE_MEMORY_SIZE = 10 * 1024 * 1024; @@ -83,16 +83,16 @@ public class IgniteCacheDatabaseSharedManager extends GridCacheSharedManagerAdap private static final long MAX_PAGE_MEMORY_INIT_SIZE_32_BIT = 2L * 1024 * 1024 * 1024; /** */ - protected Map memPlcMap; + protected Map dataRegionMap; /** */ - protected Map memMetricsMap; + protected Map memMetricsMap; /** */ - protected MemoryPolicy dfltMemPlc; + protected DataRegion dfltDataRegion; /** */ - private Map freeListMap; + protected Map freeListMap; /** */ private FreeListImpl dfltFreeList; @@ -102,10 +102,10 @@ public class IgniteCacheDatabaseSharedManager extends GridCacheSharedManagerAdap /** {@inheritDoc} */ @Override protected void start0() throws IgniteCheckedException { - if (cctx.kernalContext().clientNode() && cctx.kernalContext().config().getMemoryConfiguration() == null) + if (cctx.kernalContext().clientNode() && cctx.kernalContext().config().getDataStorageConfiguration() == null) return; - MemoryConfiguration memCfg = cctx.kernalContext().config().getMemoryConfiguration(); + DataStorageConfiguration memCfg = cctx.kernalContext().config().getDataStorageConfiguration(); assert memCfg != null; @@ -115,7 +115,7 @@ public class IgniteCacheDatabaseSharedManager extends GridCacheSharedManagerAdap } /** - * Registers MBeans for all MemoryMetrics configured in this instance. + * Registers MBeans for all DataRegionMetrics configured in this instance. */ private void registerMetricsMBeans() { if(U.IGNITE_MBEANS_DISABLED) @@ -123,21 +123,21 @@ private void registerMetricsMBeans() { IgniteConfiguration cfg = cctx.gridConfig(); - for (MemoryMetrics memMetrics : memMetricsMap.values()) { - MemoryPolicyConfiguration memPlcCfg = memPlcMap.get(memMetrics.getName()).config(); + for (DataRegionMetrics memMetrics : memMetricsMap.values()) { + DataRegionConfiguration memPlcCfg = dataRegionMap.get(memMetrics.getName()).config(); - registerMetricsMBean((MemoryMetricsImpl)memMetrics, memPlcCfg, cfg); + registerMetricsMBean((DataRegionMetricsImpl)memMetrics, memPlcCfg, cfg); } } /** * @param memMetrics Memory metrics. - * @param memPlcCfg Memory policy configuration. + * @param dataRegionCfg Data region configuration. * @param cfg Ignite configuration. */ private void registerMetricsMBean( - MemoryMetricsImpl memMetrics, - MemoryPolicyConfiguration memPlcCfg, + DataRegionMetricsImpl memMetrics, + DataRegionConfiguration dataRegionCfg, IgniteConfiguration cfg ) { assert !U.IGNITE_MBEANS_DISABLED; @@ -146,13 +146,13 @@ private void registerMetricsMBean( U.registerMBean( cfg.getMBeanServer(), cfg.getIgniteInstanceName(), - "MemoryMetrics", - memPlcCfg.getName(), - new MemoryMetricsMXBeanImpl(memMetrics, memPlcCfg), - MemoryMetricsMXBean.class); + "DataRegionMetrics", + dataRegionCfg.getName(), + new DataRegionMetricsMXBeanImpl(memMetrics, dataRegionCfg), + DataRegionMetricsMXBean.class); } catch (Throwable e) { - U.error(log, "Failed to register MBean for MemoryMetrics with name: '" + memMetrics.getName() + "'", e); + U.error(log, "Failed to register MBean for DataRegionMetrics with name: '" + memMetrics.getName() + "'", e); } } @@ -160,22 +160,24 @@ private void registerMetricsMBean( * @param dbCfg Database config. * @throws IgniteCheckedException If failed. */ - protected void initPageMemoryDataStructures(MemoryConfiguration dbCfg) throws IgniteCheckedException { - freeListMap = U.newHashMap(memPlcMap.size()); + protected void initPageMemoryDataStructures(DataStorageConfiguration dbCfg) throws IgniteCheckedException { + freeListMap = U.newHashMap(dataRegionMap.size()); - String dfltMemPlcName = dbCfg.getDefaultMemoryPolicyName(); + String dfltMemPlcName = dbCfg.getDefaultDataRegionConfiguration().getName(); - for (MemoryPolicy memPlc : memPlcMap.values()) { - MemoryPolicyConfiguration memPlcCfg = memPlc.config(); + for (DataRegion memPlc : dataRegionMap.values()) { + DataRegionConfiguration memPlcCfg = memPlc.config(); - MemoryMetricsImpl memMetrics = (MemoryMetricsImpl) memMetricsMap.get(memPlcCfg.getName()); + DataRegionMetricsImpl memMetrics = (DataRegionMetricsImpl) memMetricsMap.get(memPlcCfg.getName()); + + boolean persistenceEnabled = memPlcCfg.isPersistenceEnabled(); FreeListImpl freeList = new FreeListImpl(0, cctx.igniteInstanceName(), memMetrics, memPlc, null, - cctx.wal(), + persistenceEnabled ? cctx.wal() : null, 0L, true); @@ -196,7 +198,7 @@ public int pageSize() { * */ private void startMemoryPolicies() { - for (MemoryPolicy memPlc : memPlcMap.values()) { + for (DataRegion memPlc : dataRegionMap.values()) { memPlc.pageMemory().start(); memPlc.evictionTracker().start(); @@ -207,102 +209,81 @@ private void startMemoryPolicies() { * @param memCfg Database config. * @throws IgniteCheckedException If failed to initialize swap path. */ - protected void initPageMemoryPolicies(MemoryConfiguration memCfg) throws IgniteCheckedException { - MemoryPolicyConfiguration[] memPlcsCfgs = memCfg.getMemoryPolicies(); - - if (memPlcsCfgs == null) { - //reserve place for default and system memory policies - memPlcMap = U.newHashMap(2); - memMetricsMap = U.newHashMap(2); - - addMemoryPolicy( - memCfg, - memCfg.createDefaultPolicyConfig(), - DFLT_MEM_PLC_DEFAULT_NAME - ); - - U.warn(log, "No user-defined default MemoryPolicy found; system default of 1GB size will be used."); - } - else { - String dfltMemPlcName = memCfg.getDefaultMemoryPolicyName(); - - if (DFLT_MEM_PLC_DEFAULT_NAME.equals(dfltMemPlcName) && !hasCustomDefaultMemoryPolicy(memPlcsCfgs)) { - //reserve additional place for default and system memory policies - memPlcMap = U.newHashMap(memPlcsCfgs.length + 2); - memMetricsMap = U.newHashMap(memPlcsCfgs.length + 2); + protected void initDataRegions(DataStorageConfiguration memCfg) throws IgniteCheckedException { + DataRegionConfiguration[] dataRegionCfgs = memCfg.getDataRegionConfigurations(); - addMemoryPolicy( - memCfg, - memCfg.createDefaultPolicyConfig(), - DFLT_MEM_PLC_DEFAULT_NAME - ); + int dataRegions = dataRegionCfgs == null ? 0 : dataRegionCfgs.length; - U.warn(log, "No user-defined default MemoryPolicy found; system default of 1GB size will be used."); - } - else { - //reserve additional space for system memory policy only - memPlcMap = U.newHashMap(memPlcsCfgs.length + 1); - memMetricsMap = U.newHashMap(memPlcsCfgs.length + 1); - } + dataRegionMap = U.newHashMap(2 + dataRegions); + memMetricsMap = U.newHashMap(2 + dataRegions); - for (MemoryPolicyConfiguration memPlcCfg : memPlcsCfgs) - addMemoryPolicy(memCfg, memPlcCfg, memPlcCfg.getName()); + if (dataRegionCfgs != null) { + for (DataRegionConfiguration dataRegionCfg : dataRegionCfgs) + addDataRegion(memCfg, dataRegionCfg, dataRegionCfg.getName()); } - addMemoryPolicy( + addDataRegion( + memCfg, + memCfg.getDefaultDataRegionConfiguration(), + memCfg.getDefaultDataRegionConfiguration().getName() + ); + + addDataRegion( memCfg, - createSystemMemoryPolicy( - memCfg.getSystemCacheInitialSize(), - memCfg.getSystemCacheMaxSize() + createSystemDataRegion( + memCfg.getSystemRegionInitialSize(), + memCfg.getSystemRegionMaxSize() ), - SYSTEM_MEMORY_POLICY_NAME + SYSTEM_DATA_REGION_NAME ); } /** - * @param memCfg Database config. - * @param memPlcCfg Memory policy config. - * @param memPlcName Memory policy name. + * @param dataStorageCfg Database config. + * @param dataRegionCfg Data region config. + * @param dataRegionName Data region name. * @throws IgniteCheckedException If failed to initialize swap path. */ - private void addMemoryPolicy( - MemoryConfiguration memCfg, - MemoryPolicyConfiguration memPlcCfg, - String memPlcName + private void addDataRegion( + DataStorageConfiguration dataStorageCfg, + DataRegionConfiguration dataRegionCfg, + String dataRegionName ) throws IgniteCheckedException { - String dfltMemPlcName = memCfg.getDefaultMemoryPolicyName(); + String dfltMemPlcName = dataStorageCfg.getDefaultDataRegionConfiguration().getName(); if (dfltMemPlcName == null) - dfltMemPlcName = DFLT_MEM_PLC_DEFAULT_NAME; + dfltMemPlcName = DFLT_DATA_REG_DEFAULT_NAME; - MemoryMetricsImpl memMetrics = new MemoryMetricsImpl(memPlcCfg, fillFactorProvider(memPlcName)); + DataRegionMetricsImpl memMetrics = new DataRegionMetricsImpl(dataRegionCfg, fillFactorProvider(dataRegionCfg)); - MemoryPolicy memPlc = initMemory(memCfg, memPlcCfg, memMetrics); + DataRegion memPlc = initMemory(dataStorageCfg, dataRegionCfg, memMetrics); - memPlcMap.put(memPlcName, memPlc); + dataRegionMap.put(dataRegionName, memPlc); - memMetricsMap.put(memPlcName, memMetrics); + memMetricsMap.put(dataRegionName, memMetrics); - if (memPlcName.equals(dfltMemPlcName)) - dfltMemPlc = memPlc; - else if (memPlcName.equals(DFLT_MEM_PLC_DEFAULT_NAME)) - U.warn(log, "Memory Policy with name 'default' isn't used as a default. " + + if (dataRegionName.equals(dfltMemPlcName)) + dfltDataRegion = memPlc; + else if (dataRegionName.equals(DFLT_DATA_REG_DEFAULT_NAME)) + U.warn(log, "Data Region with name 'default' isn't used as a default. " + "Please check Memory Policies configuration."); } /** - * Closure that can be used to compute fill factor for provided memory policy. + * Closure that can be used to compute fill factor for provided data region. * - * @param memPlcName Memory policy name. + * @param dataRegCfg Data region configuration. * @return Closure. */ - protected IgniteOutClosure fillFactorProvider(final String memPlcName) { + protected IgniteOutClosure fillFactorProvider(final DataRegionConfiguration dataRegCfg) { + final String dataRegName = dataRegCfg.getName(); + return new IgniteOutClosure() { private FreeListImpl freeList; @Override public Float apply() { if (freeList == null) { - FreeListImpl freeList0 = freeListMap.get(memPlcName); + FreeListImpl freeList0 = freeListMap.get(dataRegName); if (freeList0 == null) return (float) 0; @@ -321,11 +302,11 @@ protected IgniteOutClosure fillFactorProvider(final String memPlcName) { } /** - * @param memPlcsCfgs User-defined memory policy configurations. + * @param memPlcsCfgs User-defined data region configurations. */ - private boolean hasCustomDefaultMemoryPolicy(MemoryPolicyConfiguration[] memPlcsCfgs) { - for (MemoryPolicyConfiguration memPlcsCfg : memPlcsCfgs) { - if (DFLT_MEM_PLC_DEFAULT_NAME.equals(memPlcsCfg.getName())) + private boolean hasCustomDefaultDataRegion(DataRegionConfiguration[] memPlcsCfgs) { + for (DataRegionConfiguration memPlcsCfg : memPlcsCfgs) { + if (DFLT_DATA_REG_DEFAULT_NAME.equals(memPlcsCfg.getName())) return true; } @@ -336,12 +317,12 @@ private boolean hasCustomDefaultMemoryPolicy(MemoryPolicyConfiguration[] memPlcs * @param sysCacheInitSize Initial size of PageMemory to be created for system cache. * @param sysCacheMaxSize Maximum size of PageMemory to be created for system cache. * - * @return {@link MemoryPolicyConfiguration configuration} of MemoryPolicy for system cache. + * @return {@link DataRegionConfiguration configuration} of DataRegion for system cache. */ - private MemoryPolicyConfiguration createSystemMemoryPolicy(long sysCacheInitSize, long sysCacheMaxSize) { - MemoryPolicyConfiguration res = new MemoryPolicyConfiguration(); + private DataRegionConfiguration createSystemDataRegion(long sysCacheInitSize, long sysCacheMaxSize) { + DataRegionConfiguration res = new DataRegionConfiguration(); - res.setName(SYSTEM_MEMORY_POLICY_NAME); + res.setName(SYSTEM_DATA_REGION_NAME); res.setInitialSize(sysCacheInitSize); res.setMaxSize(sysCacheMaxSize); @@ -351,71 +332,76 @@ private MemoryPolicyConfiguration createSystemMemoryPolicy(long sysCacheInitSize /** * @param memCfg configuration to validate. */ - private void validateConfiguration(MemoryConfiguration memCfg) throws IgniteCheckedException { + private void validateConfiguration(DataStorageConfiguration memCfg) throws IgniteCheckedException { checkPageSize(memCfg); - MemoryPolicyConfiguration[] plcCfgs = memCfg.getMemoryPolicies(); + DataRegionConfiguration[] regCfgs = memCfg.getDataRegionConfigurations(); - Set plcNames = (plcCfgs != null) ? U.newHashSet(plcCfgs.length) : new HashSet(0); + Set regNames = (regCfgs != null) ? U.newHashSet(regCfgs.length) : new HashSet(0); - checkSystemMemoryPolicySizeConfiguration( - memCfg.getSystemCacheInitialSize(), - memCfg.getSystemCacheMaxSize() + checkSystemDataRegionSizeConfiguration( + memCfg.getSystemRegionInitialSize(), + memCfg.getSystemRegionMaxSize() ); - if (plcCfgs != null) { - for (MemoryPolicyConfiguration plcCfg : plcCfgs) { - assert plcCfg != null; + if (regCfgs != null) { + for (DataRegionConfiguration regCfg : regCfgs) + checkDataRegionConfiguration(memCfg, regNames, regCfg); + } + + checkDataRegionConfiguration(memCfg, regNames, memCfg.getDefaultDataRegionConfiguration()); + } - checkPolicyName(plcCfg.getName(), plcNames); + /** + * @param memCfg Mem config. + * @param regNames Region names. + * @param regCfg Reg config. + */ + private void checkDataRegionConfiguration(DataStorageConfiguration memCfg, Set regNames, + DataRegionConfiguration regCfg) throws IgniteCheckedException { + assert regCfg != null; - checkPolicySize(plcCfg); + checkDataRegionName(regCfg.getName(), regNames); - checkMetricsProperties(plcCfg); + checkDataRegionSize(regCfg); - checkPolicyEvictionProperties(plcCfg, memCfg); - } - } + checkMetricsProperties(regCfg); - checkDefaultPolicyConfiguration( - memCfg.getDefaultMemoryPolicyName(), - memCfg.getDefaultMemoryPolicySize(), - plcNames - ); + checkRegionEvictionProperties(regCfg, memCfg); } /** * @param memCfg Memory config. */ - protected void checkPageSize(MemoryConfiguration memCfg) { + protected void checkPageSize(DataStorageConfiguration memCfg) { if (memCfg.getPageSize() == 0) memCfg.setPageSize(DFLT_PAGE_SIZE); } /** - * @param plcCfg Memory policy config. + * @param regCfg data region config. * * @throws IgniteCheckedException if validation of memory metrics properties fails. */ - private static void checkMetricsProperties(MemoryPolicyConfiguration plcCfg) throws IgniteCheckedException { - if (plcCfg.getRateTimeInterval() <= 0) + private static void checkMetricsProperties(DataRegionConfiguration regCfg) throws IgniteCheckedException { + if (regCfg.getMetricsRateTimeInterval() <= 0) throw new IgniteCheckedException("Rate time interval must be greater than zero " + - "(use MemoryPolicyConfiguration.rateTimeInterval property to adjust the interval) " + - "[name=" + plcCfg.getName() + - ", rateTimeInterval=" + plcCfg.getRateTimeInterval() + "]" + "(use DataRegionConfiguration.rateTimeInterval property to adjust the interval) " + + "[name=" + regCfg.getName() + + ", rateTimeInterval=" + regCfg.getMetricsRateTimeInterval() + "]" ); - if (plcCfg.getSubIntervals() <= 0) + if (regCfg.getMetricsSubIntervalCount() <= 0) throw new IgniteCheckedException("Sub intervals must be greater than zero " + - "(use MemoryPolicyConfiguration.subIntervals property to adjust the sub intervals) " + - "[name=" + plcCfg.getName() + - ", subIntervals=" + plcCfg.getSubIntervals() + "]" + "(use DataRegionConfiguration.subIntervals property to adjust the sub intervals) " + + "[name=" + regCfg.getName() + + ", subIntervals=" + regCfg.getMetricsSubIntervalCount() + "]" ); - if (plcCfg.getRateTimeInterval() < 1_000) + if (regCfg.getMetricsRateTimeInterval() < 1_000) throw new IgniteCheckedException("Rate time interval must be longer that 1 second (1_000 milliseconds) " + - "(use MemoryPolicyConfiguration.rateTimeInterval property to adjust the interval) " + - "[name=" + plcCfg.getName() + - ", rateTimeInterval=" + plcCfg.getRateTimeInterval() + "]"); + "(use DataRegionConfiguration.rateTimeInterval property to adjust the interval) " + + "[name=" + regCfg.getName() + + ", rateTimeInterval=" + regCfg.getMetricsRateTimeInterval() + "]"); } /** @@ -424,19 +410,19 @@ private static void checkMetricsProperties(MemoryPolicyConfiguration plcCfg) thr * * @throws IgniteCheckedException In case of validation violation. */ - private static void checkSystemMemoryPolicySizeConfiguration( + private static void checkSystemDataRegionSizeConfiguration( long sysCacheInitSize, long sysCacheMaxSize ) throws IgniteCheckedException { if (sysCacheInitSize < MIN_PAGE_MEMORY_SIZE) throw new IgniteCheckedException("Initial size for system cache must have size more than 10MB (use " + - "MemoryConfiguration.systemCacheInitialSize property to set correct size in bytes) " + + "DataStorageConfiguration.systemCacheInitialSize property to set correct size in bytes) " + "[size=" + U.readableSize(sysCacheInitSize, true) + ']' ); if (U.jvm32Bit() && sysCacheInitSize > MAX_PAGE_MEMORY_INIT_SIZE_32_BIT) throw new IgniteCheckedException("Initial size for system cache exceeds 2GB on 32-bit JVM (use " + - "MemoryPolicyConfiguration.systemCacheInitialSize property to set correct size in bytes " + + "DataRegionConfiguration.systemCacheInitialSize property to set correct size in bytes " + "or use 64-bit JVM) [size=" + U.readableSize(sysCacheInitSize, true) + ']' ); @@ -444,138 +430,90 @@ private static void checkSystemMemoryPolicySizeConfiguration( throw new IgniteCheckedException("MaxSize of system cache must not be smaller than " + "initialSize [initSize=" + U.readableSize(sysCacheInitSize, true) + ", maxSize=" + U.readableSize(sysCacheMaxSize, true) + "]. " + - "Use MemoryConfiguration.systemCacheInitialSize/MemoryConfiguration.systemCacheMaxSize " + + "Use DataStorageConfiguration.systemCacheInitialSize/DataStorageConfiguration.systemCacheMaxSize " + "properties to set correct sizes in bytes." ); } /** - * @param dfltPlcName Default MemoryPolicy name. - * @param dfltPlcSize Default size of MemoryPolicy overridden by user (equals to -1 if wasn't specified by user). - * @param plcNames All MemoryPolicy names. - * @throws IgniteCheckedException In case of validation violation. - */ - private static void checkDefaultPolicyConfiguration( - String dfltPlcName, - long dfltPlcSize, - Collection plcNames - ) throws IgniteCheckedException { - if (dfltPlcSize != MemoryConfiguration.DFLT_MEMORY_POLICY_MAX_SIZE) { - if (!F.eq(dfltPlcName, MemoryConfiguration.DFLT_MEM_PLC_DEFAULT_NAME)) - throw new IgniteCheckedException("User-defined MemoryPolicy configuration " + - "and defaultMemoryPolicySize properties are set at the same time. " + - "Delete either MemoryConfiguration.defaultMemoryPolicySize property " + - "or user-defined default MemoryPolicy configuration"); - - if (dfltPlcSize < MIN_PAGE_MEMORY_SIZE) - throw new IgniteCheckedException("User-defined default MemoryPolicy size is less than 1MB. " + - "Use MemoryConfiguration.defaultMemoryPolicySize property to set correct size."); - - if (U.jvm32Bit() && dfltPlcSize > MAX_PAGE_MEMORY_INIT_SIZE_32_BIT) - throw new IgniteCheckedException("User-defined default MemoryPolicy size exceeds 2GB on 32-bit JVM " + - "(use MemoryConfiguration.defaultMemoryPolicySize property to set correct size in bytes " + - "or use 64-bit JVM) [size=" + U.readableSize(dfltPlcSize, true) + ']' - ); - } - - if (!DFLT_MEM_PLC_DEFAULT_NAME.equals(dfltPlcName)) { - if (dfltPlcName.isEmpty()) - throw new IgniteCheckedException("User-defined default MemoryPolicy name must be non-empty"); - - if (!plcNames.contains(dfltPlcName)) - throw new IgniteCheckedException("User-defined default MemoryPolicy name " + - "must be presented among configured MemoryPolices: " + dfltPlcName); - } - } - - /** - * @param plcCfg MemoryPolicyConfiguration to validate. + * @param regCfg DataRegionConfiguration to validate. * @throws IgniteCheckedException If config is invalid. */ - private void checkPolicySize(MemoryPolicyConfiguration plcCfg) throws IgniteCheckedException { - boolean dfltInitSize = false; - - if (plcCfg.getInitialSize() == 0) { - plcCfg.setInitialSize(DFLT_MEMORY_POLICY_INITIAL_SIZE); - - dfltInitSize = true; - } - - if (plcCfg.getInitialSize() < MIN_PAGE_MEMORY_SIZE) - throw new IgniteCheckedException("MemoryPolicy must have size more than 10MB (use " + - "MemoryPolicyConfiguration.initialSize property to set correct size in bytes) " + - "[name=" + plcCfg.getName() + ", size=" + U.readableSize(plcCfg.getInitialSize(), true) + "]" + private void checkDataRegionSize(DataRegionConfiguration regCfg) throws IgniteCheckedException { + if (regCfg.getInitialSize() < MIN_PAGE_MEMORY_SIZE || regCfg.getMaxSize() < MIN_PAGE_MEMORY_SIZE) + throw new IgniteCheckedException("DataRegion must have size more than 10MB (use " + + "DataRegionConfiguration.initialSize and .maxSize properties to set correct size in bytes) " + + "[name=" + regCfg.getName() + ", initialSize=" + U.readableSize(regCfg.getInitialSize(), true) + + ", maxSize=" + U.readableSize(regCfg.getMaxSize(), true) + "]" ); - if (plcCfg.getMaxSize() < plcCfg.getInitialSize()) { - // If initial size was not set, use the max size. - if (dfltInitSize) { - plcCfg.setInitialSize(plcCfg.getMaxSize()); - - LT.warn(log, "MemoryPolicy maxSize=" + U.readableSize(plcCfg.getMaxSize(), true) + - " is smaller than defaultInitialSize=" + - U.readableSize(MemoryConfiguration.DFLT_MEMORY_POLICY_INITIAL_SIZE, true) + - ", setting initialSize to " + U.readableSize(plcCfg.getMaxSize(), true)); - } - else { - throw new IgniteCheckedException("MemoryPolicy maxSize must not be smaller than " + - "initialSize [name=" + plcCfg.getName() + - ", initSize=" + U.readableSize(plcCfg.getInitialSize(), true) + - ", maxSize=" + U.readableSize(plcCfg.getMaxSize(), true) + ']'); + if (regCfg.getMaxSize() < regCfg.getInitialSize()) { + if (regCfg.getInitialSize() != Math.min(DataStorageConfiguration.DFLT_DATA_REGION_MAX_SIZE, + DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE)) { + throw new IgniteCheckedException("DataRegion maxSize must not be smaller than initialSize" + + "[name=" + regCfg.getName() + ", initialSize=" + U.readableSize(regCfg.getInitialSize(), true) + + ", maxSize=" + U.readableSize(regCfg.getMaxSize(), true) + "]"); } + + regCfg.setInitialSize(regCfg.getMaxSize()); + + LT.warn(log, "DataRegion maxSize=" + U.readableSize(regCfg.getMaxSize(), true) + + " is smaller than defaultInitialSize=" + + U.readableSize(DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE, true) + + ", setting initialSize to " + U.readableSize(regCfg.getMaxSize(), true)); } - if (U.jvm32Bit() && plcCfg.getInitialSize() > MAX_PAGE_MEMORY_INIT_SIZE_32_BIT) - throw new IgniteCheckedException("MemoryPolicy initialSize exceeds 2GB on 32-bit JVM (use " + - "MemoryPolicyConfiguration.initialSize property to set correct size in bytes or use 64-bit JVM) " + - "[name=" + plcCfg.getName() + - ", size=" + U.readableSize(plcCfg.getInitialSize(), true) + "]"); + if (U.jvm32Bit() && regCfg.getInitialSize() > MAX_PAGE_MEMORY_INIT_SIZE_32_BIT) + throw new IgniteCheckedException("DataRegion initialSize exceeds 2GB on 32-bit JVM (use " + + "DataRegionConfiguration.initialSize property to set correct size in bytes or use 64-bit JVM) " + + "[name=" + regCfg.getName() + + ", size=" + U.readableSize(regCfg.getInitialSize(), true) + "]"); } /** - * @param plcCfg MemoryPolicyConfiguration to validate. + * @param regCfg DataRegionConfiguration to validate. * @param dbCfg Memory configuration. * @throws IgniteCheckedException If config is invalid. */ - protected void checkPolicyEvictionProperties(MemoryPolicyConfiguration plcCfg, MemoryConfiguration dbCfg) + protected void checkRegionEvictionProperties(DataRegionConfiguration regCfg, DataStorageConfiguration dbCfg) throws IgniteCheckedException { - if (plcCfg.getPageEvictionMode() == DataPageEvictionMode.DISABLED) + if (regCfg.getPageEvictionMode() == DataPageEvictionMode.DISABLED) return; - if (plcCfg.getEvictionThreshold() < 0.5 || plcCfg.getEvictionThreshold() > 0.999) { + if (regCfg.getEvictionThreshold() < 0.5 || regCfg.getEvictionThreshold() > 0.999) { throw new IgniteCheckedException("Page eviction threshold must be between 0.5 and 0.999: " + - plcCfg.getName()); + regCfg.getName()); } - if (plcCfg.getEmptyPagesPoolSize() <= 10) - throw new IgniteCheckedException("Evicted pages pool size should be greater than 10: " + plcCfg.getName()); + if (regCfg.getEmptyPagesPoolSize() <= 10) + throw new IgniteCheckedException("Evicted pages pool size should be greater than 10: " + regCfg.getName()); - long maxPoolSize = plcCfg.getMaxSize() / dbCfg.getPageSize() / 10; + long maxPoolSize = regCfg.getMaxSize() / dbCfg.getPageSize() / 10; - if (plcCfg.getEmptyPagesPoolSize() >= maxPoolSize) { + if (regCfg.getEmptyPagesPoolSize() >= maxPoolSize) { throw new IgniteCheckedException("Evicted pages pool size should be lesser than " + maxPoolSize + - ": " + plcCfg.getName()); + ": " + regCfg.getName()); } } /** - * @param plcName MemoryPolicy name to validate. + * @param regName DataRegion name to validate. * @param observedNames Names of MemoryPolicies observed before. * @throws IgniteCheckedException If config is invalid. */ - private static void checkPolicyName(String plcName, Collection observedNames) + private static void checkDataRegionName(String regName, Collection observedNames) throws IgniteCheckedException { - if (plcName == null || plcName.isEmpty()) - throw new IgniteCheckedException("User-defined MemoryPolicyConfiguration must have non-null and " + + if (regName == null || regName.isEmpty()) + throw new IgniteCheckedException("User-defined DataRegionConfiguration must have non-null and " + "non-empty name."); - if (observedNames.contains(plcName)) - throw new IgniteCheckedException("Two MemoryPolicies have the same name: " + plcName); + if (observedNames.contains(regName)) + throw new IgniteCheckedException("Two MemoryPolicies have the same name: " + regName); - if (SYSTEM_MEMORY_POLICY_NAME.equals(plcName)) - throw new IgniteCheckedException("'sysMemPlc' policy name is reserved for internal use."); + if (SYSTEM_DATA_REGION_NAME.equals(regName)) + throw new IgniteCheckedException("'" + SYSTEM_DATA_REGION_NAME + "' policy name is reserved for internal use."); - observedNames.add(plcName); + observedNames.add(regName); } /** @@ -589,22 +527,22 @@ public void dumpStatistics(IgniteLogger log) { } /** - * @return collection of all configured {@link MemoryPolicy policies}. + * @return collection of all configured {@link DataRegion policies}. */ - public Collection memoryPolicies() { - return memPlcMap != null ? memPlcMap.values() : null; + public Collection dataRegions() { + return dataRegionMap != null ? dataRegionMap.values() : null; } /** - * @return MemoryMetrics for all MemoryPolicies configured in Ignite instance. + * @return DataRegionMetrics for all MemoryPolicies configured in Ignite instance. */ - public Collection memoryMetrics() { + public Collection memoryMetrics() { if (!F.isEmpty(memMetricsMap)) { // Intentionally return a collection copy to make it explicitly serializable. - Collection res = new ArrayList<>(memMetricsMap.size()); + Collection res = new ArrayList<>(memMetricsMap.size()); - for (MemoryMetrics metrics : memMetricsMap.values()) - res.add(new MemoryMetricsSnapshot(metrics)); + for (DataRegionMetrics metrics : memMetricsMap.values()) + res.add(new DataRegionMetricsSnapshot(metrics)); return res; } @@ -613,9 +551,9 @@ public Collection memoryMetrics() { } /** - * @return PersistenceMetrics if persistence is enabled or {@code null} otherwise. + * @return DataStorageMetrics if persistence is enabled or {@code null} otherwise. */ - public PersistenceMetrics persistentStoreMetrics() { + public DataStorageMetrics persistentStoreMetrics() { return null; } @@ -628,46 +566,46 @@ public void readCheckpointAndRestoreMemory(List cachesTo } /** - * @param memPlcName Name of {@link MemoryPolicy} to obtain {@link MemoryMetrics} for. - * @return {@link MemoryMetrics} snapshot for specified {@link MemoryPolicy} or {@code null} if - * no {@link MemoryPolicy} is configured for specified name. + * @param memPlcName Name of {@link DataRegion} to obtain {@link DataRegionMetrics} for. + * @return {@link DataRegionMetrics} snapshot for specified {@link DataRegion} or {@code null} if + * no {@link DataRegion} is configured for specified name. */ - @Nullable public MemoryMetrics memoryMetrics(String memPlcName) { + @Nullable public DataRegionMetrics memoryMetrics(String memPlcName) { if (!F.isEmpty(memMetricsMap)) { - MemoryMetrics memMetrics = memMetricsMap.get(memPlcName); + DataRegionMetrics memMetrics = memMetricsMap.get(memPlcName); if (memMetrics == null) return null; else - return new MemoryMetricsSnapshot(memMetrics); + return new DataRegionMetricsSnapshot(memMetrics); } else return null; } /** - * @param memPlcName Memory policy name. - * @return {@link MemoryPolicy} instance associated with a given {@link MemoryPolicyConfiguration}. - * @throws IgniteCheckedException in case of request for unknown MemoryPolicy. + * @param memPlcName data region name. + * @return {@link DataRegion} instance associated with a given {@link DataRegionConfiguration}. + * @throws IgniteCheckedException in case of request for unknown DataRegion. */ - public MemoryPolicy memoryPolicy(String memPlcName) throws IgniteCheckedException { + public DataRegion dataRegion(String memPlcName) throws IgniteCheckedException { if (memPlcName == null) - return dfltMemPlc; + return dfltDataRegion; - if (memPlcMap == null) + if (dataRegionMap == null) return null; - MemoryPolicy plc; + DataRegion plc; - if ((plc = memPlcMap.get(memPlcName)) == null) - throw new IgniteCheckedException("Requested MemoryPolicy is not configured: " + memPlcName); + if ((plc = dataRegionMap.get(memPlcName)) == null) + throw new IgniteCheckedException("Requested DataRegion is not configured: " + memPlcName); return plc; } /** - * @param memPlcName MemoryPolicyConfiguration name. - * @return {@link FreeList} instance associated with a given {@link MemoryPolicyConfiguration}. + * @param memPlcName DataRegionConfiguration name. + * @return {@link FreeList} instance associated with a given {@link DataRegionConfiguration}. */ public FreeList freeList(String memPlcName) { if (memPlcName == null) @@ -677,8 +615,8 @@ public FreeList freeList(String memPlcName) { } /** - * @param memPlcName MemoryPolicyConfiguration name. - * @return {@link ReuseList} instance associated with a given {@link MemoryPolicyConfiguration}. + * @param memPlcName DataRegionConfiguration name. + * @return {@link ReuseList} instance associated with a given {@link DataRegionConfiguration}. */ public ReuseList reuseList(String memPlcName) { if (memPlcName == null) @@ -689,8 +627,8 @@ public ReuseList reuseList(String memPlcName) { /** {@inheritDoc} */ @Override protected void stop0(boolean cancel) { - if (memPlcMap != null) { - for (MemoryPolicy memPlc : memPlcMap.values()) { + if (dataRegionMap != null) { + for (DataRegion memPlc : dataRegionMap.values()) { memPlc.pageMemory().stop(); memPlc.evictionTracker().stop(); @@ -698,9 +636,9 @@ public ReuseList reuseList(String memPlcName) { unregisterMBean(memPlc.memoryMetrics().getName()); } - memPlcMap.clear(); + dataRegionMap.clear(); - memPlcMap = null; + dataRegionMap = null; } } @@ -718,7 +656,7 @@ private void unregisterMBean(String name) { cfg.getMBeanServer().unregisterMBean( U.makeMBeanName( cfg.getIgniteInstanceName(), - "MemoryMetrics", name + "DataRegionMetrics", name )); } catch (Throwable e) { @@ -848,13 +786,13 @@ public void releaseHistoryForPreloading() { /** * See {@link GridCacheMapEntry#ensureFreeSpace()} * - * @param memPlc Memory policy. + * @param memPlc data region. */ - public void ensureFreeSpace(MemoryPolicy memPlc) throws IgniteCheckedException { + public void ensureFreeSpace(DataRegion memPlc) throws IgniteCheckedException { if (memPlc == null) return; - MemoryPolicyConfiguration plcCfg = memPlc.config(); + DataRegionConfiguration plcCfg = memPlc.config(); if (plcCfg.getPageEvictionMode() == DataPageEvictionMode.DISABLED) return; @@ -884,16 +822,16 @@ public void ensureFreeSpace(MemoryPolicy memPlc) throws IgniteCheckedException { /** * @param memCfg memory configuration with common parameters. - * @param plcCfg memory policy with PageMemory specific parameters. - * @param memMetrics {@link MemoryMetrics} object to collect memory usage metrics. - * @return Memory policy instance. + * @param plcCfg data region with PageMemory specific parameters. + * @param memMetrics {@link DataRegionMetrics} object to collect memory usage metrics. + * @return data region instance. * * @throws IgniteCheckedException If failed to initialize swap path. */ - private MemoryPolicy initMemory( - MemoryConfiguration memCfg, - MemoryPolicyConfiguration plcCfg, - MemoryMetricsImpl memMetrics + private DataRegion initMemory( + DataStorageConfiguration memCfg, + DataRegionConfiguration plcCfg, + DataRegionMetricsImpl memMetrics ) throws IgniteCheckedException { File allocPath = buildAllocPath(plcCfg); @@ -905,15 +843,15 @@ private MemoryPolicy initMemory( PageMemory pageMem = createPageMemory(memProvider, memCfg, plcCfg, memMetrics); - return new MemoryPolicy(pageMem, plcCfg, memMetrics, createPageEvictionTracker(plcCfg, pageMem)); + return new DataRegion(pageMem, plcCfg, memMetrics, createPageEvictionTracker(plcCfg, pageMem)); } /** - * @param plc Memory Policy Configuration. + * @param plc data region Configuration. * @param pageMem Page memory. */ - private PageEvictionTracker createPageEvictionTracker(MemoryPolicyConfiguration plc, PageMemory pageMem) { - if (plc.getPageEvictionMode() == DataPageEvictionMode.DISABLED || cctx.gridConfig().isPersistentStoreEnabled()) + private PageEvictionTracker createPageEvictionTracker(DataRegionConfiguration plc, PageMemory pageMem) { + if (plc.getPageEvictionMode() == DataPageEvictionMode.DISABLED || CU.isPersistenceEnabled(cctx.gridConfig())) return new NoOpPageEvictionTracker(); assert pageMem instanceof PageMemoryNoStoreImpl : pageMem.getClass(); @@ -936,12 +874,12 @@ private PageEvictionTracker createPageEvictionTracker(MemoryPolicyConfiguration /** * Builds allocation path for memory mapped file to be used with PageMemory. * - * @param plc MemoryPolicyConfiguration. + * @param plc DataRegionConfiguration. * * @throws IgniteCheckedException If resolving swap directory fails. */ - @Nullable private File buildAllocPath(MemoryPolicyConfiguration plc) throws IgniteCheckedException { - String path = plc.getSwapFilePath(); + @Nullable private File buildAllocPath(DataRegionConfiguration plc) throws IgniteCheckedException { + String path = plc.getSwapPath(); if (path == null) return null; @@ -962,15 +900,15 @@ private PageEvictionTracker createPageEvictionTracker(MemoryPolicyConfiguration * * @param memProvider Memory provider. * @param memCfg Memory configuartion. - * @param memPlcCfg Memory policy configuration. - * @param memMetrics MemoryMetrics to collect memory usage metrics. + * @param memPlcCfg data region configuration. + * @param memMetrics DataRegionMetrics to collect memory usage metrics. * @return PageMemory instance. */ protected PageMemory createPageMemory( DirectMemoryProvider memProvider, - MemoryConfiguration memCfg, - MemoryPolicyConfiguration memPlcCfg, - MemoryMetricsImpl memMetrics + DataStorageConfiguration memCfg, + DataRegionConfiguration memPlcCfg, + DataRegionMetricsImpl memMetrics ) { memMetrics.persistenceEnabled(false); @@ -1003,14 +941,14 @@ protected File buildPath(String path, String consId) throws IgniteCheckedExcepti /** {@inheritDoc} */ @Override public void onActivate(GridKernalContext kctx) throws IgniteCheckedException { - if (cctx.kernalContext().clientNode() && cctx.kernalContext().config().getMemoryConfiguration() == null) + if (cctx.kernalContext().clientNode() && cctx.kernalContext().config().getDataStorageConfiguration() == null) return; - MemoryConfiguration memCfg = cctx.kernalContext().config().getMemoryConfiguration(); + DataStorageConfiguration memCfg = cctx.kernalContext().config().getDataStorageConfiguration(); assert memCfg != null; - initPageMemoryPolicies(memCfg); + initDataRegions(memCfg); registerMetricsMBeans(); @@ -1025,10 +963,10 @@ protected File buildPath(String path, String consId) throws IgniteCheckedExcepti } /** - * @return Name of MemoryPolicyConfiguration for internal caches. + * @return Name of DataRegionConfiguration for internal caches. */ - public String systemMemoryPolicyName() { - return SYSTEM_MEMORY_POLICY_NAME; + public String systemDateRegionName() { + return SYSTEM_DATA_REGION_NAME; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java index 9cc5c626df139..20510216c9100 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java @@ -40,7 +40,8 @@ public class RowStore { /** */ protected final CacheObjectContext coctx; - + /** */ + private final boolean persistenceEnabled; /** * @param grp Cache group. @@ -54,7 +55,9 @@ public RowStore(CacheGroupContext grp, FreeList freeList) { ctx = grp.shared(); coctx = grp.cacheObjectContext(); - pageMem = grp.memoryPolicy().pageMemory(); + pageMem = grp.dataRegion().pageMemory(); + + persistenceEnabled = grp.dataRegion().config().isPersistenceEnabled(); } /** @@ -63,13 +66,18 @@ public RowStore(CacheGroupContext grp, FreeList freeList) { */ public void removeRow(long link) throws IgniteCheckedException { assert link != 0; - ctx.database().checkpointReadLock(); - try { + if (!persistenceEnabled) freeList.removeDataRowByLink(link); - } - finally { - ctx.database().checkpointReadUnlock(); + else { + ctx.database().checkpointReadLock(); + + try { + freeList.removeDataRowByLink(link); + } + finally { + ctx.database().checkpointReadUnlock(); + } } } @@ -78,13 +86,17 @@ public void removeRow(long link) throws IgniteCheckedException { * @throws IgniteCheckedException If failed. */ public void addRow(CacheDataRow row) throws IgniteCheckedException { - ctx.database().checkpointReadLock(); - - try { + if (!persistenceEnabled) freeList.insertDataRow(row); - } - finally { - ctx.database().checkpointReadUnlock(); + else { + ctx.database().checkpointReadLock(); + + try { + freeList.insertDataRow(row); + } + finally { + ctx.database().checkpointReadUnlock(); + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/FairFifoPageEvictionTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/FairFifoPageEvictionTracker.java index f5c7c8a9b5b82..8a3d5b01001de 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/FairFifoPageEvictionTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/FairFifoPageEvictionTracker.java @@ -20,7 +20,7 @@ import java.util.LinkedList; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; @@ -34,12 +34,12 @@ public class FairFifoPageEvictionTracker extends PageAbstractEvictionTracker { /** * @param pageMem Page memory. - * @param plcCfg Memory policy configuration. + * @param plcCfg Data region configuration. * @param sharedCtx Shared context. */ public FairFifoPageEvictionTracker( PageMemoryNoStoreImpl pageMem, - MemoryPolicyConfiguration plcCfg, + DataRegionConfiguration plcCfg, GridCacheSharedContext sharedCtx) { super(pageMem, plcCfg, sharedCtx); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/PageAbstractEvictionTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/PageAbstractEvictionTracker.java index a524d5ef74c45..5142c59136de5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/PageAbstractEvictionTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/PageAbstractEvictionTracker.java @@ -18,7 +18,7 @@ import java.util.List; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; import org.apache.ignite.internal.processors.cache.GridCacheContext; @@ -54,12 +54,12 @@ public abstract class PageAbstractEvictionTracker implements PageEvictionTracker /** * @param pageMem Page memory. - * @param plcCfg Memory policy configuration. + * @param plcCfg Data region configuration. * @param sharedCtx Shared context. */ PageAbstractEvictionTracker( PageMemoryNoStoreImpl pageMem, - MemoryPolicyConfiguration plcCfg, + DataRegionConfiguration plcCfg, GridCacheSharedContext sharedCtx ) { this.pageMem = pageMem; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/Random2LruPageEvictionTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/Random2LruPageEvictionTracker.java index 00f1b16d11db0..4d421915764ae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/Random2LruPageEvictionTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/Random2LruPageEvictionTracker.java @@ -20,8 +20,8 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; @@ -55,12 +55,12 @@ public class Random2LruPageEvictionTracker extends PageAbstractEvictionTracker { */ public Random2LruPageEvictionTracker( PageMemoryNoStoreImpl pageMem, - MemoryPolicyConfiguration plcCfg, + DataRegionConfiguration plcCfg, GridCacheSharedContext sharedCtx ) { super(pageMem, plcCfg, sharedCtx); - MemoryConfiguration memCfg = sharedCtx.kernalContext().config().getMemoryConfiguration(); + DataStorageConfiguration memCfg = sharedCtx.kernalContext().config().getDataStorageConfiguration(); assert plcCfg.getMaxSize() / memCfg.getPageSize() < Integer.MAX_VALUE; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/RandomLruPageEvictionTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/RandomLruPageEvictionTracker.java index 035a91a93be43..ed6d2d4519660 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/RandomLruPageEvictionTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/evict/RandomLruPageEvictionTracker.java @@ -21,8 +21,8 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; @@ -57,12 +57,12 @@ public class RandomLruPageEvictionTracker extends PageAbstractEvictionTracker { */ public RandomLruPageEvictionTracker( PageMemory pageMem, - MemoryPolicyConfiguration plcCfg, + DataRegionConfiguration plcCfg, GridCacheSharedContext sharedCtx ) { super((PageMemoryNoStoreImpl)pageMem, plcCfg, sharedCtx); - MemoryConfiguration memCfg = sharedCtx.kernalContext().config().getMemoryConfiguration(); + DataStorageConfiguration memCfg = sharedCtx.kernalContext().config().getDataStorageConfiguration(); assert plcCfg.getMaxSize() / memCfg.getPageSize() < Integer.MAX_VALUE; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AsyncFileIOFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AsyncFileIOFactory.java index 0fb30520b94ac..104697e810e58 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AsyncFileIOFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AsyncFileIOFactory.java @@ -33,20 +33,34 @@ public class AsyncFileIOFactory implements FileIOFactory { /** */ private static final long serialVersionUID = 0L; + /** Thread local channel future holder. */ + private transient volatile ThreadLocal holder = initHolder(); + /** {@inheritDoc} */ @Override public FileIO create(File file) throws IOException { return create(file, CREATE, READ, WRITE); } - /** */ - private ThreadLocal holder = new ThreadLocal() { - @Override protected AsyncFileIO.ChannelOpFuture initialValue() { - return new AsyncFileIO.ChannelOpFuture(); - } - }; - /** {@inheritDoc} */ @Override public FileIO create(File file, OpenOption... modes) throws IOException { + if (holder == null) { + synchronized (this) { + if (holder == null) + holder = initHolder(); + } + } + return new AsyncFileIO(file, holder, modes); } + + /** + * Initializes thread local channel future holder. + */ + private ThreadLocal initHolder() { + return new ThreadLocal() { + @Override protected AsyncFileIO.ChannelOpFuture initialValue() { + return new AsyncFileIO.ChannelOpFuture(); + } + }; + } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java index 0547dbc26a4b7..408240cab3ab2 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java @@ -27,7 +27,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; @@ -60,7 +60,7 @@ public class FilePageStore implements PageStore { private final byte type; /** Database configuration. */ - protected final MemoryConfiguration dbCfg; + protected final DataStorageConfiguration dbCfg; /** Factory to provide I/O interfaces for read/write operations with files */ private final FileIOFactory ioFactory; @@ -92,7 +92,7 @@ public class FilePageStore implements PageStore { /** * @param file File. */ - public FilePageStore(byte type, File file, FileIOFactory factory, MemoryConfiguration cfg) { + public FilePageStore(byte type, File file, FileIOFactory factory, DataStorageConfiguration cfg) { this.type = type; cfgFile = file; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index ed821275bf3e2..aadcee60626d9 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -36,7 +36,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; @@ -92,7 +92,7 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen private final IgniteConfiguration igniteCfg; /** */ - private PersistentStoreConfiguration pstCfg; + private DataStorageConfiguration dsCfg; /** Absolute directory for file page store. Includes consistent id based folder. */ private File storeWorkDir; @@ -109,11 +109,11 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen public FilePageStoreManager(GridKernalContext ctx) { igniteCfg = ctx.config(); - PersistentStoreConfiguration pstCfg = igniteCfg.getPersistentStoreConfiguration(); + DataStorageConfiguration dsCfg = igniteCfg.getDataStorageConfiguration(); - assert pstCfg != null : "WAL should not be created if persistence is disabled."; + assert dsCfg != null; - this.pstCfg = pstCfg; + this.dsCfg = dsCfg; } /** {@inheritDoc} */ @@ -352,7 +352,7 @@ private CacheStoreHolder initForCache(CacheGroupDescriptor grpDesc, CacheConfigu grpsWithoutIdx.add(grpDesc.groupId()); FileVersionCheckingFactory pageStoreFactory = new FileVersionCheckingFactory( - pstCfg.getFileIOFactory(), igniteCfg.getMemoryConfiguration()); + dsCfg.getFileIOFactory(), igniteCfg.getDataStorageConfiguration()); FilePageStore idxStore = pageStoreFactory.createPageStore(PageMemory.FLAG_IDX, idxFile); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java index 5d044ec6b60e5..c2e2d36566b71 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java @@ -17,7 +17,7 @@ package org.apache.ignite.internal.processors.cache.persistence.file; import java.io.File; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; /** * @@ -35,7 +35,7 @@ public class FilePageStoreV2 extends FilePageStore { * @param factory Factory. * @param cfg Config. */ - public FilePageStoreV2(byte type, File file, FileIOFactory factory, MemoryConfiguration cfg) { + public FilePageStoreV2(byte type, File file, FileIOFactory factory, DataStorageConfiguration cfg) { super(type, file, factory, cfg); hdrSize = cfg.getPageSize(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java index 40870dce4b63a..bab2cf07fe65d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java @@ -22,7 +22,7 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; /** * Checks version in files if it's present on the disk, creates store with latest version otherwise. @@ -38,14 +38,14 @@ public class FileVersionCheckingFactory implements FilePageStoreFactory { private final FileIOFactory fileIOFactory; /** Memory configuration. */ - private final MemoryConfiguration memCfg; + private final DataStorageConfiguration memCfg; /** * @param fileIOFactory File io factory. * @param memCfg Memory configuration. */ public FileVersionCheckingFactory( - FileIOFactory fileIOFactory, MemoryConfiguration memCfg) { + FileIOFactory fileIOFactory, DataStorageConfiguration memCfg) { this.fileIOFactory = fileIOFactory; this.memCfg = memCfg; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java index c73a952f500ee..e7a7e63566102 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java @@ -31,12 +31,13 @@ import java.util.regex.Pattern; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.processors.GridProcessorAdapter; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.NotNull; @@ -157,7 +158,7 @@ private PdsFolderSettings prepareNewSettings() throws IgniteCheckedException { //here deprecated method is used to get compatible version of consistentId final Serializable consistentId = ctx.discovery().consistentId(); - if (!cfg.isPersistentStoreEnabled()) + if (!CU.isPersistenceEnabled(cfg)) return compatibleResolve(pstStoreBasePath, consistentId); if (getBoolean(IGNITE_DATA_STORAGE_FOLDER_BY_CONSISTENT_ID, false)) @@ -442,12 +443,12 @@ private GridCacheDatabaseSharedManager.FileLockHolder tryLock(File dbStoreDirWit * @throws IgniteCheckedException if I/O failed. */ @Nullable private File resolvePersistentStoreBasePath() throws IgniteCheckedException { - final PersistentStoreConfiguration pstCfg = cfg.getPersistentStoreConfiguration(); + final DataStorageConfiguration dsCfg = cfg.getDataStorageConfiguration(); - if (pstCfg == null) + if (dsCfg == null) return null; - final String pstPath = pstCfg.getPersistentStorePath(); + final String pstPath = dsCfg.getStoragePath(); return U.resolveWorkDirectory( cfg.getWorkDirectory(), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java index 3eb62ae937953..6a87d3e84da6b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java @@ -31,8 +31,8 @@ import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.evict.PageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.tree.io.CacheVersionIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO; @@ -81,7 +81,7 @@ public class FreeListImpl extends PagesList implements FreeList, ReuseList { private final PageHandler updateRow = new UpdateRowHandler(); /** */ - private final MemoryMetricsImpl memMetrics; + private final DataRegionMetricsImpl memMetrics; /** */ private final PageEvictionTracker evictionTracker; @@ -313,7 +313,7 @@ private final class RemoveRowHandler extends PageHandler { * @param cacheId Cache ID. * @param name Name (for debug purpose). * @param memMetrics Memory metrics. - * @param memPlc Memory policy. + * @param memPlc Data region. * @param reuseList Reuse list or {@code null} if this free list will be a reuse list for itself. * @param wal Write ahead log manager. * @param metaPageId Metadata page ID. @@ -323,8 +323,8 @@ private final class RemoveRowHandler extends PageHandler { public FreeListImpl( int cacheId, String name, - MemoryMetricsImpl memMetrics, - MemoryPolicy memPlc, + DataRegionMetricsImpl memMetrics, + DataRegion memPlc, ReuseList reuseList, IgniteWriteAheadLogManager wal, long metaPageId, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java index 8a540a03b49e3..b113c627c7ec6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java @@ -180,8 +180,6 @@ protected final void init(long metaPageId, boolean initNew) throws IgniteChecked assert nextId != pageId : "Loop detected [next=" + U.hexLong(nextId) + ", cur=" + U.hexLong(pageId) + ']'; - - } finally { readUnlock(pageId, page, pageAddr); @@ -354,9 +352,8 @@ public void saveMetadata() throws IgniteCheckedException { * @param pageId Page ID. * @param page Page absolute pointer. * @param pageAddr Page address. - * @throws IgniteCheckedException If failed. */ - private void releaseAndClose(long pageId, long page, long pageAddr) throws IgniteCheckedException { + private void releaseAndClose(long pageId, long page, long pageAddr) { if (page != 0L) { try { // No special WAL record because we most likely changed the whole page. @@ -924,7 +921,7 @@ private boolean putReuseBag( * @param bucket Bucket index. * @return Page for take. */ - private Stripe getPageForTake(int bucket) throws IgniteCheckedException { + private Stripe getPageForTake(int bucket) { Stripe[] tails = getBucket(bucket); if (tails == null || bucketsSize[bucket].get() == 0) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index 95b81ad15cc07..8c64e0e22b171 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -58,7 +58,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.TrackingPageIO; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException; @@ -235,7 +235,7 @@ public class PageMemoryImpl implements PageMemoryEx { private long[] sizes; /** */ - private MemoryMetricsImpl memMetrics; + private DataRegionMetricsImpl memMetrics; /** */ private volatile boolean closed; @@ -256,7 +256,7 @@ public PageMemoryImpl( GridInClosure3X flushDirtyPage, GridInClosure3X changeTracker, CheckpointLockStateChecker stateChecker, - MemoryMetricsImpl memMetrics, + DataRegionMetricsImpl memMetrics, boolean throttleEnabled ) { assert sharedCtx != null; @@ -1817,7 +1817,7 @@ private long evictPage() throws IgniteCheckedException { pageEvictWarned = true; U.warn(log, "Page evictions started, this will affect storage performance (consider increasing " + - "MemoryConfiguration#setPageCacheSize)."); + "DataStorageConfiguration#setPageCacheSize)."); } final ThreadLocalRandom rnd = ThreadLocalRandom.current(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index 6a75dd2322b09..53fe77e3a9933 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -43,8 +43,8 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.events.EventType; import org.apache.ignite.events.WalSegmentArchivedEvent; @@ -61,7 +61,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.GridCacheSharedManagerAdapter; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.PersistenceMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataStorageMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; @@ -76,7 +76,6 @@ import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.G; -import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; @@ -146,7 +145,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl private final long fsyncDelay; /** */ - private final PersistentStoreConfiguration psCfg; + private final DataStorageConfiguration dsCfg; /** Events service */ private final GridEventStorageManager evt; @@ -155,7 +154,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl private IgniteConfiguration igCfg; /** Persistence metrics tracker. */ - private PersistenceMetricsImpl metrics; + private DataStorageMetricsImpl metrics; /** */ private File walWorkDir; @@ -209,7 +208,7 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** * Positive (non-0) value indicates WAL can be archived even if not complete
        - * See {@link PersistentStoreConfiguration#setWalAutoArchiveAfterInactivity(long)}
        + * See {@link DataStorageConfiguration#setWalAutoArchiveAfterInactivity(long)}
        */ private final long walAutoArchiveAfterInactivity; @@ -239,20 +238,20 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl public FileWriteAheadLogManager(@NotNull final GridKernalContext ctx) { igCfg = ctx.config(); - PersistentStoreConfiguration psCfg = igCfg.getPersistentStoreConfiguration(); + DataStorageConfiguration dsCfg = igCfg.getDataStorageConfiguration(); - assert psCfg != null : "WAL should not be created if persistence is disabled."; + assert dsCfg != null; - this.psCfg = psCfg; + this.dsCfg = dsCfg; - maxWalSegmentSize = psCfg.getWalSegmentSize(); - mode = psCfg.getWalMode(); - tlbSize = psCfg.getTlbSize(); - flushFreq = psCfg.getWalFlushFrequency(); - fsyncDelay = psCfg.getWalFsyncDelayNanos(); - alwaysWriteFullPages = psCfg.isAlwaysWriteFullPages(); - ioFactory = psCfg.getFileIOFactory(); - walAutoArchiveAfterInactivity = psCfg.getWalAutoArchiveAfterInactivity(); + maxWalSegmentSize = dsCfg.getWalSegmentSize(); + mode = dsCfg.getWalMode(); + tlbSize = dsCfg.getWalThreadLocalBufferSize(); + flushFreq = dsCfg.getWalFlushFrequency(); + fsyncDelay = dsCfg.getWalFsyncDelayNanos(); + alwaysWriteFullPages = dsCfg.isAlwaysWriteFullPages(); + ioFactory = dsCfg.getFileIOFactory(); + walAutoArchiveAfterInactivity = dsCfg.getWalAutoArchiveAfterInactivity(); evt = ctx.event(); } @@ -264,15 +263,15 @@ public FileWriteAheadLogManager(@NotNull final GridKernalContext ctx) { checkWalConfiguration(); walWorkDir = initDirectory( - psCfg.getWalStorePath(), - PersistentStoreConfiguration.DFLT_WAL_STORE_PATH, + dsCfg.getWalPath(), + DataStorageConfiguration.DFLT_WAL_PATH, resolveFolders.folderName(), "write ahead log work directory" ); walArchiveDir = initDirectory( - psCfg.getWalArchivePath(), - PersistentStoreConfiguration.DFLT_WAL_ARCHIVE_PATH, + dsCfg.getWalArchivePath(), + DataStorageConfiguration.DFLT_WAL_ARCHIVE_PATH, resolveFolders.folderName(), "write ahead log archive directory" ); @@ -305,11 +304,11 @@ public FileWriteAheadLogManager(@NotNull final GridKernalContext ctx) { * @throws IgniteCheckedException if WAL store path is configured and archive path isn't (or vice versa) */ private void checkWalConfiguration() throws IgniteCheckedException { - if (psCfg.getWalStorePath() == null ^ psCfg.getWalArchivePath() == null) { + if (dsCfg.getWalPath() == null ^ dsCfg.getWalArchivePath() == null) { throw new IgniteCheckedException( "Properties should be either both specified or both null " + - "[walStorePath = " + psCfg.getWalStorePath() + - ", walArchivePath = " + psCfg.getWalArchivePath() + "]" + "[walStorePath = " + dsCfg.getWalPath() + + ", walArchivePath = " + dsCfg.getWalArchivePath() + "]" ); } } @@ -568,7 +567,7 @@ private void checkWalRolloverRequiredDuringInactivityPeriod() { walArchiveDir, (FileWALPointer)start, end, - psCfg, + dsCfg, serializer, ioFactory, archiver, @@ -814,7 +813,7 @@ private FileWriteHandle rollOver(FileWriteHandle cur) throws StorageException, I private FileWriteHandle restoreWriteHandle(FileWALPointer lastReadPtr) throws IgniteCheckedException { long absIdx = lastReadPtr == null ? 0 : lastReadPtr.index(); - long segNo = absIdx % psCfg.getWalSegments(); + long segNo = absIdx % dsCfg.getWalSegments(); File curFile = new File(walWorkDir, FileDescriptor.fileName(segNo)); @@ -927,9 +926,9 @@ private void checkOrPrepareFiles() throws IgniteCheckedException { File[] allFiles = walWorkDir.listFiles(WAL_SEGMENT_FILE_FILTER); - if (allFiles.length != 0 && allFiles.length > psCfg.getWalSegments()) + if (allFiles.length != 0 && allFiles.length > dsCfg.getWalSegments()) throw new IgniteCheckedException("Failed to initialize wal (work directory contains " + - "incorrect number of segments) [cur=" + allFiles.length + ", expected=" + psCfg.getWalSegments() + ']'); + "incorrect number of segments) [cur=" + allFiles.length + ", expected=" + dsCfg.getWalSegments() + ']'); // Allocate the first segment synchronously. All other segments will be allocated by archiver in background. if (allFiles.length == 0) { @@ -951,7 +950,7 @@ private void formatFile(File file) throws IgniteCheckedException { log.debug("Formatting file [exists=" + file.exists() + ", file=" + file.getAbsolutePath() + ']'); try (FileIO fileIO = ioFactory.create(file, CREATE, READ, WRITE)) { - int left = psCfg.getWalSegmentSize(); + int left = dsCfg.getWalSegmentSize(); if (mode == WALMode.DEFAULT) { while (left > 0) { @@ -1010,7 +1009,7 @@ private File pollNextFile(long curIdx) throws IgniteCheckedException { // Signal to archiver that we are done with the segment and it can be archived. long absNextIdx = archiver.nextAbsoluteSegmentIndex(curIdx); - long segmentIdx = absNextIdx % psCfg.getWalSegments(); + long segmentIdx = absNextIdx % dsCfg.getWalSegments(); return new File(walWorkDir, FileDescriptor.fileName(segmentIdx)); } @@ -1078,7 +1077,7 @@ private void checkEnvironment() throws StorageException { /** * File archiver operates on absolute segment indexes. For any given absolute segment index N we can calculate - * the work WAL segment: S(N) = N % psCfg.walSegments. + * the work WAL segment: S(N) = N % dsCfg.walSegments. * When a work segment is finished, it is given to the archiver. If the absolute index of last archived segment * is denoted by A and the absolute index of next segment we want to write is denoted by W, then we can allow * write to S(W) if W - A <= walSegments.
        @@ -1289,7 +1288,7 @@ private long nextAbsoluteSegmentIndex(long curIdx) throws IgniteCheckedException // Notify archiver thread. notifyAll(); - while (curAbsWalIdx - lastAbsArchivedIdx > psCfg.getWalSegments() && cleanException == null) + while (curAbsWalIdx - lastAbsArchivedIdx > dsCfg.getWalSegments() && cleanException == null) wait(); return curAbsWalIdx; @@ -1359,7 +1358,7 @@ private void releaseWorkSegment(long absIdx) { * @param absIdx Absolute index to archive. */ private SegmentArchiveResult archiveSegment(long absIdx) throws IgniteCheckedException { - long segIdx = absIdx % psCfg.getWalSegments(); + long segIdx = absIdx % dsCfg.getWalSegments(); File origFile = new File(walWorkDir, FileDescriptor.fileName(segIdx)); @@ -1420,7 +1419,7 @@ private void allocateRemainingFiles() throws IgniteCheckedException { } /** - * Validate files depending on {@link PersistentStoreConfiguration#getWalSegments()} and create if need. + * Validate files depending on {@link DataStorageConfiguration#getWalSegments()} and create if need. * Check end when exit condition return false or all files are passed. * * @param startWith Start with. @@ -1429,14 +1428,14 @@ private void allocateRemainingFiles() throws IgniteCheckedException { * @throws IgniteCheckedException if validation or create file fail. */ private void checkFiles(int startWith, boolean create, IgnitePredicate p) throws IgniteCheckedException { - for (int i = startWith; i < psCfg.getWalSegments() && (p == null || (p != null && p.apply(i))); i++) { + for (int i = startWith; i < dsCfg.getWalSegments() && (p == null || (p != null && p.apply(i))); i++) { File checkFile = new File(walWorkDir, FileDescriptor.fileName(i)); if (checkFile.exists()) { if (checkFile.isDirectory()) throw new IgniteCheckedException("Failed to initialize WAL log segment (a directory with " + "the same name already exists): " + checkFile.getAbsolutePath()); - else if (checkFile.length() != psCfg.getWalSegmentSize() && mode == WALMode.DEFAULT) + else if (checkFile.length() != dsCfg.getWalSegmentSize() && mode == WALMode.DEFAULT) throw new IgniteCheckedException("Failed to initialize WAL log segment " + "(WAL segment size change is not supported):" + checkFile.getAbsolutePath()); } @@ -1761,7 +1760,7 @@ private class FileWriteHandle extends FileHandle { /** Condition activated each time writeBuffer() completes. Used to wait previously flushed write to complete */ private final Condition writeComplete = lock.newCondition(); - /** Condition for timed wait of several threads, see {@link PersistentStoreConfiguration#getWalFsyncDelayNanos()} */ + /** Condition for timed wait of several threads, see {@link DataStorageConfiguration#getWalFsyncDelayNanos()} */ private final Condition fsync = lock.newCondition(); /** @@ -2481,7 +2480,7 @@ private static class RecordsIterator extends AbstractWalRecordsIterator { private final FileArchiver archiver; /** */ - private final PersistentStoreConfiguration psCfg; + private final DataStorageConfiguration psCfg; /** Optional start pointer. */ @Nullable @@ -2509,7 +2508,7 @@ private RecordsIterator( File walArchiveDir, @Nullable FileWALPointer start, @Nullable FileWALPointer end, - PersistentStoreConfiguration psCfg, + DataStorageConfiguration psCfg, @NotNull RecordSerializer serializer, FileIOFactory ioFactory, FileArchiver archiver, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java index 0fb8adf9bd29b..962c4ca135962 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java @@ -20,8 +20,8 @@ import java.io.File; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.pagemem.wal.WALIterator; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; @@ -84,8 +84,8 @@ public IgniteWalIteratorFactory( this.binaryMetadataFileStoreDir = binaryMetadataFileStoreDir; this.marshallerMappingFileStoreDir = marshallerMappingFileStoreDir; this.keepBinary = keepBinary; - this.ioFactory = new PersistentStoreConfiguration().getFileIOFactory(); - new MemoryConfiguration().setPageSize(pageSize); // just for validate + this.ioFactory = new DataStorageConfiguration().getFileIOFactory(); + new DataStorageConfiguration().setPageSize(pageSize); // just for validate } /** @@ -122,7 +122,7 @@ public IgniteWalIteratorFactory(@NotNull final IgniteLogger log, @NotNull final this.log = log; this.pageSize = pageSize; this.ioFactory = ioFactory; - new MemoryConfiguration().setPageSize(pageSize); // just for validate + new DataStorageConfiguration().setPageSize(pageSize); // just for validate } /** @@ -134,7 +134,7 @@ public IgniteWalIteratorFactory(@NotNull final IgniteLogger log, @NotNull final * according its boundaries. */ public IgniteWalIteratorFactory(@NotNull final IgniteLogger log, int pageSize) { - this(log, new PersistentStoreConfiguration().getFileIOFactory(), pageSize); + this(log, new DataStorageConfiguration().getFileIOFactory(), pageSize); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index e23476625380e..c0c3650fb0fe1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -27,8 +27,9 @@ import java.util.concurrent.ExecutorService; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.GridComponent; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridKernalGateway; @@ -177,8 +178,12 @@ private IgniteConfiguration prepareIgniteConfiguration() { final Marshaller marshaller = new BinaryMarshaller(); cfg.setMarshaller(marshaller); - PersistentStoreConfiguration pstCfg = new PersistentStoreConfiguration(); - cfg.setPersistentStoreConfiguration(pstCfg); + final DataStorageConfiguration pstCfg = new DataStorageConfiguration(); + final DataRegionConfiguration regCfg = new DataRegionConfiguration(); + regCfg.setPersistenceEnabled(true); + pstCfg.setDefaultDataRegionConfiguration(regCfg); + + cfg.setDataStorageConfiguration(pstCfg); marshaller.setContext(marshallerCtx); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetrics.java index 824bc7a9f50bf..9c096a64c5921 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetrics.java @@ -28,7 +28,7 @@ * *

        Implementation is nonblocking and protected from hits loss. * Maximum relative error is 1/{@link #size}. - * 2^56 - 1 hits per interval can be accumulated without numeric overflow. + * 2^55 - 1 hits per interval can be accumulated without numeric overflow. */ public class HitRateMetrics { /** Bits that store actual hit count. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java index 143e5cbc4e381..7da4898627a90 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java @@ -648,7 +648,7 @@ else if (conflictCtx.isUseNew()) { if (dhtVer == null) dhtVer = explicitVer != null ? explicitVer : writeVersion(); - if (cctx.wal() != null && !writeEntries().isEmpty() + if (cacheCtx.group().persistenceEnabled() && !writeEntries().isEmpty() && op != NOOP && op != RELOAD && op != READ) ptr = cctx.wal().log(new DataRecord(new DataEntry( cacheCtx.cacheId(), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index 36306cbaed89b..afa3fd7a4bf6b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -64,8 +64,8 @@ public CacheDataTree( ) throws IgniteCheckedException { super(name, grp.groupId(), - grp.memoryPolicy().pageMemory(), - grp.shared().wal(), + grp.dataRegion().pageMemory(), + grp.dataRegion().config().isPersistenceEnabled() ? grp.shared().wal() : null, grp.offheap().globalRemoveId(), metaPageId, reuseList, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java index fad3a50a7ed53..a6ec6e7b38ee7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java @@ -55,7 +55,7 @@ public PendingEntriesTree( super(name, grp.groupId(), pageMem, - grp.shared().wal(), + grp.dataRegion().config().isPersistenceEnabled() ? grp.shared().wal() : null, grp.offheap().globalRemoveId(), metaPageId, reuseList, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java index ff42ff64c0d27..3cd0451c37e69 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java @@ -53,6 +53,7 @@ import org.apache.ignite.internal.util.typedef.CI1; import org.apache.ignite.internal.util.typedef.CI2; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteCallable; @@ -142,7 +143,7 @@ public boolean publicApiActiveState() { /** {@inheritDoc} */ @Override public void start() throws IgniteCheckedException { // Start first node as inactive if persistence is enabled. - boolean activeOnStart = !ctx.config().isPersistentStoreEnabled() && ctx.config().isActiveOnStart(); + boolean activeOnStart = !CU.isPersistenceEnabled(ctx.config()) && ctx.config().isActiveOnStart(); globalState = DiscoveryDataClusterState.createState(activeOnStart); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java index e73fa6cd67a54..12765df25006b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDataManager.java @@ -65,7 +65,7 @@ import org.apache.ignite.internal.managers.communication.GridMessageListener; import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; import org.apache.ignite.internal.processors.datastreamer.DataStreamerCacheUpdaters; import org.apache.ignite.internal.processors.igfs.data.IgfsDataPutProcessor; @@ -243,7 +243,7 @@ public long spaceSize() { * @return Maximum number of bytes for IGFS data cache. */ public long maxSpaceSize() { - MemoryPolicy plc = dataCachePrj.context().memoryPolicy(); + DataRegion plc = dataCachePrj.context().dataRegion(); long size = plc != null ? plc.config().getMaxSize() : 0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index 3a1cdb7cc16a8..c0a169bd9d352 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -2555,7 +2555,8 @@ private void processStatusMessage(SchemaOperationStatusMessage msg) { private void saveCacheConfiguration(DynamicCacheDescriptor desc) { GridCacheSharedContext cctx = ctx.cache().context(); - if (cctx.pageStore() != null && cctx.database().persistenceEnabled() && !cctx.kernalContext().clientNode()) { + if (cctx.pageStore() != null && cctx.database().persistenceEnabled() && !cctx.kernalContext().clientNode() && + CU.isPersistentCache(desc.cacheConfiguration(), cctx.gridConfig().getDataStorageConfiguration())) { CacheConfiguration cfg = desc.cacheConfiguration(); try { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java index 6fe056c59164b..f2fd195e2190e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java @@ -26,7 +26,7 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.PartitionLossPolicy; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -142,7 +142,7 @@ public class VisorCacheConfiguration extends VisorDataTransferObject { /** */ private boolean loadPrevVal; - /** Name of {@link MemoryPolicyConfiguration} for this cache */ + /** Name of {@link DataRegionConfiguration} for this cache */ private String memPlcName; /** Maximum inline size for sql indexes. */ @@ -219,7 +219,7 @@ public VisorCacheConfiguration(IgniteEx ignite, CacheConfiguration ccfg, IgniteU evictFilter = compactClass(ccfg.getEvictionFilter()); lsnrConfigurations = compactIterable(ccfg.getCacheEntryListenerConfigurations()); loadPrevVal = ccfg.isLoadPreviousValue(); - memPlcName = ccfg.getMemoryPolicyName(); + memPlcName = ccfg.getDataRegionName(); sqlIdxMaxInlineSize = ccfg.getSqlIndexMaxInlineSize(); nodeFilter = compactClass(ccfg.getNodeFilter()); qryDetailMetricsSz = ccfg.getQueryDetailMetricsSize(); @@ -460,7 +460,7 @@ public boolean isLoadPreviousValue() { } /** - * @return {@link MemoryPolicyConfiguration} name. + * @return {@link DataRegionConfiguration} name. */ public String getMemoryPolicyName() { return memPlcName; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java index c6cdd5c419007..37fed5abe3c1c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java @@ -20,13 +20,13 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import org.apache.ignite.MemoryMetrics; +import org.apache.ignite.DataRegionMetrics; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorDataTransferObject; /** - * Data transfer object for {@link MemoryMetrics} + * Data transfer object for {@link DataRegionMetrics} */ public class VisorMemoryMetrics extends VisorDataTransferObject { /** */ @@ -69,7 +69,7 @@ public VisorMemoryMetrics() { /** * @param m Metrics instance to create DTO. */ - public VisorMemoryMetrics(MemoryMetrics m) { + public VisorMemoryMetrics(DataRegionMetrics m) { name = m.getName(); totalAllocatedPages = m.getTotalAllocatedPages(); allocationRate = m.getAllocationRate(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java new file mode 100644 index 0000000000000..394e2940865df --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.node; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.ArrayList; +import java.util.List; +import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * Data transfer object for data region configuration. + */ +public class VisorDataRegionConfiguration extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Unique name of DataRegion. */ + private String name; + + /** Initial memory region size defined by this memory policy. */ + private long initSize; + + /** Maximum memory region size defined by this memory policy. */ + private long maxSize; + + /** Path for memory mapped file. */ + private String swapPath; + + /** An algorithm for memory pages eviction. */ + private DataPageEvictionMode pageEvictionMode; + + /** A threshold for memory pages eviction initiation. */ + private double evictionThreshold; + + /** Minimum number of empty pages in reuse lists. */ + private int emptyPagesPoolSize; + + /** Enable memory metrics collection for this data region. */ + private boolean metricsEnabled; + + /** Number of sub-intervals. */ + private int metricsSubIntervalCount; + + /** Time interval over which allocation rate is calculated. */ + private long metricsRateTimeInterval; + + /** Enable Ignite Native Persistence. */ + private boolean persistenceEnabled; + + /** + * Default constructor. + */ + public VisorDataRegionConfiguration() { + // No-op. + } + + /** + * Constructor. + * + * @param plc Data region configuration. + */ + public VisorDataRegionConfiguration(DataRegionConfiguration plc) { + assert plc != null; + + name = plc.getName(); + initSize = plc.getInitialSize(); + maxSize = plc.getMaxSize(); + swapPath = plc.getSwapPath(); + pageEvictionMode = plc.getPageEvictionMode(); + evictionThreshold = plc.getEvictionThreshold(); + emptyPagesPoolSize = plc.getEmptyPagesPoolSize(); + metricsEnabled = plc.isMetricsEnabled(); + metricsSubIntervalCount = plc.getMetricsSubIntervalCount(); + metricsRateTimeInterval = plc.getMetricsRateTimeInterval(); + persistenceEnabled = plc.isPersistenceEnabled(); + } + + /** + * @param regCfgs Array of data region configurations. + * @return Collection of DTO objects. + */ + public static List from(DataRegionConfiguration[] regCfgs) { + List res = new ArrayList<>(); + + if (regCfgs != null) { + for (DataRegionConfiguration plc: regCfgs) + res.add(new VisorDataRegionConfiguration(plc)); + } + + return res; + } + + /** + * @return Unique name of DataRegion. + */ + public String getName() { + return name; + } + + /** + * @return Maximum memory region size defined by this memory policy. + */ + public long getMaxSize() { + return maxSize; + } + + /** + * @return Initial memory region size defined by this memory policy. + */ + public long getInitialSize() { + return initSize; + } + + /** + * @return Path for memory mapped file. + */ + public String getSwapPath() { + return swapPath; + } + + /** + * @return Memory pages eviction algorithm. {@link DataPageEvictionMode#DISABLED} used by default. + */ + public DataPageEvictionMode getPageEvictionMode() { + return pageEvictionMode; + } + + /** + * @return Memory pages eviction threshold. + */ + public double getEvictionThreshold() { + return evictionThreshold; + } + + /** + * @return Minimum number of empty pages in reuse list. + */ + public int getEmptyPagesPoolSize() { + return emptyPagesPoolSize; + } + + /** + * @return Metrics enabled flag. + */ + public boolean isMetricsEnabled() { + return metricsEnabled; + } + + /** + * @return Number of sub intervals. + */ + public int getMetricsSubIntervalCount() { + return metricsSubIntervalCount; + } + + /** + * @return Time interval over which allocation rate is calculated. + */ + public long getMetricsRateTimeInterval() { + return metricsRateTimeInterval; + } + + /** + * @return Persistence enabled flag. + */ + public boolean isPersistenceEnabled() { + return persistenceEnabled; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeString(out, name); + out.writeLong(initSize); + out.writeLong(maxSize); + U.writeString(out, swapPath); + U.writeEnum(out, pageEvictionMode); + out.writeDouble(evictionThreshold); + out.writeInt(emptyPagesPoolSize); + out.writeBoolean(metricsEnabled); + out.writeInt(metricsSubIntervalCount); + out.writeLong(metricsRateTimeInterval); + out.writeBoolean(persistenceEnabled); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + name = U.readString(in); + initSize = in.readLong(); + maxSize = in.readLong(); + swapPath = U.readString(in); + pageEvictionMode = DataPageEvictionMode.fromOrdinal(in.readByte()); + evictionThreshold = in.readDouble(); + emptyPagesPoolSize = in.readInt(); + metricsEnabled = in.readBoolean(); + metricsSubIntervalCount = in.readInt(); + metricsRateTimeInterval = in.readLong(); + persistenceEnabled = in.readBoolean(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorDataRegionConfiguration.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java new file mode 100644 index 0000000000000..78bf1c504a910 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java @@ -0,0 +1,453 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.node; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; +import org.apache.ignite.configuration.CheckpointWriteOrder; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +import static org.apache.ignite.internal.visor.util.VisorTaskUtils.compactClass; + +/** + * Data transfer object for data store configuration. + */ +public class VisorDataStorageConfiguration extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Size of a memory chunk reserved for system cache initially. */ + private long sysRegionInitSize; + + /** Size of memory for system cache. */ + private long sysRegionMaxSize; + + /** Page size. */ + private int pageSize; + + /** Concurrency level. */ + private int concLvl; + + /** Configuration of default data region. */ + private VisorDataRegionConfiguration dfltDataRegCfg; + + /** Memory policies. */ + private List dataRegCfgs; + + /** */ + private String storagePath; + + /** Checkpointing frequency. */ + private long checkpointFreq; + + /** Lock wait time. */ + private long lockWaitTime; + + /** */ + private long checkpointPageBufSize; + + /** */ + private int checkpointThreads; + + /** Checkpoint write order. */ + private CheckpointWriteOrder checkpointWriteOrder; + + /** */ + private int walHistSize; + + /** Number of work WAL segments. */ + private int walSegments; + + /** Number of WAL segments to keep. */ + private int walSegmentSize; + + /** WAL persistence path. */ + private String walPath; + + /** WAL archive path. */ + private String walArchivePath; + + /** Metrics enabled flag. */ + private boolean metricsEnabled; + + /** Wal mode. */ + private WALMode walMode; + + /** WAl thread local buffer size. */ + private int walTlbSize; + + /** Wal flush frequency. */ + private long walFlushFreq; + + /** Wal fsync delay in nanoseconds. */ + private long walFsyncDelay; + + /** Wal record iterator buffer size. */ + private int walRecordIterBuffSize; + + /** Always write full pages. */ + private boolean alwaysWriteFullPages; + + /** Factory to provide I/O interface for files */ + private String fileIOFactory; + + /** Number of sub-intervals. */ + private int metricsSubIntervalCount; + + /** Time interval (in milliseconds) for rate-based metrics. */ + private long metricsRateTimeInterval; + + /** Time interval (in milliseconds) for running auto archiving for incompletely WAL segment */ + private long walAutoArchiveAfterInactivity; + + /** If true, threads that generate dirty pages too fast during ongoing checkpoint will be throttled. */ + private boolean writeThrottlingEnabled; + + /** + * Default constructor. + */ + public VisorDataStorageConfiguration() { + // No-op. + } + + /** + * Constructor. + * + * @param cfg Data storage configuration. + */ + public VisorDataStorageConfiguration(DataStorageConfiguration cfg) { + assert cfg != null; + + sysRegionInitSize = cfg.getSystemRegionInitialSize(); + sysRegionMaxSize = cfg.getSystemRegionMaxSize(); + pageSize = cfg.getPageSize(); + concLvl = cfg.getConcurrencyLevel(); + + DataRegionConfiguration dfltRegion = cfg.getDefaultDataRegionConfiguration(); + + if (dfltRegion != null) + dfltDataRegCfg = new VisorDataRegionConfiguration(dfltRegion); + + dataRegCfgs = VisorDataRegionConfiguration.from(cfg.getDataRegionConfigurations()); + + storagePath = cfg.getStoragePath(); + checkpointFreq = cfg.getCheckpointFrequency(); + lockWaitTime = cfg.getLockWaitTime(); + checkpointPageBufSize = cfg.getCheckpointPageBufferSize(); + checkpointThreads = cfg.getCheckpointThreads(); + checkpointWriteOrder = cfg.getCheckpointWriteOrder(); + walHistSize = cfg.getWalHistorySize(); + walSegments = cfg.getWalSegments(); + walSegmentSize = cfg.getWalSegmentSize(); + walPath = cfg.getWalPath(); + walArchivePath = cfg.getWalArchivePath(); + metricsEnabled = cfg.isMetricsEnabled(); + walMode = cfg.getWalMode(); + walTlbSize = cfg.getWalThreadLocalBufferSize(); + walFlushFreq = cfg.getWalFlushFrequency(); + walFsyncDelay = cfg.getWalFsyncDelayNanos(); + walRecordIterBuffSize = cfg.getWalRecordIteratorBufferSize(); + alwaysWriteFullPages = cfg.isAlwaysWriteFullPages(); + fileIOFactory = compactClass(cfg.getFileIOFactory()); + metricsSubIntervalCount = cfg.getMetricsSubIntervalCount(); + metricsRateTimeInterval = cfg.getMetricsRateTimeInterval(); + walAutoArchiveAfterInactivity = cfg.getWalAutoArchiveAfterInactivity(); + writeThrottlingEnabled = cfg.isWriteThrottlingEnabled(); + } + + /** + * @return Initial size in bytes. + */ + public long getSystemRegionInitialSize() { + return sysRegionInitSize; + } + + /** + * @return Maximum in bytes. + */ + public long getSystemRegionMaxSize() { + return sysRegionMaxSize; + } + + /** + * @return Page size in bytes. + */ + public int getPageSize() { + return pageSize; + } + + /** + * @return Mapping table concurrency level. + */ + public int getConcurrencyLevel() { + return concLvl; + } + + /** + * @return Configuration of default data region. + */ + public VisorDataRegionConfiguration getDefaultDataRegionConfiguration() { + return dfltDataRegCfg; + } + + /** + * @return Array of configured data regions. + */ + public List getDataRegionConfigurations() { + return dataRegCfgs; + } + + /** + * @return Path the root directory where the Persistent Store will persist data and indexes. + */ + public String getStoragePath() { + return storagePath; + } + + /** + * @return Checkpointing frequency in milliseconds. + */ + public long getCheckpointFrequency() { + return checkpointFreq; + } + + /** + * @return Checkpointing page buffer size in bytes. + */ + public long getCheckpointPageBufferSize() { + return checkpointPageBufSize; + } + + /** + * @return Number of checkpointing threads. + */ + public int getCheckpointThreads() { + return checkpointThreads; + } + + /** + * @return Checkpoint write order. + */ + public CheckpointWriteOrder getCheckpointWriteOrder() { + return checkpointWriteOrder; + } + + /** + * @return Time for wait. + */ + public long getLockWaitTime() { + return lockWaitTime; + } + + /** + * @return Number of WAL segments to keep after a checkpoint is finished. + */ + public int getWalHistorySize() { + return walHistSize; + } + + /** + * @return Number of work WAL segments. + */ + public int getWalSegments() { + return walSegments; + } + + /** + * @return WAL segment size. + */ + public int getWalSegmentSize() { + return walSegmentSize; + } + + /** + * @return WAL persistence path, absolute or relative to Ignite work directory. + */ + public String getWalPath() { + return walPath; + } + + /** + * @return WAL archive directory. + */ + public String getWalArchivePath() { + return walArchivePath; + } + + /** + * @return Metrics enabled flag. + */ + public boolean isMetricsEnabled() { + return metricsEnabled; + } + + /** + * @return Time interval in milliseconds. + */ + public long getMetricsRateTimeInterval() { + return metricsRateTimeInterval; + } + + /** + * @return The number of sub-intervals for history tracking. + */ + public int getMetricsSubIntervalCount() { + return metricsSubIntervalCount; + } + + /** + * @return WAL mode. + */ + public WALMode getWalMode() { + return walMode; + } + + /** + * @return Thread local buffer size. + */ + public int getWalThreadLocalBufferSize() { + return walTlbSize; + } + + /** + * @return Flush frequency. + */ + public long getWalFlushFrequency() { + return walFlushFreq; + } + + /** + * @return Gets the fsync delay, in nanoseconds. + */ + public long getWalFsyncDelayNanos() { + return walFsyncDelay; + } + + /** + * @return Record iterator buffer size. + */ + public int getWalRecordIteratorBufferSize() { + return walRecordIterBuffSize; + } + + /** + * @return Flag indicating whether full pages should be always written. + */ + public boolean isAlwaysWriteFullPages() { + return alwaysWriteFullPages; + } + + /** + * @return File I/O factory class name. + */ + public String getFileIOFactory() { + return fileIOFactory; + } + + /** + * @return Time in millis. + */ + public long getWalAutoArchiveAfterInactivity() { + return walAutoArchiveAfterInactivity; + } + + /** + * @return Flag indicating whether write throttling is enabled. + */ + public boolean isWriteThrottlingEnabled() { + return writeThrottlingEnabled; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeLong(sysRegionInitSize); + out.writeLong(sysRegionMaxSize); + out.writeInt(pageSize); + out.writeInt(concLvl); + out.writeObject(dfltDataRegCfg); + U.writeCollection(out, dataRegCfgs); + U.writeString(out, storagePath); + out.writeLong(checkpointFreq); + out.writeLong(lockWaitTime); + out.writeLong(checkpointPageBufSize); + out.writeInt(checkpointThreads); + U.writeEnum(out, checkpointWriteOrder); + out.writeInt(walHistSize); + out.writeInt(walSegments); + out.writeInt(walSegmentSize); + U.writeString(out, walPath); + U.writeString(out, walArchivePath); + out.writeBoolean(metricsEnabled); + U.writeEnum(out, walMode); + out.writeInt(walTlbSize); + out.writeLong(walFlushFreq); + out.writeLong(walFsyncDelay); + out.writeInt(walRecordIterBuffSize); + out.writeBoolean(alwaysWriteFullPages); + U.writeString(out, fileIOFactory); + out.writeInt(metricsSubIntervalCount); + out.writeLong(metricsRateTimeInterval); + out.writeLong(walAutoArchiveAfterInactivity); + out.writeBoolean(writeThrottlingEnabled); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + sysRegionInitSize = in.readLong(); + sysRegionMaxSize = in.readLong(); + pageSize = in.readInt(); + concLvl = in.readInt(); + dfltDataRegCfg = (VisorDataRegionConfiguration)in.readObject(); + dataRegCfgs = U.readList(in); + storagePath = U.readString(in); + checkpointFreq = in.readLong(); + lockWaitTime = in.readLong(); + checkpointPageBufSize = in.readLong(); + checkpointThreads = in.readInt(); + checkpointWriteOrder = CheckpointWriteOrder.fromOrdinal(in.readByte()); + walHistSize = in.readInt(); + walSegments = in.readInt(); + walSegmentSize = in.readInt(); + walPath = U.readString(in); + walArchivePath = U.readString(in); + metricsEnabled = in.readBoolean(); + walMode = WALMode.fromOrdinal(in.readByte()); + walTlbSize = in.readInt(); + walFlushFreq = in.readLong(); + walFsyncDelay = in.readLong(); + walRecordIterBuffSize = in.readInt(); + alwaysWriteFullPages = in.readBoolean(); + fileIOFactory = U.readString(in); + metricsSubIntervalCount = in.readInt(); + metricsRateTimeInterval = in.readLong(); + walAutoArchiveAfterInactivity = in.readLong(); + writeThrottlingEnabled = in.readBoolean(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorDataStorageConfiguration.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java index a716a766ce246..99cce40e2a80f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java @@ -119,6 +119,9 @@ public class VisorGridConfiguration extends VisorDataTransferObject { /** List of service configurations. */ private List srvcCfgs; + /** Configuration of data storage. */ + private VisorDataStorageConfiguration dataStorage; + /** * Default constructor. */ @@ -153,11 +156,11 @@ public VisorGridConfiguration(IgniteEx ignite) { atomic = new VisorAtomicConfiguration(c.getAtomicConfiguration()); txCfg = new VisorTransactionConfiguration(c.getTransactionConfiguration()); - if (c.getMemoryConfiguration() != null) - memCfg = new VisorMemoryConfiguration(c.getMemoryConfiguration()); + if (c.getDataStorageConfiguration() != null) + memCfg = null; - if (c.getPersistentStoreConfiguration() != null) - psCfg = new VisorPersistentStoreConfiguration(c.getPersistentStoreConfiguration()); + if (c.getDataStorageConfiguration() != null) + psCfg = null; storeSesLsnrs = compactArray(c.getCacheStoreSessionListenerFactories()); warmupClos = compactClass(c.getWarmupClosure()); @@ -180,6 +183,8 @@ public VisorGridConfiguration(IgniteEx ignite) { sqlConnCfg = new VisorSqlConnectorConfiguration(scc); srvcCfgs = VisorServiceConfiguration.list(c.getServiceConfiguration()); + + dataStorage = new VisorDataStorageConfiguration(c.getDataStorageConfiguration()); } /** @@ -357,6 +362,18 @@ public List getServiceConfigurations() { return srvcCfgs; } + /** + * @return Configuration of data storage. + */ + public VisorDataStorageConfiguration getDataStorageConfiguration() { + return dataStorage; + } + + /** {@inheritDoc} */ + @Override public byte getProtocolVersion() { + return V2; + } + /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { out.writeObject(basic); @@ -384,6 +401,7 @@ public List getServiceConfigurations() { out.writeObject(hadoopCfg); out.writeObject(sqlConnCfg); U.writeCollection(out, srvcCfgs); + out.writeObject(dataStorage); } /** {@inheritDoc} */ @@ -413,6 +431,9 @@ public List getServiceConfigurations() { hadoopCfg = (VisorHadoopConfiguration)in.readObject(); sqlConnCfg = (VisorSqlConnectorConfiguration) in.readObject(); srvcCfgs = U.readList(in); + + if (protoVer == V2) + dataStorage = (VisorDataStorageConfiguration)in.readObject(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryConfiguration.java index ccb23ac7f39a8..6708f9a8711f3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryConfiguration.java @@ -22,8 +22,8 @@ import java.io.ObjectOutput; import java.util.ArrayList; import java.util.List; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -48,10 +48,10 @@ public class VisorMemoryConfiguration extends VisorDataTransferObject { /** Concurrency level. */ private int concLvl; - /** Name of MemoryPolicy to be used as default. */ + /** Name of DataRegion to be used as default. */ private String dfltMemPlcName; - /** Size of memory (in bytes) to use for default MemoryPolicy. */ + /** Size of memory (in bytes) to use for default DataRegion. */ private long dfltMemPlcSize; /** Memory policies. */ @@ -69,22 +69,22 @@ public VisorMemoryConfiguration() { * * @param memCfg Memory configuration. */ - public VisorMemoryConfiguration(MemoryConfiguration memCfg) { + public VisorMemoryConfiguration(DataStorageConfiguration memCfg) { assert memCfg != null; - sysCacheInitSize = memCfg.getSystemCacheInitialSize(); - sysCacheMaxSize = memCfg.getSystemCacheMaxSize(); + sysCacheInitSize = memCfg.getSystemRegionInitialSize(); + sysCacheMaxSize = memCfg.getSystemRegionMaxSize(); pageSize = memCfg.getPageSize(); concLvl = memCfg.getConcurrencyLevel(); - dfltMemPlcName = memCfg.getDefaultMemoryPolicyName(); - dfltMemPlcSize = memCfg.getDefaultMemoryPolicySize(); +// dfltMemPlcName = memCfg.getDefaultDataRegionName(); + //dfltMemPlcSize = memCfg.getDefaultDataRegionSize(); - MemoryPolicyConfiguration[] plcs = memCfg.getMemoryPolicies(); + DataRegionConfiguration[] plcs = memCfg.getDataRegionConfigurations(); if (!F.isEmpty(plcs)) { memPlcs = new ArrayList<>(plcs.length); - for (MemoryPolicyConfiguration plc : plcs) + for (DataRegionConfiguration plc : plcs) memPlcs.add(new VisorMemoryPolicyConfiguration(plc)); } } @@ -118,7 +118,7 @@ public int getPageSize() { } /** - * @return Name of MemoryPolicy to be used as default. + * @return Name of DataRegion to be used as default. */ public String getDefaultMemoryPolicyName() { return dfltMemPlcName; @@ -132,7 +132,7 @@ public long getDefaultMemoryPolicySize() { } /** - * @return Collection of MemoryPolicyConfiguration objects. + * @return Collection of DataRegionConfiguration objects. */ public List getMemoryPolicies() { return memPlcs; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryPolicyConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryPolicyConfiguration.java index bed4c4bb11f4e..92159a811db69 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryPolicyConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorMemoryPolicyConfiguration.java @@ -21,7 +21,7 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import org.apache.ignite.configuration.DataPageEvictionMode; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorDataTransferObject; @@ -33,7 +33,7 @@ public class VisorMemoryPolicyConfiguration extends VisorDataTransferObject { /** */ private static final long serialVersionUID = 0L; - /** Unique name of MemoryPolicy. */ + /** Unique name of DataRegion. */ private String name; /** Maximum memory region size defined by this memory policy. */ @@ -69,20 +69,20 @@ public VisorMemoryPolicyConfiguration() { * * @param plc Memory policy configuration. */ - public VisorMemoryPolicyConfiguration(MemoryPolicyConfiguration plc) { + public VisorMemoryPolicyConfiguration(DataRegionConfiguration plc) { assert plc != null; name = plc.getName(); maxSize = plc.getMaxSize(); initSize = plc.getInitialSize(); - swapFilePath = plc.getSwapFilePath(); + swapFilePath = plc.getSwapPath(); pageEvictionMode = plc.getPageEvictionMode(); evictionThreshold = plc.getEvictionThreshold(); emptyPagesPoolSize = plc.getEmptyPagesPoolSize(); } /** - * Unique name of MemoryPolicy. + * Unique name of DataRegion. */ public String getName() { return name; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java index 3fd7b0d9eb1bd..99d113278b605 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.concurrent.ConcurrentMap; import org.apache.ignite.IgniteFileSystem; -import org.apache.ignite.MemoryMetrics; +import org.apache.ignite.DataRegionMetrics; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.FileSystemConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; @@ -157,7 +157,7 @@ protected void memoryMetrics(VisorNodeDataCollectorJobResult res) { try { List memoryMetrics = res.getMemoryMetrics(); - for (MemoryMetrics m : ignite.memoryMetrics()) + for (DataRegionMetrics m : ignite.dataRegionMetrics()) memoryMetrics.add(new VisorMemoryMetrics(m)); } catch (Exception e) { @@ -257,7 +257,7 @@ protected void igfs(VisorNodeDataCollectorJobResult res) { */ protected void persistenceMetrics(VisorNodeDataCollectorJobResult res) { try { - res.setPersistenceMetrics(new VisorPersistenceMetrics(ignite.persistentStoreMetrics())); + res.setPersistenceMetrics(new VisorPersistenceMetrics(ignite.dataStorageMetrics())); } catch (Exception e) { res.setPersistenceMetricsEx(new VisorExceptionWrapper(e)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java index 90ecf6e68d97f..0612c5ea6f221 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java @@ -54,7 +54,7 @@ public class VisorNodeDataCollectorJobResult extends VisorDataTransferObject { /** Exception while collecting node events. */ private VisorExceptionWrapper evtsEx; - /** Node memory metrics. */ + /** Node data region metrics. */ private List memoryMetrics = new ArrayList<>(); /** Exception while collecting memory metrics. */ @@ -161,7 +161,7 @@ public void setEventsEx(VisorExceptionWrapper evtsEx) { } /** - * @return Collected memory metrics. + * @return Collected data region metrics. */ public List getMemoryMetrics() { return memoryMetrics; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java index 6e10b84bc33ab..ace964cdc75b5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java @@ -67,7 +67,7 @@ public class VisorNodeDataCollectorTaskResult extends VisorDataTransferObject { /** Exceptions caught during collecting events from nodes. */ private Map evtsEx = new HashMap<>(); - /** All memory metrics collected from nodes. */ + /** All data region metrics collected from nodes. */ private Map> memoryMetrics = new HashMap<>(); /** Exceptions caught during collecting memory metrics from nodes. */ @@ -188,7 +188,7 @@ public Map getEventsEx() { } /** - * @return All memory metrics collected from nodes. + * @return All data region metrics collected from nodes. */ public Map> getMemoryMetrics() { return memoryMetrics; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java index c83816151e300..165855c0d5237 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java @@ -19,12 +19,12 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import org.apache.ignite.PersistenceMetrics; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.visor.VisorDataTransferObject; /** - * DTO object for {@link PersistenceMetrics}. + * DTO object for {@link DataStorageMetrics}. */ public class VisorPersistenceMetrics extends VisorDataTransferObject { /** */ @@ -76,7 +76,7 @@ public VisorPersistenceMetrics() { /** * @param metrics Persistence metrics. */ - public VisorPersistenceMetrics(PersistenceMetrics metrics) { + public VisorPersistenceMetrics(DataStorageMetrics metrics) { walLoggingRate = metrics.getWalLoggingRate(); walWritingRate = metrics.getWalWritingRate(); walArchiveSegments = metrics.getWalArchiveSegments(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java index 128f43a189d91..f9d7a64478c88 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java @@ -19,14 +19,14 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorDataTransferObject; /** - * DTO object for {@link PersistentStoreConfiguration}. + * DTO object for {@link DataStorageConfiguration}. */ public class VisorPersistentStoreConfiguration extends VisorDataTransferObject { /** */ @@ -99,26 +99,26 @@ public VisorPersistentStoreConfiguration() { /** * @param cfg Persistent store configuration. */ - public VisorPersistentStoreConfiguration(PersistentStoreConfiguration cfg) { - persistenceStorePath = cfg.getPersistentStorePath(); - checkpointingFreq = cfg.getCheckpointingFrequency(); + public VisorPersistentStoreConfiguration(DataStorageConfiguration cfg) { + persistenceStorePath = cfg.getStoragePath(); + checkpointingFreq = cfg.getCheckpointFrequency(); lockWaitTime = cfg.getLockWaitTime(); - checkpointingPageBufSize = cfg.getCheckpointingPageBufferSize(); - checkpointingThreads = cfg.getCheckpointingThreads(); + checkpointingPageBufSize = cfg.getCheckpointPageBufferSize(); + checkpointingThreads = cfg.getCheckpointThreads(); walHistSize = cfg.getWalHistorySize(); walSegments = cfg.getWalSegments(); walSegmentSize = cfg.getWalSegmentSize(); - walStorePath = cfg.getWalStorePath(); + walStorePath = cfg.getWalPath(); walArchivePath = cfg.getWalArchivePath(); metricsEnabled = cfg.isMetricsEnabled(); walMode = cfg.getWalMode(); - tlbSize = cfg.getTlbSize(); + tlbSize = cfg.getWalThreadLocalBufferSize(); walFlushFreq = cfg.getWalFlushFrequency(); walFsyncDelay = cfg.getWalFsyncDelayNanos(); walRecordIterBuffSize = cfg.getWalRecordIteratorBufferSize(); alwaysWriteFullPages = cfg.isAlwaysWriteFullPages(); - subIntervals = cfg.getSubIntervals(); - rateTimeInterval = cfg.getRateTimeInterval(); + subIntervals = cfg.getMetricsSubIntervalCount(); + rateTimeInterval = cfg.getMetricsRateTimeInterval(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/DataRegionMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/DataRegionMetricsMXBean.java new file mode 100644 index 0000000000000..eeed496296874 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/DataRegionMetricsMXBean.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.mxbean; + +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.configuration.DataRegionConfiguration; + +/** + * This interface defines a JMX view on {@link DataRegionMetrics}. + */ +@MXBeanDescription("MBean that provides access to DataRegionMetrics of a local Apache Ignite node.") +public interface DataRegionMetricsMXBean extends DataRegionMetrics { + /** {@inheritDoc} */ + @MXBeanDescription("A name of a memory region the metrics are collected for.") + @Override public String getName(); + + /** + * Gets initial memory region size defined by its {@link DataRegionConfiguration}. + * + * @return Initial size in MB. + */ + @MXBeanDescription("Initial memory region size defined by its data region.") + public int getInitialSize(); + + /** + * Maximum memory region size defined by its {@link DataRegionConfiguration}. + * + * @return Maximum size in MB. + */ + @MXBeanDescription("Maximum memory region size defined by its data region.") + public int getMaxSize(); + + /** + * A path to the memory-mapped files the memory region defined by {@link DataRegionConfiguration} will be + * mapped to. + * + * @return Path to the memory-mapped files. + */ + @MXBeanDescription("Path to the memory-mapped files.") + public String getSwapPath(); + + /** {@inheritDoc} */ + @MXBeanDescription("Total number of allocated pages.") + @Override public long getTotalAllocatedPages(); + + /** {@inheritDoc} */ + @MXBeanDescription("Allocation rate (pages per second) averaged across rateTimeInternal.") + @Override public float getAllocationRate(); + + /** {@inheritDoc} */ + @MXBeanDescription("Eviction rate (pages per second).") + @Override public float getEvictionRate(); + + /** {@inheritDoc} */ + @MXBeanDescription("Percentage of pages that are fully occupied by large entries that go beyond page size.") + @Override public float getLargeEntriesPagesPercentage(); + + /** {@inheritDoc} */ + @MXBeanDescription("Percentage of space that is still free and can be filled in.") + @Override public float getPagesFillFactor(); + + /** {@inheritDoc} */ + @MXBeanDescription("Number of pages in memory not yet synchronized with persistent storage.") + @Override public long getDirtyPages(); + + /** {@inheritDoc} */ + @MXBeanDescription("Rate at which pages in memory are replaced with pages from persistent storage (pages per second).") + @Override public float getPagesReplaceRate(); + + /** {@inheritDoc} */ + @MXBeanDescription("Number of pages residing in physical RAM.") + @Override public long getPhysicalMemoryPages(); + + /** + * Enables memory metrics collection on an Apache Ignite node. + */ + @MXBeanDescription("Enables memory metrics collection on an Apache Ignite node.") + public void enableMetrics(); + + /** + * Disables memory metrics collection on an Apache Ignite node. + */ + @MXBeanDescription("Disables memory metrics collection on an Apache Ignite node.") + public void disableMetrics(); + + /** + * Sets time interval for {@link #getAllocationRate()} and {@link #getEvictionRate()} monitoring purposes. + *

        + * For instance, after setting the interval to 60 seconds, subsequent calls to {@link #getAllocationRate()} + * will return average allocation rate (pages per second) for the last minute. + * + * @param rateTimeInterval Time interval (in milliseconds) used for allocation and eviction rates calculations. + */ + @MXBeanDescription( + "Sets time interval for pages allocation and eviction monitoring purposes." + ) + @MXBeanParametersNames( + "rateTimeInterval" + ) + @MXBeanParametersDescriptions( + "Time interval (in milliseconds) to set." + ) + public void rateTimeInterval(long rateTimeInterval); + + /** + * Sets a number of sub-intervals the whole {@link #rateTimeInterval(long)} will be split into to calculate + * {@link #getAllocationRate()} and {@link #getEvictionRate()} rates (5 by default). + *

        + * Setting it to a bigger value will result in more precise calculation and smaller drops of + * {@link #getAllocationRate()} metric when next sub-interval has to be recycled but introduces bigger + * calculation overhead. + * + * @param subInts A number of sub-intervals. + */ + @MXBeanDescription( + "Sets a number of sub-intervals to calculate allocation and eviction rates metrics." + ) + @MXBeanParametersNames( + "subInts" + ) + @MXBeanParametersDescriptions( + "Number of subintervals to set." + ) + public void subIntervals(int subInts); +} diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java new file mode 100644 index 0000000000000..f0fb631e6d52e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.mxbean; + +import org.apache.ignite.DataStorageMetrics; +import org.apache.ignite.configuration.DataStorageConfiguration; + +/** + * An MX bean allowing to monitor and tune persistence metrics. + */ +public interface DataStorageMetricsMXBean extends DataStorageMetrics { + /** {@inheritDoc} */ + @MXBeanDescription("Average number of WAL records per second written during the last time interval.") + @Override float getWalLoggingRate(); + + /** {@inheritDoc} */ + @MXBeanDescription("Average number of bytes per second written during the last time interval.") + @Override float getWalWritingRate(); + + /** {@inheritDoc} */ + @MXBeanDescription("Current number of WAL segments in the WAL archive.") + @Override int getWalArchiveSegments(); + + /** {@inheritDoc} */ + @MXBeanDescription("Average WAL fsync duration in microseconds over the last time interval.") + @Override float getWalFsyncTimeAverage(); + + /** {@inheritDoc} */ + @MXBeanDescription("Duration of the last checkpoint in milliseconds.") + @Override long getLastCheckpointingDuration(); + + /** {@inheritDoc} */ + @MXBeanDescription("Duration of the checkpoint lock wait in milliseconds.") + @Override long getLastCheckpointLockWaitDuration(); + + /** {@inheritDoc} */ + @MXBeanDescription("Duration of the checkpoint mark in milliseconds.") + @Override long getLastCheckpointMarkDuration(); + + /** {@inheritDoc} */ + @MXBeanDescription("Duration of the checkpoint pages write in milliseconds.") + @Override long getLastCheckpointPagesWriteDuration(); + + /** {@inheritDoc} */ + @MXBeanDescription("Duration of the sync phase of the last checkpoint in milliseconds.") + @Override long getLastCheckpointFsyncDuration(); + + /** {@inheritDoc} */ + @MXBeanDescription("Total number of pages written during the last checkpoint.") + @Override long getLastCheckpointTotalPagesNumber(); + + /** {@inheritDoc} */ + @MXBeanDescription("Total number of data pages written during the last checkpoint.") + @Override long getLastCheckpointDataPagesNumber(); + + /** {@inheritDoc} */ + @MXBeanDescription("Number of pages copied to a temporary checkpoint buffer during the last checkpoint.") + @Override long getLastCheckpointCopiedOnWritePagesNumber(); + + /** + * Enables persistence metrics collection on an Apache Ignite node. + */ + @MXBeanDescription("Enables persistence metrics collection on an Apache Ignite node.") + public void enableMetrics(); + + /** + * Disables persistence metrics collection on an Apache Ignite node. + */ + @MXBeanDescription("Disables persistence metrics collection on an Apache Ignite node.") + public void disableMetrics(); + + /** + * Sets time interval for rate-based metrics. Identical to setting + * {@link DataStorageConfiguration#setMetricsRateTimeInterval(long)} configuration property. + * + * @param rateTimeInterval Time interval (in milliseconds) used for allocation and eviction rates calculations. + */ + @MXBeanDescription( + "Sets time interval for pages allocation and eviction monitoring purposes." + ) + @MXBeanParametersNames( + "rateTimeInterval" + ) + @MXBeanParametersDescriptions( + "Time interval (in milliseconds) to set." + ) + public void rateTimeInterval(long rateTimeInterval); + + /** + * Sets a number of sub-intervals the whole {@link #rateTimeInterval(long)} will be split into to calculate + * rate-based metrics. Identical to setting {@link DataStorageConfiguration#setMetricsSubIntervalCount(int)} configuration + * property. + * + * @param subInts A number of sub-intervals. + */ + @MXBeanDescription( + "Sets a number of sub-intervals to calculate allocation and eviction rates metrics." + ) + @MXBeanParametersNames( + "subInts" + ) + @MXBeanParametersDescriptions( + "Number of subintervals to set." + ) + public void subIntervals(int subInts); +} diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/MemoryMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/MemoryMetricsMXBean.java index 4d6c96a2353c5..e54753601e50b 100644 --- a/modules/core/src/main/java/org/apache/ignite/mxbean/MemoryMetricsMXBean.java +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/MemoryMetricsMXBean.java @@ -21,8 +21,10 @@ /** * This interface defines a JMX view on {@link MemoryMetrics}. + * @deprecated Part of old API. Metrics are accessible through {@link DataRegionMetricsMXBean}. */ @MXBeanDescription("MBean that provides access to MemoryMetrics of a local Apache Ignite node.") +@Deprecated public interface MemoryMetricsMXBean extends MemoryMetrics { /** {@inheritDoc} */ @MXBeanDescription("A name of a memory region the metrics are collected for.") diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/PersistenceMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/PersistenceMetricsMXBean.java index 40c2235e373e1..0c16640f9061f 100644 --- a/modules/core/src/main/java/org/apache/ignite/mxbean/PersistenceMetricsMXBean.java +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/PersistenceMetricsMXBean.java @@ -22,7 +22,9 @@ /** * An MX bean allowing to monitor and tune persistence metrics. + * @deprecated Part of old API. Metrics are accessible through {@link DataStorageMetricsMXBean}. */ +@Deprecated public interface PersistenceMetricsMXBean extends PersistenceMetrics { /** {@inheritDoc} */ @MXBeanDescription("Average number of WAL records per second written during the last time interval.") diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties index 2f795dfd8de0f..f3fc074e1e850 100644 --- a/modules/core/src/main/resources/META-INF/classnames.properties +++ b/modules/core/src/main/resources/META-INF/classnames.properties @@ -137,10 +137,10 @@ org.apache.ignite.configuration.CollectionConfiguration org.apache.ignite.configuration.DataPageEvictionMode org.apache.ignite.configuration.DeploymentMode org.apache.ignite.configuration.IgniteReflectionFactory -org.apache.ignite.configuration.MemoryConfiguration -org.apache.ignite.configuration.MemoryPolicyConfiguration +org.apache.ignite.configuration.DataStorageConfiguration +org.apache.ignite.configuration.DataRegionConfiguration org.apache.ignite.configuration.NearCacheConfiguration -org.apache.ignite.configuration.PersistentStoreConfiguration +org.apache.ignite.configuration.DataStorageConfiguration org.apache.ignite.configuration.TopologyValidator org.apache.ignite.configuration.TransactionConfiguration org.apache.ignite.configuration.WALMode diff --git a/modules/core/src/test/config/examples.properties b/modules/core/src/test/config/examples.properties index ea0d8edf3cb97..2144533b03700 100644 --- a/modules/core/src/test/config/examples.properties +++ b/modules/core/src/test/config/examples.properties @@ -22,4 +22,4 @@ ScalarCacheExample=examples/config/example-ignite.xml ScalarCacheQueryExample=examples/config/example-ignite.xml ScalarCountGraphTrianglesExample=examples/config/example-ignite.xml ScalarPopularNumbersRealTimeExample=examples/config/example-ignite.xml -MemoryPolicyExample=examples/config/example-memory-policies.xml \ No newline at end of file +DataRegionExample=examples/config/example-data-regions.xml \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/cache/LargeEntryUpdateTest.java b/modules/core/src/test/java/org/apache/ignite/cache/LargeEntryUpdateTest.java index be92761f6a250..008da71b6679a 100644 --- a/modules/core/src/test/java/org/apache/ignite/cache/LargeEntryUpdateTest.java +++ b/modules/core/src/test/java/org/apache/ignite/cache/LargeEntryUpdateTest.java @@ -25,8 +25,8 @@ import org.apache.ignite.IgniteCompute; import org.apache.ignite.IgniteException; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgniteRunnable; import org.apache.ignite.resources.IgniteInstanceResource; @@ -71,11 +71,11 @@ public class LargeEntryUpdateTest extends GridCommonAbstractTest { cfg.setPublicThreadPoolSize(THREAD_COUNT); - MemoryConfiguration mem = new MemoryConfiguration(); + DataStorageConfiguration mem = new DataStorageConfiguration(); mem.setPageSize(PAGE_SIZE); - cfg.setMemoryConfiguration(mem); + cfg.setDataStorageConfiguration(mem); CacheConfiguration[] ccfgs = new CacheConfiguration[CACHE_COUNT]; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java index f842440b68346..1352c37e0dff2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java @@ -29,7 +29,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.events.Event; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.util.lang.GridAbsPredicate; import org.apache.ignite.lang.IgnitePredicate; @@ -111,7 +111,7 @@ public void testAllocatedMemory() throws Exception { final IgniteCache cache = ignite.getOrCreateCache(CACHE_NAME); - MemoryMetricsImpl memMetrics = getDefaultMemoryPolicyMetrics(ignite); + DataRegionMetricsImpl memMetrics = getDefaultMemoryPolicyMetrics(ignite); memMetrics.enableMetrics(); @@ -128,8 +128,8 @@ public void testAllocatedMemory() throws Exception { /** * @param ignite Ignite instance. */ - private MemoryMetricsImpl getDefaultMemoryPolicyMetrics(IgniteEx ignite) throws IgniteCheckedException { - return ignite.context().cache().context().database().memoryPolicy(null).memoryMetrics(); + private DataRegionMetricsImpl getDefaultMemoryPolicyMetrics(IgniteEx ignite) throws IgniteCheckedException { + return ignite.context().cache().context().database().dataRegion(null).memoryMetrics(); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteSlowClientDetectionSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteSlowClientDetectionSelfTest.java index 3d6f116d4faac..9a923f3689d90 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/IgniteSlowClientDetectionSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteSlowClientDetectionSelfTest.java @@ -25,7 +25,7 @@ import org.apache.ignite.Ignition; import org.apache.ignite.cache.query.ContinuousQuery; import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.Event; @@ -81,10 +81,10 @@ private int nodeCount() { cfg.setCommunicationSpi(commSpi); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); dbCfg.setPageSize(16 * 1024); - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(dbCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java index 5bef37207beb0..3b9e3933ed980 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java @@ -23,7 +23,7 @@ import java.util.HashSet; import java.util.List; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; @@ -32,7 +32,7 @@ import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -309,7 +309,7 @@ public void testPageIdRotation() throws Exception { protected PageMemory memory() throws Exception { File memDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "pagemem", false); - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration() + DataRegionConfiguration plcCfg = new DataRegionConfiguration() .setMaxSize(MAX_MEMORY_SIZE).setInitialSize(MAX_MEMORY_SIZE); DirectMemoryProvider provider = new MappedFileMemoryProvider(log(), memDir); @@ -320,7 +320,7 @@ protected PageMemory memory() throws Exception { null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), true); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheClientStoreSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheClientStoreSelfTest.java index 3a418f04ce51a..87037918306e0 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheClientStoreSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheClientStoreSelfTest.java @@ -31,8 +31,8 @@ import org.apache.ignite.cache.store.CacheStore; import org.apache.ignite.cache.store.CacheStoreAdapter; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteBiInClosure; @@ -75,7 +75,7 @@ public class CacheClientStoreSelfTest extends GridCommonAbstractTest { cfg.setClientMode(client); if (client) - cfg.setMemoryConfiguration(new MemoryConfiguration()); + cfg.setDataStorageConfiguration(new DataStorageConfiguration()); CacheConfiguration cc = new CacheConfiguration(DEFAULT_CACHE_NAME); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheConfigurationLeakTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheConfigurationLeakTest.java index bf94d16ab59f3..6b0386729e738 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheConfigurationLeakTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheConfigurationLeakTest.java @@ -21,9 +21,9 @@ import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.eviction.lru.LruEvictionPolicy; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -43,17 +43,16 @@ public CacheConfigurationLeakTest() { @Override protected IgniteConfiguration getConfiguration() throws Exception { IgniteConfiguration cfg = super.getConfiguration(); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); - MemoryPolicyConfiguration plc = new MemoryPolicyConfiguration(); + DataRegionConfiguration plc = new DataRegionConfiguration(); plc.setName("dfltPlc"); - plc.setMaxSize(MemoryConfiguration.DFLT_MEMORY_POLICY_MAX_SIZE * 10); + plc.setMaxSize(DataStorageConfiguration.DFLT_DATA_REGION_MAX_SIZE * 10); - memCfg.setDefaultMemoryPolicyName("dfltPlc"); - memCfg.setMemoryPolicies(plc); + memCfg.setDefaultDataRegionConfiguration(plc); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheMemoryPolicyConfigurationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheDataRegionConfigurationTest.java similarity index 73% rename from modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheMemoryPolicyConfigurationTest.java rename to modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheDataRegionConfigurationTest.java index 0fb9c08409951..775aaa80fa467 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheMemoryPolicyConfigurationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheDataRegionConfigurationTest.java @@ -19,9 +19,9 @@ import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -29,12 +29,12 @@ /** * */ -public class CacheMemoryPolicyConfigurationTest extends GridCommonAbstractTest { +public class CacheDataRegionConfigurationTest extends GridCommonAbstractTest { /** */ private volatile CacheConfiguration ccfg; /** */ - private volatile MemoryConfiguration memCfg; + private volatile DataStorageConfiguration memCfg; /** */ private static final long DFLT_MEM_PLC_SIZE = 10 * 1024 * 1024; @@ -47,7 +47,7 @@ public class CacheMemoryPolicyConfigurationTest extends GridCommonAbstractTest { IgniteConfiguration cfg = super.getConfiguration(gridName); if (memCfg != null) - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); if (ccfg != null) cfg.setCacheConfiguration(ccfg); @@ -61,12 +61,12 @@ public class CacheMemoryPolicyConfigurationTest extends GridCommonAbstractTest { } /** - * Verifies that proper exception is thrown when MemoryPolicy is misconfigured for cache. + * Verifies that proper exception is thrown when DataRegion is misconfigured for cache. */ - public void testMissingMemoryPolicy() throws Exception { + public void testMissingDataRegion() throws Exception { ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME); - ccfg.setMemoryPolicyName("nonExistingMemPlc"); + ccfg.setDataRegionName("nonExistingMemPlc"); try { startGrid(0); @@ -74,31 +74,31 @@ public void testMissingMemoryPolicy() throws Exception { catch (IgniteCheckedException e) { String msg = e.getMessage(); - assertTrue("Not expected exception was thrown: " + e, msg.contains("Requested MemoryPolicy is not configured")); + assertTrue("Not expected exception was thrown: " + e, msg.contains("Requested DataRegion is not configured")); return; } - fail("Expected exception was not thrown: missing MemoryPolicy"); + fail("Expected exception was not thrown: missing DataRegion"); } /** - * Verifies that {@link IgniteOutOfMemoryException} is thrown when cache is configured with too small MemoryPolicy. + * Verifies that {@link IgniteOutOfMemoryException} is thrown when cache is configured with too small DataRegion. */ - public void testTooSmallMemoryPolicy() throws Exception { - memCfg = new MemoryConfiguration(); + public void testTooSmallDataRegion() throws Exception { + memCfg = new DataStorageConfiguration(); - MemoryPolicyConfiguration dfltPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration dfltPlcCfg = new DataRegionConfiguration(); dfltPlcCfg.setName("dfltPlc"); dfltPlcCfg.setInitialSize(10 * 1024 * 1024); dfltPlcCfg.setMaxSize(10 * 1024 * 1024); - MemoryPolicyConfiguration bigPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration bigPlcCfg = new DataRegionConfiguration(); bigPlcCfg.setName("bigPlc"); bigPlcCfg.setMaxSize(1024 * 1024 * 1024); - memCfg.setMemoryPolicies(dfltPlcCfg, bigPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltPlc"); + memCfg.setDataRegionConfigurations(bigPlcCfg); + memCfg.setDefaultDataRegionConfiguration(dfltPlcCfg); ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME); @@ -140,22 +140,22 @@ public void testTooSmallMemoryPolicy() throws Exception { * Verifies that with enough memory allocated adding values to cache doesn't cause any exceptions. */ public void testProperlySizedMemoryPolicy() throws Exception { - memCfg = new MemoryConfiguration(); + memCfg = new DataStorageConfiguration(); - MemoryPolicyConfiguration dfltPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration dfltPlcCfg = new DataRegionConfiguration(); dfltPlcCfg.setName("dfltPlc"); dfltPlcCfg.setInitialSize(DFLT_MEM_PLC_SIZE); dfltPlcCfg.setMaxSize(DFLT_MEM_PLC_SIZE); - MemoryPolicyConfiguration bigPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration bigPlcCfg = new DataRegionConfiguration(); bigPlcCfg.setName("bigPlc"); bigPlcCfg.setMaxSize(BIG_MEM_PLC_SIZE); - memCfg.setMemoryPolicies(dfltPlcCfg, bigPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltPlc"); + memCfg.setDataRegionConfigurations(bigPlcCfg); + memCfg.setDefaultDataRegionConfiguration(dfltPlcCfg); ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME); - ccfg.setMemoryPolicyName("bigPlc"); + ccfg.setDataRegionName("bigPlc"); IgniteEx ignite0 = startGrid(0); @@ -166,7 +166,7 @@ public void testProperlySizedMemoryPolicy() throws Exception { cache.put(i, "abc"); } catch (Exception e) { - fail("With properly sized MemoryPolicy no exceptions are expected to be thrown."); + fail("With properly sized DataRegion no exceptions are expected to be thrown."); } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java index c53bc4bb0767c..5eb8292a3e43f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java @@ -27,8 +27,8 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.managers.communication.GridIoMessage; @@ -72,7 +72,7 @@ public class CacheStopAndDestroySelfTest extends GridCommonAbstractTest { private static String CACHE_NAME_LOC = "cache_local"; /** Memory configuration to be used on client nodes with local caches. */ - private static MemoryConfiguration memCfg; + private static DataStorageConfiguration memCfg; /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { @@ -97,7 +97,7 @@ protected int gridCount() { if (getTestIgniteInstanceName(2).equals(igniteInstanceName)) { iCfg.setClientMode(true); - iCfg.setMemoryConfiguration(memCfg); + iCfg.setDataStorageConfiguration(memCfg); } ((TcpDiscoverySpi)iCfg.getDiscoverySpi()).setIpFinder(ipFinder); @@ -670,7 +670,7 @@ public void testNearCloseWithTry() throws Exception { * @throws Exception If failed. */ public void testLocalClose() throws Exception { - memCfg = new MemoryConfiguration(); + memCfg = new DataStorageConfiguration(); startGridsMultiThreaded(gridCount()); @@ -721,7 +721,7 @@ public void testLocalClose() throws Exception { * @throws Exception If failed. */ public void testLocalCloseWithTry() throws Exception { - memCfg = new MemoryConfiguration(); + memCfg = new DataStorageConfiguration(); startGridsMultiThreaded(gridCount()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridMemoryConfigurationConsistencySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridDataStorageConfigurationConsistencySelfTest.java similarity index 86% rename from modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridMemoryConfigurationConsistencySelfTest.java rename to modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridDataStorageConfigurationConsistencySelfTest.java index bc71e336ae594..3c728f7898045 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridMemoryConfigurationConsistencySelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridDataStorageConfigurationConsistencySelfTest.java @@ -19,8 +19,8 @@ import java.util.concurrent.Callable; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; @@ -30,7 +30,7 @@ /** * Tests a check of memory configuration consistency. */ -public class GridMemoryConfigurationConsistencySelfTest extends GridCommonAbstractTest { +public class GridDataStorageConfigurationConsistencySelfTest extends GridCommonAbstractTest { /** IP finder. */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); @@ -43,12 +43,12 @@ public class GridMemoryConfigurationConsistencySelfTest extends GridCommonAbstra cfg.setDiscoverySpi(discoSpi); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); // Nodes will have different page size. - memCfg.setPageSize(MemoryConfiguration.DFLT_PAGE_SIZE * (1 + getTestIgniteInstanceIndex(gridName))); + memCfg.setPageSize(DataStorageConfiguration.DFLT_PAGE_SIZE * (1 + getTestIgniteInstanceIndex(gridName))); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java index 4c9ad271fe96f..1827c659a654f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java @@ -29,9 +29,9 @@ import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteClientReconnectAbstractTest; import org.apache.ignite.internal.IgniteInternalFuture; @@ -67,6 +67,9 @@ public class IgniteClusterActivateDeactivateTest extends GridCommonAbstractTest /** */ static final String CACHE_NAME_PREFIX = "cache-"; + /** Non-persistent data region name. */ + private static final String NO_PERSISTENCE_REGION = "no-persistence-region"; + /** */ boolean client; @@ -116,19 +119,21 @@ else if (testDiscoSpi) ccfgs = null; } - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); memCfg.setPageSize(1024); - memCfg.setDefaultMemoryPolicySize(10 * 1024 * 1024); + memCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(10 * 1024 * 1024) + .setPersistenceEnabled(persistenceEnabled())); - cfg.setMemoryConfiguration(memCfg); + memCfg.setDataRegionConfigurations(new DataRegionConfiguration() + .setMaxSize(10 * 1024 * 1024) + .setName(NO_PERSISTENCE_REGION) + .setPersistenceEnabled(false)); - if (persistenceEnabled()) { - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); + if (persistenceEnabled()) + memCfg.setWalMode(WALMode.LOG_ONLY); - pCfg.setWalMode(WALMode.LOG_ONLY); - - cfg.setPersistentStoreConfiguration(pCfg); - } + cfg.setDataStorageConfiguration(memCfg); if (testSpi) { TestRecordingCommunicationSpi spi = new TestRecordingCommunicationSpi(); @@ -1236,12 +1241,15 @@ final CacheConfiguration[] cacheConfigurations1() { * @return Cache configurations. */ final CacheConfiguration[] cacheConfigurations2() { - CacheConfiguration[] ccfgs = new CacheConfiguration[4]; + CacheConfiguration[] ccfgs = new CacheConfiguration[5]; ccfgs[0] = cacheConfiguration(CACHE_NAME_PREFIX + 0, ATOMIC); ccfgs[1] = cacheConfiguration(CACHE_NAME_PREFIX + 1, TRANSACTIONAL); ccfgs[2] = cacheConfiguration(CACHE_NAME_PREFIX + 2, ATOMIC); ccfgs[3] = cacheConfiguration(CACHE_NAME_PREFIX + 3, TRANSACTIONAL); + ccfgs[4] = cacheConfiguration(CACHE_NAME_PREFIX + 4, TRANSACTIONAL); + + ccfgs[4].setDataRegionName(NO_PERSISTENCE_REGION); return ccfgs; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTestWithPersistence.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTestWithPersistence.java index 4a19aa8a94da1..624595215ae10 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTestWithPersistence.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTestWithPersistence.java @@ -23,6 +23,9 @@ import org.apache.ignite.Ignite; import org.apache.ignite.IgniteException; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.testframework.GridTestUtils; /** @@ -73,7 +76,7 @@ public void testActivateCachesRestore_5_Servers() throws Exception { * @throws Exception If failed. */ public void testActivateCachesRestore_5_Servers_WithNewCaches() throws Exception { - activateCachesRestore(5, false); + activateCachesRestore(5, true); } /** @@ -120,8 +123,9 @@ private void activateCachesRestore(int srvs, boolean withNewCaches) throws Excep checkCache(ignite(i), CACHE_NAME_PREFIX + c, true); } - for (CacheConfiguration ccfg : cacheConfigurations1()) - checkCacheData(cacheData, ccfg.getName()); + DataStorageConfiguration dsCfg = srv.configuration().getDataStorageConfiguration(); + + checkCachesData(cacheData, dsCfg); checkCaches(srvs, CACHES); @@ -152,8 +156,24 @@ private void activateCachesRestore(int srvs, boolean withNewCaches) throws Excep checkCache(ignite(i), CACHE_NAME_PREFIX + c, true); } - for (CacheConfiguration ccfg : cacheConfigurations1()) - checkCacheData(cacheData, ccfg.getName()); + checkCachesData(cacheData, dsCfg); + } + + /** + * Checks that persistent caches are present with actual data and volatile caches are missing. + * + * @param cacheData Cache data. + * @param dsCfg DataStorageConfiguration. + */ + private void checkCachesData(Map cacheData, DataStorageConfiguration dsCfg) { + for (CacheConfiguration ccfg : cacheConfigurations1()) { + if (CU.isPersistentCache(ccfg, dsCfg)) + checkCacheData(cacheData, ccfg.getName()); + else { + for (Ignite node : G.allGrids()) + assertTrue(node.cache(ccfg.getName()) == null || node.cache(ccfg.getName()).size() == 0); + } + } } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/MemoryPolicyConfigValidationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/MemoryPolicyConfigValidationTest.java index 1d8174b1de619..d0130cfe20fd3 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/MemoryPolicyConfigValidationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/MemoryPolicyConfigValidationTest.java @@ -187,7 +187,7 @@ private MemoryPolicyConfiguration[] createTooSmallMemoryCfg() { private MemoryPolicyConfiguration[] createPlcWithReservedNameMisuseCfg() { MemoryPolicyConfiguration[] res = new MemoryPolicyConfiguration[1]; - res[0] = createMemoryPolicy("sysMemPlc", 1024 * 1024, 1024 * 1024); + res[0] = createMemoryPolicy("sysMemPlc", 10 * 1024 * 1024, 10 * 1024 * 1024); return res; } @@ -354,36 +354,34 @@ private void doTest(ValidationViolationType violationType) throws Exception { */ private enum ValidationViolationType { /** */ - NAMES_CONFLICT("Two MemoryPolicies have the same name: "), + NAMES_CONFLICT("have the same name"), /** */ - SYSTEM_MEMORY_POLICY_NAME_MISUSE("'sysMemPlc' policy name is reserved for internal use."), + SYSTEM_MEMORY_POLICY_NAME_MISUSE("name is reserved for internal use"), /** */ - TOO_SMALL_MEMORY_SIZE("MemoryPolicy must have size more than 10MB "), + TOO_SMALL_MEMORY_SIZE("must have size more than 10MB"), /** */ - NULL_NAME_ON_USER_DEFINED_POLICY("User-defined MemoryPolicyConfiguration must have non-null and non-empty name."), + NULL_NAME_ON_USER_DEFINED_POLICY("must have non-null and non-empty name"), /** */ - MISSING_USER_DEFINED_DEFAULT("User-defined default MemoryPolicy name must be presented among configured MemoryPolices: "), + MISSING_USER_DEFINED_DEFAULT("name must be presented among configured"), /** */ - DEFAULT_SIZE_IS_DEFINED_TWICE("User-defined MemoryPolicy configuration and defaultMemoryPolicySize properties are set at the same time."), + DEFAULT_SIZE_IS_DEFINED_TWICE("properties are set at the same time."), /** */ - TOO_SMALL_USER_DEFINED_DFLT_MEM_PLC_SIZE("User-defined default MemoryPolicy size is less than 1MB."), + TOO_SMALL_USER_DEFINED_DFLT_MEM_PLC_SIZE("must have size more than 10MB"), /** */ - MAX_SIZE_IS_SMALLER_THAN_INITIAL_SIZE("MemoryPolicy maxSize must not be smaller than initialSize"), + MAX_SIZE_IS_SMALLER_THAN_INITIAL_SIZE("must not be smaller than initialSize"), /** Case when rateTimeInterval property of MemoryPolicyConfiguration is less than or equals zero. */ - LTE_ZERO_RATE_TIME_INTERVAL("Rate time interval must be greater than zero " + - "(use MemoryPolicyConfiguration.rateTimeInterval property to adjust the interval)"), + LTE_ZERO_RATE_TIME_INTERVAL("Rate time interval must be greater than zero"), /** Case when subIntervals property of MemoryPolicyConfiguration is less than or equals zero. */ - LTE_ZERO_SUB_INTERVALS("Sub intervals must be greater than zero " + - "(use MemoryPolicyConfiguration.subIntervals property to adjust the sub intervals)"); + LTE_ZERO_SUB_INTERVALS("Sub intervals must be greater than zero"); /** * @param violationMsg Violation message. diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/NonAffinityCoordinatorDynamicStartStopTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/NonAffinityCoordinatorDynamicStartStopTest.java index d0659417b7504..0f50efc98e35f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/NonAffinityCoordinatorDynamicStartStopTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/NonAffinityCoordinatorDynamicStartStopTest.java @@ -23,8 +23,8 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -59,15 +59,10 @@ public class NonAffinityCoordinatorDynamicStartStopTest extends GridCommonAbstra TcpDiscoverySpi discoverySpi = (TcpDiscoverySpi)cfg.getDiscoverySpi(); discoverySpi.setIpFinder(ipFinder); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024)); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - memPlcCfg.setMaxSize(200 * 1000 * 1000); - - memPlcCfg.setName("dfltMemPlc"); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); + cfg.setDataStorageConfiguration(memCfg); if (gridName.contains(DUMMY_GRID_NAME)) cfg.setUserAttributes(F.asMap(TEST_ATTRIBUTE, false)); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/Cache64kPartitionsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/Cache64kPartitionsTest.java index fe139bae6c447..7d9df2628be20 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/Cache64kPartitionsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/Cache64kPartitionsTest.java @@ -19,8 +19,10 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -43,8 +45,14 @@ public class Cache64kPartitionsTest extends GridCommonAbstractTest { cfg.setActiveOnStart(false); - if (persistenceEnabled) - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + if (persistenceEnabled) { + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); + + cfg.setDataStorageConfiguration(memCfg); + } return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java index 7f2a5d089d53a..ab07611792c7a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java @@ -48,8 +48,9 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridNodeOrderComparator; @@ -182,11 +183,11 @@ public class CacheLateAffinityAssignmentTest extends GridCommonAbstractTest { discoSpi.setJoinTimeout(30_000); } - MemoryConfiguration cfg1 = new MemoryConfiguration(); + DataStorageConfiguration cfg1 = new DataStorageConfiguration(); - cfg1.setDefaultMemoryPolicySize(150 * 1024 * 1024L); + cfg1.setDefaultDataRegionConfiguration(new DataRegionConfiguration().setMaxSize(150 * 1024 * 1024L)); - cfg.setMemoryConfiguration(cfg1); + cfg.setDataStorageConfiguration(cfg1); cfg.setClientMode(client); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheStartOnJoinTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheStartOnJoinTest.java index 88df6079c37a1..44d8b443d2843 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheStartOnJoinTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheStartOnJoinTest.java @@ -32,8 +32,9 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -96,11 +97,11 @@ public class CacheStartOnJoinTest extends GridCommonAbstractTest { cfg.setDiscoverySpi(testSpi); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); memCfg.setPageSize(1024); - memCfg.setDefaultMemoryPolicySize(50 * 1024 * 1024); + memCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration().setMaxSize(50 * 1024 * 1024)); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); cfg.setClientMode(client); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionAbstractTest.java index bda7940005ae4..072ca7fc0d3bb 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionAbstractTest.java @@ -22,9 +22,9 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -63,10 +63,14 @@ public class PageEvictionAbstractTest extends GridCommonAbstractTest { * @return Configuration with given eviction mode set. */ static IgniteConfiguration setEvictionMode(DataPageEvictionMode mode, IgniteConfiguration configuration) { - MemoryPolicyConfiguration[] policies = configuration.getMemoryConfiguration().getMemoryPolicies(); + DataRegionConfiguration[] policies = configuration.getDataStorageConfiguration().getDataRegionConfigurations(); - for (MemoryPolicyConfiguration plcCfg : policies) - plcCfg.setPageEvictionMode(mode); + if (policies != null) { + for (DataRegionConfiguration plcCfg : policies) + plcCfg.setPageEvictionMode(mode); + } + + configuration.getDataStorageConfiguration().getDefaultDataRegionConfiguration().setPageEvictionMode(mode); return configuration; } @@ -84,9 +88,9 @@ protected boolean nearEnabled() { ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); - MemoryPolicyConfiguration plc = new MemoryPolicyConfiguration(); + DataRegionConfiguration plc = new DataRegionConfiguration(); // This will test additional segment allocation. plc.setInitialSize(SIZE / 2); @@ -95,11 +99,10 @@ protected boolean nearEnabled() { plc.setEvictionThreshold(EVICTION_THRESHOLD); plc.setName(DEFAULT_POLICY_NAME); - dbCfg.setMemoryPolicies(plc); + dbCfg.setDefaultDataRegionConfiguration(plc); dbCfg.setPageSize(PAGE_SIZE); - dbCfg.setDefaultMemoryPolicyName(DEFAULT_POLICY_NAME); - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(dbCfg); return cfg; } @@ -124,7 +127,7 @@ protected CacheConfiguration cacheConfig( .setAffinity(new RendezvousAffinityFunction(false, 32)) .setCacheMode(cacheMode) .setAtomicityMode(atomicityMode) - .setMemoryPolicyName(memoryPlcName) + .setDataRegionName(memoryPlcName) .setWriteSynchronizationMode(writeSynchronizationMode); if (cacheMode == CacheMode.PARTITIONED) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/expiry/IgniteCacheLargeValueExpireTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/expiry/IgniteCacheLargeValueExpireTest.java index 71d809af32d02..7e0b1d75c5ff9 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/expiry/IgniteCacheLargeValueExpireTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/expiry/IgniteCacheLargeValueExpireTest.java @@ -27,7 +27,7 @@ import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -51,10 +51,10 @@ public class IgniteCacheLargeValueExpireTest extends GridCommonAbstractTest { ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); dbCfg.setPageSize(1024); - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(dbCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistenceMetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgniteDataStorageMetricsSelfTest.java similarity index 74% rename from modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistenceMetricsSelfTest.java rename to modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgniteDataStorageMetricsSelfTest.java index cfa170627c34a..93fa24ef5f036 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistenceMetricsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgniteDataStorageMetricsSelfTest.java @@ -19,18 +19,17 @@ import java.io.Serializable; import java.util.Objects; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCache; -import org.apache.ignite.MemoryMetrics; -import org.apache.ignite.PersistenceMetrics; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.tostring.GridToStringInclude; @@ -49,7 +48,7 @@ /** * */ -public class IgnitePersistenceMetricsSelfTest extends GridCommonAbstractTest { +public class IgniteDataStorageMetricsSelfTest extends GridCommonAbstractTest { /** */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); @@ -74,28 +73,29 @@ public class IgnitePersistenceMetricsSelfTest extends GridCommonAbstractTest { cfg.setConsistentId(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration(); - memCfg.setPageSize(1024); - - memCfg.setDefaultMemoryPolicyName("dflt-plc"); - - MemoryPolicyConfiguration memPlc = new MemoryPolicyConfiguration(); - memPlc.setName("dflt-plc"); - memPlc.setMaxSize(10 * 1024 * 1024); - memPlc.setMetricsEnabled(true); - - memCfg.setMemoryPolicies(memPlc); - - cfg.setMemoryConfiguration(memCfg); - - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration() - .setMetricsEnabled(true).setWalMode(WALMode.LOG_ONLY)); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(10 * 1024 * 1024) + .setPersistenceEnabled(true) + .setMetricsEnabled(true) + .setName("dflt-plc")) + .setDataRegionConfigurations(new DataRegionConfiguration() + .setMaxSize(10 * 1024 * 1024) + .setPersistenceEnabled(false) + .setMetricsEnabled(true) + .setName("no-persistence")) + .setWalMode(WALMode.LOG_ONLY) + .setPageSize(1024) + .setMetricsEnabled(true); + + cfg.setDataStorageConfiguration(memCfg); cfg.setBinaryConfiguration(new BinaryConfiguration().setCompactFooter(false)); ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER); - cfg.setCacheConfiguration(cacheConfiguration(GROUP1, "cache", PARTITIONED, ATOMIC, 1)); + cfg.setCacheConfiguration(cacheConfiguration(GROUP1, "cache", PARTITIONED, ATOMIC, 1, null), + cacheConfiguration(null, "cache-np", PARTITIONED, ATOMIC, 1, "no-persistence")); return cfg; } @@ -122,7 +122,8 @@ private CacheConfiguration cacheConfiguration( String name, CacheMode cacheMode, CacheAtomicityMode atomicityMode, - int backups + int backups, + String dataRegName ) { CacheConfiguration ccfg = new CacheConfiguration(); @@ -132,6 +133,7 @@ private CacheConfiguration cacheConfiguration( ccfg.setBackups(backups); ccfg.setCacheMode(cacheMode); ccfg.setWriteSynchronizationMode(FULL_SYNC); + ccfg.setDataRegionName(dataRegName); return ccfg; } @@ -150,25 +152,35 @@ public void testPersistenceMetrics() throws Exception { for (int i = 0; i < 10; i++) cache.put(i, new Person("first-" + i, "last-" + i)); - { - MemoryMetrics memMetrics = ig.memoryMetrics("dflt-plc"); + IgniteCache cacheNp = ig.cache("cache-np"); + + for (int i = 0; i < 10; i++) + cacheNp.put(i, new Person("first-" + i, "last-" + i)); + + DataRegionMetrics memMetrics = ig.dataRegionMetrics("dflt-plc"); + + assertNotNull(memMetrics); + assertTrue(memMetrics.getDirtyPages() > 0); + assertTrue(memMetrics.getPagesFillFactor() > 0); + + memMetrics = ig.dataRegionMetrics("no-persistence"); - assertNotNull(memMetrics); - assertTrue(memMetrics.getDirtyPages() > 0); - } + assertNotNull(memMetrics); + assertTrue(memMetrics.getTotalAllocatedPages() > 0); + assertTrue(memMetrics.getPagesFillFactor() > 0); ig.context().cache().context().database().waitForCheckpoint("test"); - GridTestUtils.waitForCondition(new PAX() { + assertTrue(GridTestUtils.waitForCondition(new PAX() { @Override public boolean applyx() { - PersistenceMetrics pMetrics = ig.persistentStoreMetrics(); + DataStorageMetrics pMetrics = ig.dataStorageMetrics(); assertNotNull(pMetrics); return pMetrics.getLastCheckpointTotalPagesNumber() != 0 && pMetrics.getLastCheckpointDataPagesNumber() != 0; } - }, 5_000); + }, 10_000)); } finally { stopAllGrids(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinaryMetadataOnClusterRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinaryMetadataOnClusterRestartTest.java index cc3820b6e469d..50d7e7eb0abfc 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinaryMetadataOnClusterRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinaryMetadataOnClusterRestartTest.java @@ -31,8 +31,9 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; @@ -63,9 +64,12 @@ public class IgnitePdsBinaryMetadataOnClusterRestartTest extends GridCommonAbstr cfg.setClientMode(clientMode); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() + cfg.setDataStorageConfiguration( + new DataStorageConfiguration() .setWalMode(WALMode.LOG_ONLY) + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(100 * 1024 * 1024)) ); BinaryConfiguration bCfg = new BinaryConfiguration(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheRebalancingAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheRebalancingAbstractTest.java index 705156fa43546..8e43e93ceff7c 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheRebalancingAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheRebalancingAbstractTest.java @@ -37,10 +37,9 @@ import org.apache.ignite.cache.QueryIndex; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; @@ -113,27 +112,23 @@ public abstract class IgnitePdsCacheRebalancingAbstractTest extends GridCommonAb cfg.setCacheConfiguration(ccfg1, ccfg2, ccfg3); } - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); memCfg.setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); memCfg.setPageSize(1024); + memCfg.setWalMode(WALMode.LOG_ONLY); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration memPlcCfg = new DataRegionConfiguration(); - memPlcCfg.setName("dfltMemPlc"); + memPlcCfg.setName("dfltDataRegion"); memPlcCfg.setMaxSize(150 * 1024 * 1024); memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setSwapFilePath("work/swap"); + memPlcCfg.setSwapPath("work/swap"); + memPlcCfg.setPersistenceEnabled(true); - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); + memCfg.setDefaultDataRegionConfiguration(memPlcCfg); - cfg.setMemoryConfiguration(memCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - ); + cfg.setDataStorageConfiguration(memCfg); cfg.setDiscoverySpi( new TcpDiscoverySpi() diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsClientNearCachePutGetTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsClientNearCachePutGetTest.java index 2c15416ed57e5..130a91c3ae7d0 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsClientNearCachePutGetTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsClientNearCachePutGetTest.java @@ -17,8 +17,8 @@ package org.apache.ignite.internal.processors.cache.persistence; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.processors.database.IgniteDbClientNearCachePutGetTest; import org.apache.ignite.internal.util.typedef.internal.U; @@ -33,8 +33,8 @@ public class IgnitePdsClientNearCachePutGetTest extends IgniteDbClientNearCacheP @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() + cfg.setDataStorageConfiguration( + new DataStorageConfiguration() .setWalMode(WALMode.LOG_ONLY) ); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java index a363e8c857f76..27b19501de8a9 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java @@ -32,10 +32,9 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.util.typedef.internal.U; @@ -80,18 +79,13 @@ public IgnitePdsContinuousRestartTest(boolean cancel) { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(400 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setCheckpointFrequency(checkpointDelay); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setMaxSize(400 * 1024 * 1024); - memPlcCfg.setInitialSize(400 * 1024 * 1024); - - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg1 = new CacheConfiguration(); @@ -103,12 +97,6 @@ public IgnitePdsContinuousRestartTest(boolean cancel) { cfg.setCacheConfiguration(ccfg1); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - .setCheckpointingFrequency(checkpointDelay) - ); - return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDynamicCacheTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDynamicCacheTest.java index 0325e122c1024..7e0cf8270043e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDynamicCacheTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsDynamicCacheTest.java @@ -28,10 +28,9 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.processors.database.IgniteDbDynamicCacheSelfTest; import org.apache.ignite.internal.util.typedef.internal.U; @@ -46,24 +45,13 @@ public class IgnitePdsDynamicCacheTest extends IgniteDbDynamicCacheSelfTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setPageSize(1024); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(200 * 1024 * 1024); - memPlcCfg.setMaxSize(200 * 1024 * 1024); - - dbCfg.setPageSize(1024); - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - ); + cfg.setDataStorageConfiguration(memCfg); if ("client".equals(gridName)) cfg.setClientMode(true); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsExchangeDuringCheckpointTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsExchangeDuringCheckpointTest.java index 94b8f5331729a..2586a1f9476e1 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsExchangeDuringCheckpointTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsExchangeDuringCheckpointTest.java @@ -19,10 +19,9 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.U; @@ -40,6 +39,9 @@ public class IgnitePdsExchangeDuringCheckpointTest extends GridCommonAbstractTes /** */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + /** Non-persistent data region name. */ + private static final String NO_PERSISTENCE_REGION = "no-persistence-region"; + /** * */ @@ -88,31 +90,28 @@ public void testExchangeOnNodeJoin() throws Exception { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration(); - - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(1000 * 1024 * 1024); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(800 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setCheckpointThreads(1) + .setCheckpointFrequency(1); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - memCfg.setMemoryPolicies(memPlcCfg); + memCfg.setDataRegionConfigurations(new DataRegionConfiguration() + .setMaxSize(200 * 1024 * 1024) + .setName(NO_PERSISTENCE_REGION) + .setPersistenceEnabled(false)); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME); - ccfg.setAffinity(new RendezvousAffinityFunction(false, 4096)); - - cfg.setCacheConfiguration(ccfg); + CacheConfiguration ccfgNp = new CacheConfiguration("nonPersistentCache"); + ccfgNp.setDataRegionName(NO_PERSISTENCE_REGION); - PersistentStoreConfiguration psiCfg = new PersistentStoreConfiguration() - .setCheckpointingThreads(1) - .setCheckpointingFrequency(1) - .setWalMode(WALMode.LOG_ONLY); + ccfg.setAffinity(new RendezvousAffinityFunction(false, 4096)); - cfg.setPersistentStoreConfiguration(psiCfg); + cfg.setCacheConfiguration(ccfg, ccfgNp); TcpDiscoverySpi discoSpi = new TcpDiscoverySpi(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMarshallerMappingRestoreOnNodeStartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMarshallerMappingRestoreOnNodeStartTest.java index 517b9ea869acc..0429d6b876e6b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMarshallerMappingRestoreOnNodeStartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMarshallerMappingRestoreOnNodeStartTest.java @@ -23,8 +23,8 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.AffinityKeyMapped; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; /** @@ -41,8 +41,8 @@ public class IgnitePdsMarshallerMappingRestoreOnNodeStartTest extends GridCommon cfg.setWorkDirectory(Paths.get(tmpDir, "srv" + gridIndex).toString()); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() + cfg.setDataStorageConfiguration( + new DataStorageConfiguration() ); cfg.setCacheConfiguration(new CacheConfiguration() diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMultiNodePutGetRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMultiNodePutGetRestartTest.java index 04372510d823b..6a2c9b8a8fe94 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMultiNodePutGetRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsMultiNodePutGetRestartTest.java @@ -30,10 +30,10 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -72,18 +72,12 @@ public IgnitePdsMultiNodePutGetRestartTest() { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(100 * 1024 * 1024); - - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - memCfg.setMemoryPolicies(memPlcCfg); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg = new CacheConfiguration(); @@ -97,8 +91,6 @@ public IgnitePdsMultiNodePutGetRestartTest() { cfg.setCacheConfiguration(ccfg); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); - TcpDiscoverySpi discoSpi = new TcpDiscoverySpi(); discoSpi.setIpFinder(IP_FINDER); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsPageSizesTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsPageSizesTest.java index 1d6ba4b9029a7..d2ec33a63330f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsPageSizesTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsPageSizesTest.java @@ -23,10 +23,9 @@ import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.U; @@ -49,25 +48,13 @@ public class IgnitePdsPageSizesTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setPageSize(pageSize); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(100 * 1024 * 1024); - - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - memCfg.setPageSize(pageSize); - - cfg.setMemoryConfiguration(memCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - ); + cfg.setDataStorageConfiguration(memCfg); cfg.setCacheConfiguration( new CacheConfiguration(cacheName) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java index 6e2752d7e205c..936944317b694 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java @@ -26,10 +26,10 @@ import org.apache.ignite.cache.CacheRebalanceMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdAllocator; @@ -69,7 +69,7 @@ public class IgnitePdsRecoveryAfterFileCorruptionTest extends GridCommonAbstract private final String cacheName = "cache"; /** Policy name. */ - private final String policyName = "dfltMemPlc"; + private final String policyName = "dfltDataRegion"; /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { @@ -82,24 +82,17 @@ public class IgnitePdsRecoveryAfterFileCorruptionTest extends GridCommonAbstract cfg.setCacheConfiguration(ccfg); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setMaxSize(1024 * 1024 * 1024) + .setPersistenceEnabled(true) + .setName(policyName)) + .setWalMode(WALMode.LOG_ONLY) + .setCheckpointFrequency(500) + .setAlwaysWriteFullPages(true); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName(policyName); - memPlcCfg.setInitialSize(1024 * 1024 * 1024); - memPlcCfg.setMaxSize(1024 * 1024 * 1024); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName(policyName); - - cfg.setMemoryConfiguration(dbCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setCheckpointingFrequency(500) - .setAlwaysWriteFullPages(true) - ); + cfg.setDataStorageConfiguration(memCfg); cfg.setDiscoverySpi( new TcpDiscoverySpi() @@ -147,7 +140,7 @@ public void testPageRecoveryAfterFileCorruption() throws Exception { // Disable integrated checkpoint thread. psMgr.enableCheckpoints(false).get(); - PageMemory mem = sharedCtx.database().memoryPolicy(policyName).pageMemory(); + PageMemory mem = sharedCtx.database().dataRegion(policyName).pageMemory(); int cacheId = sharedCtx.cache().cache(cacheName).context().cacheId(); @@ -212,7 +205,7 @@ private void checkRestore(IgniteEx ig, FullPageId[] pages) throws IgniteCheckedE dbMgr.enableCheckpoints(false).get(); - PageMemory mem = shared.database().memoryPolicy(null).pageMemory(); + PageMemory mem = shared.database().dataRegion(null).pageMemory(); for (FullPageId fullId : pages) { long page = mem.acquirePage(fullId.groupId(), fullId.pageId()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRemoveDuringRebalancingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRemoveDuringRebalancingTest.java index 78da14d94f13f..d2c157b8c70cb 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRemoveDuringRebalancingTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRemoveDuringRebalancingTest.java @@ -25,10 +25,9 @@ import org.apache.ignite.cache.CacheRebalanceMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; @@ -61,27 +60,17 @@ public class IgnitePdsRemoveDuringRebalancingTest extends GridCommonAbstractTest .setRebalanceMode(CacheRebalanceMode.SYNC) ); - MemoryConfiguration dbCfg = new MemoryConfiguration(); - - dbCfg.setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); - dbCfg.setPageSize(1024); - - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(100 * 1024 * 1024); - memPlcCfg.setSwapFilePath(DFLT_STORE_DIR); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true) + .setSwapPath(DFLT_STORE_DIR)) .setWalMode(WALMode.LOG_ONLY) - ); + .setPageSize(1024) + .setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); + + cfg.setDataStorageConfiguration(memCfg); cfg.setDiscoverySpi( new TcpDiscoverySpi() diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsSingleNodePutGetPersistenceTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsSingleNodePutGetPersistenceTest.java index 4add384b7e4e3..18e31fcbdcba2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsSingleNodePutGetPersistenceTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsSingleNodePutGetPersistenceTest.java @@ -17,8 +17,8 @@ package org.apache.ignite.internal.processors.cache.persistence; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.processors.database.IgniteDbSingleNodePutGetTest; import org.apache.ignite.internal.util.typedef.internal.U; @@ -33,8 +33,8 @@ public class IgnitePdsSingleNodePutGetPersistenceTest extends IgniteDbSingleNode @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() + cfg.setDataStorageConfiguration( + new DataStorageConfiguration() .setWalMode(WALMode.LOG_ONLY) ); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistenceSequentialCheckpointTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistenceSequentialCheckpointTest.java index 92950002d5f77..814ee57bfeeac 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistenceSequentialCheckpointTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistenceSequentialCheckpointTest.java @@ -18,7 +18,7 @@ import org.apache.ignite.configuration.CheckpointWriteOrder; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.WALMode; /** @@ -29,9 +29,9 @@ public class IgnitePersistenceSequentialCheckpointTest extends IgnitePersistentS @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration() + cfg.setDataStorageConfiguration(new DataStorageConfiguration() .setWalMode(WALMode.LOG_ONLY) - .setCheckpointingThreads(4) + .setCheckpointThreads(4) .setCheckpointWriteOrder(CheckpointWriteOrder.SEQUENTIAL)); return cfg; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreCacheGroupsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreCacheGroupsTest.java index dc6517731e226..1e7ad1b777fe4 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreCacheGroupsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreCacheGroupsTest.java @@ -33,9 +33,9 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.processors.platform.cache.expiry.PlatformExpiryPolicy; import org.apache.ignite.internal.util.tostring.GridToStringInclude; @@ -85,13 +85,13 @@ public class IgnitePersistentStoreCacheGroupsTest extends GridCommonAbstractTest cfg.setConsistentId(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration(); - memCfg.setPageSize(1024); - memCfg.setDefaultMemoryPolicySize(100 * 1024 * 1024); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setPageSize(1024) + .setWalMode(WALMode.LOG_ONLY); - cfg.setMemoryConfiguration(memCfg); - - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration().setWalMode(WALMode.LOG_ONLY)); + cfg.setDataStorageConfiguration(memCfg); cfg.setBinaryConfiguration(new BinaryConfiguration().setCompactFooter(false)); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java index fed87666e6b03..e9828f5d17d00 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java @@ -26,10 +26,9 @@ import org.apache.ignite.IgniteSemaphore; import org.apache.ignite.IgniteSet; import org.apache.ignite.configuration.CollectionConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -50,20 +49,12 @@ public class IgnitePersistentStoreDataStructuresTest extends GridCommonAbstractT ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(200 * 1024 * 1024); - memPlcCfg.setMaxSize(200 * 1024 * 1024); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); - - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration().setWalMode(WALMode.LOG_ONLY)); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java index 842f6180470a6..fb1574db962e2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java @@ -73,7 +73,7 @@ public void testNoConfigProvided() throws Exception { IgniteEx ignite = startGrid(0); - Collection allMemPlcs = ignite.context().cache().context().database().memoryPolicies(); + Collection allMemPlcs = ignite.context().cache().context().database().dataRegions(); assertTrue(allMemPlcs.size() == 2); @@ -89,7 +89,7 @@ public void testCustomConfigNoDefault() throws Exception { IgniteEx ignite = startGrid(0); - Collection allMemPlcs = ignite.context().cache().context().database().memoryPolicies(); + Collection allMemPlcs = ignite.context().cache().context().database().dataRegions(); assertTrue(allMemPlcs.size() == 3); @@ -110,13 +110,13 @@ public void testCustomConfigOverridesDefault() throws Exception { IgniteCacheDatabaseSharedManager dbMgr = ignite.context().cache().context().database(); - Collection allMemPlcs = dbMgr.memoryPolicies(); + Collection allMemPlcs = dbMgr.dataRegions(); assertTrue(allMemPlcs.size() == 2); verifyDefaultAndSystemMemoryPolicies(allMemPlcs); - MemoryPolicy dfltMemPlc = U.field(dbMgr, "dfltMemPlc"); + DataRegion dfltMemPlc = U.field(dbMgr, "dfltDataRegion"); assertTrue(dfltMemPlc.config().getMaxSize() == USER_DEFAULT_MEM_PLC_SIZE); } @@ -134,13 +134,13 @@ public void testCustomConfigOverridesDefaultNameAndDeclaresDefault() throws Exce IgniteCacheDatabaseSharedManager dbMgr = ignite.context().cache().context().database(); - Collection allMemPlcs = dbMgr.memoryPolicies(); + Collection allMemPlcs = dbMgr.dataRegions(); assertTrue(allMemPlcs.size() == 3); verifyDefaultAndSystemMemoryPolicies(allMemPlcs); - MemoryPolicy dfltMemPlc = U.field(dbMgr, "dfltMemPlc"); + DataRegion dfltMemPlc = U.field(dbMgr, "dfltDataRegion"); assertTrue(dfltMemPlc.config().getMaxSize() == USER_CUSTOM_MEM_PLC_SIZE); } @@ -220,7 +220,7 @@ public void testCachesOnUserDefinedDefaultMemoryPolicy() throws Exception { private void verifyCacheMemoryPolicy(IgniteCache cache, String plcName) { GridCacheContext ctx = ((IgniteCacheProxy) cache).context(); - assertEquals(plcName, ctx.memoryPolicy().config().getName()); + assertEquals(plcName, ctx.dataRegion().config().getName()); } /** @@ -278,12 +278,12 @@ private void prepareCustomConfigWithOverridingDefaultAndCustom() { /** * @param allMemPlcs Collection of all memory policies. */ - private void verifyDefaultAndSystemMemoryPolicies(Collection allMemPlcs) { + private void verifyDefaultAndSystemMemoryPolicies(Collection allMemPlcs) { assertTrue("Default memory policy is not presented", isMemoryPolicyPresented(allMemPlcs, DFLT_MEM_PLC_DEFAULT_NAME)); assertTrue("System memory policy is not presented", - isMemoryPolicyPresented(allMemPlcs, IgniteCacheDatabaseSharedManager.SYSTEM_MEMORY_POLICY_NAME)); + isMemoryPolicyPresented(allMemPlcs, IgniteCacheDatabaseSharedManager.SYSTEM_DATA_REGION_NAME)); } /** @@ -303,8 +303,8 @@ private void prepareCustomNoDefaultConfig() { * @param memPlcs Collection of memory policies. * @param nameToVerify Excepted name of memory policy. */ - private boolean isMemoryPolicyPresented(Collection memPlcs, String nameToVerify) { - for (MemoryPolicy memPlc : memPlcs) { + private boolean isMemoryPolicyPresented(Collection memPlcs, String nameToVerify) { + for (DataRegion memPlc : memPlcs) { if (nameToVerify.equals(memPlc.config().getName())) return true; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsCacheRestoreTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsCacheRestoreTest.java index 25626f4bb33c3..577cf9a27376a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsCacheRestoreTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsCacheRestoreTest.java @@ -21,9 +21,9 @@ import java.util.List; import org.apache.ignite.IgniteCache; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -40,6 +40,9 @@ public class IgnitePdsCacheRestoreTest extends GridCommonAbstractTest { /** */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + /** Non-persistent data region name. */ + private static final String NO_PERSISTENCE_REGION = "no-persistence-region"; + /** */ private CacheConfiguration[] ccfgs; @@ -55,17 +58,18 @@ public class IgnitePdsCacheRestoreTest extends GridCommonAbstractTest { ccfgs = null; } - MemoryConfiguration memCfg = new MemoryConfiguration(); - memCfg.setPageSize(1024); - memCfg.setDefaultMemoryPolicySize(10 * 1024 * 1024); - - cfg.setMemoryConfiguration(memCfg); - - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(10 * 1024 * 1024).setPersistenceEnabled(true)) + .setPageSize(1024) + .setWalMode(WALMode.LOG_ONLY); - pCfg.setWalMode(WALMode.LOG_ONLY); + memCfg.setDataRegionConfigurations(new DataRegionConfiguration() + .setMaxSize(10 * 1024 * 1024) + .setName(NO_PERSISTENCE_REGION) + .setPersistenceEnabled(false)); - cfg.setPersistentStoreConfiguration(pCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } @@ -137,14 +141,22 @@ private void restoreAndNewCache(boolean createNew) throws Exception { IgniteCache cache2 = ignite(2).cache("c2"); + IgniteCache cache3 = ignite(2).cache("c3"); + for (Integer key : keys) { assertEquals(key, cache1.get(key)); assertNull(cache2.get(key)); + assertNull(cache3.get(key)); + cache2.put(key, key); assertEquals(key, cache2.get(key)); + + cache3.put(key, key); + + assertEquals(key, cache3.get(key)); } List nearKeys = nearKeys(cache1, 10, 0); @@ -152,6 +164,10 @@ private void restoreAndNewCache(boolean createNew) throws Exception { for (Integer key : nearKeys) { assertNull(cache1.get(key)); assertNull(cache2.get(key)); + assertNull(cache3.get(key)); + + cache3.put(key, key); + assertEquals(key, cache3.get(key)); cache2.put(key, key); assertEquals(key, cache2.get(key)); @@ -165,6 +181,8 @@ private void restoreAndNewCache(boolean createNew) throws Exception { awaitPartitionMapExchange(); for (Integer key : nearKeys) { + assertEquals(key, cache3.get(key)); + assertEquals(key, cache2.get(key)); assertEquals(key, cache1.get(key)); @@ -186,10 +204,13 @@ private CacheConfiguration[] configurations1() { * @return Configurations set 1. */ private CacheConfiguration[] configurations2() { - CacheConfiguration[] ccfgs = new CacheConfiguration[2]; + CacheConfiguration[] ccfgs = new CacheConfiguration[3]; ccfgs[0] = cacheConfiguration("c1"); ccfgs[1] = cacheConfiguration("c2"); + ccfgs[2] = cacheConfiguration("c3"); + + ccfgs[2].setDataRegionName(NO_PERSISTENCE_REGION); return ccfgs; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsMultiNodePutGetRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsMultiNodePutGetRestartTest.java index b8db8027ff147..615e1083c7dbb 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsMultiNodePutGetRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsMultiNodePutGetRestartTest.java @@ -30,9 +30,10 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -40,7 +41,6 @@ import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; -import org.apache.ignite.configuration.MemoryConfiguration; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; @@ -61,18 +61,12 @@ public class IgnitePdsMultiNodePutGetRestartTest extends GridCommonAbstractTest @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(100 * 1024 * 1024); - - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - dbCfg.setMemoryPolicies(memPlcCfg); - - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg = new CacheConfiguration(CACHE_NAME); @@ -86,7 +80,6 @@ public class IgnitePdsMultiNodePutGetRestartTest extends GridCommonAbstractTest cfg.setCacheConfiguration(ccfg); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER)); cfg.setMarshaller(null); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionDuringPartitionClearTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionDuringPartitionClearTest.java index c1bec350adb7d..3dfdc57393cb9 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionDuringPartitionClearTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionDuringPartitionClearTest.java @@ -25,10 +25,9 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; @@ -60,26 +59,12 @@ public class IgnitePdsPageEvictionDuringPartitionClearTest extends GridCommonAbs cfg.setCacheConfiguration(ccfg); // Intentionally set small page cache size. + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(70 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setInitialSize(70 * 1024 * 1024); - memPlcCfg.setMaxSize(70 * 1024 * 1024); - - memPlcCfg.setName("dfltMemPlc"); - - MemoryConfiguration memCfg = new MemoryConfiguration(); - - memCfg.setMemoryPolicies(memPlcCfg); - - memCfg.setDefaultMemoryPolicyName(memPlcCfg.getName()); - - cfg.setMemoryConfiguration(memCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - ); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionTest.java index 13cd8b4f1e8e2..47d0cb2c869da 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsPageEvictionTest.java @@ -27,10 +27,10 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -58,22 +58,14 @@ public class IgnitePdsPageEvictionTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(50 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setPageSize(1024) + .setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); - memCfg.setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); - - memCfg.setPageSize(1024); - - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(50 * 1024 * 1024); - memPlcCfg.setMaxSize(50 * 1024 * 1024); - - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg = new CacheConfiguration<>(CACHE_NAME); @@ -84,8 +76,6 @@ public class IgnitePdsPageEvictionTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); - cfg.setDiscoverySpi( new TcpDiscoverySpi() .setIpFinder(IP_FINDER) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsRebalancingOnNotStableTopologyTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsRebalancingOnNotStableTopologyTest.java index 546a87ac739cd..893ecb5b317c1 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsRebalancingOnNotStableTopologyTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsRebalancingOnNotStableTopologyTest.java @@ -28,10 +28,10 @@ import org.apache.ignite.cache.PartitionLossPolicy; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy; @@ -167,23 +167,13 @@ public void test() throws Exception { cfg.setCacheConfiguration(ccfg); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setCheckpointingFrequency(CHECKPOINT_FREQUENCY) - ); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setCheckpointFrequency(CHECKPOINT_FREQUENCY); - MemoryConfiguration memCfg = new MemoryConfiguration(); - - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(200 * 1024 * 1024); - memPlcCfg.setMaxSize(200 * 1024 * 1024); - - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsTransactionsHangTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsTransactionsHangTest.java index 7e8cfac07f888..f3aee08b773fc 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsTransactionsHangTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsTransactionsHangTest.java @@ -32,10 +32,9 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.TransactionConfiguration; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager; import org.apache.ignite.internal.util.typedef.internal.U; @@ -123,26 +122,21 @@ public class IgnitePdsTransactionsHangTest extends GridCommonAbstractTest { cfg.setTransactionConfiguration(txCfg); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalHistorySize(1) - .setCheckpointingFrequency(CHECKPOINT_FREQUENCY) - ); + DataRegionConfiguration memPlcCfg = new DataRegionConfiguration(); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); + memPlcCfg.setName("dfltDataRegion"); memPlcCfg.setInitialSize(PAGE_CACHE_SIZE * 1024 * 1024); memPlcCfg.setMaxSize(PAGE_CACHE_SIZE * 1024 * 1024); + memPlcCfg.setPersistenceEnabled(true); - MemoryConfiguration memCfg = new MemoryConfiguration(); - - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); + memCfg.setDefaultDataRegionConfiguration(memPlcCfg); + memCfg.setWalHistorySize(1); + memCfg.setCheckpointFrequency(CHECKPOINT_FREQUENCY); memCfg.setPageSize(PAGE_SIZE * 1024); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java index df5bfdf8a6f20..91380f07a47cf 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java @@ -28,10 +28,9 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.checkpoint.noop.NoopCheckpointSpi; @@ -56,18 +55,12 @@ public class IgnitePdsWholeClusterRestartTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(100 * 1024 * 1024); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg1 = new CacheConfiguration(); @@ -85,11 +78,6 @@ public class IgnitePdsWholeClusterRestartTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg1); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - ); - cfg.setConsistentId(gridName); return cfg; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/DefaultPageSizeBackwardsCompatibilityTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/DefaultPageSizeBackwardsCompatibilityTest.java index e577886ad46b8..9e01f7becb376 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/DefaultPageSizeBackwardsCompatibilityTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/DefaultPageSizeBackwardsCompatibilityTest.java @@ -23,10 +23,9 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -58,20 +57,20 @@ public class DefaultPageSizeBackwardsCompatibilityTest extends GridCommonAbstrac TcpDiscoverySpi discoverySpi = (TcpDiscoverySpi)cfg.getDiscoverySpi(); discoverySpi.setIpFinder(IP_FINDER); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); if (set2kPageSize) memCfg.setPageSize(2048); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration memPlcCfg = new DataRegionConfiguration(); memPlcCfg.setMaxSize(100 * 1000 * 1000); + memPlcCfg.setName("dfltDataRegion"); + memPlcCfg.setPersistenceEnabled(true); - memPlcCfg.setName("dfltMemPlc"); + memCfg.setDefaultDataRegionConfiguration(memPlcCfg); + memCfg.setCheckpointFrequency(3_000); - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg1 = new CacheConfiguration(); @@ -82,8 +81,6 @@ public class DefaultPageSizeBackwardsCompatibilityTest extends GridCommonAbstrac cfg.setCacheConfiguration(ccfg1); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration().setCheckpointingFrequency(3_000)); - cfg.setConsistentId(gridName); return cfg; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCacheIntegrationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCacheIntegrationTest.java index d36894f4194ff..7d51b461ea5af 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCacheIntegrationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCacheIntegrationTest.java @@ -31,10 +31,9 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.GridCacheAdapter; @@ -65,25 +64,13 @@ public class IgnitePdsCacheIntegrationTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); - dbCfg.setConcurrencyLevel(Runtime.getRuntime().availableProcessors() * 4); - - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(100 * 1024 * 1024); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - ); - - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg = new CacheConfiguration(CACHE_NAME); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java index b4c32d87de10d..5ae8969358496 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java @@ -37,9 +37,9 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.CacheRebalanceMode; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; @@ -106,15 +106,12 @@ public class IgnitePdsCheckpointSimulationWithRealCpDisabledTest extends GridCom cfg.setCacheConfiguration(ccfg); - MemoryConfiguration dbCfg = new MemoryConfiguration(); - - cfg.setMemoryConfiguration(dbCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setCheckpointingFrequency(500) + cfg.setDataStorageConfiguration( + new DataStorageConfiguration() + .setCheckpointFrequency(500) .setWalMode(WALMode.LOG_ONLY) .setAlwaysWriteFullPages(true) + .setDefaultDataRegionConfiguration(new DataRegionConfiguration().setPersistenceEnabled(true)) ); TcpDiscoverySpi discoSpi = new TcpDiscoverySpi(); @@ -163,7 +160,7 @@ public void testCheckpointSimulationMultiThreaded() throws Exception { // Otherwise we will violate page store integrity rules. ig.cache(cacheName).put(0, 0); - PageMemory mem = shared.database().memoryPolicy(null).pageMemory(); + PageMemory mem = shared.database().dataRegion(null).pageMemory(); IgniteBiTuple, WALPointer> res; @@ -192,7 +189,7 @@ public void testCheckpointSimulationMultiThreaded() throws Exception { dbMgr.enableCheckpoints(false).get(); - mem = shared.database().memoryPolicy(null).pageMemory(); + mem = shared.database().dataRegion(null).pageMemory(); verifyReads(res.get1(), mem, res.get2(), shared.wal()); } @@ -214,7 +211,7 @@ public void testGetForInitialWrite() throws Exception { // Disable integrated checkpoint thread. dbMgr.enableCheckpoints(false); - PageMemory mem = shared.database().memoryPolicy(null).pageMemory(); + PageMemory mem = shared.database().dataRegion(null).pageMemory(); IgniteWriteAheadLogManager wal = shared.wal(); @@ -415,7 +412,7 @@ public void testPageWalEntries() throws Exception { int cacheId = sharedCtx.cache().cache(cacheName).context().cacheId(); GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)sharedCtx.database(); - PageMemory pageMem = sharedCtx.database().memoryPolicy(null).pageMemory(); + PageMemory pageMem = sharedCtx.database().dataRegion(null).pageMemory(); IgniteWriteAheadLogManager wal = sharedCtx.wal(); db.enableCheckpoints(false).get(); @@ -520,7 +517,7 @@ public void testDirtyFlag() throws Exception { // Disable integrated checkpoint thread. dbMgr.enableCheckpoints(false); - PageMemoryEx mem = (PageMemoryEx) dbMgr.memoryPolicy(null).pageMemory(); + PageMemoryEx mem = (PageMemoryEx) dbMgr.dataRegion(null).pageMemory(); ig.context().cache().context().database().checkpointReadLock(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java index a9b0892cb59d1..47a4b7b7f935b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java @@ -24,10 +24,10 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.pagemem.FullPageId; @@ -69,9 +69,7 @@ public class IgnitePdsEvictionTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { final IgniteConfiguration cfg = super.getConfiguration(gridName); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); - - cfg.setMemoryConfiguration(createDbConfig()); + cfg.setDataStorageConfiguration(createDbConfig()); cfg.setCacheConfiguration(new CacheConfiguration<>(cacheName)); @@ -81,19 +79,20 @@ public class IgnitePdsEvictionTest extends GridCommonAbstractTest { /** * @return DB config. */ - private MemoryConfiguration createDbConfig() { - final MemoryConfiguration memCfg = new MemoryConfiguration(); + private DataStorageConfiguration createDbConfig() { + final DataStorageConfiguration memCfg = new DataStorageConfiguration(); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration memPlcCfg = new DataRegionConfiguration(); memPlcCfg.setInitialSize(MEMORY_LIMIT); memPlcCfg.setMaxSize(MEMORY_LIMIT); memPlcCfg.setPageEvictionMode(DataPageEvictionMode.RANDOM_LRU); - memPlcCfg.setName("dfltMemPlc"); + memPlcCfg.setName("dfltDataRegion"); + memPlcCfg.setPersistenceEnabled(true); memCfg.setPageSize(PAGE_SIZE); memCfg.setConcurrencyLevel(NUMBER_OF_SEGMENTS); - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); + memCfg.setDefaultDataRegionConfiguration(memPlcCfg); + memCfg.setWalMode(WALMode.LOG_ONLY); return memCfg; } @@ -290,7 +289,7 @@ private PageMemory getMemory(IgniteEx ig) throws Exception { final IgniteCacheDatabaseSharedManager db = sharedCtx.database(); - return db.memoryPolicy(null).pageMemory(); + return db.dataRegion(null).pageMemory(); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsNoActualWalHistoryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsNoActualWalHistoryTest.java index 1779fcefb143e..61f92c5796aab 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsNoActualWalHistoryTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsNoActualWalHistoryTest.java @@ -27,9 +27,9 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; @@ -59,19 +59,19 @@ public class IgnitePdsNoActualWalHistoryTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); dbCfg.setPageSize(4 * 1024); - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(dbCfg); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalSegmentSize(4 * 1024 * 1024) - .setWalHistorySize(2) - .setWalSegments(10) - .setWalMode(WALMode.LOG_ONLY) - ); + dbCfg.setWalSegmentSize(4 * 1024 * 1024) + .setWalHistorySize(2) + .setWalSegments(10) + .setWalMode(WALMode.LOG_ONLY) + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true)); cfg.setMarshaller(null); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsThreadInterruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsThreadInterruptionTest.java index 2a007685bde24..4b55aed7e7403 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsThreadInterruptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsThreadInterruptionTest.java @@ -17,22 +17,20 @@ package org.apache.ignite.internal.processors.cache.persistence.db.file; +import java.util.concurrent.atomic.AtomicReference; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.jsr166.ThreadLocalRandom8; -import java.util.concurrent.atomic.AtomicReference; - import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; /** @@ -59,46 +57,27 @@ public class IgnitePdsThreadInterruptionTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { final IgniteConfiguration cfg = super.getConfiguration(gridName); - cfg.setPersistentStoreConfiguration(storeConfiguration()); - - cfg.setMemoryConfiguration(memoryConfiguration()); + cfg.setDataStorageConfiguration(memoryConfiguration()); cfg.setCacheConfiguration(new CacheConfiguration<>(cacheName)); return cfg; } - /** - * @return Store config. - */ - private PersistentStoreConfiguration storeConfiguration() { - PersistentStoreConfiguration cfg = new PersistentStoreConfiguration(); - - cfg.setWalMode(WALMode.LOG_ONLY); - - cfg.setWalFsyncDelayNanos(0); - - cfg.setFileIOFactory(new AsyncFileIOFactory()); - - return cfg; - } - /** * @return Memory config. */ - private MemoryConfiguration memoryConfiguration() { - final MemoryConfiguration memCfg = new MemoryConfiguration(); - - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - // memPlcCfg.setPageEvictionMode(RANDOM_LRU); TODO Fix NPE on start. - memPlcCfg.setName("dfltMemPlc"); - - memCfg.setPageSize(PAGE_SIZE); - memCfg.setConcurrencyLevel(1); - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - return memCfg; + private DataStorageConfiguration memoryConfiguration() { + return new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setName("dfltMemPlc") + .setPersistenceEnabled(true) + /*.setPageEvictionMode(DataPageEvictionMode.RANDOM_LRU) TODO: fix NPE on start */) + .setPageSize(PAGE_SIZE) + .setConcurrencyLevel(1) + .setWalMode(WALMode.LOG_ONLY) + .setWalFsyncDelayNanos(0) + .setFileIOFactory(new AsyncFileIOFactory()); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java index fe7e4df8fb225..1f322decc24f2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/filename/IgniteUidAsConsistentIdMigrationTest.java @@ -27,10 +27,9 @@ import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.GridStringLogger; @@ -129,7 +128,7 @@ private void deleteWorkFiles() throws IgniteCheckedException { if (configuredConsistentId != null) cfg.setConsistentId(configuredConsistentId); - final PersistentStoreConfiguration psCfg = new PersistentStoreConfiguration(); + final DataStorageConfiguration dsCfg = new DataStorageConfiguration(); if (placeStorageInTemp) { final File tempDir = new File(System.getProperty("java.io.tmpdir")); @@ -138,19 +137,16 @@ private void deleteWorkFiles() throws IgniteCheckedException { pstWalStoreCustomPath = new File(tempDir, "WalStore"); pstWalArchCustomPath = new File(tempDir, "WalArchive"); - psCfg.setPersistentStorePath(pstStoreCustomPath.getAbsolutePath()); - psCfg.setWalStorePath(pstWalStoreCustomPath.getAbsolutePath()); - psCfg.setWalArchivePath(pstWalArchCustomPath.getAbsolutePath()); + dsCfg.setStoragePath(pstStoreCustomPath.getAbsolutePath()); + dsCfg.setWalPath(pstWalStoreCustomPath.getAbsolutePath()); + dsCfg.setWalArchivePath(pstWalArchCustomPath.getAbsolutePath()); } - cfg.setPersistentStoreConfiguration(psCfg); + dsCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(32 * 1024 * 1024) + .setPersistenceEnabled(true)); - final MemoryConfiguration memCfg = new MemoryConfiguration(); - final MemoryPolicyConfiguration memPolCfg = new MemoryPolicyConfiguration(); - - memPolCfg.setMaxSize(32 * 1024 * 1024); // we don't need much memory for this test - memCfg.setMemoryPolicies(memPolCfg); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(dsCfg); if (strLog != null) cfg.setGridLogger(strLog); @@ -665,8 +661,8 @@ private void assertNodeIndexesInFolder(Integer... indexes) throws IgniteCheckedE */ private void assertPdsDirsDefaultExist(String subDirName) throws IgniteCheckedException { assertDirectoryExist("binary_meta", subDirName); - assertDirectoryExist(PersistentStoreConfiguration.DFLT_WAL_STORE_PATH, subDirName); - assertDirectoryExist(PersistentStoreConfiguration.DFLT_WAL_ARCHIVE_PATH, subDirName); + assertDirectoryExist(DataStorageConfiguration.DFLT_WAL_PATH, subDirName); + assertDirectoryExist(DataStorageConfiguration.DFLT_WAL_ARCHIVE_PATH, subDirName); assertDirectoryExist(PdsConsistentIdProcessor.DB_DEFAULT_FOLDER, subDirName); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java index a06d587a4f944..5700eb3d7c808 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java @@ -21,9 +21,9 @@ import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -52,24 +52,14 @@ public class IgnitePdsWalTlbTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setCheckpointPageBufferSize(DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE + 1) + .setWalThreadLocalBufferSize(640000000); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(100 * 1024 * 1024); - memPlcCfg.setMaxSize(100 * 1024 * 1024); - - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(memCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setCheckpointingPageBufferSize(DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE + 1) - .setTlbSize(640000000) - ); + cfg.setDataStorageConfiguration(memCfg); TcpDiscoverySpi discoSpi = new TcpDiscoverySpi(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushFailoverTest.java index 12ec6efe8ba52..af8f679218214 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushFailoverTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushFailoverTest.java @@ -24,10 +24,9 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.GridKernalState; import org.apache.ignite.internal.IgniteEx; @@ -83,23 +82,15 @@ public class IgniteWalFlushFailoverTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(cacheCfg); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration() - .setName("dfltMemPlc") - .setInitialSize(2 * 1024L * 1024L * 1024L); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(2048L * 1024 * 1024).setPersistenceEnabled(true)) + .setFileIOFactory(new FailingFileIOFactory()) + .setWalMode(WALMode.BACKGROUND) + // Setting WAL Segment size to high values forces flushing by timeout. + .setWalSegmentSize(flushByTimeout ? 500_000 : 50_000); - MemoryConfiguration memCfg = new MemoryConfiguration() - .setMemoryPolicies(memPlcCfg) - .setDefaultMemoryPolicyName(memPlcCfg.getName()); - - cfg.setMemoryConfiguration(memCfg); - - PersistentStoreConfiguration storeCfg = new PersistentStoreConfiguration() - .setFileIOFactory(new FailingFileIOFactory()) - .setWalMode(WALMode.BACKGROUND) - // Setting WAL Segment size to high values forces flushing by timeout. - .setWalSegmentSize(flushByTimeout ? 500_000 : 50_000); - - cfg.setPersistentStoreConfiguration(storeCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalHistoryReservationsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalHistoryReservationsTest.java index 5d5458e783ae8..35d85d1b63b28 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalHistoryReservationsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalHistoryReservationsTest.java @@ -27,10 +27,10 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; @@ -57,20 +57,12 @@ public class IgniteWalHistoryReservationsTest extends GridCommonAbstractTest { cfg.setClientMode(client); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); - long memSize = 200L * 1024L * 1024L; - - memCfg.setMemoryPolicies( - new MemoryPolicyConfiguration() - .setInitialSize(memSize) - .setMaxSize(memSize) - .setName("dfltMemPlc") - ); - - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg1 = new CacheConfiguration(); @@ -82,8 +74,6 @@ public class IgniteWalHistoryReservationsTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg1); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); - return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryPPCTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryPPCTest.java new file mode 100644 index 0000000000000..f3c2c99f4854e --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryPPCTest.java @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.wal; + +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheRebalanceMode; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.configuration.BinaryConfiguration; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * + */ +public class IgniteWalRecoveryPPCTest extends GridCommonAbstractTest { + /** */ + private boolean fork; + + /** */ + public static final String CACHE_NAME_1 = "cache_1"; + + /** */ + public static final String CACHE_NAME_2 = "cache_2"; + + /** */ + public static final String MEM_PLC_NO_PDS = "mem_plc_2"; + + /** */ + private int walSegmentSize; + + /** Logger only. */ + private boolean logOnly; + + /** {@inheritDoc} */ + @Override protected boolean isMultiJvm() { + return fork; + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + CacheConfiguration ccfg = new CacheConfiguration<>(CACHE_NAME_1); + + ccfg.setAtomicityMode(CacheAtomicityMode.ATOMIC); + ccfg.setRebalanceMode(CacheRebalanceMode.SYNC); + ccfg.setAffinity(new RendezvousAffinityFunction(false, 32)); + + cfg.setCacheConfiguration(ccfg); + + CacheConfiguration ccfg2 = new CacheConfiguration<>(CACHE_NAME_2); + + ccfg2.setAtomicityMode(CacheAtomicityMode.ATOMIC); + ccfg2.setRebalanceMode(CacheRebalanceMode.SYNC); + ccfg2.setAffinity(new RendezvousAffinityFunction(false, 32)); + ccfg2.setDataRegionName(MEM_PLC_NO_PDS); + + cfg.setCacheConfiguration(ccfg, ccfg2); + + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); + dbCfg.setPageSize(4 * 1024); + + DataRegionConfiguration memPlcCfg = new DataRegionConfiguration(); + memPlcCfg.setInitialSize(1024 * 1024 * 1024); + memPlcCfg.setMaxSize(1024 * 1024 * 1024); + memPlcCfg.setPersistenceEnabled(true); + + dbCfg.setDefaultDataRegionConfiguration(memPlcCfg); + + DataRegionConfiguration memPlcCfg2 = new DataRegionConfiguration(); + memPlcCfg2.setName(MEM_PLC_NO_PDS); + memPlcCfg2.setInitialSize(1024 * 1024 * 1024); + memPlcCfg2.setMaxSize(1024 * 1024 * 1024); + memPlcCfg2.setPersistenceEnabled(false); + + dbCfg.setDataRegionConfigurations(memPlcCfg2); + + dbCfg.setWalRecordIteratorBufferSize(1024 * 1024); + + dbCfg.setWalHistorySize(2); + + dbCfg.setWalMode(WALMode.LOG_ONLY); + + if (walSegmentSize != 0) + dbCfg.setWalSegmentSize(walSegmentSize); + + cfg.setDataStorageConfiguration(dbCfg); + + cfg.setMarshaller(null); + + BinaryConfiguration binCfg = new BinaryConfiguration(); + + binCfg.setCompactFooter(false); + + cfg.setBinaryConfiguration(binCfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + stopAllGrids(); + + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false)); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false)); + } + + /** + * @throws Exception if failed. + */ + public void testWalSimple() throws Exception { + try { + IgniteEx ignite = startGrid(1); + + ignite.active(true); + + IgniteCache cache1 = ignite.cache(CACHE_NAME_1); + IgniteCache cache2 = ignite.cache(CACHE_NAME_2); + + info(" --> step1"); + + for (int i = 0; i < 10_000; i += 2) { + cache1.put(i, new IndexedObject(i)); + cache2.put(i, new IndexedObject(i + 1)); + } + + info(" --> step2"); + + for (int i = 0; i < 10_000; i += 3) { + cache1.put(i, new IndexedObject(i * 2)); + cache2.put(i, new IndexedObject(i * 2 + 1)); + } + + info(" --> step3"); + + for (int i = 0; i < 10_000; i += 7) { + cache1.put(i, new IndexedObject(i * 3)); + cache2.put(i, new IndexedObject(i * 3 + 1)); + } + + info(" --> check1"); + + // Check. + for (int i = 0; i < 10_000; i++) { + IndexedObject o; + IndexedObject o1; + + if (i % 7 == 0) { + o = new IndexedObject(i * 3); + o1 = new IndexedObject(i * 3 + 1); + } + else if (i % 3 == 0) { + o = new IndexedObject(i * 2); + o1 = new IndexedObject(i * 2 + 1); + } + else if (i % 2 == 0) { + o = new IndexedObject(i); + o1 = new IndexedObject(i + 1); + } + else { + o = null; + o1 = null; + } + + assertEquals(o, cache1.get(i)); + assertEquals(o1, cache2.get(i)); + } + + stopGrid(1); + + ignite = startGrid(1); + + ignite.active(true); + + cache1 = ignite.cache(CACHE_NAME_1); + cache2 = ignite.cache(CACHE_NAME_2); + + info(" --> check2"); + + // Check. + for (int i = 0; i < 10_000; i++) { + IndexedObject o; + + if (i % 7 == 0) + o = new IndexedObject(i * 3); + else if (i % 3 == 0) + o = new IndexedObject(i * 2); + else if (i % 2 == 0) + o = new IndexedObject(i); + else + o = null; + + assertEquals(o, cache1.get(i)); + assertEquals(null, cache2.get(i)); + } + + info(" --> ok"); + } + finally { + stopAllGrids(); + } + } + + /** + * + */ + public void testDynamicallyStartedNonPersistentCache() throws Exception { + try { + IgniteEx ignite = startGrid(1); + + ignite.active(true); + + IgniteCache dynamicPersistent = ignite.getOrCreateCache( + new CacheConfiguration() + .setAtomicityMode(CacheAtomicityMode.ATOMIC) + .setRebalanceMode(CacheRebalanceMode.SYNC) + .setName("dynamicPersistent") + .setAffinity(new RendezvousAffinityFunction(false, 32))); + + IgniteCache dynamicVolatile = ignite.getOrCreateCache( + new CacheConfiguration() + .setAtomicityMode(CacheAtomicityMode.ATOMIC) + .setRebalanceMode(CacheRebalanceMode.SYNC) + .setDataRegionName(MEM_PLC_NO_PDS) + .setName("dynamicVolatile") + .setAffinity(new RendezvousAffinityFunction(false, 32))); + + for (int i = 0; i < 10_000; i++) { + dynamicPersistent.put(i, new IndexedObject(i)); + dynamicVolatile.put(i, new IndexedObject(i + 1)); + } + + stopGrid(1); + + ignite = startGrid(1); + + ignite.active(true); + + dynamicPersistent = ignite.cache("dynamicPersistent"); + dynamicVolatile = ignite.cache("dynamicVolatile"); + + for (int i = 0; i < 10_000; i++) + assertEquals(new IndexedObject(i), dynamicPersistent.get(i)); + + assertNull(dynamicVolatile); + + } + finally { + stopAllGrids(); + } + } + + /** + * + */ + private static class IndexedObject { + /** */ + @QuerySqlField(index = true) + private int iVal; + + /** + * @param iVal Integer value. + */ + private IndexedObject(int iVal) { + this.iVal = iVal; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (!(o instanceof IndexedObject)) + return false; + + IndexedObject that = (IndexedObject)o; + + return iVal == that.iVal; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return iVal; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(IndexedObject.class, this); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoverySeveralRestartsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoverySeveralRestartsTest.java index 9497dc6b6f8ad..699fe81661665 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoverySeveralRestartsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoverySeveralRestartsTest.java @@ -28,10 +28,9 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.S; @@ -80,25 +79,13 @@ public class IgniteWalRecoverySeveralRestartsTest extends GridCommonAbstractTest cfg.setCacheConfiguration(ccfg); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(500 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY) + .setPageSize(PAGE_SIZE); - dbCfg.setPageSize(PAGE_SIZE); - - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(500 * 1024 * 1024); - memPlcCfg.setMaxSize(500 * 1024 * 1024); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); - - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.LOG_ONLY) - ); + cfg.setDataStorageConfiguration(memCfg); cfg.setMarshaller(null); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java index bf8cd85f6fd5a..b3578779b5699 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java @@ -48,10 +48,9 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; @@ -136,34 +135,30 @@ public class IgniteWalRecoveryTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); dbCfg.setPageSize(4 * 1024); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration memPlcCfg = new DataRegionConfiguration(); - memPlcCfg.setName("dfltMemPlc"); + memPlcCfg.setName("dfltDataRegion"); memPlcCfg.setInitialSize(1024 * 1024 * 1024); memPlcCfg.setMaxSize(1024 * 1024 * 1024); + memPlcCfg.setPersistenceEnabled(true); - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); + dbCfg.setDefaultDataRegionConfiguration(memPlcCfg); - cfg.setMemoryConfiguration(dbCfg); + dbCfg.setWalRecordIteratorBufferSize(1024 * 1024); - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); - - pCfg.setWalRecordIteratorBufferSize(1024 * 1024); - - pCfg.setWalHistorySize(2); + dbCfg.setWalHistorySize(2); if (logOnly) - pCfg.setWalMode(WALMode.LOG_ONLY); + dbCfg.setWalMode(WALMode.LOG_ONLY); if (walSegmentSize != 0) - pCfg.setWalSegmentSize(walSegmentSize); + dbCfg.setWalSegmentSize(walSegmentSize); - cfg.setPersistentStoreConfiguration(pCfg); + cfg.setDataStorageConfiguration(dbCfg); cfg.setMarshaller(null); @@ -976,7 +971,7 @@ else if (rec instanceof PageDeltaRecord) { delta.applyDelta(sharedCtx .database() - .memoryPolicy(null) + .dataRegion(null) .pageMemory(), ((DirectBuffer)buf1).address()); @@ -990,7 +985,7 @@ else if (rec instanceof PageDeltaRecord) { info("Done apply..."); - PageMemoryEx pageMem = (PageMemoryEx)db.memoryPolicy(null).pageMemory(); + PageMemoryEx pageMem = (PageMemoryEx)db.dataRegion(null).pageMemory(); for (Map.Entry entry : rolledPages.entrySet()) { FullPageId fullId = entry.getKey(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalSerializerVersionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalSerializerVersionTest.java index f31d0f9e77955..dcda829c5d029 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalSerializerVersionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalSerializerVersionTest.java @@ -18,8 +18,9 @@ package org.apache.ignite.internal.processors.cache.persistence.db.wal; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.wal.RecordSerializer; @@ -48,7 +49,10 @@ public class IgniteWalSerializerVersionTest extends GridCommonAbstractTest { ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + cfg.setDataStorageConfiguration(new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(100 * 1024 * 1024))); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java index f5d46e295cf45..10b61105d1114 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java @@ -38,9 +38,9 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.store.PageStore; @@ -107,20 +107,20 @@ public class WalRecoveryTxLogicalRecordsTest extends GridCommonAbstractTest { else cfg.setCacheConfiguration(ccfg); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); dbCfg.setPageSize(pageSize); - cfg.setMemoryConfiguration(dbCfg); + dbCfg.setWalHistorySize(WAL_HIST_SIZE); - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); - - pCfg.setWalHistorySize(WAL_HIST_SIZE); + dbCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true)); if (checkpointFreq != null) - pCfg.setCheckpointingFrequency(checkpointFreq); + dbCfg.setCheckpointFrequency(checkpointFreq); - cfg.setPersistentStoreConfiguration(pCfg); + cfg.setDataStorageConfiguration(dbCfg); cfg.setMarshaller(null); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java index 6db2784cb6635..4a4010ae1a1ee 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java @@ -45,10 +45,9 @@ import org.apache.ignite.cache.CacheRebalanceMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.events.Event; import org.apache.ignite.events.EventType; @@ -130,31 +129,20 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { cfg.setIncludeEventTypes(EventType.EVT_WAL_SEGMENT_ARCHIVED); - final MemoryConfiguration dbCfg = new MemoryConfiguration(); - - dbCfg.setPageSize(PAGE_SIZE); - - final MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - - memPlcCfg.setName("dfltMemPlc"); - memPlcCfg.setInitialSize(1024 * 1024 * 1024); - memPlcCfg.setMaxSize(1024 * 1024 * 1024); - - dbCfg.setMemoryPolicies(memPlcCfg); - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); - - final PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); - pCfg.setWalHistorySize(1); - pCfg.setWalSegmentSize(1024 * 1024); - pCfg.setWalSegments(WAL_SEGMENTS); - pCfg.setWalMode(customWalMode != null ? customWalMode : WALMode.BACKGROUND); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(1024 * 1024 * 1024).setPersistenceEnabled(true)) + .setPageSize(PAGE_SIZE) + .setWalHistorySize(1) + .setWalSegmentSize(1024 * 1024) + .setWalSegments(WAL_SEGMENTS) + .setWalMode(customWalMode != null ? customWalMode : WALMode.BACKGROUND); if (archiveIncompleteSegmentAfterInactivityMs > 0) - pCfg.setWalAutoArchiveAfterInactivity(archiveIncompleteSegmentAfterInactivityMs); + memCfg.setWalAutoArchiveAfterInactivity(archiveIncompleteSegmentAfterInactivityMs); + + cfg.setDataStorageConfiguration(memCfg); - cfg.setPersistentStoreConfiguration(pCfg); return cfg; } @@ -492,8 +480,8 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { * @param factory WAL iterator factory. * @param workDir Ignite work directory. * @param subfolderName DB subfolder name based on consistent ID. - * @param expCntEntries minimum expected entries count to find. - * @param expTxCnt minimum expected transaction count to find. + * @param minCntEntries minimum expected entries count to find. + * @param minTxCnt minimum expected transaction count to find. * @param objConsumer object handler, called for each object found in logical data records. * @param dataRecordHnd data handler record * @throws IgniteCheckedException if failed. @@ -502,8 +490,8 @@ private void scanIterateAndCount( final IgniteWalIteratorFactory factory, final String workDir, final String subfolderName, - final int expCntEntries, - final int expTxCnt, + final int minCntEntries, + final int minTxCnt, @Nullable final BiConsumer objConsumer, @Nullable final Consumer dataRecordHnd) throws IgniteCheckedException { @@ -538,8 +526,8 @@ private void scanIterateAndCount( final int entriesWork = valuesSum(cntWork.values()); log.info("Archive directory: Tx found " + txCntObservedWork + " entries " + entriesWork); - assert entriesArch + entriesWork >= expCntEntries; - assert txCntObservedWork + txCntObservedArch >= expTxCnt; + assert entriesArch + entriesWork >= minCntEntries; + assert txCntObservedWork + txCntObservedArch >= minTxCnt; } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java index 05636ebeeb576..5c9e0849e337f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java @@ -21,8 +21,8 @@ import java.io.Serializable; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; @@ -86,20 +86,20 @@ public MockWalIteratorFactory(@Nullable IgniteLogger log, * @throws IgniteCheckedException if IO failed */ public WALIterator iterator(File wal, File walArchive) throws IgniteCheckedException { - final PersistentStoreConfiguration persistentCfg1 = Mockito.mock(PersistentStoreConfiguration.class); + final DataStorageConfiguration persistentCfg1 = Mockito.mock(DataStorageConfiguration.class); - when(persistentCfg1.getWalStorePath()).thenReturn(wal.getAbsolutePath()); + when(persistentCfg1.getWalPath()).thenReturn(wal.getAbsolutePath()); when(persistentCfg1.getWalArchivePath()).thenReturn(walArchive.getAbsolutePath()); when(persistentCfg1.getWalSegments()).thenReturn(segments); - when(persistentCfg1.getTlbSize()).thenReturn(PersistentStoreConfiguration.DFLT_TLB_SIZE); - when(persistentCfg1.getWalRecordIteratorBufferSize()).thenReturn(PersistentStoreConfiguration.DFLT_WAL_RECORD_ITERATOR_BUFFER_SIZE); + when(persistentCfg1.getWalThreadLocalBufferSize()).thenReturn(DataStorageConfiguration.DFLT_TLB_SIZE); + when(persistentCfg1.getWalRecordIteratorBufferSize()).thenReturn(DataStorageConfiguration.DFLT_WAL_RECORD_ITERATOR_BUFFER_SIZE); - final FileIOFactory fileIOFactory = new PersistentStoreConfiguration().getFileIOFactory(); + final FileIOFactory fileIOFactory = new DataStorageConfiguration().getFileIOFactory(); when(persistentCfg1.getFileIOFactory()).thenReturn(fileIOFactory); final IgniteConfiguration cfg = Mockito.mock(IgniteConfiguration.class); - when(cfg.getPersistentStoreConfiguration()).thenReturn(persistentCfg1); + when(cfg.getDataStorageConfiguration()).thenReturn(persistentCfg1); final GridKernalContext ctx = Mockito.mock(GridKernalContext.class); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java index 56d09f8b0f399..d6bfe107e70bb 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java @@ -18,7 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.pagemem; import java.nio.ByteBuffer; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; import org.apache.ignite.internal.pagemem.FullPageId; @@ -26,7 +26,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.database.BPlusTreeSelfTest; import org.apache.ignite.internal.util.typedef.CIX3; import org.apache.ignite.testframework.junits.GridTestKernalContext; @@ -82,7 +82,7 @@ public class BPlusTreePageMemoryImplTest extends BPlusTreeSelfTest { return true; } }, - new MemoryMetricsImpl(new MemoryPolicyConfiguration()), + new DataRegionMetricsImpl(new DataRegionConfiguration()), false ); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java index 39183b2b76480..dabd5320d21a6 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java @@ -18,7 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.pagemem; import java.nio.ByteBuffer; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; import org.apache.ignite.internal.pagemem.FullPageId; @@ -26,7 +26,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.database.BPlusTreeReuseSelfTest; import org.apache.ignite.internal.util.lang.GridInClosure3X; import org.apache.ignite.internal.util.typedef.CIX3; @@ -82,7 +82,7 @@ public class BPlusTreeReuseListPageMemoryImplTest extends BPlusTreeReuseSelfTest return true; } }, - new MemoryMetricsImpl(new MemoryPolicyConfiguration()), + new DataRegionMetricsImpl(new DataRegionConfiguration()), false ); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/MetadataStoragePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/MetadataStoragePageMemoryImplTest.java index a427c63fa1ecb..d5492abc53c5c 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/MetadataStoragePageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/MetadataStoragePageMemoryImplTest.java @@ -19,7 +19,7 @@ import java.io.File; import java.nio.ByteBuffer; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; import org.apache.ignite.internal.pagemem.FullPageId; @@ -27,7 +27,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.database.MetadataStorageSelfTest; import org.apache.ignite.internal.util.lang.GridInClosure3X; import org.apache.ignite.internal.util.typedef.CIX3; @@ -97,7 +97,7 @@ public class MetadataStoragePageMemoryImplTest extends MetadataStorageSelfTest{ return true; } }, - new MemoryMetricsImpl(new MemoryPolicyConfiguration()), + new DataRegionMetricsImpl(new DataRegionConfiguration()), false ); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java index 467ede44d1cdf..db6d321e1b75d 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java @@ -19,7 +19,7 @@ import java.io.File; import java.nio.ByteBuffer; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; import org.apache.ignite.internal.pagemem.FullPageId; @@ -27,7 +27,7 @@ import org.apache.ignite.internal.pagemem.impl.PageMemoryNoLoadSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.util.lang.GridInClosure3X; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.util.typedef.CIX3; @@ -88,7 +88,7 @@ public class PageMemoryImplNoLoadTest extends PageMemoryNoLoadSelfTest { return true; } }, - new MemoryMetricsImpl(new MemoryPolicyConfiguration()), + new DataRegionMetricsImpl(new DataRegionConfiguration()), false ); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java index c5997fa0dfe73..92c5ad6ae857e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java @@ -18,7 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.pagemem; import java.nio.ByteBuffer; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; @@ -27,7 +27,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.util.lang.GridInClosure3X; import org.apache.ignite.internal.util.typedef.CIX3; import org.apache.ignite.testframework.junits.GridTestKernalContext; @@ -110,7 +110,7 @@ private PageMemoryImpl createPageMemory() throws Exception { return true; } }, - new MemoryMetricsImpl(new MemoryPolicyConfiguration()), + new DataRegionMetricsImpl(new DataRegionConfiguration()), false ); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java index e3de4937b60a6..30fb4920463ea 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java @@ -24,15 +24,14 @@ import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteDataStreamer; -import org.apache.ignite.MemoryMetrics; +import org.apache.ignite.DataRegionMetrics; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; @@ -66,16 +65,18 @@ public class PagesWriteThrottleSandboxTest extends GridCommonAbstractTest { TcpDiscoverySpi discoverySpi = (TcpDiscoverySpi)cfg.getDiscoverySpi(); discoverySpi.setIpFinder(ipFinder); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(4000L * 1024 * 1024) + .setName("dfltDataRegion") + .setMetricsEnabled(true) + .setPersistenceEnabled(true)) + .setWalMode(WALMode.BACKGROUND) + .setCheckpointFrequency(20_000) + .setCheckpointPageBufferSize(1000L * 1000 * 1000) + .setWriteThrottlingEnabled(true); - dbCfg.setMemoryPolicies(new MemoryPolicyConfiguration() - .setMaxSize(4000L * 1024 * 1024) - .setName("dfltMemPlc") - .setMetricsEnabled(true)); - - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(dbCfg); CacheConfiguration ccfg1 = new CacheConfiguration(); @@ -86,13 +87,6 @@ public class PagesWriteThrottleSandboxTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg1); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.BACKGROUND) - .setCheckpointingFrequency(20_000) - .setCheckpointingPageBufferSize(1000L * 1000 * 1000) - .setWriteThrottlingEnabled(true)); - cfg.setConsistentId(gridName); return cfg; @@ -155,8 +149,8 @@ public void testThrottle() throws Exception { while (run.get()) { long dirtyPages = 0; - for (MemoryMetrics m : ig.memoryMetrics()) - if (m.getName().equals("dfltMemPlc")) + for (DataRegionMetrics m : ig.dataRegionMetrics()) + if (m.getName().equals("dfltDataRegion")) dirtyPages = m.getDirtyPages(); long cpBufPages = 0; @@ -170,7 +164,7 @@ public void testThrottle() throws Exception { try { cpBufPages = ((PageMemoryImpl)((IgniteEx)ignite(0)).context().cache().context().database() - .memoryPolicy("dfltMemPlc").pageMemory()).checkpointBufferPagesCount(); + .dataRegion("dfltDataRegion").pageMemory()).checkpointBufferPagesCount(); } catch (IgniteCheckedException e) { e.printStackTrace(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java index 70a1df8cd85ca..ab7aab4e04a52 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java @@ -31,10 +31,9 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; @@ -64,7 +63,7 @@ public class PagesWriteThrottleSmokeTest extends GridCommonAbstractTest { private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); /** Slow checkpoint enabled. */ - private final AtomicBoolean slowCheckpointEnabled = new AtomicBoolean(true); + private static final AtomicBoolean slowCheckpointEnabled = new AtomicBoolean(true); /** Cache name. */ private static final String CACHE_NAME = "cache1"; @@ -76,16 +75,20 @@ public class PagesWriteThrottleSmokeTest extends GridCommonAbstractTest { TcpDiscoverySpi discoverySpi = (TcpDiscoverySpi)cfg.getDiscoverySpi(); discoverySpi.setIpFinder(ipFinder); - MemoryConfiguration dbCfg = new MemoryConfiguration(); - - dbCfg.setMemoryPolicies(new MemoryPolicyConfiguration() - .setMaxSize(400 * 1024 * 1024) - .setName("dfltMemPlc") - .setMetricsEnabled(true)); - - dbCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(dbCfg); + DataStorageConfiguration dbCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(400 * 1024 * 1024) + .setName("dfltDataRegion") + .setMetricsEnabled(true) + .setPersistenceEnabled(true)) + .setWalMode(WALMode.BACKGROUND) + .setCheckpointFrequency(20_000) + .setCheckpointPageBufferSize(200 * 1000 * 1000) + .setWriteThrottlingEnabled(true) + .setCheckpointThreads(1) + .setFileIOFactory(new SlowCheckpointFileIOFactory()); + + cfg.setDataStorageConfiguration(dbCfg); CacheConfiguration ccfg1 = new CacheConfiguration(); @@ -96,15 +99,6 @@ public class PagesWriteThrottleSmokeTest extends GridCommonAbstractTest { cfg.setCacheConfiguration(ccfg1); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() - .setWalMode(WALMode.BACKGROUND) - .setCheckpointingFrequency(20_000) - .setCheckpointingPageBufferSize(200 * 1000 * 1000) - .setWriteThrottlingEnabled(true) - .setCheckpointingThreads(1) - .setFileIOFactory(new SlowCheckpointFileIOFactory())); - cfg.setConsistentId(gridName); return cfg; @@ -285,7 +279,7 @@ private void deleteWorkFiles() throws IgniteCheckedException { /** * Create File I/O that emulates poor checkpoint write speed. */ - private class SlowCheckpointFileIOFactory implements FileIOFactory { + private static class SlowCheckpointFileIOFactory implements FileIOFactory { /** Serial version uid. */ private static final long serialVersionUID = 0L; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/AbstractNodeJoinTemplate.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/AbstractNodeJoinTemplate.java index 436db1c07ae77..66e5b172b1612 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/AbstractNodeJoinTemplate.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/AbstractNodeJoinTemplate.java @@ -23,8 +23,9 @@ import java.util.Map; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; @@ -308,7 +309,10 @@ protected CacheConfiguration[] allCacheConfigurations() { /** {@inheritDoc} */ protected IgniteConfiguration persistentCfg(IgniteConfiguration cfg) throws Exception { - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + cfg.setDataStorageConfiguration(new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true))); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java index 4e575cc42be46..71107d4c158c3 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java @@ -23,10 +23,10 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadLocalRandom; import org.apache.ignite.Ignite; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -340,28 +340,21 @@ private void stopAll(String suffix) { @Override protected IgniteConfiguration getConfiguration(final String gridName) throws Exception { final IgniteConfiguration cfg = super.getConfiguration(gridName); - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); + DataStorageConfiguration pCfg = new DataStorageConfiguration(); - pCfg.setPersistentStorePath(testName() + "/db"); + pCfg.setStoragePath(testName() + "/db"); pCfg.setWalArchivePath(testName() + "/db/wal/archive"); - pCfg.setWalStorePath(testName() + "/db/wal"); + pCfg.setWalPath(testName() + "/db/wal"); - cfg.setPersistentStoreConfiguration(pCfg); + pCfg.setPageSize(1024); + pCfg.setConcurrencyLevel(64); - final MemoryConfiguration memCfg = new MemoryConfiguration(); + pCfg.setWalMode(WALMode.LOG_ONLY); - memCfg.setPageSize(1024); - memCfg.setConcurrencyLevel(64); + pCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024).setPersistenceEnabled(true)); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); - memPlcCfg.setInitialSize(200 * 1024 * 1024); - memPlcCfg.setMaxSize(200 * 1024 * 1024); - memPlcCfg.setName("dfltMemPlc"); - - memCfg.setMemoryPolicies(memPlcCfg); - memCfg.setDefaultMemoryPolicyName("dfltMemPlc"); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(pCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateServiceTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateServiceTest.java index 44e03573f949e..e6c9ae5976e03 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateServiceTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateServiceTest.java @@ -49,6 +49,8 @@ public class IgniteChangeGlobalStateServiceTest extends IgniteChangeGlobalStateA * */ public void testDeployService() throws Exception { + fail("https://issues.apache.org/jira/browse/IGNITE-6629"); + Ignite ig1P = primary(0); Ignite ig1B = backUp(0); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteStandByClusterTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteStandByClusterTest.java index 77f89bada786f..300f9f8e963b0 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteStandByClusterTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteStandByClusterTest.java @@ -28,8 +28,9 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; @@ -68,7 +69,12 @@ public class IgniteStandByClusterTest extends GridCommonAbstractTest { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(vmIpFinder)); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + + cfg.setDataStorageConfiguration(new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true))); + cfg.setConsistentId(igniteInstanceName); return cfg; @@ -178,7 +184,7 @@ public void testStaticCacheStartAfterActivationWithCacheFilter() throws Exceptio for (IgniteEx ig : Arrays.asList(ig1, ig2, ig3)) { Map desc = U.field( - U.field(ig.context().cache(), "cachesInfo"), "registeredCaches"); + (Object)U.field(ig.context().cache(), "cachesInfo"), "registeredCaches"); assertEquals(4, desc.size()); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/extended/GridActivateExtensionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/extended/GridActivateExtensionTest.java index f70dd1edad51c..6ca29d84623b2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/extended/GridActivateExtensionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/extended/GridActivateExtensionTest.java @@ -21,11 +21,11 @@ import java.util.LinkedHashMap; import java.util.Map; import org.apache.ignite.Ignite; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.GridCacheAbstractFullApiSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearOnlyMultiNodeFullApiSelfTest; @@ -61,31 +61,21 @@ public class GridActivateExtensionTest extends GridCacheAbstractFullApiSelfTest cfg.setConsistentId("ConsId" + (condId++)); ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(primaryIpFinder); - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); + DataStorageConfiguration pCfg = new DataStorageConfiguration(); - pCfg.setPersistentStorePath(testName + "/db"); + pCfg.setStoragePath(testName + "/db"); pCfg.setWalArchivePath(testName + "/db/wal/archive"); - pCfg.setWalStorePath(testName + "/db/wal"); + pCfg.setWalPath(testName + "/db/wal"); - cfg.setPersistentStoreConfiguration(pCfg); + pCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024).setPersistenceEnabled(true)); - final MemoryConfiguration memCfg = new MemoryConfiguration(); + pCfg.setWalMode(WALMode.LOG_ONLY); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); + pCfg.setPageSize(1024); + pCfg.setConcurrencyLevel(64); - memPlcCfg.setInitialSize(200 * 1024 * 1024); - memPlcCfg.setMaxSize(200 * 1024 * 1024); - - memPlcCfg.setName("dfltMemPlc"); - - memCfg.setMemoryPolicies(memPlcCfg); - - memCfg.setDefaultMemoryPolicyName(memPlcCfg.getName()); - - memCfg.setPageSize(1024); - memCfg.setConcurrencyLevel(64); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(pCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java index 59dcce2e23e58..5552d702f6c6e 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java @@ -25,8 +25,9 @@ import java.util.concurrent.CountDownLatch; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.events.Event; import org.apache.ignite.events.EventType; import org.apache.ignite.internal.IgniteEx; @@ -109,7 +110,11 @@ public abstract class IgniteAbstractStandByClientReconnectTest extends GridCommo cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(clientIpFinder)); } - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + cfg.setDataStorageConfiguration(new DataStorageConfiguration() + .setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true))); + cfg.setConsistentId(name); return cfg; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/IgniteCacheContinuousQueryBackupQueueTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/IgniteCacheContinuousQueryBackupQueueTest.java index 85d68d3f22667..5baa3a7f0c1f7 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/IgniteCacheContinuousQueryBackupQueueTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/IgniteCacheContinuousQueryBackupQueueTest.java @@ -33,8 +33,8 @@ import org.apache.ignite.cache.query.ContinuousQuery; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.processors.continuous.GridContinuousHandler; import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; @@ -90,10 +90,10 @@ public class IgniteCacheContinuousQueryBackupQueueTest extends GridCommonAbstrac ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration(); memCfg.setPageSize(16 * 1024); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxDeadlockCauseTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxDeadlockCauseTest.java index 530009befb896..4e760bc631fdc 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxDeadlockCauseTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxDeadlockCauseTest.java @@ -21,11 +21,9 @@ import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.CacheAtomicityMode; -import org.apache.ignite.cache.CacheMode; import org.apache.ignite.configuration.*; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.util.typedef.CAX; -import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -63,17 +61,10 @@ public class TxDeadlockCauseTest extends GridCommonAbstractTest { cfg.setDiscoverySpi(discoSpi); } - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024)); - MemoryPolicyConfiguration plc = new MemoryPolicyConfiguration(); - - plc.setName("dfltPlc"); - plc.setMaxSize(100L * 1024 * 1024); - - memCfg.setDefaultMemoryPolicyName("dfltPlc"); - memCfg.setMemoryPolicies(plc); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); CacheConfiguration ccfg0 = ccfg == null ? new CacheConfiguration(DEFAULT_CACHE_NAME) .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) : ccfg; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java index 82fa52c63c72d..60f1c96f572d2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java @@ -34,8 +34,8 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteKernal; @@ -89,17 +89,12 @@ public class TxPessimisticDeadlockDetectionTest extends AbstractDeadlockDetectio cfg.setDiscoverySpi(discoSpi); } - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setMaxSize(DataStorageConfiguration.DFLT_DATA_REGION_MAX_SIZE * 10) + .setName("dfltPlc")); - MemoryPolicyConfiguration plc = new MemoryPolicyConfiguration(); - - plc.setName("dfltPlc"); - plc.setMaxSize(MemoryConfiguration.DFLT_MEMORY_POLICY_MAX_SIZE * 10); - - memCfg.setDefaultMemoryPolicyName("dfltPlc"); - memCfg.setMemoryPolicies(plc); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); cfg.setClientMode(client); @@ -196,7 +191,7 @@ private IgniteCache createCache(CacheMode cacheMode, CacheWriteSynchronizationMo ccfg.setWriteSynchronizationMode(syncMode); if (cacheMode == LOCAL) - ccfg.setMemoryPolicyName("dfltPlc"); + ccfg.setDataRegionName("dfltPlc"); IgniteCache cache = ignite(0).createCache(ccfg); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java index 9c0d7918e20d4..7b4ca132a108b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java @@ -33,7 +33,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.Lock; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; import org.apache.ignite.internal.pagemem.FullPageId; @@ -42,7 +42,7 @@ import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; import org.apache.ignite.internal.processors.cache.persistence.DataStructure; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO; @@ -1795,7 +1795,7 @@ private void checkNotRemoved(Long row) { * @return Page memory. */ protected PageMemory createPageMemory() throws Exception { - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration() + DataRegionConfiguration plcCfg = new DataRegionConfiguration() .setInitialSize(1024 * MB) .setMaxSize(1024 * MB); @@ -1804,7 +1804,7 @@ protected PageMemory createPageMemory() throws Exception { null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), true); + new DataRegionMetricsImpl(plcCfg), true); pageMem.start(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/MemoryMetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/DataRegionMetricsSelfTest.java similarity index 93% rename from modules/core/src/test/java/org/apache/ignite/internal/processors/database/MemoryMetricsSelfTest.java rename to modules/core/src/test/java/org/apache/ignite/internal/processors/database/DataRegionMetricsSelfTest.java index 7fc1035a210af..22e87b880eda3 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/MemoryMetricsSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/DataRegionMetricsSelfTest.java @@ -17,9 +17,9 @@ package org.apache.ignite.internal.processors.database; import java.util.concurrent.CountDownLatch; -import org.apache.ignite.MemoryMetrics; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.ratemetrics.HitRateMetrics; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -28,9 +28,9 @@ /** * */ -public class MemoryMetricsSelfTest extends GridCommonAbstractTest { +public class DataRegionMetricsSelfTest extends GridCommonAbstractTest { /** */ - private MemoryMetricsImpl memMetrics; + private DataRegionMetricsImpl memMetrics; /** */ private int threadsCnt = 1; @@ -49,9 +49,9 @@ public class MemoryMetricsSelfTest extends GridCommonAbstractTest { /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration plcCfg = new DataRegionConfiguration(); - memMetrics = new MemoryMetricsImpl(plcCfg); + memMetrics = new DataRegionMetricsImpl(plcCfg); memMetrics.enableMetrics(); } @@ -253,7 +253,7 @@ private static class AllocationsIncrementer implements Runnable { private final CountDownLatch startLatch; /** */ - private final MemoryMetricsImpl memMetrics; + private final DataRegionMetricsImpl memMetrics; /** */ private final int iterationsCnt; @@ -267,7 +267,7 @@ private static class AllocationsIncrementer implements Runnable { * @param iterationsCnt Iterations count. * @param delay Delay. */ - private AllocationsIncrementer(CountDownLatch startLatch, MemoryMetricsImpl memMetrics, int iterationsCnt, int delay) { + private AllocationsIncrementer(CountDownLatch startLatch, DataRegionMetricsImpl memMetrics, int iterationsCnt, int delay) { this.startLatch = startLatch; this.memMetrics = memMetrics; this.iterationsCnt = iterationsCnt; @@ -305,7 +305,7 @@ private static class AllocationRateWatcher implements Runnable { private final CountDownLatch startLatch; /** */ - private final MemoryMetrics memMetrics; + private final DataRegionMetrics memMetrics; /** */ private final int delay; @@ -315,7 +315,7 @@ private static class AllocationRateWatcher implements Runnable { * @param memMetrics Mem metrics. * @param delay Delay. */ - private AllocationRateWatcher(CountDownLatch startLatch, MemoryMetrics memMetrics, int delay) { + private AllocationRateWatcher(CountDownLatch startLatch, DataRegionMetrics memMetrics, int delay) { this.startLatch = startLatch; this.memMetrics = memMetrics; this.delay = delay; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListImplSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListImplSelfTest.java index c190b1d559eb5..72a1d819981a2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListImplSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/FreeListImplSelfTest.java @@ -29,7 +29,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; @@ -40,8 +40,8 @@ import org.apache.ignite.internal.processors.cache.CacheObjectValueContext; import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.evict.NoOpPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeListImpl; @@ -315,13 +315,13 @@ protected void checkInsertDeleteSingleThreaded(int pageSize) throws Exception { /** * @return Page memory. */ - protected PageMemory createPageMemory(int pageSize, MemoryPolicyConfiguration plcCfg) throws Exception { + protected PageMemory createPageMemory(int pageSize, DataRegionConfiguration plcCfg) throws Exception { PageMemory pageMem = new PageMemoryNoStoreImpl(log, new UnsafeMemoryProvider(log), null, pageSize, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), true); pageMem.start(); @@ -335,7 +335,7 @@ protected PageMemory createPageMemory(int pageSize, MemoryPolicyConfiguration pl * @throws Exception If failed. */ protected FreeList createFreeList(int pageSize) throws Exception { - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration() + DataRegionConfiguration plcCfg = new DataRegionConfiguration() .setInitialSize(1024 * MB) .setMaxSize(1024 * MB); @@ -343,9 +343,9 @@ protected FreeList createFreeList(int pageSize) throws Exception { long metaPageId = pageMem.allocatePage(1, 1, PageIdAllocator.FLAG_DATA); - MemoryMetricsImpl metrics = new MemoryMetricsImpl(plcCfg); + DataRegionMetricsImpl metrics = new DataRegionMetricsImpl(plcCfg); - MemoryPolicy memPlc = new MemoryPolicy(pageMem, plcCfg, metrics, new NoOpPageEvictionTracker()); + DataRegion memPlc = new DataRegion(pageMem, plcCfg, metrics, new NoOpPageEvictionTracker()); return new FreeListImpl(1, "freelist", metrics, memPlc, null, null, metaPageId, true); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbAbstractTest.java index c9e583fd55d33..1d5b624b429b7 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbAbstractTest.java @@ -22,8 +22,8 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -63,7 +63,7 @@ public abstract class IgniteDbAbstractTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration dbCfg = new DataStorageConfiguration(); if (client) cfg.setClientMode(true); @@ -77,7 +77,7 @@ public abstract class IgniteDbAbstractTest extends GridCommonAbstractTest { configure(dbCfg); - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(dbCfg); CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME); @@ -149,9 +149,9 @@ protected void configure(IgniteConfiguration cfg){ } /** - * @param mCfg MemoryConfiguration. + * @param mCfg DataStorageConfiguration. */ - protected void configure(MemoryConfiguration mCfg){ + protected void configure(DataStorageConfiguration mCfg){ // No-op. } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbDynamicCacheSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbDynamicCacheSelfTest.java index e7454826ca82e..e5c0e8ac3e34d 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbDynamicCacheSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbDynamicCacheSelfTest.java @@ -25,9 +25,9 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -39,18 +39,10 @@ public class IgniteDbDynamicCacheSelfTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration dbCfg = new MemoryConfiguration(); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(200 * 1024 * 1024)); - MemoryPolicyConfiguration plc = new MemoryPolicyConfiguration(); - - plc.setName("dfltPlc"); - plc.setInitialSize(200 * 1024 * 1024); - plc.setMaxSize(200 * 1024 * 1024); - - dbCfg.setDefaultMemoryPolicyName("dfltPlc"); - dbCfg.setMemoryPolicies(plc); - - cfg.setMemoryConfiguration(dbCfg); + cfg.setDataStorageConfiguration(memCfg); if (gridName.equals("client")) cfg.setClientMode(true); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbMemoryLeakAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbMemoryLeakAbstractTest.java index 93e5181093032..c4e8bee951efc 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbMemoryLeakAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbMemoryLeakAbstractTest.java @@ -20,9 +20,9 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import org.apache.ignite.IgniteCache; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.IgniteCacheProxy; import org.apache.ignite.internal.processors.cache.persistence.DataStructure; @@ -34,7 +34,7 @@ */ public abstract class IgniteDbMemoryLeakAbstractTest extends IgniteDbAbstractTest { /** */ - private static final int CONCURRENCY_LEVEL = 8; + private static final int CONCURRENCY_LEVEL = 16; /** */ private static final int MIN_PAGE_CACHE_SIZE = 1048576 * CONCURRENCY_LEVEL; @@ -76,13 +76,13 @@ public abstract class IgniteDbMemoryLeakAbstractTest extends IgniteDbAbstractTes } /** {@inheritDoc} */ - @Override protected void configure(MemoryConfiguration mCfg) { + @Override protected void configure(DataStorageConfiguration mCfg) { mCfg.setConcurrencyLevel(CONCURRENCY_LEVEL); long size = (1024 * (isLargePage() ? 16 : 1) + 24) * pagesMax(); - mCfg.setDefaultMemoryPolicyName("default").setMemoryPolicies( - new MemoryPolicyConfiguration().setMaxSize(Math.max(size, MIN_PAGE_CACHE_SIZE)).setName("default")); + mCfg.setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(Math.max(size, MIN_PAGE_CACHE_SIZE)).setName("default")); } /** @@ -234,7 +234,7 @@ public void testMemoryLeak() throws Exception { * @throws Exception If failed. */ protected final void check(IgniteCache cache) throws Exception { - long pagesActual = ((IgniteCacheProxy)cache).context().memoryPolicy().pageMemory().loadedPages(); + long pagesActual = ((IgniteCacheProxy)cache).context().dataRegion().pageMemory().loadedPages(); if (loadedPages > 0) { delta += pagesActual - loadedPages; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/MetadataStorageSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/MetadataStorageSelfTest.java index dcd4ce196a2cf..880e37e1fb351 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/MetadataStorageSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/MetadataStorageSelfTest.java @@ -23,12 +23,12 @@ import java.util.Random; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.MetadataStorage; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; import org.apache.ignite.internal.pagemem.PageMemory; @@ -156,7 +156,7 @@ private static String randomName() { protected PageMemory memory(boolean clean) throws Exception { DirectMemoryProvider provider = new MappedFileMemoryProvider(log(), allocationPath); - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration() + DataRegionConfiguration plcCfg = new DataRegionConfiguration() .setMaxSize(30 * 1024 * 1024).setInitialSize(30 * 1024 * 1024); return new PageMemoryNoStoreImpl( @@ -165,7 +165,7 @@ protected PageMemory memory(boolean clean) throws Exception { null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), true); } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/SwapPathConstructionSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/SwapPathConstructionSelfTest.java index 53e5daf81f96d..f22128c4d136f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/SwapPathConstructionSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/SwapPathConstructionSelfTest.java @@ -22,23 +22,23 @@ import java.nio.file.Paths; import java.util.Map; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; -import org.apache.ignite.internal.processors.cache.persistence.MemoryPolicy; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; /** - * Test verifies correct construction of swap file path {@link MemoryPolicyConfiguration#setSwapFilePath(String)} + * Test verifies correct construction of swap file path {@link DataRegionConfiguration#setSwapPath(String)} * when absolute or relative paths are provided via configuration. */ public class SwapPathConstructionSelfTest extends GridCommonAbstractTest { /** */ - private MemoryConfiguration memCfg; + private DataStorageConfiguration memCfg; /** */ private static final String RELATIVE_SWAP_PATH = "relSwapPath"; @@ -50,7 +50,7 @@ public class SwapPathConstructionSelfTest extends GridCommonAbstractTest { @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return cfg; } @@ -118,7 +118,7 @@ public void testAbsoluteSwapFilePath() throws Exception { private String extractDefaultPageMemoryAllocPath(GridKernalContext context) { IgniteCacheDatabaseSharedManager dbMgr = context.cache().context().database(); - Map memPlcMap = U.field(dbMgr, "memPlcMap"); + Map memPlcMap = U.field(dbMgr, "dataRegionMap"); PageMemory pageMem = memPlcMap.get("default").pageMemory(); @@ -128,22 +128,22 @@ private String extractDefaultPageMemoryAllocPath(GridKernalContext context) { } /** - * @param isRelativePath flag is set to {@code true} if relative path should be used for memory policy configuration. + * @param isRelativePath flag is set to {@code true} if relative path should be used for data region configuration. */ - private MemoryConfiguration createMemoryConfiguration(boolean isRelativePath) { - MemoryConfiguration memCfg = new MemoryConfiguration(); + private DataStorageConfiguration createMemoryConfiguration(boolean isRelativePath) { + DataStorageConfiguration memCfg = new DataStorageConfiguration(); - MemoryPolicyConfiguration memPlcCfg = new MemoryPolicyConfiguration(); + DataRegionConfiguration memPlcCfg = new DataRegionConfiguration(); memPlcCfg.setName("default"); memPlcCfg.setMaxSize(20 * 1024 * 1024); if (isRelativePath) - memPlcCfg.setSwapFilePath(RELATIVE_SWAP_PATH); + memPlcCfg.setSwapPath(RELATIVE_SWAP_PATH); else - memPlcCfg.setSwapFilePath(Paths.get(getTmpDir(), ABSOLUTE_SWAP_PATH).toString()); + memPlcCfg.setSwapPath(Paths.get(getTmpDir(), ABSOLUTE_SWAP_PATH).toString()); - memCfg.setMemoryPolicies(memPlcCfg); + memCfg.setDefaultDataRegionConfiguration(memPlcCfg); return memCfg; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java index 1e5fcd1074c2c..a0ce28559e847 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsIgniteMock.java @@ -17,6 +17,9 @@ package org.apache.ignite.internal.processors.igfs; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataRegionMetricsAdapter; +import org.apache.ignite.DataStorageMetricsAdapter; import org.apache.ignite.IgniteAtomicLong; import org.apache.ignite.IgniteAtomicReference; import org.apache.ignite.IgniteAtomicSequence; @@ -38,6 +41,7 @@ import org.apache.ignite.IgniteServices; import org.apache.ignite.IgniteSet; import org.apache.ignite.IgniteTransactions; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.MemoryMetrics; import org.apache.ignite.PersistenceMetrics; import org.apache.ignite.cache.affinity.Affinity; @@ -584,26 +588,41 @@ public void rebalanceEnabled(boolean rebalanceEnabled) { } /** {@inheritDoc} */ - @Override public Collection memoryMetrics() { + @Override public Collection dataRegionMetrics() { throwUnsupported(); return null; } /** {@inheritDoc} */ - @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + @Nullable @Override public DataRegionMetrics dataRegionMetrics(String memPlcName) { throwUnsupported(); return null; } /** {@inheritDoc} */ - @Override public PersistenceMetrics persistentStoreMetrics() { + @Override public DataStorageMetrics dataStorageMetrics() { throwUnsupported(); return null; } + /** {@inheritDoc} */ + @Override public Collection memoryMetrics() { + return DataRegionMetricsAdapter.collectionOf(dataRegionMetrics()); + } + + /** {@inheritDoc} */ + @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + return DataRegionMetricsAdapter.valueOf(dataRegionMetrics(memPlcName)); + } + + /** {@inheritDoc} */ + @Override public PersistenceMetrics persistentStoreMetrics() { + return DataStorageMetricsAdapter.valueOf(dataStorageMetrics()); + } + /** * Throw {@link UnsupportedOperationException}. */ diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java index 597efe13cfe98..0acb1d3012925 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsSizeSelfTest.java @@ -24,10 +24,10 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.FileSystemConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.igfs.IgfsGroupDataBlocksKeyMapper; import org.apache.ignite.igfs.IgfsInputStream; @@ -396,14 +396,14 @@ private void checkOversize() throws Exception { @Override public void apply(IgniteConfiguration cfg) { String memPlcName = "igfsDataMemPlc"; - cfg.setMemoryConfiguration(new MemoryConfiguration().setMemoryPolicies( - new MemoryPolicyConfiguration().setMaxSize(maxSize).setInitialSize(maxSize).setName(memPlcName))); + cfg.setDataStorageConfiguration(new DataStorageConfiguration().setDataRegionConfigurations( + new DataRegionConfiguration().setMaxSize(maxSize).setInitialSize(maxSize).setName(memPlcName))); FileSystemConfiguration igfsCfg = cfg.getFileSystemConfiguration()[0]; - igfsCfg.getDataCacheConfiguration().setMemoryPolicyName(memPlcName); + igfsCfg.getDataCacheConfiguration().setDataRegionName(memPlcName); - cfg.setCacheConfiguration(new CacheConfiguration().setName("QQQ").setMemoryPolicyName(memPlcName)); + cfg.setCacheConfiguration(new CacheConfiguration().setName("QQQ").setDataRegionName(memPlcName)); } }; diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java index 779f0959a0c29..c08e144262925 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/IgniteMock.java @@ -21,6 +21,10 @@ import java.util.UUID; import java.util.concurrent.ExecutorService; import javax.management.MBeanServer; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataRegionMetricsAdapter; +import org.apache.ignite.DataStorageMetrics; +import org.apache.ignite.DataStorageMetricsAdapter; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteAtomicLong; import org.apache.ignite.IgniteAtomicReference; @@ -469,20 +473,35 @@ public IgniteMock( } /** {@inheritDoc} */ - @Override public Collection memoryMetrics() { + @Override public Collection dataRegionMetrics() { return null; } /** {@inheritDoc} */ - @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + @Nullable @Override public DataRegionMetrics dataRegionMetrics(String memPlcName) { return null; } /** {@inheritDoc} */ - @Override public PersistenceMetrics persistentStoreMetrics() { + @Override public DataStorageMetrics dataStorageMetrics() { return null; } + /** {@inheritDoc} */ + @Override public Collection memoryMetrics() { + return DataRegionMetricsAdapter.collectionOf(dataRegionMetrics()); + } + + /** {@inheritDoc} */ + @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + return DataRegionMetricsAdapter.valueOf(dataRegionMetrics(memPlcName)); + } + + /** {@inheritDoc} */ + @Override public PersistenceMetrics persistentStoreMetrics() { + return DataStorageMetricsAdapter.valueOf(dataStorageMetrics()); + } + /** * @param staticCfg Configuration. */ diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java index 2f91e408d6b32..86a374a02592e 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/multijvm/IgniteProcessProxy.java @@ -26,6 +26,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import javax.cache.CacheException; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataRegionMetricsAdapter; +import org.apache.ignite.DataStorageMetricsAdapter; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteAtomicLong; import org.apache.ignite.IgniteAtomicReference; @@ -49,6 +52,7 @@ import org.apache.ignite.IgniteServices; import org.apache.ignite.IgniteSet; import org.apache.ignite.IgniteTransactions; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.MemoryMetrics; import org.apache.ignite.PersistenceMetrics; import org.apache.ignite.cache.affinity.Affinity; @@ -742,20 +746,35 @@ public void rebalanceEnabled(boolean rebalanceEnabled) { } /** {@inheritDoc} */ - @Override public Collection memoryMetrics() { + @Override public Collection dataRegionMetrics() { throw new UnsupportedOperationException("Operation isn't supported yet."); } /** {@inheritDoc} */ - @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + @Nullable @Override public DataRegionMetrics dataRegionMetrics(String memPlcName) { throw new UnsupportedOperationException("Operation isn't supported yet."); } /** {@inheritDoc} */ - @Override public PersistenceMetrics persistentStoreMetrics() { + @Override public DataStorageMetrics dataStorageMetrics() { throw new UnsupportedOperationException("Operation isn't supported yet."); } + /** {@inheritDoc} */ + @Override public Collection memoryMetrics() { + return DataRegionMetricsAdapter.collectionOf(dataRegionMetrics()); + } + + /** {@inheritDoc} */ + @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + return DataRegionMetricsAdapter.valueOf(dataRegionMetrics(memPlcName)); + } + + /** {@inheritDoc} */ + @Override public PersistenceMetrics persistentStoreMetrics() { + return DataStorageMetricsAdapter.valueOf(dataStorageMetrics()); + } + /** {@inheritDoc} */ @Override public void close() throws IgniteException { if (locJvmGrid != null) { diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java index 5c4d7fd5f8420..7ca94671f483c 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java @@ -54,7 +54,7 @@ import org.apache.ignite.internal.processors.database.BPlusTreeReuseSelfTest; import org.apache.ignite.internal.processors.database.BPlusTreeSelfTest; import org.apache.ignite.internal.processors.database.FreeListImplSelfTest; -import org.apache.ignite.internal.processors.database.MemoryMetricsSelfTest; +import org.apache.ignite.internal.processors.database.DataRegionMetricsSelfTest; import org.apache.ignite.internal.processors.database.MetadataStorageSelfTest; import org.apache.ignite.internal.processors.database.SwapPathConstructionSelfTest; import org.apache.ignite.internal.processors.odbc.OdbcConfigurationValidationSelfTest; @@ -175,7 +175,7 @@ public static TestSuite suite(@Nullable final Set ignoredTests) throws Ex suite.addTestSuite(BPlusTreeReuseSelfTest.class); suite.addTestSuite(MetadataStorageSelfTest.class); suite.addTestSuite(FreeListImplSelfTest.class); - suite.addTestSuite(MemoryMetricsSelfTest.class); + suite.addTestSuite(DataRegionMetricsSelfTest.class); suite.addTestSuite(SwapPathConstructionSelfTest.class); suite.addTestSuite(IgniteMarshallerCacheFSRestoreTest.class); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java index e8810bbab0761..047550dc4583c 100755 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java @@ -78,7 +78,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSwapPreloadSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheTtlManagerSelfTest; import org.apache.ignite.internal.processors.cache.GridCacheTxPartitionedLocalStoreSelfTest; -import org.apache.ignite.internal.processors.cache.GridMemoryConfigurationConsistencySelfTest; +import org.apache.ignite.internal.processors.cache.GridDataStorageConfigurationConsistencySelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalInvokeTest; import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalWithStoreInvokeTest; @@ -225,7 +225,7 @@ public static TestSuite suite(Set ignoredTests) throws Exception { // suite.addTestSuite(GridCacheP2PUndeploySelfTest.class); suite.addTestSuite(GridCacheConfigurationValidationSelfTest.class); suite.addTestSuite(GridCacheConfigurationConsistencySelfTest.class); - suite.addTestSuite(GridMemoryConfigurationConsistencySelfTest.class); + suite.addTestSuite(GridDataStorageConfigurationConsistencySelfTest.class); suite.addTestSuite(GridCacheJdbcBlobStoreSelfTest.class); suite.addTestSuite(GridCacheJdbcBlobStoreMultithreadedSelfTest.class); suite.addTestSuite(JdbcTypesDefaultTransformerTest.class); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java index 31ad015ad1c04..6f5b7108d49b0 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java @@ -29,7 +29,7 @@ import org.apache.ignite.internal.processors.cache.CacheEnumOperationsTest; import org.apache.ignite.internal.processors.cache.CacheExchangeMessageDuplicatedStateTest; import org.apache.ignite.internal.processors.cache.CacheGroupLocalConfigurationSelfTest; -import org.apache.ignite.internal.processors.cache.CacheMemoryPolicyConfigurationTest; +import org.apache.ignite.internal.processors.cache.CacheDataRegionConfigurationTest; import org.apache.ignite.internal.processors.cache.CacheOptimisticTransactionsWithFilterSingleServerTest; import org.apache.ignite.internal.processors.cache.CacheOptimisticTransactionsWithFilterTest; import org.apache.ignite.internal.processors.cache.CrossCacheTxNearEnabledRandomOperationsTest; @@ -262,7 +262,7 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(CacheConfigurationLeakTest.class)); suite.addTest(new TestSuite(MemoryPolicyConfigValidationTest.class)); suite.addTest(new TestSuite(MemoryPolicyInitializationTest.class)); - suite.addTest(new TestSuite(CacheMemoryPolicyConfigurationTest.class)); + suite.addTest(new TestSuite(CacheDataRegionConfigurationTest.class)); suite.addTest(new TestSuite(CacheGroupLocalConfigurationSelfTest.class)); suite.addTest(new TestSuite(CacheEnumOperationsSingleNodeTest.class)); suite.addTest(new TestSuite(CacheEnumOperationsTest.class)); diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java index d92d848bea1a7..b1e80eaabdfbc 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java @@ -18,12 +18,12 @@ package org.apache.ignite.testsuites; import junit.framework.TestSuite; +import org.apache.ignite.internal.processors.cache.persistence.IgniteDataStorageMetricsSelfTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsExchangeDuringCheckpointTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsPageSizesTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRecoveryAfterFileCorruptionTest; -import org.apache.ignite.internal.processors.cache.persistence.IgnitePersistenceMetricsSelfTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePersistentStoreDataStructuresTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPageEvictionDuringPartitionClearTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsRebalancingOnNotStableTopologyTest; @@ -53,7 +53,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgnitePdsPageSizesTest.class); // Metrics test. - suite.addTestSuite(IgnitePersistenceMetricsSelfTest.class); + suite.addTestSuite(IgniteDataStorageMetricsSelfTest.class); suite.addTestSuite(IgnitePdsTransactionsHangTest.class); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java index 5c3e1bd0fcb09..3194a4cd409d7 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java @@ -110,7 +110,7 @@ public H2TreeIndex( name, cctx.offheap().reuseListForIndex(name), cctx.groupId(), - cctx.memoryPolicy().pageMemory(), + cctx.dataRegion().pageMemory(), cctx.shared().wal(), cctx.offheap().globalRemoveId(), tbl.rowFactory(), diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest.java index 998e1e4385af5..4a32dfdd50ef3 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest.java @@ -18,7 +18,7 @@ package org.apache.ignite.internal.processors.cache; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.processors.database.IgniteDbSingleNodeWithIndexingPutGetTest; import org.apache.ignite.internal.util.typedef.internal.U; @@ -33,8 +33,8 @@ public class IgnitePdsSingleNodeWithIndexingPutGetPersistenceTest extends Ignite @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - cfg.setPersistentStoreConfiguration( - new PersistentStoreConfiguration() + cfg.setDataStorageConfiguration( + new DataStorageConfiguration() .setWalMode(WALMode.LOG_ONLY) ); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedPartitionQueryAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedPartitionQueryAbstractSelfTest.java index 0a0afb4063df2..079dcdf09dd5e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedPartitionQueryAbstractSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedPartitionQueryAbstractSelfTest.java @@ -43,8 +43,9 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.util.GridRandom; import org.apache.ignite.internal.util.typedef.F; @@ -136,9 +137,10 @@ public abstract class IgniteCacheDistributedPartitionQueryAbstractSelfTest exten @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(gridName); - MemoryConfiguration memCfg = new MemoryConfiguration().setDefaultMemoryPolicySize(20 * 1024 * 1024); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(20 * 1024 * 1024)); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); TcpDiscoverySpi spi = (TcpDiscoverySpi)cfg.getDiscoverySpi(); spi.setIpFinder(IP_FINDER); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java index 943a5c8bf2c91..627b3eb34ea0d 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java @@ -33,8 +33,9 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; @@ -90,9 +91,10 @@ public class IgniteCacheQueryNodeRestartSelfTest2 extends GridCommonAbstractTest @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration c = super.getConfiguration(igniteInstanceName); - MemoryConfiguration memCfg = new MemoryConfiguration().setDefaultMemoryPolicySize(50 * 1024 * 1024); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(50 * 1024 * 1024)); - c.setMemoryConfiguration(memCfg); + c.setDataStorageConfiguration(memCfg); TcpDiscoverySpi disco = new TcpDiscoverySpi(); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java index b25359adbed7c..2beea8b452567 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java @@ -31,9 +31,9 @@ import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; @@ -259,16 +259,10 @@ protected IgniteConfiguration commonConfiguration(int idx) throws Exception { cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER)); - MemoryConfiguration memCfg = new MemoryConfiguration() - .setDefaultMemoryPolicyName("default") - .setMemoryPolicies( - new MemoryPolicyConfiguration() - .setName("default") - .setMaxSize(128 * 1024 * 1024L) - .setInitialSize(128 * 1024 * 1024L) - ); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(128 * 1024 * 1024)); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return optimize(cfg); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java index 70197f5c4c991..a39283b0ca332 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java @@ -28,9 +28,9 @@ import org.apache.ignite.cache.query.SqlQuery; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.util.typedef.T2; @@ -141,16 +141,10 @@ protected IgniteConfiguration commonConfiguration(int idx) throws Exception { cfg.setMarshaller(new BinaryMarshaller()); - MemoryConfiguration memCfg = new MemoryConfiguration() - .setDefaultMemoryPolicyName("default") - .setMemoryPolicies( - new MemoryPolicyConfiguration() - .setName("default") - .setMaxSize(128 * 1024 * 1024L) - .setInitialSize(128 * 1024 * 1024L) - ); + DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(128 * 1024 * 1024)); - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(memCfg); return optimize(cfg); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/LongIndexNameTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/LongIndexNameTest.java index 544eb74b4923d..ab0d520b5abdf 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/LongIndexNameTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/LongIndexNameTest.java @@ -27,7 +27,7 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.jetbrains.annotations.NotNull; @@ -44,7 +44,7 @@ public class LongIndexNameTest extends GridCommonAbstractTest { /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { return super.getConfiguration(igniteInstanceName) - .setPersistentStoreConfiguration(new PersistentStoreConfiguration()) + .setDataStorageConfiguration(new DataStorageConfiguration()) .setCacheConfiguration(new CacheConfiguration("cache") .setQueryEntities(getIndexCfg()) .setAffinity(new RendezvousAffinityFunction(false, 16))); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbSingleNodeWithIndexingWalRestoreTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbSingleNodeWithIndexingWalRestoreTest.java index 8f6afd8fe3d53..54667dfbe5aba 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbSingleNodeWithIndexingWalRestoreTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbSingleNodeWithIndexingWalRestoreTest.java @@ -27,8 +27,10 @@ import org.apache.ignite.cache.QueryIndex; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.WALMode; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; import org.apache.ignite.internal.util.typedef.F; @@ -111,7 +113,12 @@ public RegularPerson(String regName) { cfg.setCacheConfiguration(indexedCacheCfg); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(WALMode.LOG_ONLY); + + cfg.setDataStorageConfiguration(memCfg); cfg.setConsistentId(gridName); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreQueryWithMultipleClassesPerCacheTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreQueryWithMultipleClassesPerCacheTest.java index 5bb1eb13eb06b..c37dbdaedc9a2 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreQueryWithMultipleClassesPerCacheTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreQueryWithMultipleClassesPerCacheTest.java @@ -27,7 +27,7 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -56,11 +56,11 @@ public class IgnitePersistentStoreQueryWithMultipleClassesPerCacheTest extends G cfg.setCacheConfiguration(cacheCfg(CACHE_NAME)); - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); + DataStorageConfiguration pCfg = new DataStorageConfiguration(); - pCfg.setCheckpointingFrequency(1000); + pCfg.setCheckpointFrequency(1000); - cfg.setPersistentStoreConfiguration(pCfg); + cfg.setDataStorageConfiguration(pCfg); return cfg; } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java index a408596ca9291..14749546d9766 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java @@ -27,8 +27,9 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; import org.apache.ignite.internal.processors.cache.persistence.DbCheckpointListener; @@ -71,13 +72,15 @@ public class IgnitePersistentStoreSchemaLoadTest extends GridCommonAbstractTest cfg.setCacheConfiguration(cacheCfg(TMPL_NAME)); - PersistentStoreConfiguration pCfg = new PersistentStoreConfiguration(); + DataStorageConfiguration pCfg = new DataStorageConfiguration(); - pCfg.setCheckpointingFrequency(1000); + pCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(100 * 1024 * 1024)); - cfg.setPersistentStoreConfiguration(pCfg); + pCfg.setCheckpointFrequency(1000); - cfg.setActiveOnStart(true); + cfg.setDataStorageConfiguration(pCfg); return cfg; } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java index 8deb61f33d0a2..8283003e0bf11 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java @@ -41,8 +41,8 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.cache.store.CacheStoreAdapter; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; @@ -141,7 +141,7 @@ public class IgniteSqlNotNullConstraintTest extends GridCommonAbstractTest { c.setClientMode(true); // Not allowed to have local cache on client without memory config - c.setMemoryConfiguration(new MemoryConfiguration()); + c.setDataStorageConfiguration(new DataStorageConfiguration()); } return c; diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/database/InlineIndexHelperTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/database/InlineIndexHelperTest.java index 4a6988734093f..41dd4f18a8bd4 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/database/InlineIndexHelperTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/database/InlineIndexHelperTest.java @@ -25,12 +25,12 @@ import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import org.apache.commons.io.Charsets; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; -import org.apache.ignite.internal.processors.cache.persistence.MemoryMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.h2.result.SortOrder; import org.h2.value.CompareMode; @@ -181,7 +181,7 @@ public void testCompareMixed2() throws Exception { * @throws Exception If failed. */ private int putAndCompare(String v1, String v2, int maxSize) throws Exception { - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration().setInitialSize(1024 * MB) + DataRegionConfiguration plcCfg = new DataRegionConfiguration().setInitialSize(1024 * MB) .setMaxSize(1024 * MB); PageMemory pageMem = new PageMemoryNoStoreImpl(log, @@ -189,7 +189,7 @@ private int putAndCompare(String v1, String v2, int maxSize) throws Exception { null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), false); pageMem.start(); @@ -279,7 +279,7 @@ public void testRelyOnCompareBytes() { /** */ public void testStringTruncate() throws Exception { - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration().setInitialSize(1024 * MB) + DataRegionConfiguration plcCfg = new DataRegionConfiguration().setInitialSize(1024 * MB) .setMaxSize(1024 * MB); PageMemory pageMem = new PageMemoryNoStoreImpl(log(), @@ -287,7 +287,7 @@ public void testStringTruncate() throws Exception { null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), false); pageMem.start(); @@ -330,7 +330,7 @@ public void testStringTruncate() throws Exception { /** */ public void testBytes() throws Exception { - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration().setInitialSize(1024 * MB) + DataRegionConfiguration plcCfg = new DataRegionConfiguration().setInitialSize(1024 * MB) .setMaxSize(1024 * MB); PageMemory pageMem = new PageMemoryNoStoreImpl(log(), @@ -338,7 +338,7 @@ public void testBytes() throws Exception { null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), false); pageMem.start(); @@ -449,7 +449,7 @@ public void testUUID() throws Exception { /** */ private void testPutGet(Value v1, Value v2, Value v3) throws Exception { - MemoryPolicyConfiguration plcCfg = new MemoryPolicyConfiguration().setInitialSize(1024 * MB) + DataRegionConfiguration plcCfg = new DataRegionConfiguration().setInitialSize(1024 * MB) .setMaxSize(1024 * MB); PageMemory pageMem = new PageMemoryNoStoreImpl(log(), @@ -457,7 +457,7 @@ private void testPutGet(Value v1, Value v2, Value v3) throws Exception { null, PAGE_SIZE, plcCfg, - new MemoryMetricsImpl(plcCfg), + new DataRegionMetricsImpl(plcCfg), false); pageMem.start(); diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java index cfbe2e090c2ac..114f6304e2742 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java @@ -27,6 +27,7 @@ import org.apache.ignite.internal.processors.cache.persistence.db.file.IgnitePdsCacheIntegrationTest; import org.apache.ignite.internal.processors.cache.persistence.db.file.IgnitePdsNoActualWalHistoryTest; import org.apache.ignite.internal.processors.cache.persistence.db.file.IgnitePdsThreadInterruptionTest; +import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalRecoveryPPCTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalRecoveryTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.WalRecoveryTxLogicalRecordsTest; @@ -52,6 +53,8 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgnitePdsAtomicCacheRebalancingTest.class); suite.addTestSuite(IgnitePdsTxCacheRebalancingTest.class); + suite.addTestSuite(IgniteWalRecoveryPPCTest.class); + suite.addTestSuite(IgnitePdsBinaryMetadataOnClusterRestartTest.class); suite.addTestSuite(IgnitePdsMarshallerMappingRestoreOnNodeStartTest.class); suite.addTestSuite(IgnitePdsThreadInterruptionTest.class); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs index cde216b507064..73636d1812567 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs @@ -316,7 +316,6 @@ public void TestSpringXml() Assert.IsNotNull(mem); Assert.AreEqual("dfltPlc", mem.DefaultMemoryPolicyName); - Assert.AreEqual(MemoryConfiguration.DefaultPageSize, mem.PageSize); Assert.AreEqual(MemoryConfiguration.DefaultSystemCacheInitialSize, mem.SystemCacheInitialSize); Assert.AreEqual(MemoryConfiguration.DefaultSystemCacheMaxSize, mem.SystemCacheMaxSize); @@ -324,7 +323,6 @@ public void TestSpringXml() Assert.AreEqual("dfltPlc", plc.Name); Assert.AreEqual(MemoryPolicyConfiguration.DefaultEmptyPagesPoolSize, plc.EmptyPagesPoolSize); Assert.AreEqual(MemoryPolicyConfiguration.DefaultEvictionThreshold, plc.EvictionThreshold); - Assert.AreEqual(MemoryPolicyConfiguration.DefaultInitialSize, plc.InitialSize); Assert.AreEqual(MemoryPolicyConfiguration.DefaultMaxSize, plc.MaxSize); Assert.AreEqual(MemoryPolicyConfiguration.DefaultSubIntervals, plc.SubIntervals); Assert.AreEqual(MemoryPolicyConfiguration.DefaultRateTimeInterval, plc.RateTimeInterval); diff --git a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyObjectMapper.java b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyObjectMapper.java index b4f89f2ae2287..00941d09ade97 100644 --- a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyObjectMapper.java +++ b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyObjectMapper.java @@ -24,6 +24,7 @@ import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationConfig; +import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.module.SimpleModule; import com.fasterxml.jackson.databind.ser.DefaultSerializerProvider; @@ -59,6 +60,8 @@ public GridJettyObjectMapper() { module.addSerializer(GridCacheSqlMetadata.class, IGNITE_SQL_METADATA_SERIALIZER); module.addSerializer(GridCacheSqlIndexMetadata.class, IGNITE_SQL_INDEX_METADATA_SERIALIZER); + configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); + registerModule(module); } diff --git a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java index 1f80574830c5b..4cba76ed80897 100644 --- a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java +++ b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java @@ -273,24 +273,39 @@ public ApplicationContext getApplicationContext() throws BeansException { } /** {@inheritDoc} */ - @Override public Collection memoryMetrics() { + @Override public Collection dataRegionMetrics() { checkIgnite(); - return g.memoryMetrics(); + return g.dataRegionMetrics(); } /** {@inheritDoc} */ - @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + @Nullable @Override public DataRegionMetrics dataRegionMetrics(String memPlcName) { checkIgnite(); - return g.memoryMetrics(memPlcName); + return g.dataRegionMetrics(memPlcName); } /** {@inheritDoc} */ - @Override public PersistenceMetrics persistentStoreMetrics() { + @Override public DataStorageMetrics dataStorageMetrics() { checkIgnite(); - return g.persistentStoreMetrics(); + return g.dataStorageMetrics(); + } + + /** {@inheritDoc} */ + @Override public Collection memoryMetrics() { + return DataRegionMetricsAdapter.collectionOf(dataRegionMetrics()); + } + + /** {@inheritDoc} */ + @Nullable @Override public MemoryMetrics memoryMetrics(String memPlcName) { + return DataRegionMetricsAdapter.valueOf(dataRegionMetrics(memPlcName)); + } + + /** {@inheritDoc} */ + @Override public PersistenceMetrics persistentStoreMetrics() { + return DataStorageMetricsAdapter.valueOf(dataStorageMetrics()); } /** {@inheritDoc} */ diff --git a/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/top/VisorActivationCommandSpec.scala b/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/top/VisorActivationCommandSpec.scala index fb2a7f439c816..a77249220577c 100644 --- a/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/top/VisorActivationCommandSpec.scala +++ b/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/top/VisorActivationCommandSpec.scala @@ -18,7 +18,7 @@ package org.apache.ignite.visor.commands.top import org.apache.ignite.Ignition -import org.apache.ignite.configuration.{IgniteConfiguration, MemoryConfiguration, PersistentStoreConfiguration} +import org.apache.ignite.configuration._ import org.apache.ignite.visor.commands.top.VisorTopologyCommand._ import org.apache.ignite.visor.{VisorRuntimeBaseSpec, visor} import VisorRuntimeBaseSpec._ @@ -30,11 +30,14 @@ class VisorActivationCommandSpec extends VisorRuntimeBaseSpec(2) { override protected def config(name: String): IgniteConfiguration = { val cfg = super.config(name) - val memCfg = new MemoryConfiguration - memCfg.setDefaultMemoryPolicySize(10 * 1024 * 1024) + val dfltReg = new DataRegionConfiguration + val dataRegCfg = new DataStorageConfiguration - cfg.setMemoryConfiguration(memCfg) - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration) + dfltReg.setMaxSize(10 * 1024 * 1024) + dfltReg.setPersistenceEnabled(true) + dataRegCfg.setDefaultDataRegionConfiguration(dfltReg) + + cfg.setDataStorageConfiguration(dataRegCfg) cfg } diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index a07f979964b3a..bfe1ae2701550 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -960,6 +960,61 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose name: String, size: Number }], + dataStorageConfiguration: { + systemRegionInitialSize: Number, + systemRegionMaxSize: Number, + pageSize: Number, + concurrencyLevel: Number, + defaultDataRegionConfiguration: { + name: String, + initialSize: Number, + maxSize: Number, + swapPath: String, + pageEvictionMode: {type: String, enum: ['DISABLED', 'RANDOM_LRU', 'RANDOM_2_LRU']}, + evictionThreshold: Number, + emptyPagesPoolSize: Number, + metricsEnabled: Boolean, + metricsSubIntervalCount: Number, + metricsRateTimeInterval: Number, + persistenceEnabled: Boolean + }, + dataRegionConfigurations: [{ + name: String, + initialSize: Number, + maxSize: Number, + swapPath: String, + pageEvictionMode: {type: String, enum: ['DISABLED', 'RANDOM_LRU', 'RANDOM_2_LRU']}, + evictionThreshold: Number, + emptyPagesPoolSize: Number, + metricsEnabled: Boolean, + metricsSubIntervalCount: Number, + metricsRateTimeInterval: Number, + persistenceEnabled: Boolean + }], + storagePath: String, + metricsEnabled: Boolean, + alwaysWriteFullPages: Boolean, + checkpointFrequency: Number, + checkpointPageBufferSize: Number, + checkpointThreads: Number, + checkpointWriteOrder: {type: String, enum: ['RANDOM', 'SEQUENTIAL']}, + walPath: String, + walArchivePath: String, + walMode: {type: String, enum: ['DEFAULT', 'LOG_ONLY', 'BACKGROUND', 'NONE']}, + walSegments: Number, + walSegmentSize: Number, + walHistorySize: Number, + walFlushFrequency: Number, + walFsyncDelayNanos: Number, + walRecordIteratorBufferSize: Number, + lockWaitTime: Number, + walThreadLocalBufferSize: Number, + metricsSubIntervalCount: Number, + metricsRateTimeInterval: Number, + fileIOFactory: {type: String, enum: ['RANDOM', 'ASYNC']}, + walAutoArchiveAfterInactivity: Number, + writeThrottlingEnabled: Boolean + }, memoryConfiguration: { systemCacheInitialSize: Number, systemCacheMaxSize: Number, diff --git a/modules/web-console/frontend/app/components/page-configure-basic/controller.js b/modules/web-console/frontend/app/components/page-configure-basic/controller.js index c0b56acfecd6a..cafdb20b9e428 100644 --- a/modules/web-console/frontend/app/components/page-configure-basic/controller.js +++ b/modules/web-console/frontend/app/components/page-configure-basic/controller.js @@ -57,7 +57,7 @@ export default class PageConfigureBasicController { allClusterCaches: this.getAllClusterCaches(state.configureBasic), cachesMenu: this.getCachesMenu(state.list.caches), clustersMenu: this.getClustersMenu(state.list.clusters), - defaultMemoryPolicy: this.getDefaultClusterMemoryPolicy(state.configureBasic.cluster), + defaultMemoryPolicy: this.getDefaultClusterMemoryPolicy(state.configureBasic.cluster, version), memorySizeInputVisible: this.getMemorySizeInputVisibility(version) })) .do((value) => this.applyValue(value)); @@ -125,8 +125,12 @@ export default class PageConfigureBasicController { return [...state.oldClusterCaches, ...state.newClusterCaches]; } - getDefaultClusterMemoryPolicy(cluster) { - return get(cluster, 'memoryConfiguration.memoryPolicies', []).find((p) => p.name === 'default'); + getDefaultClusterMemoryPolicy(cluster, version) { + if (this.Version.since(version.ignite, ['2.1.0', '2.3.0'])) + return get(cluster, 'memoryConfiguration.memoryPolicies', []).find((p) => p.name === 'default'); + + return get(cluster, 'dataStorageConfiguration.defaultDataRegionConfiguration') || + get(cluster, 'dataStorageConfiguration.dataRegionConfigurations', []).find((p) => p.name === 'default'); } getMemorySizeInputVisibility(version) { diff --git a/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js b/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js index a67f7b9dff109..f6f471c3c482e 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js @@ -138,6 +138,11 @@ export default class AbstractTransformer { return this.toSection(this.generator.clusterMemory(memoryConfiguration, available)); } + // Generate memory configuration group. + static clusterDataStorageConfiguration(dataStorageCfg, available) { + return this.toSection(this.generator.clusterDataStorageConfiguration(dataStorageCfg, available)); + } + // Generate marshaller group. static clusterMisc(cluster, available) { return this.toSection(this.generator.clusterMisc(cluster, available)); diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js index 9d7887ae47761..74588f01e60a3 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js @@ -81,6 +81,11 @@ export default class IgniteConfigurationGenerator { this.clusterCollision(cluster.collision, cfg); this.clusterCommunication(cluster, cfg); this.clusterConnector(cluster.connector, cfg); + + // Since ignite 2.3 + if (available('2.3.0')) + this.clusterDataStorageConfiguration(cluster.dataStorageConfiguration, available, cfg); + this.clusterDeployment(cluster, available, cfg); this.clusterEvents(cluster, available, cfg); this.clusterFailover(cluster, available, cfg); @@ -89,19 +94,23 @@ export default class IgniteConfigurationGenerator { this.clusterLogger(cluster.logger, cfg); this.clusterMarshaller(cluster, available, cfg); - // Since ignite 2.0 - if (available('2.0.0')) + // Since ignite 2.0 and deprecated in ignite 2.3 + if (available(['2.0.0', '2.3.0'])) this.clusterMemory(cluster.memoryConfiguration, available, cfg); this.clusterMisc(cluster, available, cfg); this.clusterMetrics(cluster, available, cfg); this.clusterODBC(cluster.odbc, available, cfg); - this.clusterPersistence(cluster.persistenceStoreConfiguration, available, cfg); + + // Since ignite 2.1 deprecated in ignite 2.3 + if (available(['2.1.0', '2.3.0'])) + this.clusterPersistence(cluster.persistenceStoreConfiguration, available, cfg); + this.clusterQuery(cluster, available, cfg); this.clusterServiceConfiguration(cluster.serviceConfigurations, cluster.caches, cfg); this.clusterSsl(cluster, cfg); - // Removed in ignite 2.0 + // Deprecated in ignite 2.0 if (available(['1.0.0', '2.0.0'])) this.clusterSwap(cluster, cfg); @@ -1333,6 +1342,95 @@ export default class IgniteConfigurationGenerator { return cfg; } + static dataRegionConfiguration(dataRegionCfg) { + const plcBean = new Bean('org.apache.ignite.configuration.DataRegionConfiguration', 'dataRegionCfg', dataRegionCfg, clusterDflts.dataStorageConfiguration.dataRegionConfigurations); + + return plcBean.stringProperty('name') + .longProperty('initialSize') + .longProperty('maxSize') + .stringProperty('swapPath') + .enumProperty('pageEvictionMode') + .doubleProperty('evictionThreshold') + .intProperty('emptyPagesPoolSize') + .intProperty('metricsSubIntervalCount') + .longProperty('metricsRateTimeInterval') + .boolProperty('metricsEnabled') + .boolProperty('persistenceEnabled'); + } + + // Generate data storage configuration. + static clusterDataStorageConfiguration(dataStorageCfg, available, cfg = this.igniteConfigurationBean()) { + if (!available('2.3.0')) + return cfg; + + const storageBean = new Bean('org.apache.ignite.configuration.DataStorageConfiguration', 'dataStorageCfg', dataStorageCfg, clusterDflts.dataStorageConfiguration); + + storageBean.intProperty('pageSize') + .intProperty('concurrencyLevel') + .intProperty('systemRegionInitialSize') + .intProperty('systemRegionMaxSize'); + + const dfltDataRegionCfg = this.dataRegionConfiguration(_.get(dataStorageCfg, 'defaultDataRegionConfiguration')); + + if (!dfltDataRegionCfg.isEmpty()) + storageBean.beanProperty('defaultDataRegionConfiguration', dfltDataRegionCfg); + + const dataRegionCfgs = []; + + _.forEach(_.get(dataStorageCfg, 'dataRegionConfigurations'), (dataRegionCfg) => { + const plcBean = this.dataRegionConfiguration(dataRegionCfg); + + if (plcBean.isEmpty()) + return; + + dataRegionCfgs.push(plcBean); + }); + + if (!_.isEmpty(dataRegionCfgs)) + storageBean.varArgProperty('dataRegionConfigurations', 'dataRegionConfigurations', dataRegionCfgs, 'org.apache.ignite.configuration.DataRegionConfiguration'); + + storageBean.stringProperty('storagePath') + .intProperty('checkpointFrequency') + .longProperty('checkpointPageBufferSize') + .intProperty('checkpointThreads') + .enumProperty('walMode') + .stringProperty('walPath') + .stringProperty('walArchivePath') + .intProperty('walSegments') + .intProperty('walSegmentSize') + .intProperty('walHistorySize') + .longProperty('walFlushFrequency') + .longProperty('walFsyncDelayNanos') + .intProperty('walRecordIteratorBufferSize') + .longProperty('lockWaitTime') + .intProperty('walThreadLocalBufferSize') + .intProperty('metricsSubIntervalCount') + .intProperty('metricsRateTimeInterval') + .longProperty('walAutoArchiveAfterInactivity') + .boolProperty('metricsEnabled') + .boolProperty('alwaysWriteFullPages') + .boolProperty('writeThrottlingEnabled'); + + const fileIOFactory = _.get(dataStorageCfg, 'fileIOFactory'); + + let factoryBean; + + if (fileIOFactory === 'RANDOM') + factoryBean = new Bean('org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory', 'rndFileIoFactory', {}); + else if (fileIOFactory === 'ASYNC') + factoryBean = new Bean('org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory', 'asyncFileIoFactory', {}); + + if (factoryBean) + storageBean.beanProperty('fileIOFactory', factoryBean); + + if (storageBean.isEmpty()) + return cfg; + + cfg.beanProperty('dataStorageConfiguration', storageBean); + + return cfg; + } + // Generate miscellaneous configuration. static clusterMisc(cluster, available, cfg = this.igniteConfigurationBean(cluster)) { cfg.stringProperty('workDirectory'); @@ -1473,7 +1571,7 @@ export default class IgniteConfigurationGenerator { // Generate cluster query group. static clusterPersistence(persistence, available, cfg = this.igniteConfigurationBean()) { - if (!available('2.1.0') || _.get(persistence, 'enabled') !== true) + if (!available(['2.1.0', '2.3.0']) || _.get(persistence, 'enabled') !== true) return cfg; const bean = new Bean('org.apache.ignite.configuration.PersistentStoreConfiguration', 'PersistenceCfg', diff --git a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js index 1c7fc44eee5f7..0e786d935160d 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js @@ -311,6 +311,46 @@ const DFLT_CLUSTER = { rateTimeInterval: 60000 } }, + dataStorageConfiguration: { + systemCacheInitialSize: 41943040, + systemCacheMaxSize: 104857600, + pageSize: 2048, + storagePath: 'db', + dataRegionConfigurations: { + name: 'default', + initialSize: 268435456, + pageEvictionMode: { + clsName: 'org.apache.ignite.configuration.DataPageEvictionMode', + value: 'DISABLED' + }, + evictionThreshold: 0.9, + emptyPagesPoolSize: 100, + metricsEnabled: false, + metricsSubIntervalCount: 5, + metricsRateTimeInterval: 60000 + }, + metricsEnabled: false, + alwaysWriteFullPages: false, + checkpointFrequency: 180000, + checkpointPageBufferSize: 268435456, + checkpointThreads: 4, + walMode: { + clsName: 'org.apache.ignite.configuration.WALMode', + value: 'DEFAULT' + }, + walPath: 'db/wal', + walArchivePath: 'db/wal/archive', + walSegments: 10, + walSegmentSize: 67108864, + walHistorySize: 20, + walFlushFrequency: 2000, + walFsyncDelayNanos: 1000, + walRecordIteratorBufferSize: 67108864, + lockWaitTime: 10000, + walThreadLocalBufferSize: 131072, + metricsSubIntervalCount: 5, + metricsRateTimeInterval: 60000 + }, utilityCacheKeepAliveTime: 60000, hadoopConfiguration: { mapReducePlanner: { diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug new file mode 100644 index 0000000000000..9c2dca1013ee4 --- /dev/null +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug @@ -0,0 +1,255 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +include /app/helpers/jade/mixins + +-var form = 'dataStorageConfiguration' +-var model = 'backupItem.dataStorageConfiguration' +-var dfltRegionModel = model + '.defaultDataRegionConfiguration' +-var dataRegionConfigurations = model + '.dataRegionConfigurations' + +.panel.panel-default(ng-show='$ctrl.available("2.3.0")' ng-form=form novalidate) + .panel-heading(bs-collapse-toggle ng-click=`ui.loadPanel('${form}')`) + ignite-form-panel-chevron + label Data storage configuration + ignite-form-field-tooltip.tipLabel + | Page memory is a manageable off-heap based memory architecture that is split into pages of fixed size#[br] + | #[a(href="https://apacheignite.readme.io/docs/distributed-persistent-store" target="_blank") More info] + ignite-form-revert + .panel-collapse(role='tabpanel' bs-collapse-target id=`${form}`) + .panel-body(ng-if=`$ctrl.available("2.3.0") && ui.isPanelLoaded('${form}')`) + .col-sm-6 + .settings-row + +number-min-max('Page size:', model + '.pageSize', '"DataStorageConfigurationPageSize"', + 'true', '2048', '1024', '16384', 'Every memory region is split on pages of fixed size') + .settings-row + +number('Concurrency level:', model + '.concurrencyLevel', '"DataStorageConfigurationConcurrencyLevel"', + 'true', 'availableProcessors', '2', 'The number of concurrent segments in Ignite internal page mapping tables') + .settings-row + +ignite-form-group + ignite-form-field-label + | System region + ignite-form-group-tooltip + | System region properties + .group-content + .details-row + +number('Initial size:', model + '.systemRegionInitialSize', '"DataStorageSystemRegionInitialSize"', + 'true', '41943040', '10485760', 'Initial size of a data region reserved for system cache') + .details-row + +number('Maximum size:', model + '.systemRegionMaxSize', '"DataStorageSystemRegionMaxSize"', + 'true', '104857600', '10485760', 'Maximum data region size reserved for system cache') + .settings-row + +ignite-form-group + ignite-form-field-label + | Data regions + ignite-form-group-tooltip + | Data region configurations + .group-content + .details-row + +ignite-form-group + ignite-form-field-label + | Default data region + ignite-form-group-tooltip + | Default data region properties + .group-content + .details-row + +text('Name:', dfltRegionModel + '.name', '"DfltRegionName" + $index', 'false', 'default', 'Default data region name') + .details-row + +number('Initial size:', dfltRegionModel + '.initialSize', '"DfltRegionInitialSize" + $index', + 'true', '268435456', '10485760', 'Default data region initial size') + .details-row + +number('Maximum size:', dfltRegionModel + '.maxSize', '"DfltRegionMaxSize" + $index', + 'true', '0.2 * totalMemoryAvailable', '10485760', 'Default data region maximum size') + .details-row + +text('Swap file path:', dfltRegionModel + '.swapPath', '"DfltRegionSwapFilePath" + $index', 'false', + 'Input swap file path', 'An optional path to a memory mapped file for default data region') + .details-row + +dropdown('Eviction mode:', dfltRegionModel + '.pageEvictionMode', '"DfltRegionPageEvictionMode"', 'true', 'DISABLED', + '[\ + {value: "DISABLED", label: "DISABLED"},\ + {value: "RANDOM_LRU", label: "RANDOM_LRU"},\ + {value: "RANDOM_2_LRU", label: "RANDOM_2_LRU"}\ + ]', + 'An algorithm for memory pages eviction\ +

          \ +
        • DISABLED - Eviction is disabled
        • \ +
        • RANDOM_LRU - Once a memory region defined by a memory policy is configured, an off - heap array is allocated to track last usage timestamp for every individual data page
        • \ +
        • RANDOM_2_LRU - Differs from Random - LRU only in a way that two latest access timestamps are stored for every data page
        • \ +
        ') + .details-row + +number-min-max-step('Eviction threshold:', dfltRegionModel + '.evictionThreshold', '"DfltRegionEvictionThreshold" + $index', + 'true', '0.9', '0.5', '0.999', '0.05', 'A threshold for memory pages eviction initiation') + .details-row + +number('Empty pages pool size:', dfltRegionModel + '.emptyPagesPoolSize', '"DfltRegionEmptyPagesPoolSize" + $index', + 'true', '100', '11', 'The minimal number of empty pages to be present in reuse lists for default data region') + .details-row + +number('Metrics sub interval count:', dfltRegionModel + '.metricsSubIntervalCount', '"DfltRegionSubIntervals" + $index', + 'true', '5', '1', 'A number of sub-intervals the whole rate time interval will be split into to calculate allocation and eviction rates') + .details-row + +number('Metrics rate time interval:', dfltRegionModel + '.metricsRateTimeInterval', '"DfltRegionRateTimeInterval" + $index', + 'true', '60000', '1000', 'Time interval for allocation rate and eviction rate monitoring purposes') + .details-row + +checkbox('Metrics enabled', dfltRegionModel + '.metricsEnabled', '"DfltRegionMetricsEnabled" + $index', + 'Whether memory metrics are enabled by default on node startup') + .details-row + +checkbox('Persistence enabled', dfltRegionModel + '.persistenceEnabled', '"DfltRegionPersistenceEnabled" + $index', + 'Enable Ignite Native Persistence') + .details-row(ng-init='dataRegionTbl={type: "dataRegions", model: "dataRegionConfigurations", focusId: "name", ui: "data-region-table"}') + +ignite-form-group() + ignite-form-field-label + | Configured data regions + ignite-form-group-tooltip + | List of configured data regions + ignite-form-group-add(ng-click='tableNewItem(dataRegionTbl)') + | Add data region configuration + .group-content-empty(ng-if=`!(${dataRegionConfigurations} && ${dataRegionConfigurations}.length > 0)`) + | Not defined + .group-content(ng-show=`${dataRegionConfigurations} && ${dataRegionConfigurations}.length > 0` ng-repeat=`model in ${dataRegionConfigurations} track by $index`) + hr(ng-if='$index != 0') + .settings-row + +text-enabled-autofocus('Name:', 'model.name', '"DataRegionName" + $index', 'true', 'false', 'default', 'Data region name') + +table-remove-button(dataRegionConfigurations, 'Remove memory configuration') + .settings-row + +number('Initial size:', 'model.initialSize', '"DataRegionInitialSize" + $index', + 'true', '268435456', '10485760', 'Initial memory region size defined by this data region') + .settings-row + +number('Maximum size:', 'model.maxSize', '"DataRegionMaxSize" + $index', + 'true', '0.2 * totalMemoryAvailable', '10485760', 'Maximum memory region size defined by this data region') + .settings-row + +text('Swap file path:', 'model.swapPath', '"DataRegionSwapPath" + $index', 'false', + 'Input swap file path', 'An optional path to a memory mapped file for this data region') + .settings-row + +dropdown('Eviction mode:', 'model.pageEvictionMode', '"DataRegionPageEvictionMode"', 'true', 'DISABLED', + '[\ + {value: "DISABLED", label: "DISABLED"},\ + {value: "RANDOM_LRU", label: "RANDOM_LRU"},\ + {value: "RANDOM_2_LRU", label: "RANDOM_2_LRU"}\ + ]', + 'An algorithm for memory pages eviction\ +
          \ +
        • DISABLED - Eviction is disabled
        • \ +
        • RANDOM_LRU - Once a memory region defined by a memory policy is configured, an off - heap array is allocated to track last usage timestamp for every individual data page
        • \ +
        • RANDOM_2_LRU - Differs from Random - LRU only in a way that two latest access timestamps are stored for every data page
        • \ +
        ') + .settings-row + +number-min-max-step('Eviction threshold:', 'model.evictionThreshold', '"DataRegionEvictionThreshold" + $index', + 'true', '0.9', '0.5', '0.999', '0.05', 'A threshold for memory pages eviction initiation') + .settings-row + +number('Empty pages pool size:', 'model.emptyPagesPoolSize', '"DataRegionEmptyPagesPoolSize" + $index', + 'true', '100', '11', 'The minimal number of empty pages to be present in reuse lists for this data region') + .settings-row + +number('Metrics sub interval count:', 'model.metricsSubIntervalCount', '"DataRegionSubIntervals" + $index', + 'true', '5', '1', 'A number of sub-intervals the whole rate time interval will be split into to calculate allocation and eviction rates') + .settings-row + +number('Metrics rate time interval:', 'model.metricsRateTimeInterval', '"DataRegionRateTimeInterval" + $index', + 'true', '60000', '1000', 'Time interval for allocation rate and eviction rate monitoring purposes') + .settings-row + +checkbox('Metrics enabled', 'model.metricsEnabled', '"DataRegionMetricsEnabled" + $index', + 'Whether memory metrics are enabled by default on node startup') + .settings-row + +checkbox('Persistence enabled', 'model.persistenceEnabled', '"DataRegionPersistenceEnabled" + $index', + 'Enable Ignite Native Persistence') + .settings-row + +text-enabled('Storage path:', `${model}.storagePath`, '"DataStoragePath"', 'true', 'false', 'db', + 'Directory where index and partition files are stored') + .settings-row + +number('Checkpoint frequency:', `${model}.checkpointFrequency`, '"DataStorageCheckpointFrequency"', 'true', '180000', '1', + 'Frequency which is a minimal interval when the dirty pages will be written to the Persistent Store') + .settings-row + +number('Checkpoint page buffer size:', `${model}.checkpointPageBufferSize`, '"DataStorageCheckpointPageBufferSize"', 'true', '268435456', '0', + 'Amount of memory allocated for a checkpointing temporary buffer') + .settings-row + +number('Checkpoint threads:', `${model}.checkpointThreads`, '"DataStorageCheckpointThreads"', 'true', '4', '1', 'A number of threads to use for the checkpoint purposes') + .settings-row + +dropdown('Checkpoint write order:', `${model}.checkpointWriteOrder`, '"DataStorageCheckpointWriteOrder"', 'true', 'SEQUENTIAL', + '[\ + {value: "RANDOM", label: "RANDOM"},\ + {value: "SEQUENTIAL", label: "SEQUENTIAL"}\ + ]', + 'Order of writing pages to disk storage during checkpoint.\ +
          \ +
        • RANDOM - Pages are written in order provided by checkpoint pages collection iterator
        • \ +
        • SEQUENTIAL - All checkpoint pages are collected into single list and sorted by page index
        • \ +
        ') + .settings-row + +dropdown('WAL mode:', `${model}.walMode`, '"DataStorageWalMode"', 'true', 'DEFAULT', + '[\ + {value: "DEFAULT", label: "DEFAULT"},\ + {value: "LOG_ONLY", label: "LOG_ONLY"},\ + {value: "BACKGROUND", label: "BACKGROUND"},\ + {value: "NONE", label: "NONE"}\ + ]', + 'Type define behavior wal fsync.\ +
          \ +
        • DEFAULT - full-sync disk writes
        • \ +
        • LOG_ONLY - flushes application buffers
        • \ +
        • BACKGROUND - does not force application's buffer flush
        • \ +
        • NONE - WAL is disabled
        • \ +
        ') + .settings-row + +text-enabled('WAL path:', `${model}.walPath`, '"DataStorageWalPath"', 'true', 'false', 'db/wal', 'A path to the directory where WAL is stored') + .settings-row + +text-enabled('WAL archive path:', `${model}.walArchivePath`, '"DataStorageWalArchivePath"', 'true', 'false', 'db/wal/archive', 'A path to the WAL archive directory') + .settings-row + +number('WAL segments:', `${model}.walSegments`, '"DataStorageWalSegments"', 'true', '10', '1', 'A number of WAL segments to work with') + .settings-row + +number('WAL segment size:', `${model}.walSegmentSize`, '"DataStorageWalSegmentSize"', 'true', '67108864', '0', 'Size of a WAL segment') + .settings-row + +number('WAL history size:', `${model}.walHistorySize`, '"DataStorageWalHistorySize"', 'true', '20', '1', 'A total number of checkpoints to keep in the WAL history') + .settings-row + +number('WAL flush frequency:', `${model}.walFlushFrequency`, '"DataStorageWalFlushFrequency"', 'true', '2000', '1', + 'How often will be fsync, in milliseconds. In background mode, exist thread which do fsync by timeout') + .settings-row + +number('WAL fsync delay:', `${model}.walFsyncDelayNanos`, '"DataStorageWalFsyncDelay"', 'true', '1000', '1', 'WAL fsync delay, in nanoseconds') + .settings-row + +number('WAL record iterator buffer size:', `${model}.walRecordIteratorBufferSize`, '"DataStorageWalRecordIteratorBufferSize"', 'true', '67108864', '1', + 'How many bytes iterator read from disk(for one reading), during go ahead WAL') + .settings-row + +number('Lock wait time:', `${model}.lockWaitTime`, '"DataStorageLockWaitTime"', 'true', '10000', '1', + 'Time out in milliseconds, while wait and try get file lock for start persist manager') + .settings-row + +number('WAL thread local buffer size:', `${model}.walThreadLocalBufferSize`, '"DataStorageWalThreadLocalBufferSize"', 'true', '131072', '1', + 'Define size thread local buffer. Each thread which write to WAL have thread local buffer for serialize recode before write in WAL') + .settings-row + +number('Metrics sub interval count:', `${model}.metricsSubIntervalCount`, '"DataStorageMetricsSubIntervalCount"', 'true', '5', '1', + 'Number of sub - intervals the whole rate time interval will be split into to calculate rate - based metrics') + .settings-row + +number('Metrics rate time interval:', `${model}.metricsRateTimeInterval`, '"DataStorageMetricsRateTimeInterval"', 'true', '60000', '1000', + 'The length of the time interval for rate - based metrics. This interval defines a window over which hits will be tracked') + .settings-row + +dropdown('File IO factory:', `${model}.fileIOFactory`, '"DataStorageFileIOFactory"', 'true', 'Default', + '[\ + {value: "RANDOM", label: "RANDOM"},\ + {value: "ASYNC", label: "ASYNC"},\ + {value: null, label: "Default"},\ + ]', + 'Order of writing pages to disk storage during checkpoint.\ +
          \ +
        • RANDOM - Pages are written in order provided by checkpoint pages collection iterator
        • \ +
        • SEQUENTIAL - All checkpoint pages are collected into single list and sorted by page index
        • \ +
        ') + .settings-row + +number('WAL auto archive after inactivity:', `${model}.walAutoArchiveAfterInactivity`, '"DataStorageWalAutoArchiveAfterInactivity"', 'true', '-1', '-1', + 'Time in millis to run auto archiving segment after last record logging') + .settings-row + +checkbox-enabled('Metrics enabled', `${model}.metricsEnabled`, '"DataStorageMetricsEnabled"', 'true', 'Flag indicating whether persistence metrics collection is enabled') + .settings-row + +checkbox-enabled('Always write full pages', `${model}.alwaysWriteFullPages`, '"DataStorageAlwaysWriteFullPages"', 'true', 'Flag indicating whether always write full pages') + .settings-row + +checkbox('Write throttling enabled', `${model}.writeThrottlingEnabled`, '"DataStorageWriteThrottlingEnabled"', + 'Throttle threads that generate dirty pages too fast during ongoing checkpoint') + .col-sm-6 + +preview-xml-java(model, 'clusterDataStorageConfiguration') diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/memory.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/memory.pug index e22afe284182d..705ba9154cfc2 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/clusters/memory.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/memory.pug @@ -20,7 +20,7 @@ include /app/helpers/jade/mixins -var model = 'backupItem.memoryConfiguration' -var memoryPolicies = model + '.memoryPolicies' -.panel.panel-default(ng-show='$ctrl.available("2.0.0")' ng-form=form novalidate) +.panel.panel-default(ng-show='$ctrl.available(["2.0.0", "2.3.0"])' ng-form=form novalidate) .panel-heading(bs-collapse-toggle ng-click=`ui.loadPanel('${form}')`) ignite-form-panel-chevron label Memory configuration @@ -29,7 +29,7 @@ include /app/helpers/jade/mixins | #[a(href="https://apacheignite.readme.io/docs/durable-memory" target="_blank") More info] ignite-form-revert .panel-collapse(role='tabpanel' bs-collapse-target id=`${form}`) - .panel-body(ng-if=`$ctrl.available("2.0.0") && ui.isPanelLoaded('${form}')`) + .panel-body(ng-if=`$ctrl.available(["2.0.0", "2.3.0"]) && ui.isPanelLoaded('${form}')`) .col-sm-6 .settings-row +number-min-max('Page size:', model + '.pageSize', '"MemoryConfigurationPageSize"', diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/persistence.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/persistence.pug index 50a569b22de18..fcc170e98d73c 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/clusters/persistence.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/persistence.pug @@ -20,7 +20,7 @@ include /app/helpers/jade/mixins -var model = 'backupItem.persistenceStoreConfiguration' -var enabled = model + '.enabled' -.panel.panel-default(ng-show='$ctrl.available("2.1.0")' ng-form=form novalidate) +.panel.panel-default(ng-show='$ctrl.available(["2.1.0", "2.3.0"])' ng-form=form novalidate) .panel-heading(bs-collapse-toggle ng-click=`ui.loadPanel('${form}')`) ignite-form-panel-chevron label Persistence store @@ -29,7 +29,7 @@ include /app/helpers/jade/mixins //- TODO IGNITE-5415 Add link to documentation. ignite-form-revert .panel-collapse(role='tabpanel' bs-collapse-target id=`${form}`) - .panel-body(ng-if=`$ctrl.available("2.1.0") && ui.isPanelLoaded('${form}')`) + .panel-body(ng-if=`$ctrl.available(["2.1.0", "2.3.0"]) && ui.isPanelLoaded('${form}')`) .col-sm-6 .settings-row +checkbox('Enabled', enabled, '"PersistenceEnabled"', 'Flag indicating whether to configure persistent configuration') diff --git a/modules/web-console/frontend/app/services/Clusters.js b/modules/web-console/frontend/app/services/Clusters.js index 4e38a5875a5d3..dd2f59894f51a 100644 --- a/modules/web-console/frontend/app/services/Clusters.js +++ b/modules/web-console/frontend/app/services/Clusters.js @@ -60,6 +60,12 @@ export default class Clusters { }, swapSpaceSpi: {}, transactionConfiguration: {}, + dataStorageConfiguration: { + defaultDataRegionConfiguration: { + name: 'default' + }, + dataRegionConfigurations: [] + }, memoryConfiguration: { memoryPolicies: [{ name: 'default', diff --git a/modules/web-console/frontend/app/services/Version.service.js b/modules/web-console/frontend/app/services/Version.service.js index beb27fa0a3340..8b67eb7d5a206 100644 --- a/modules/web-console/frontend/app/services/Version.service.js +++ b/modules/web-console/frontend/app/services/Version.service.js @@ -77,7 +77,11 @@ export default class IgniteVersion { this.supportedVersions = [ { - label: 'Ignite 2.x', + label: 'Ignite 2.3', + ignite: '2.3.0' + }, + { + label: 'Ignite 2.1', ignite: '2.2.0' }, { diff --git a/modules/web-console/frontend/controllers/clusters-controller.js b/modules/web-console/frontend/controllers/clusters-controller.js index 8340b4d43d40c..2485fa69e9397 100644 --- a/modules/web-console/frontend/controllers/clusters-controller.js +++ b/modules/web-console/frontend/controllers/clusters-controller.js @@ -176,6 +176,8 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe } else if (field.type === 'memoryPolicies') $scope.backupItem.memoryConfiguration.memoryPolicies.push({}); + else if (field.type === 'dataRegions') + $scope.backupItem.dataStorageConfiguration.dataRegionConfigurations.push({}); else if (field.type === 'serviceConfigurations') $scope.backupItem.serviceConfigurations.push({}); else if (field.type === 'executorConfigurations') @@ -329,6 +331,9 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe if (!cluster.memoryConfiguration) cluster.memoryConfiguration = { memoryPolicies: [] }; + if (!cluster.dataStorageConfiguration) + cluster.dataStorageConfiguration = { dataRegionConfigurations: [] }; + if (!cluster.hadoopConfiguration) cluster.hadoopConfiguration = { nativeLibraryNames: [] }; @@ -712,6 +717,53 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe })); } + function checkDataStorageConfiguration(item) { + const dataStorage = item.dataStorageConfiguration; + + if ((dataStorage.systemRegionMaxSize || 104857600) < (dataStorage.systemRegionInitialSize || 41943040)) + return ErrorPopover.show('DataStorageSystemRegionMaxSize', 'System data region maximum size should be greater than initial size', $scope.ui, 'dataStorageConfiguration'); + + const pageSize = dataStorage.pageSize; + + if (pageSize > 0 && (pageSize & (pageSize - 1) !== 0)) { + ErrorPopover.show('DataStorageConfigurationPageSize', 'Page size must be power of 2', $scope.ui, 'dataStorageConfiguration'); + + return false; + } + + return _.isNil(_.find(dataStorage.dataRegionConfigurations, (curPlc, curIx) => { + if (curPlc.name === 'sysMemPlc') { + ErrorPopover.show('DfltRegionPolicyName' + curIx, '"sysMemPlc" policy name is reserved for internal use', $scope.ui, 'dataStorageConfiguration'); + + return true; + } + + if (_.find(dataStorage.dataRegionConfigurations, (plc, ix) => curIx > ix && (curPlc.name || 'default') === (plc.name || 'default'))) { + ErrorPopover.show('DfltRegionPolicyName' + curIx, 'Data region with that name is already configured', $scope.ui, 'dataStorageConfiguration'); + + return true; + } + + if (curPlc.maxSize && curPlc.maxSize < (curPlc.initialSize || 268435456)) { + ErrorPopover.show('DfltRegionPolicyMaxSize' + curIx, 'Maximum size should be greater than initial size', $scope.ui, 'dataStorageConfiguration'); + + return true; + } + + if (curPlc.maxSize) { + const maxPoolSize = Math.floor(curPlc.maxSize / (dataStorage.pageSize || 2048) / 10); + + if (maxPoolSize < (curPlc.emptyPagesPoolSize || 100)) { + ErrorPopover.show('DfltRegionPolicyEmptyPagesPoolSize' + curIx, 'Evicted pages pool size should be lesser than ' + maxPoolSize, $scope.ui, 'dataStorageConfiguration'); + + return true; + } + } + + return false; + })); + } + function checkODBC(item) { if (_.get(item, 'odbc.odbcEnabled') && _.get(item, 'marshaller.kind')) return ErrorPopover.show('odbcEnabledInput', 'ODBC can only be used with BinaryMarshaller', $scope.ui, 'odbcConfiguration'); @@ -786,7 +838,7 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe } // Check cluster logical consistency. - function validate(item) { + this.validate = (item) => { ErrorPopover.hide(); if (LegacyUtils.isEmptyString(item.name)) @@ -813,13 +865,16 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe if (!checkCommunicationConfiguration(item)) return false; + if (!this.available('2.3.0') && !checkDataStorageConfiguration(item)) + return false; + if (!checkDiscoveryConfiguration(item)) return false; if (!checkLoadBalancingConfiguration(item)) return false; - if (!checkMemoryConfiguration(item)) + if (this.available(['2.0.0', '2.3.0']) && !checkMemoryConfiguration(item)) return false; if (!checkODBC(item)) @@ -838,7 +893,7 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe return false; return true; - } + }; // Save cluster in database. function save(item) { @@ -882,7 +937,7 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe } // Save cluster. - $scope.saveItem = function() { + $scope.saveItem = () => { const item = $scope.backupItem; const swapConfigured = item.swapSpaceSpi && item.swapSpaceSpi.kind; @@ -890,7 +945,7 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe if (!swapConfigured && _.find(clusterCaches(item), (cache) => cache.swapEnabled)) _.merge(item, {swapSpaceSpi: {kind: 'FileSwapSpaceSpi'}}); - if (validate(item)) + if (this.validate(item)) save(item); }; @@ -899,8 +954,8 @@ export default ['$rootScope', '$scope', '$http', '$state', '$timeout', 'IgniteLe } // Clone cluster with new name. - $scope.cloneItem = function() { - if (validate($scope.backupItem)) { + $scope.cloneItem = () => { + if (this.validate($scope.backupItem)) { Input.clone($scope.backupItem.name, _clusterNames()).then((newName) => { const item = angular.copy($scope.backupItem); diff --git a/modules/web-console/frontend/views/configuration/clusters.tpl.pug b/modules/web-console/frontend/views/configuration/clusters.tpl.pug index f0d88b7d1012c..26a1da13bd4fb 100644 --- a/modules/web-console/frontend/views/configuration/clusters.tpl.pug +++ b/modules/web-console/frontend/views/configuration/clusters.tpl.pug @@ -49,6 +49,10 @@ include /app/helpers/jade/mixins include /app/modules/states/configuration/clusters/communication include /app/modules/states/configuration/clusters/connector include /app/modules/states/configuration/clusters/deployment + + //- Since ignite 2.3 + include /app/modules/states/configuration/clusters/data-storage + include /app/modules/states/configuration/clusters/discovery include /app/modules/states/configuration/clusters/events include /app/modules/states/configuration/clusters/failover @@ -58,7 +62,7 @@ include /app/helpers/jade/mixins include /app/modules/states/configuration/clusters/logger include /app/modules/states/configuration/clusters/marshaller - //- Since ignite 2.0 + //- Since ignite 2.0, deprecated in ignite 2.3 include /app/modules/states/configuration/clusters/memory include /app/modules/states/configuration/clusters/misc @@ -67,7 +71,7 @@ include /app/helpers/jade/mixins //- Deprecated in ignite 2.1 include /app/modules/states/configuration/clusters/odbc - //- Since ignite 2.1 + //- Since ignite 2.1, deprecated in ignite 2.3 include /app/modules/states/configuration/clusters/persistence include /app/modules/states/configuration/clusters/sql-connector diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java index 594fa1f1b5907..ba96b6c6684d7 100644 --- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java +++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java @@ -20,8 +20,8 @@ import com.beust.jcommander.Parameter; import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.cache.CacheWriteSynchronizationMode; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.util.tostring.GridToStringBuilder; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.transactions.TransactionConcurrency; @@ -206,7 +206,7 @@ public class IgniteBenchmarkArguments { /** */ @Parameter(names = {"-ps", "--pageSize"}, description = "Page size") - private int pageSize = MemoryConfiguration.DFLT_PAGE_SIZE; + private int pageSize = DataStorageConfiguration.DFLT_PAGE_SIZE; /** */ @Parameter(names = {"-sl", "--stringLength"}, description = "Test string length") @@ -253,7 +253,7 @@ public class IgniteBenchmarkArguments { private int streamerBufSize = IgniteDataStreamer.DFLT_PER_NODE_BUFFER_SIZE; /** - * @return {@code True} if need set {@link PersistentStoreConfiguration}. + * @return {@code True} if need set {@link DataStorageConfiguration}. */ public boolean persistentStoreEnabled() { return persistentStoreEnabled; diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java index 35fa9490b200f..9770fa367283a 100644 --- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java +++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java @@ -28,10 +28,9 @@ import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.ConnectorConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.configuration.TransactionConfiguration; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.typedef.internal.U; @@ -156,24 +155,24 @@ public IgniteNode(boolean clientMode, Ignite ignite) { c.setCommunicationSpi(commSpi); - if (args.getPageSize() != MemoryConfiguration.DFLT_PAGE_SIZE) { - MemoryConfiguration memCfg = c.getMemoryConfiguration(); + if (args.getPageSize() != DataStorageConfiguration.DFLT_PAGE_SIZE) { + DataStorageConfiguration memCfg = c.getDataStorageConfiguration(); if (memCfg == null) { - memCfg = new MemoryConfiguration(); + memCfg = new DataStorageConfiguration(); - c.setMemoryConfiguration(memCfg); + c.setDataStorageConfiguration(memCfg); } memCfg.setPageSize(args.getPageSize()); } if (args.persistentStoreEnabled()) { - PersistentStoreConfiguration pcCfg = new PersistentStoreConfiguration(); + DataStorageConfiguration pcCfg = new DataStorageConfiguration(); c.setBinaryConfiguration(new BinaryConfiguration().setCompactFooter(false)); - c.setPersistentStoreConfiguration(pcCfg); + c.setDataStorageConfiguration(pcCfg); } ignite = IgniteSpring.start(c, appCtx); From 6c58b4ac7c4527d583de49c4d8b250436273294c Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Fri, 20 Oct 2017 13:43:44 +0300 Subject: [PATCH 052/243] IGNITE-6030 Fixed misspelled metric --- .../src/main/java/org/apache/ignite/DataStorageMetrics.java | 2 +- .../java/org/apache/ignite/DataStorageMetricsAdapter.java | 2 +- .../processors/cache/persistence/DataStorageMetricsImpl.java | 2 +- .../cache/persistence/DataStorageMetricsSnapshot.java | 4 ++-- .../ignite/internal/visor/node/VisorPersistenceMetrics.java | 2 +- .../org/apache/ignite/mxbean/DataStorageMetricsMXBean.java | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java index 87095f6ee9618..e26bb1fe5b225 100644 --- a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java @@ -61,7 +61,7 @@ public interface DataStorageMetrics { * * @return Total checkpoint duration in milliseconds. */ - public long getLastCheckpointingDuration(); + public long getLastCheckpointDuration(); /** * Gets the duration of last checkpoint lock wait in milliseconds. diff --git a/modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java b/modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java index 6bb4b7e47b635..431e264dcb2ac 100644 --- a/modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/DataStorageMetricsAdapter.java @@ -61,7 +61,7 @@ public static DataStorageMetricsAdapter valueOf(DataStorageMetrics delegate) { /** {@inheritDoc} */ @Override public long getLastCheckpointingDuration() { - return delegate.getLastCheckpointingDuration(); + return delegate.getLastCheckpointDuration(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java index 16707aaaf2f44..6d196dc0534f5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsImpl.java @@ -127,7 +127,7 @@ public DataStorageMetricsImpl( } /** {@inheritDoc} */ - @Override public long getLastCheckpointingDuration() { + @Override public long getLastCheckpointDuration() { if (!metricsEnabled) return 0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java index 484138710129f..5bbb0e1086c4f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStorageMetricsSnapshot.java @@ -67,7 +67,7 @@ public DataStorageMetricsSnapshot(DataStorageMetrics metrics) { walWritingRate = metrics.getWalWritingRate(); walArchiveSegments = metrics.getWalArchiveSegments(); walFsyncTimeAvg = metrics.getWalFsyncTimeAverage(); - lastCpDuration = metrics.getLastCheckpointingDuration(); + lastCpDuration = metrics.getLastCheckpointDuration(); lastCpLockWaitDuration = metrics.getLastCheckpointLockWaitDuration(); lastCpMmarkDuration = metrics.getLastCheckpointMarkDuration(); lastCpPagesWriteDuration = metrics.getLastCheckpointPagesWriteDuration(); @@ -98,7 +98,7 @@ public DataStorageMetricsSnapshot(DataStorageMetrics metrics) { } /** {@inheritDoc} */ - @Override public long getLastCheckpointingDuration() { + @Override public long getLastCheckpointDuration() { return lastCpDuration; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java index 165855c0d5237..d7aed5ff4d622 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java @@ -81,7 +81,7 @@ public VisorPersistenceMetrics(DataStorageMetrics metrics) { walWritingRate = metrics.getWalWritingRate(); walArchiveSegments = metrics.getWalArchiveSegments(); walFsyncTimeAvg = metrics.getWalFsyncTimeAverage(); - lastCpDuration = metrics.getLastCheckpointingDuration(); + lastCpDuration = metrics.getLastCheckpointDuration(); lastCpLockWaitDuration = metrics.getLastCheckpointLockWaitDuration(); lastCpMmarkDuration = metrics.getLastCheckpointMarkDuration(); lastCpPagesWriteDuration = metrics.getLastCheckpointPagesWriteDuration(); diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java index f0fb631e6d52e..40410cb58e492 100644 --- a/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java +++ b/modules/core/src/main/java/org/apache/ignite/mxbean/DataStorageMetricsMXBean.java @@ -42,7 +42,7 @@ public interface DataStorageMetricsMXBean extends DataStorageMetrics { /** {@inheritDoc} */ @MXBeanDescription("Duration of the last checkpoint in milliseconds.") - @Override long getLastCheckpointingDuration(); + @Override long getLastCheckpointDuration(); /** {@inheritDoc} */ @MXBeanDescription("Duration of the checkpoint lock wait in milliseconds.") From 8ee033fdc50b11c6913e1b6ddc100c28f6bf4341 Mon Sep 17 00:00:00 2001 From: Pavel Tupitsyn Date: Fri, 20 Oct 2017 14:38:11 +0300 Subject: [PATCH 053/243] IGNITE-6515 .NET: Enable persistence on per-cache basis This closes #2891 --- .../cluster/PlatformClusterGroup.java | 90 ++++ .../utils/PlatformConfigurationUtils.java | 174 ++++++- .../Apache.Ignite.Core.Tests.csproj | 5 +- .../Cache/CacheConfigurationTest.cs | 6 + .../Cache/DataRegionMetricsTest.cs | 153 ++++++ .../Cache/DataStorageMetricsTest.cs | 107 ++++ .../Cache/MemoryMetricsTest.cs | 1 + .../Cache/PersistenceTest.cs | 235 +++++++++ ...Test.cs => PersistentStoreTestObsolete.cs} | 5 +- .../Config/full-config.xml | 18 + .../Config/spring-test.xml | 18 +- .../IgniteConfigurationSerializerTest.cs | 135 ++++- .../IgniteConfigurationTest.cs | 227 ++++----- .../Apache.Ignite.Core.csproj | 10 + .../Cache/Configuration/CacheConfiguration.cs | 21 +- .../Configuration/DataPageEvictionMode.cs | 3 + .../Configuration/MemoryConfiguration.cs | 5 + .../MemoryPolicyConfiguration.cs | 3 + .../Cache/IMemoryMetrics.cs | 4 + .../Configuration/CheckpointWriteOrder.cs | 37 ++ .../Configuration/DataPageEvictionMode.cs | 59 +++ .../Configuration/DataRegionConfiguration.cs | 213 ++++++++ .../Configuration/DataStorageConfiguration.cs | 466 ++++++++++++++++++ .../Configuration/WalMode.cs | 45 ++ .../Apache.Ignite.Core/IDataRegionMetrics.cs | 55 +++ .../Apache.Ignite.Core/IDataStorageMetrics.cs | 87 ++++ .../dotnet/Apache.Ignite.Core/IIgnite.cs | 33 ++ .../Apache.Ignite.Core/IgniteConfiguration.cs | 40 +- .../IgniteConfigurationSection.xsd | 273 +++++++++- .../Impl/Cache/MemoryMetrics.cs | 2 + .../Impl/Cluster/ClusterGroupImpl.cs | 53 ++ .../IgniteConfigurationXmlSerializer.cs | 11 +- .../Impl/DataRegionMetrics.cs | 61 +++ .../Impl/DataStorageMetrics.cs | 87 ++++ .../dotnet/Apache.Ignite.Core/Impl/Ignite.cs | 22 + .../PersistentStore/PersistentStoreMetrics.cs | 2 + .../PersistentStore/CheckpointWriteOrder.cs | 3 + .../IPersistentStoreMetrics.cs | 2 + .../PersistentStoreConfiguration.cs | 4 + .../PersistentStore/WalMode.cs | 3 + 40 files changed, 2614 insertions(+), 164 deletions(-) create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistenceTest.cs rename modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/{PersistentStoreTest.cs => PersistentStoreTestObsolete.cs} (96%) create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Configuration/CheckpointWriteOrder.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataPageEvictionMode.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Configuration/WalMode.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/IDataRegionMetrics.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/IDataStorageMetrics.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataRegionMetrics.cs create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataStorageMetrics.cs diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cluster/PlatformClusterGroup.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cluster/PlatformClusterGroup.java index 7c1c03e2214c4..ef382d61e0abf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cluster/PlatformClusterGroup.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cluster/PlatformClusterGroup.java @@ -21,6 +21,8 @@ import java.util.Collection; import java.util.UUID; +import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteCluster; @@ -142,6 +144,14 @@ public class PlatformClusterGroup extends PlatformAbstractTarget { /** */ private static final int OP_GET_SERVICES = 34; + /** */ + private static final int OP_DATA_REGION_METRICS = 35; + + /** */ + private static final int OP_DATA_REGION_METRICS_BY_NAME = 36; + + /** */ + private static final int OP_DATA_STORAGE_METRICS = 37; /** Projection. */ private final ClusterGroupEx prj; @@ -187,6 +197,26 @@ public PlatformClusterGroup(PlatformContext platformCtx, ClusterGroupEx prj) { break; } + case OP_DATA_STORAGE_METRICS: { + DataStorageMetrics metrics = prj.ignite().dataStorageMetrics(); + + writeDataStorageMetrics(writer, metrics); + + break; + } + + case OP_DATA_REGION_METRICS: { + Collection metrics = prj.ignite().dataRegionMetrics(); + + writer.writeInt(metrics.size()); + + for (DataRegionMetrics m : metrics) { + writeDataRegionMetrics(writer, m); + } + + break; + } + default: super.processOutStream(type, writer); } @@ -287,6 +317,22 @@ public PlatformClusterGroup(PlatformContext platformCtx, ClusterGroupEx prj) { break; } + case OP_DATA_REGION_METRICS_BY_NAME: { + String name = reader.readString(); + + DataRegionMetrics metrics = platformCtx.kernalContext().grid().dataRegionMetrics(name); + + if (metrics != null) { + writer.writeBoolean(true); + writeDataRegionMetrics(writer, metrics); + } + else { + writer.writeBoolean(false); + } + + break; + } + default: super.processInStreamOutStream(type, reader, writer); } @@ -479,6 +525,7 @@ private Collection topology(long topVer) { * @param writer Writer. * @param metrics Metrics. */ + @SuppressWarnings("deprecation") private static void writeMemoryMetrics(BinaryRawWriter writer, MemoryMetrics metrics) { assert writer != null; assert metrics != null; @@ -491,12 +538,31 @@ private static void writeMemoryMetrics(BinaryRawWriter writer, MemoryMetrics met writer.writeFloat(metrics.getPagesFillFactor()); } + /** + * Writes the data region metrics. + * + * @param writer Writer. + * @param metrics Metrics. + */ + private static void writeDataRegionMetrics(BinaryRawWriter writer, DataRegionMetrics metrics) { + assert writer != null; + assert metrics != null; + + writer.writeString(metrics.getName()); + writer.writeLong(metrics.getTotalAllocatedPages()); + writer.writeFloat(metrics.getAllocationRate()); + writer.writeFloat(metrics.getEvictionRate()); + writer.writeFloat(metrics.getLargeEntriesPagesPercentage()); + writer.writeFloat(metrics.getPagesFillFactor()); + } + /** * Writes persistent store metrics. * * @param writer Writer. * @param metrics Metrics */ + @SuppressWarnings("deprecation") private void writePersistentStoreMetrics(BinaryRawWriter writer, PersistenceMetrics metrics) { assert writer != null; assert metrics != null; @@ -514,4 +580,28 @@ private void writePersistentStoreMetrics(BinaryRawWriter writer, PersistenceMetr writer.writeLong(metrics.getLastCheckpointDataPagesNumber()); writer.writeLong(metrics.getLastCheckpointCopiedOnWritePagesNumber()); } + + /** + * Writes data storage metrics. + * + * @param writer Writer. + * @param metrics Metrics + */ + private void writeDataStorageMetrics(BinaryRawWriter writer, DataStorageMetrics metrics) { + assert writer != null; + assert metrics != null; + + writer.writeFloat(metrics.getWalLoggingRate()); + writer.writeFloat(metrics.getWalWritingRate()); + writer.writeInt(metrics.getWalArchiveSegments()); + writer.writeFloat(metrics.getWalFsyncTimeAverage()); + writer.writeLong(metrics.getLastCheckpointDuration()); + writer.writeLong(metrics.getLastCheckpointLockWaitDuration()); + writer.writeLong(metrics.getLastCheckpointMarkDuration()); + writer.writeLong(metrics.getLastCheckpointPagesWriteDuration()); + writer.writeLong(metrics.getLastCheckpointFsyncDuration()); + writer.writeLong(metrics.getLastCheckpointTotalPagesNumber()); + writer.writeLong(metrics.getLastCheckpointDataPagesNumber()); + writer.writeLong(metrics.getLastCheckpointCopiedOnWritePagesNumber()); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java index 66160fbb782fb..10a1f27af0628 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java @@ -57,6 +57,8 @@ import org.apache.ignite.configuration.CheckpointWriteOrder; import org.apache.ignite.configuration.ClientConnectorConfiguration; import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.configuration.MemoryPolicyConfiguration; @@ -181,10 +183,11 @@ public static CacheConfiguration readCacheConfiguration(BinaryRawReaderEx in) { ccfg.setWriteThrough(in.readBoolean()); ccfg.setStatisticsEnabled(in.readBoolean()); - String memoryPolicyName = in.readString(); + String dataRegionName = in.readString(); - if (memoryPolicyName != null) - ccfg.setMemoryPolicyName(memoryPolicyName); + if (dataRegionName != null) + //noinspection deprecation + ccfg.setMemoryPolicyName(dataRegionName); ccfg.setPartitionLossPolicy(PartitionLossPolicy.fromOrdinal((byte)in.readInt())); ccfg.setGroupName(in.readString()); @@ -714,6 +717,9 @@ public static void readIgniteConfiguration(BinaryRawReaderEx in, IgniteConfigura if (in.readBoolean()) cfg.setPersistentStoreConfiguration(readPersistentStoreConfiguration(in)); + if (in.readBoolean()) + cfg.setDataStorageConfiguration(readDataStorageConfiguration(in)); + readPluginConfiguration(cfg, in); readLocalEventListeners(cfg, in); @@ -870,6 +876,7 @@ public static void writeCacheConfiguration(BinaryRawWriter writer, CacheConfigur writer.writeBoolean(ccfg.isReadThrough()); writer.writeBoolean(ccfg.isWriteThrough()); writer.writeBoolean(ccfg.isStatisticsEnabled()); + //noinspection deprecation writer.writeString(ccfg.getMemoryPolicyName()); writer.writeInt(ccfg.getPartitionLossPolicy().ordinal()); writer.writeString(ccfg.getGroupName()); @@ -1191,6 +1198,8 @@ else if (evtStorageSpi instanceof MemoryEventStorageSpi) { writePersistentStoreConfiguration(w, cfg.getPersistentStoreConfiguration()); + writeDataStorageConfiguration(w, cfg.getDataStorageConfiguration()); + w.writeString(cfg.getIgniteHome()); w.writeLong(ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getInit()); @@ -1400,6 +1409,7 @@ private static PlatformCachePluginConfigurationClosure cachePluginConfiguration( * @param in Reader * @return Config. */ + @SuppressWarnings("deprecation") private static MemoryConfiguration readMemoryConfiguration(BinaryRawReader in) { MemoryConfiguration res = new MemoryConfiguration(); @@ -1443,6 +1453,7 @@ private static MemoryConfiguration readMemoryConfiguration(BinaryRawReader in) { * @param w Writer. * @param cfg Config. */ + @SuppressWarnings("deprecation") private static void writeMemoryConfiguration(BinaryRawWriter w, MemoryConfiguration cfg) { if (cfg == null) { w.writeBoolean(false); @@ -1572,6 +1583,7 @@ private static void writeClientConnectorConfiguration(BinaryRawWriter w, ClientC * @param in Reader. * @return Config. */ + @SuppressWarnings("deprecation") private static PersistentStoreConfiguration readPersistentStoreConfiguration(BinaryRawReader in) { return new PersistentStoreConfiguration() .setPersistentStorePath(in.readString()) @@ -1597,11 +1609,65 @@ private static PersistentStoreConfiguration readPersistentStoreConfiguration(Bin .setWriteThrottlingEnabled(in.readBoolean()); } + /** + * Reads the data storage configuration. + * + * @param in Reader. + * @return Config. + */ + private static DataStorageConfiguration readDataStorageConfiguration(BinaryRawReader in) { + DataStorageConfiguration res = new DataStorageConfiguration() + .setStoragePath(in.readString()) + .setCheckpointFrequency(in.readLong()) + .setCheckpointPageBufferSize(in.readLong()) + .setCheckpointThreads(in.readInt()) + .setLockWaitTime((int) in.readLong()) + .setWalHistorySize(in.readInt()) + .setWalSegments(in.readInt()) + .setWalSegmentSize(in.readInt()) + .setWalPath(in.readString()) + .setWalArchivePath(in.readString()) + .setWalMode(WALMode.fromOrdinal(in.readInt())) + .setWalThreadLocalBufferSize(in.readInt()) + .setWalFlushFrequency((int) in.readLong()) + .setWalFsyncDelayNanos(in.readLong()) + .setWalRecordIteratorBufferSize(in.readInt()) + .setAlwaysWriteFullPages(in.readBoolean()) + .setMetricsEnabled(in.readBoolean()) + .setMetricsSubIntervalCount(in.readInt()) + .setMetricsRateTimeInterval(in.readLong()) + .setCheckpointWriteOrder(CheckpointWriteOrder.fromOrdinal(in.readInt())) + .setWriteThrottlingEnabled(in.readBoolean()) + .setSystemRegionInitialSize(in.readLong()) + .setSystemRegionMaxSize(in.readLong()) + .setPageSize(in.readInt()) + .setConcurrencyLevel(in.readInt()); + + int cnt = in.readInt(); + + if (cnt > 0) { + DataRegionConfiguration[] regs = new DataRegionConfiguration[cnt]; + + for (int i = 0; i < cnt; i++) { + regs[i] = readDataRegionConfiguration(in); + } + + res.setDataRegionConfigurations(regs); + } + + if (in.readBoolean()) { + res.setDefaultDataRegionConfiguration(readDataRegionConfiguration(in)); + } + + return res; + } + /** * Writes the persistent store configuration. * * @param w Writer. */ + @SuppressWarnings("deprecation") private static void writePersistentStoreConfiguration(BinaryRawWriter w, PersistentStoreConfiguration cfg) { assert w != null; @@ -1635,6 +1701,108 @@ private static void writePersistentStoreConfiguration(BinaryRawWriter w, Persist } } + /** + * Writes the data storage configuration. + * + * @param w Writer. + */ + private static void writeDataStorageConfiguration(BinaryRawWriter w, DataStorageConfiguration cfg) { + assert w != null; + + if (cfg != null) { + w.writeBoolean(true); + + w.writeString(cfg.getStoragePath()); + w.writeLong(cfg.getCheckpointFrequency()); + w.writeLong(cfg.getCheckpointPageBufferSize()); + w.writeInt(cfg.getCheckpointThreads()); + w.writeLong(cfg.getLockWaitTime()); + w.writeInt(cfg.getWalHistorySize()); + w.writeInt(cfg.getWalSegments()); + w.writeInt(cfg.getWalSegmentSize()); + w.writeString(cfg.getWalPath()); + w.writeString(cfg.getWalArchivePath()); + w.writeInt(cfg.getWalMode().ordinal()); + w.writeInt(cfg.getWalThreadLocalBufferSize()); + w.writeLong(cfg.getWalFlushFrequency()); + w.writeLong(cfg.getWalFsyncDelayNanos()); + w.writeInt(cfg.getWalRecordIteratorBufferSize()); + w.writeBoolean(cfg.isAlwaysWriteFullPages()); + w.writeBoolean(cfg.isMetricsEnabled()); + w.writeInt(cfg.getMetricsSubIntervalCount()); + w.writeLong(cfg.getMetricsRateTimeInterval()); + w.writeInt(cfg.getCheckpointWriteOrder().ordinal()); + w.writeBoolean(cfg.isWriteThrottlingEnabled()); + w.writeLong(cfg.getSystemRegionInitialSize()); + w.writeLong(cfg.getSystemRegionMaxSize()); + w.writeInt(cfg.getPageSize()); + w.writeInt(cfg.getConcurrencyLevel()); + + if (cfg.getDataRegionConfigurations() != null) { + w.writeInt(cfg.getDataRegionConfigurations().length); + + for (DataRegionConfiguration d : cfg.getDataRegionConfigurations()) { + writeDataRegionConfiguration(w, d); + } + } else { + w.writeInt(0); + } + + if (cfg.getDefaultDataRegionConfiguration() != null) { + w.writeBoolean(true); + writeDataRegionConfiguration(w, cfg.getDefaultDataRegionConfiguration()); + } else { + w.writeBoolean(false); + } + } else { + w.writeBoolean(false); + } + } + + /** + * Writes the data region configuration. + * + * @param w Writer. + */ + private static void writeDataRegionConfiguration(BinaryRawWriter w, DataRegionConfiguration cfg) { + assert w != null; + assert cfg != null; + + w.writeString(cfg.getName()); + w.writeBoolean(cfg.isPersistenceEnabled()); + w.writeLong(cfg.getInitialSize()); + w.writeLong(cfg.getMaxSize()); + w.writeString(cfg.getSwapPath()); + w.writeInt(cfg.getPageEvictionMode().ordinal()); + w.writeDouble(cfg.getEvictionThreshold()); + w.writeInt(cfg.getEmptyPagesPoolSize()); + w.writeBoolean(cfg.isMetricsEnabled()); + w.writeInt(cfg.getMetricsSubIntervalCount()); + w.writeLong(cfg.getMetricsRateTimeInterval()); + } + + /** + * Reads the data region configuration. + * + * @param r Reader. + */ + private static DataRegionConfiguration readDataRegionConfiguration(BinaryRawReader r) { + assert r != null; + + return new DataRegionConfiguration() + .setName(r.readString()) + .setPersistenceEnabled(r.readBoolean()) + .setInitialSize(r.readLong()) + .setMaxSize(r.readLong()) + .setSwapPath(r.readString()) + .setPageEvictionMode(DataPageEvictionMode.fromOrdinal(r.readInt())) + .setEvictionThreshold(r.readDouble()) + .setEmptyPagesPoolSize(r.readInt()) + .setMetricsEnabled(r.readBoolean()) + .setMetricsSubIntervalCount(r.readInt()) + .setMetricsRateTimeInterval(r.readLong()); + } + /** * Reads the plugin configuration. * diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj index ec85ca2516f13..e5e6da99fa9c7 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj @@ -78,7 +78,10 @@ - + + + + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs index ddf669df04b89..4f13172914f53 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs @@ -71,6 +71,7 @@ public void FixtureSetUp() }, IgniteInstanceName = CacheName, BinaryConfiguration = new BinaryConfiguration(typeof(Entity)), +#pragma warning disable 618 MemoryConfiguration = new MemoryConfiguration { MemoryPolicies = new[] @@ -83,6 +84,7 @@ public void FixtureSetUp() } } }, +#pragma warning restore 618 SpringConfigUrl = "Config\\cache-default.xml" }; @@ -297,7 +299,9 @@ private static void AssertConfigsAreEqual(CacheConfiguration x, CacheConfigurati Assert.AreEqual(x.WriteBehindFlushFrequency, y.WriteBehindFlushFrequency); Assert.AreEqual(x.WriteBehindFlushSize, y.WriteBehindFlushSize); Assert.AreEqual(x.EnableStatistics, y.EnableStatistics); +#pragma warning disable 618 Assert.AreEqual(x.MemoryPolicyName, y.MemoryPolicyName); +#pragma warning restore 618 Assert.AreEqual(x.PartitionLossPolicy, y.PartitionLossPolicy); Assert.AreEqual(x.WriteBehindCoalescing, y.WriteBehindCoalescing); Assert.AreEqual(x.GroupName, y.GroupName); @@ -626,7 +630,9 @@ private static CacheConfiguration GetCustomCacheConfiguration(string name = null }, ExpiryPolicyFactory = new ExpiryFactory(), EnableStatistics = true, +#pragma warning disable 618 MemoryPolicyName = "myMemPolicy", +#pragma warning restore 618 PartitionLossPolicy = PartitionLossPolicy.ReadOnlySafe, PluginConfigurations = new[] { new MyPluginConfiguration() }, SqlIndexMaxInlineSize = 10000 diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs new file mode 100644 index 0000000000000..dd1cf5375d3b5 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataRegionMetricsTest.cs @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Cache +{ + using System.Linq; + using Apache.Ignite.Core.Cache.Configuration; + using Apache.Ignite.Core.Configuration; + using NUnit.Framework; + + /// + /// Data region metrics test. + /// + public class DataRegionMetricsTest + { + /** */ + private const string RegionWithMetrics = "regWithMetrics"; + + /** */ + private const string RegionNoMetrics = "regNoMetrics"; + + /// + /// Tests the memory metrics. + /// + [Test] + public void TestMemoryMetrics() + { + var ignite = StartIgniteWithTwoDataRegions(); + + // Verify metrics. + var metrics = ignite.GetDataRegionMetrics().OrderBy(x => x.Name).ToArray(); + Assert.AreEqual(3, metrics.Length); // two defined plus system. + + var emptyMetrics = metrics[0]; + Assert.AreEqual(RegionNoMetrics, emptyMetrics.Name); + AssertMetricsAreEmpty(emptyMetrics); + + var memMetrics = metrics[1]; + Assert.AreEqual(RegionWithMetrics, memMetrics.Name); + Assert.Greater(memMetrics.AllocationRate, 0); + Assert.AreEqual(0, memMetrics.EvictionRate); + Assert.AreEqual(0, memMetrics.LargeEntriesPagesPercentage); + Assert.Greater(memMetrics.PageFillFactor, 0); + Assert.Greater(memMetrics.TotalAllocatedPages, 1000); + + var sysMetrics = metrics[2]; + Assert.AreEqual("sysMemPlc", sysMetrics.Name); + AssertMetricsAreEmpty(sysMetrics); + + // Metrics by name. + emptyMetrics = ignite.GetDataRegionMetrics(RegionNoMetrics); + Assert.AreEqual(RegionNoMetrics, emptyMetrics.Name); + AssertMetricsAreEmpty(emptyMetrics); + + memMetrics = ignite.GetDataRegionMetrics(RegionWithMetrics); + Assert.AreEqual(RegionWithMetrics, memMetrics.Name); + Assert.Greater(memMetrics.AllocationRate, 0); + Assert.AreEqual(0, memMetrics.EvictionRate); + Assert.AreEqual(0, memMetrics.LargeEntriesPagesPercentage); + Assert.Greater(memMetrics.PageFillFactor, 0); + Assert.Greater(memMetrics.TotalAllocatedPages, 1000); + + sysMetrics = ignite.GetDataRegionMetrics("sysMemPlc"); + Assert.AreEqual("sysMemPlc", sysMetrics.Name); + AssertMetricsAreEmpty(sysMetrics); + + // Invalid name. + Assert.IsNull(ignite.GetDataRegionMetrics("boo")); + } + + /// + /// Asserts that metrics are empty. + /// + private static void AssertMetricsAreEmpty(IDataRegionMetrics metrics) + { + Assert.AreEqual(0, metrics.AllocationRate); + Assert.AreEqual(0, metrics.EvictionRate); + Assert.AreEqual(0, metrics.LargeEntriesPagesPercentage); + Assert.AreEqual(0, metrics.PageFillFactor); + Assert.AreEqual(0, metrics.TotalAllocatedPages); + } + + /// + /// Starts the ignite with two policies. + /// + private static IIgnite StartIgniteWithTwoDataRegions() + { + var cfg = new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + DataStorageConfiguration = new DataStorageConfiguration() + { + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + Name = RegionWithMetrics, + MetricsEnabled = true + }, + DataRegionConfigurations = new[] + { + new DataRegionConfiguration + { + Name = RegionNoMetrics, + MetricsEnabled = false + } + } + } + }; + + var ignite = Ignition.Start(cfg); + + // Create caches and do some things with them. + var cacheNoMetrics = ignite.CreateCache(new CacheConfiguration("cacheNoMetrics") + { + DataRegionName = RegionNoMetrics + }); + + cacheNoMetrics.Put(1, 1); + cacheNoMetrics.Get(1); + + var cacheWithMetrics = ignite.CreateCache(new CacheConfiguration("cacheWithMetrics") + { + DataRegionName = RegionWithMetrics + }); + + cacheWithMetrics.Put(1, 1); + cacheWithMetrics.Get(1); + + return ignite; + } + + /// + /// Tears down the test. + /// + [TearDown] + public void TearDown() + { + Ignition.StopAll(true); + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs new file mode 100644 index 0000000000000..b24c20b401431 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Cache +{ + using System; + using System.IO; + using System.Linq; + using Apache.Ignite.Core.Configuration; + using Apache.Ignite.Core.Impl; + using NUnit.Framework; + + /// + /// Tests . + /// + public class DataStorageMetricsTest + { + /** Temp dir for WAL. */ + private readonly string _tempDir = IgniteUtils.GetTempDirectoryName(); + + /// + /// Tests the data storage metrics. + /// + [Test] + public void TestDataStorageMetrics() + { + var cfg = new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + DataStorageConfiguration = new DataStorageConfiguration + { + CheckpointFrequency = TimeSpan.FromSeconds(1), + MetricsEnabled = true, + WalMode = WalMode.LogOnly, + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + PersistenceEnabled = true, + Name = "foobar" + } + }, + WorkDirectory = _tempDir + }; + + using (var ignite = Ignition.Start(cfg)) + { + ignite.SetActive(true); + + var cache = ignite.CreateCache("c"); + + cache.PutAll(Enumerable.Range(1, 10) + .ToDictionary(x => x, x => (object) new {Name = x.ToString(), Id = x})); + + // Wait for checkpoint and metrics update and verify. + IDataStorageMetrics metrics = null; + + Assert.IsTrue(TestUtils.WaitForCondition(() => + { + // ReSharper disable once AccessToDisposedClosure + metrics = ignite.GetDataStorageMetrics(); + + return metrics.LastCheckpointTotalPagesNumber > 0; + }, 10000)); + + Assert.IsNotNull(metrics); + + Assert.AreEqual(0, metrics.WalArchiveSegments); + Assert.AreEqual(0, metrics.WalFsyncTimeAverage); + + Assert.AreEqual(77, metrics.LastCheckpointTotalPagesNumber); + Assert.AreEqual(10, metrics.LastCheckpointDataPagesNumber); + Assert.AreEqual(0, metrics.LastCheckpointCopiedOnWritePagesNumber); + Assert.AreEqual(TimeSpan.Zero, metrics.LastCheckpointLockWaitDuration); + + Assert.Greater(metrics.LastCheckpointPagesWriteDuration, TimeSpan.Zero); + Assert.Greater(metrics.LastCheckpointMarkDuration, TimeSpan.Zero); + Assert.Greater(metrics.LastCheckpointDuration, TimeSpan.Zero); + Assert.Greater(metrics.LastCheckpointFsyncDuration, TimeSpan.Zero); + + Assert.Greater(metrics.LastCheckpointDuration, metrics.LastCheckpointMarkDuration); + Assert.Greater(metrics.LastCheckpointDuration, metrics.LastCheckpointPagesWriteDuration); + Assert.Greater(metrics.LastCheckpointDuration, metrics.LastCheckpointFsyncDuration); + } + } + + /// + /// Tears down the test. + /// + [TearDown] + public void TearDown() + { + Directory.Delete(_tempDir, true); + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs index 1aad8234e1123..7ccee94b5fd68 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/MemoryMetricsTest.cs @@ -15,6 +15,7 @@ * limitations under the License. */ +#pragma warning disable 618 namespace Apache.Ignite.Core.Tests.Cache { using System.Linq; diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistenceTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistenceTest.cs new file mode 100644 index 0000000000000..b2e4d05ca9d99 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistenceTest.cs @@ -0,0 +1,235 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Cache +{ + using System; + using System.IO; + using Apache.Ignite.Core.Cache.Configuration; + using Apache.Ignite.Core.Common; + using Apache.Ignite.Core.Configuration; + using Apache.Ignite.Core.Impl; + using NUnit.Framework; + using DataPageEvictionMode = Apache.Ignite.Core.Configuration.DataPageEvictionMode; + + /// + /// Tests disk persistence. + /// + public class PersistenceTest + { + /** Temp dir for WAL. */ + private readonly string _tempDir = IgniteUtils.GetTempDirectoryName(); + + /// + /// Tears down the test. + /// + [TearDown] + public void TearDown() + { + Ignition.StopAll(true); + + if (Directory.Exists(_tempDir)) + { + Directory.Delete(_tempDir, true); + } + } + + /// + /// Tests that cache data survives node restart. + /// + [Test] + public void TestCacheDataSurvivesNodeRestart() + { + var cfg = new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + DataStorageConfiguration = new DataStorageConfiguration + { + StoragePath = Path.Combine(_tempDir, "Store"), + WalPath = Path.Combine(_tempDir, "WalStore"), + WalArchivePath = Path.Combine(_tempDir, "WalArchive"), + MetricsEnabled = true, + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + PageEvictionMode = DataPageEvictionMode.Disabled, + Name = DataStorageConfiguration.DefaultDataRegionName, + PersistenceEnabled = true + }, + DataRegionConfigurations = new[] + { + new DataRegionConfiguration + { + Name = "volatileRegion", + PersistenceEnabled = false + } + } + } + }; + + const string cacheName = "persistentCache"; + const string volatileCacheName = "volatileCache"; + + // Start Ignite, put data, stop. + using (var ignite = Ignition.Start(cfg)) + { + ignite.SetActive(true); + + // Create cache with default region (persistence enabled), add data. + var cache = ignite.CreateCache(cacheName); + cache[1] = 1; + + // Check some metrics. + CheckDataStorageMetrics(ignite); + + // Create cache with non-persistent region. + var volatileCache = ignite.CreateCache(new CacheConfiguration + { + Name = volatileCacheName, + DataRegionName = "volatileRegion" + }); + volatileCache[2] = 2; + } + + // Verify directories. + Assert.IsTrue(Directory.Exists(cfg.DataStorageConfiguration.StoragePath)); + Assert.IsTrue(Directory.Exists(cfg.DataStorageConfiguration.WalPath)); + Assert.IsTrue(Directory.Exists(cfg.DataStorageConfiguration.WalArchivePath)); + + // Start Ignite, verify data survival. + using (var ignite = Ignition.Start(cfg)) + { + ignite.SetActive(true); + + // Persistent cache already exists and contains data. + var cache = ignite.GetCache(cacheName); + Assert.AreEqual(1, cache[1]); + + // Non-persistent cache does not exist. + var ex = Assert.Throws(() => ignite.GetCache(volatileCacheName)); + Assert.AreEqual("Cache doesn't exist: volatileCache", ex.Message); + } + + // Delete store directory. + Directory.Delete(_tempDir, true); + + // Start Ignite, verify data loss. + using (var ignite = Ignition.Start(cfg)) + { + ignite.SetActive(true); + + Assert.IsFalse(ignite.GetCacheNames().Contains(cacheName)); + } + } + + /// + /// Checks the data storage metrics. + /// + private static void CheckDataStorageMetrics(IIgnite ignite) + { + // Check metrics. + var metrics = ignite.GetDataStorageMetrics(); + Assert.Greater(metrics.WalLoggingRate, 0); + Assert.Greater(metrics.WalWritingRate, 0); + Assert.Greater(metrics.WalFsyncTimeAverage, 0); + } + + /// + /// Tests the grid activation with persistence (inactive by default). + /// + [Test] + public void TestGridActivationWithPersistence() + { + var cfg = new IgniteConfiguration(TestUtils.GetTestConfiguration()) + { + DataStorageConfiguration = new DataStorageConfiguration + { + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + PersistenceEnabled = true, + Name = "foo" + } + } + }; + + // Default config, inactive by default (IsActiveOnStart is ignored when persistence is enabled). + using (var ignite = Ignition.Start(cfg)) + { + CheckIsActive(ignite, false); + + ignite.SetActive(true); + CheckIsActive(ignite, true); + + ignite.SetActive(false); + CheckIsActive(ignite, false); + } + } + + /// + /// Tests the grid activation without persistence (active by default). + /// + [Test] + public void TestGridActivationNoPersistence() + { + var cfg = TestUtils.GetTestConfiguration(); + Assert.IsTrue(cfg.IsActiveOnStart); + + using (var ignite = Ignition.Start(cfg)) + { + CheckIsActive(ignite, true); + + ignite.SetActive(false); + CheckIsActive(ignite, false); + + ignite.SetActive(true); + CheckIsActive(ignite, true); + } + + cfg.IsActiveOnStart = false; + + using (var ignite = Ignition.Start(cfg)) + { + CheckIsActive(ignite, false); + + ignite.SetActive(true); + CheckIsActive(ignite, true); + + ignite.SetActive(false); + CheckIsActive(ignite, false); + } + } + + /// + /// Checks active state. + /// + private static void CheckIsActive(IIgnite ignite, bool isActive) + { + Assert.AreEqual(isActive, ignite.IsActive()); + + if (isActive) + { + var cache = ignite.GetOrCreateCache("default"); + cache[1] = 1; + Assert.AreEqual(1, cache[1]); + } + else + { + var ex = Assert.Throws(() => ignite.GetOrCreateCache("default")); + Assert.AreEqual("Can not perform the operation because the cluster is inactive.", + ex.Message.Substring(0, 62)); + } + } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistentStoreTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistentStoreTestObsolete.cs similarity index 96% rename from modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistentStoreTest.cs rename to modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistentStoreTestObsolete.cs index a592859d996b1..a6b9b3bcfdc61 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistentStoreTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/PersistentStoreTestObsolete.cs @@ -15,6 +15,7 @@ * limitations under the License. */ +#pragma warning disable 618 // Obsolete. namespace Apache.Ignite.Core.Tests.Cache { using System.IO; @@ -24,9 +25,9 @@ namespace Apache.Ignite.Core.Tests.Cache using NUnit.Framework; /// - /// Tests the persistent store. + /// Tests the persistent store. Uses the obsolete API. See for the actual API. /// - public class PersistentStoreTest + public class PersistentStoreTestObsolete { /** Temp dir for WAL. */ private readonly string _tempDir = IgniteUtils.GetTempDirectoryName(); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml index 229d42ec98aca..1e17752c7da3f 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml @@ -131,4 +131,22 @@ + + + + + + \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/spring-test.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/spring-test.xml index 31fa3b339e070..145fb01762521 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/spring-test.xml +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/spring-test.xml @@ -43,22 +43,8 @@ - - - - - - - - - - - - - - - - + + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs index edecccce28583..72c73e4f0d134 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs @@ -17,6 +17,7 @@ // ReSharper disable UnusedAutoPropertyAccessor.Global // ReSharper disable MemberCanBePrivate.Global +#pragma warning disable 618 namespace Apache.Ignite.Core.Tests { using System; @@ -55,6 +56,9 @@ namespace Apache.Ignite.Core.Tests using Apache.Ignite.Core.Transactions; using Apache.Ignite.NLog; using NUnit.Framework; + using CheckpointWriteOrder = Apache.Ignite.Core.PersistentStore.CheckpointWriteOrder; + using DataPageEvictionMode = Apache.Ignite.Core.Cache.Configuration.DataPageEvictionMode; + using WalMode = Apache.Ignite.Core.PersistentStore.WalMode; /// /// Tests serialization. @@ -213,7 +217,6 @@ public void TestPredefinedXml() Assert.AreEqual(PeerAssemblyLoadingMode.CurrentAppDomain, cfg.PeerAssemblyLoadingMode); -#pragma warning disable 618 // Obsolete var sql = cfg.SqlConnectorConfiguration; Assert.IsNotNull(sql); Assert.AreEqual("bar", sql.Host); @@ -224,7 +227,6 @@ public void TestPredefinedXml() Assert.IsTrue(sql.TcpNoDelay); Assert.AreEqual(14, sql.MaxOpenCursorsPerConnection); Assert.AreEqual(15, sql.ThreadPoolSize); -#pragma warning restore 618 var client = cfg.ClientConnectorConfiguration; Assert.IsNotNull(client); @@ -269,6 +271,56 @@ public void TestPredefinedXml() Assert.AreEqual("Apache.Ignite.Core.Tests.EventsTestLocalListeners+Listener`1" + "[Apache.Ignite.Core.Events.CacheRebalancingEvent]", rebalListener.Listener.GetType().ToString()); + + var ds = cfg.DataStorageConfiguration; + Assert.IsFalse(ds.AlwaysWriteFullPages); + Assert.AreEqual(TimeSpan.FromSeconds(1), ds.CheckpointFrequency); + Assert.AreEqual(2, ds.CheckpointPageBufferSize); + Assert.AreEqual(3, ds.CheckpointThreads); + Assert.AreEqual(4, ds.ConcurrencyLevel); + Assert.AreEqual(TimeSpan.FromSeconds(5), ds.LockWaitTime); + Assert.IsTrue(ds.MetricsEnabled); + Assert.AreEqual(6, ds.PageSize); + Assert.AreEqual("cde", ds.StoragePath); + Assert.AreEqual(TimeSpan.FromSeconds(7), ds.MetricsRateTimeInterval); + Assert.AreEqual(8, ds.MetricsSubIntervalCount); + Assert.AreEqual(9, ds.SystemRegionInitialSize); + Assert.AreEqual(10, ds.SystemRegionMaxSize); + Assert.AreEqual(11, ds.WalThreadLocalBufferSize); + Assert.AreEqual("abc", ds.WalArchivePath); + Assert.AreEqual(TimeSpan.FromSeconds(12), ds.WalFlushFrequency); + Assert.AreEqual(13, ds.WalFsyncDelayNanos); + Assert.AreEqual(14, ds.WalHistorySize); + Assert.AreEqual(Core.Configuration.WalMode.Background, ds.WalMode); + Assert.AreEqual(15, ds.WalRecordIteratorBufferSize); + Assert.AreEqual(16, ds.WalSegments); + Assert.AreEqual(17, ds.WalSegmentSize); + Assert.AreEqual("wal-store", ds.WalPath); + Assert.IsTrue(ds.WriteThrottlingEnabled); + + var dr = ds.DataRegionConfigurations.Single(); + Assert.AreEqual(1, dr.EmptyPagesPoolSize); + Assert.AreEqual(2, dr.EvictionThreshold); + Assert.AreEqual(3, dr.InitialSize); + Assert.AreEqual(4, dr.MaxSize); + Assert.AreEqual("reg2", dr.Name); + Assert.AreEqual(Core.Configuration.DataPageEvictionMode.RandomLru, dr.PageEvictionMode); + Assert.AreEqual(TimeSpan.FromSeconds(1), dr.MetricsRateTimeInterval); + Assert.AreEqual(5, dr.MetricsSubIntervalCount); + Assert.AreEqual("swap", dr.SwapPath); + Assert.IsTrue(dr.MetricsEnabled); + + dr = ds.DefaultDataRegionConfiguration; + Assert.AreEqual(2, dr.EmptyPagesPoolSize); + Assert.AreEqual(3, dr.EvictionThreshold); + Assert.AreEqual(4, dr.InitialSize); + Assert.AreEqual(5, dr.MaxSize); + Assert.AreEqual("reg1", dr.Name); + Assert.AreEqual(Core.Configuration.DataPageEvictionMode.Disabled, dr.PageEvictionMode); + Assert.AreEqual(TimeSpan.FromSeconds(3), dr.MetricsRateTimeInterval); + Assert.AreEqual(6, dr.MetricsSubIntervalCount); + Assert.AreEqual("swap2", dr.SwapPath); + Assert.IsFalse(dr.MetricsEnabled); } /// @@ -574,7 +626,7 @@ private static IgniteConfiguration GetTestConfig() Serializer = new BinaryReflectiveSerializer() } }, - Types = new[] {typeof (string).FullName}, + Types = new[] {typeof(string).FullName}, IdMapper = new IdMapper(), KeepDeserialized = true, NameMapper = new NameMapper(), @@ -601,7 +653,7 @@ private static IgniteConfiguration GetTestConfig() { Fields = new[] { - new QueryField("field", typeof (int)) + new QueryField("field", typeof(int)) { IsKeyField = true, NotNull = true @@ -619,8 +671,8 @@ private static IgniteConfiguration GetTestConfig() { new QueryAlias("field.field", "fld") }, - KeyType = typeof (string), - ValueType = typeof (long), + KeyType = typeof(string), + ValueType = typeof(long), TableName = "table-1", KeyFieldName = "k", ValueFieldName = "v" @@ -645,12 +697,16 @@ private static IgniteConfiguration GetTestConfig() NearStartSize = 5, EvictionPolicy = new FifoEvictionPolicy { - BatchSize = 19, MaxMemorySize = 1024, MaxSize = 555 + BatchSize = 19, + MaxMemorySize = 1024, + MaxSize = 555 } }, EvictionPolicy = new LruEvictionPolicy { - BatchSize = 18, MaxMemorySize = 1023, MaxSize = 554 + BatchSize = 18, + MaxMemorySize = 1023, + MaxSize = 554 }, AffinityFunction = new RendezvousAffinityFunction { @@ -715,7 +771,7 @@ private static IgniteConfiguration GetTestConfig() WorkDirectory = @"c:\work", IsDaemon = true, UserAttributes = Enumerable.Range(1, 10).ToDictionary(x => x.ToString(), - x => x%2 == 0 ? (object) x : new FooClass {Bar = x.ToString()}), + x => x % 2 == 0 ? (object) x : new FooClass {Bar = x.ToString()}), AtomicConfiguration = new AtomicConfiguration { CacheMode = CacheMode.Replicated, @@ -755,7 +811,7 @@ private static IgniteConfiguration GetTestConfig() FailureDetectionTimeout = TimeSpan.FromMinutes(2), ClientFailureDetectionTimeout = TimeSpan.FromMinutes(3), LongQueryWarningTimeout = TimeSpan.FromDays(4), - PluginConfigurations = new[] {new TestIgnitePluginConfiguration() }, + PluginConfigurations = new[] {new TestIgnitePluginConfiguration()}, EventStorageSpi = new MemoryEventStorageSpi { ExpirationTimeout = TimeSpan.FromMilliseconds(12345), @@ -838,6 +894,65 @@ private static IgniteConfiguration GetTestConfig() EventTypes = new[] {1, 2}, Listener = new MyEventListener() } + }, + DataStorageConfiguration = new DataStorageConfiguration + { + AlwaysWriteFullPages = true, + CheckpointFrequency = TimeSpan.FromSeconds(25), + CheckpointPageBufferSize = 28 * 1024 * 1024, + CheckpointThreads = 2, + LockWaitTime = TimeSpan.FromSeconds(5), + StoragePath = Path.GetTempPath(), + WalThreadLocalBufferSize = 64 * 1024, + WalArchivePath = Path.GetTempPath(), + WalFlushFrequency = TimeSpan.FromSeconds(3), + WalFsyncDelayNanos = 3, + WalHistorySize = 10, + WalMode = Core.Configuration.WalMode.None, + WalRecordIteratorBufferSize = 32 * 1024 * 1024, + WalSegments = 6, + WalSegmentSize = 5 * 1024 * 1024, + WalPath = Path.GetTempPath(), + MetricsEnabled = true, + MetricsSubIntervalCount = 7, + MetricsRateTimeInterval = TimeSpan.FromSeconds(9), + CheckpointWriteOrder = Core.Configuration.CheckpointWriteOrder.Sequential, + WriteThrottlingEnabled = true, + SystemRegionInitialSize = 64 * 1024 * 1024, + SystemRegionMaxSize = 128 * 1024 * 1024, + ConcurrencyLevel = 1, + PageSize = 5 * 1024, + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + Name = "reg1", + EmptyPagesPoolSize = 50, + EvictionThreshold = 0.8, + InitialSize = 100 * 1024 * 1024, + MaxSize = 150 * 1024 * 1024, + MetricsEnabled = true, + PageEvictionMode = Core.Configuration.DataPageEvictionMode.RandomLru, + PersistenceEnabled = false, + MetricsRateTimeInterval = TimeSpan.FromMinutes(2), + MetricsSubIntervalCount = 6, + SwapPath = Path.GetTempPath() + }, + DataRegionConfigurations = new[] + { + new DataRegionConfiguration + { + Name = "reg2", + EmptyPagesPoolSize = 51, + EvictionThreshold = 0.7, + InitialSize = 101 * 1024 * 1024, + MaxSize = 151 * 1024 * 1024, + MetricsEnabled = false, + PageEvictionMode = Core.Configuration.DataPageEvictionMode.RandomLru, + PersistenceEnabled = false, + MetricsRateTimeInterval = TimeSpan.FromMinutes(3), + MetricsSubIntervalCount = 7, + SwapPath = Path.GetTempPath() + } + } } }; } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs index 73636d1812567..c8c06b23c0933 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs @@ -41,6 +41,7 @@ namespace Apache.Ignite.Core.Tests using Apache.Ignite.Core.Tests.Plugin; using Apache.Ignite.Core.Transactions; using NUnit.Framework; + using WalMode = Apache.Ignite.Core.PersistentStore.WalMode; /// /// Tests code-based configuration. @@ -64,6 +65,8 @@ public void TestDefaultConfigurationProperties() { CheckDefaultProperties(new IgniteConfiguration()); CheckDefaultProperties(new PersistentStoreConfiguration()); + CheckDefaultProperties(new DataStorageConfiguration()); + CheckDefaultProperties(new DataRegionConfiguration()); CheckDefaultProperties(new ClientConnectorConfiguration()); CheckDefaultProperties(new SqlConnectorConfiguration()); } @@ -94,6 +97,8 @@ public void TestDefaultValueAttributes() CheckDefaultValueAttributes(new PersistentStoreConfiguration()); CheckDefaultValueAttributes(new IgniteClientConfiguration()); CheckDefaultValueAttributes(new QueryIndex()); + CheckDefaultValueAttributes(new DataStorageConfiguration()); + CheckDefaultValueAttributes(new DataRegionConfiguration()); } /// @@ -219,33 +224,6 @@ public void TestAllConfigurationProperties() Assert.AreEqual(eventCfg.ExpirationTimeout, resEventCfg.ExpirationTimeout); Assert.AreEqual(eventCfg.MaxEventCount, resEventCfg.MaxEventCount); - var memCfg = cfg.MemoryConfiguration; - var resMemCfg = resCfg.MemoryConfiguration; - Assert.IsNotNull(memCfg); - Assert.IsNotNull(resMemCfg); - Assert.AreEqual(memCfg.PageSize, resMemCfg.PageSize); - Assert.AreEqual(memCfg.ConcurrencyLevel, resMemCfg.ConcurrencyLevel); - Assert.AreEqual(memCfg.DefaultMemoryPolicyName, resMemCfg.DefaultMemoryPolicyName); - Assert.AreEqual(memCfg.SystemCacheInitialSize, resMemCfg.SystemCacheInitialSize); - Assert.AreEqual(memCfg.SystemCacheMaxSize, resMemCfg.SystemCacheMaxSize); - Assert.IsNotNull(memCfg.MemoryPolicies); - Assert.IsNotNull(resMemCfg.MemoryPolicies); - Assert.AreEqual(2, memCfg.MemoryPolicies.Count); - Assert.AreEqual(2, resMemCfg.MemoryPolicies.Count); - - for (var i = 0; i < memCfg.MemoryPolicies.Count; i++) - { - var plc = memCfg.MemoryPolicies.Skip(i).First(); - var resPlc = resMemCfg.MemoryPolicies.Skip(i).First(); - - Assert.AreEqual(plc.PageEvictionMode, resPlc.PageEvictionMode); - Assert.AreEqual(plc.MaxSize, resPlc.MaxSize); - Assert.AreEqual(plc.EmptyPagesPoolSize, resPlc.EmptyPagesPoolSize); - Assert.AreEqual(plc.EvictionThreshold, resPlc.EvictionThreshold); - Assert.AreEqual(plc.Name, resPlc.Name); - Assert.AreEqual(plc.SwapFilePath, resPlc.SwapFilePath); - } - var sql = cfg.SqlConnectorConfiguration; var resSql = resCfg.SqlConnectorConfiguration; @@ -258,30 +236,7 @@ public void TestAllConfigurationProperties() Assert.AreEqual(sql.TcpNoDelay, resSql.TcpNoDelay); Assert.AreEqual(sql.ThreadPoolSize, resSql.ThreadPoolSize); - var pers = cfg.PersistentStoreConfiguration; - var resPers = resCfg.PersistentStoreConfiguration; - - Assert.AreEqual(pers.AlwaysWriteFullPages, resPers.AlwaysWriteFullPages); - Assert.AreEqual(pers.CheckpointingFrequency, resPers.CheckpointingFrequency); - Assert.AreEqual(pers.CheckpointingPageBufferSize, resPers.CheckpointingPageBufferSize); - Assert.AreEqual(pers.CheckpointingThreads, resPers.CheckpointingThreads); - Assert.AreEqual(pers.LockWaitTime, resPers.LockWaitTime); - Assert.AreEqual(pers.PersistentStorePath, resPers.PersistentStorePath); - Assert.AreEqual(pers.TlbSize, resPers.TlbSize); - Assert.AreEqual(pers.WalArchivePath, resPers.WalArchivePath); - Assert.AreEqual(pers.WalFlushFrequency, resPers.WalFlushFrequency); - Assert.AreEqual(pers.WalFsyncDelayNanos, resPers.WalFsyncDelayNanos); - Assert.AreEqual(pers.WalHistorySize, resPers.WalHistorySize); - Assert.AreEqual(pers.WalMode, resPers.WalMode); - Assert.AreEqual(pers.WalRecordIteratorBufferSize, resPers.WalRecordIteratorBufferSize); - Assert.AreEqual(pers.WalSegments, resPers.WalSegments); - Assert.AreEqual(pers.WalSegmentSize, resPers.WalSegmentSize); - Assert.AreEqual(pers.WalStorePath, resPers.WalStorePath); - Assert.AreEqual(pers.MetricsEnabled, resPers.MetricsEnabled); - Assert.AreEqual(pers.RateTimeInterval, resPers.RateTimeInterval); - Assert.AreEqual(pers.SubIntervals, resPers.SubIntervals); - Assert.AreEqual(pers.CheckpointWriteOrder, resPers.CheckpointWriteOrder); - Assert.AreEqual(pers.WriteThrottlingEnabled, resPers.WriteThrottlingEnabled); + TestUtils.AssertReflectionEqual(cfg.DataStorageConfiguration, resCfg.DataStorageConfiguration); } } @@ -311,24 +266,9 @@ public void TestSpringXml() Assert.IsNotNull(disco); Assert.AreEqual(TimeSpan.FromMilliseconds(300), disco.SocketTimeout); - // Check memory configuration defaults. - var mem = resCfg.MemoryConfiguration; - - Assert.IsNotNull(mem); - Assert.AreEqual("dfltPlc", mem.DefaultMemoryPolicyName); - Assert.AreEqual(MemoryConfiguration.DefaultSystemCacheInitialSize, mem.SystemCacheInitialSize); - Assert.AreEqual(MemoryConfiguration.DefaultSystemCacheMaxSize, mem.SystemCacheMaxSize); - - var plc = mem.MemoryPolicies.Single(); - Assert.AreEqual("dfltPlc", plc.Name); - Assert.AreEqual(MemoryPolicyConfiguration.DefaultEmptyPagesPoolSize, plc.EmptyPagesPoolSize); - Assert.AreEqual(MemoryPolicyConfiguration.DefaultEvictionThreshold, plc.EvictionThreshold); - Assert.AreEqual(MemoryPolicyConfiguration.DefaultMaxSize, plc.MaxSize); - Assert.AreEqual(MemoryPolicyConfiguration.DefaultSubIntervals, plc.SubIntervals); - Assert.AreEqual(MemoryPolicyConfiguration.DefaultRateTimeInterval, plc.RateTimeInterval); - - // Check PersistentStoreConfiguration defaults. - CheckDefaultProperties(resCfg.PersistentStoreConfiguration); + // DataStorage defaults. + CheckDefaultProperties(resCfg.DataStorageConfiguration); + CheckDefaultProperties(resCfg.DataStorageConfiguration.DefaultDataRegionConfiguration); // Connector defaults. CheckDefaultProperties(resCfg.ClientConnectorConfiguration); @@ -594,6 +534,54 @@ private static void CheckDefaultProperties(PersistentStoreConfiguration cfg) Assert.AreEqual(PersistentStoreConfiguration.DefaultWriteThrottlingEnabled, cfg.WriteThrottlingEnabled); } + /// + /// Checks the default properties. + /// + /// Config. + private static void CheckDefaultProperties(DataStorageConfiguration cfg) + { + Assert.AreEqual(DataStorageConfiguration.DefaultTlbSize, cfg.WalThreadLocalBufferSize); + Assert.AreEqual(DataStorageConfiguration.DefaultCheckpointFrequency, cfg.CheckpointFrequency); + Assert.AreEqual(DataStorageConfiguration.DefaultCheckpointThreads, cfg.CheckpointThreads); + Assert.AreEqual(default(long), cfg.CheckpointPageBufferSize); + Assert.AreEqual(DataStorageConfiguration.DefaultLockWaitTime, cfg.LockWaitTime); + Assert.AreEqual(DataStorageConfiguration.DefaultWalFlushFrequency, cfg.WalFlushFrequency); + Assert.AreEqual(DataStorageConfiguration.DefaultWalFsyncDelayNanos, cfg.WalFsyncDelayNanos); + Assert.AreEqual(DataStorageConfiguration.DefaultWalHistorySize, cfg.WalHistorySize); + Assert.AreEqual(DataStorageConfiguration.DefaultWalRecordIteratorBufferSize, + cfg.WalRecordIteratorBufferSize); + Assert.AreEqual(DataStorageConfiguration.DefaultWalSegmentSize, cfg.WalSegmentSize); + Assert.AreEqual(DataStorageConfiguration.DefaultWalSegments, cfg.WalSegments); + Assert.AreEqual(DataStorageConfiguration.DefaultWalMode, cfg.WalMode); + Assert.IsFalse(cfg.MetricsEnabled); + Assert.AreEqual(DataStorageConfiguration.DefaultMetricsSubIntervalCount, cfg.MetricsSubIntervalCount); + Assert.AreEqual(DataStorageConfiguration.DefaultMetricsRateTimeInterval, cfg.MetricsRateTimeInterval); + Assert.AreEqual(DataStorageConfiguration.DefaultWalPath, cfg.WalPath); + Assert.AreEqual(DataStorageConfiguration.DefaultWalArchivePath, cfg.WalArchivePath); + Assert.AreEqual(DataStorageConfiguration.DefaultCheckpointWriteOrder, cfg.CheckpointWriteOrder); + Assert.AreEqual(DataStorageConfiguration.DefaultWriteThrottlingEnabled, cfg.WriteThrottlingEnabled); + + Assert.AreEqual(DataStorageConfiguration.DefaultSystemRegionInitialSize, cfg.SystemRegionInitialSize); + Assert.AreEqual(DataStorageConfiguration.DefaultSystemRegionMaxSize, cfg.SystemRegionMaxSize); + Assert.AreEqual(DataStorageConfiguration.DefaultPageSize, cfg.PageSize); + Assert.AreEqual(DataStorageConfiguration.DefaultConcurrencyLevel, cfg.ConcurrencyLevel); + } + + /// + /// Checks the default properties. + /// + /// Config. + private static void CheckDefaultProperties(DataRegionConfiguration cfg) + { + Assert.AreEqual(DataRegionConfiguration.DefaultEmptyPagesPoolSize, cfg.EmptyPagesPoolSize); + Assert.AreEqual(DataRegionConfiguration.DefaultEvictionThreshold, cfg.EvictionThreshold); + Assert.AreEqual(DataRegionConfiguration.DefaultInitialSize, cfg.InitialSize); + Assert.AreEqual(DataRegionConfiguration.DefaultMaxSize, cfg.MaxSize); + Assert.AreEqual(DataRegionConfiguration.DefaultPersistenceEnabled, cfg.PersistenceEnabled); + Assert.AreEqual(DataRegionConfiguration.DefaultMetricsRateTimeInterval, cfg.MetricsRateTimeInterval); + Assert.AreEqual(DataRegionConfiguration.DefaultMetricsSubIntervalCount, cfg.MetricsSubIntervalCount); + } + /// /// Checks the default properties. /// @@ -636,8 +624,7 @@ private static void CheckDefaultValueAttributes(object obj) foreach (var prop in props.Where(p => p.Name != "SelectorsCount" && p.Name != "ReadStripesNumber" && !p.Name.Contains("ThreadPoolSize") && - !(p.Name == "MaxSize" && - p.DeclaringType == typeof(MemoryPolicyConfiguration)))) + p.Name != "MaxSize")) { var attr = prop.GetCustomAttributes(true).OfType().FirstOrDefault(); var propValue = prop.GetValue(obj, null); @@ -645,7 +632,7 @@ private static void CheckDefaultValueAttributes(object obj) if (attr != null) Assert.AreEqual(attr.Value, propValue, string.Format("{0}.{1}", obj.GetType(), prop.Name)); else if (prop.PropertyType.IsValueType) - Assert.AreEqual(Activator.CreateInstance(prop.PropertyType), propValue); + Assert.AreEqual(Activator.CreateInstance(prop.PropertyType), propValue, prop.Name); else Assert.IsNull(propValue); } @@ -757,39 +744,6 @@ private static IgniteConfiguration GetCustomConfig() ExpirationTimeout = TimeSpan.FromSeconds(5), MaxEventCount = 10 }, - MemoryConfiguration = new MemoryConfiguration - { - ConcurrencyLevel = 3, - DefaultMemoryPolicyName = "myDefaultPlc", - PageSize = 2048, - SystemCacheInitialSize = 13 * 1024 * 1024, - SystemCacheMaxSize = 15 * 1024 * 1024, - MemoryPolicies = new[] - { - new MemoryPolicyConfiguration - { - Name = "myDefaultPlc", - PageEvictionMode = DataPageEvictionMode.Disabled, - InitialSize = 340 * 1024 * 1024, - MaxSize = 345 * 1024 * 1024, - EvictionThreshold = 0.88, - EmptyPagesPoolSize = 77, - SwapFilePath = "myPath1", - RateTimeInterval = TimeSpan.FromSeconds(35), - SubIntervals = 7 - }, - new MemoryPolicyConfiguration - { - Name = "customPlc", - PageEvictionMode = DataPageEvictionMode.Disabled, - MaxSize = 456 * 1024 * 1024, - EvictionThreshold = 0.77, - EmptyPagesPoolSize = 66, - SwapFilePath = "somePath2", - MetricsEnabled = true - } - } - }, PublicThreadPoolSize = 3, StripedThreadPoolSize = 5, ServiceThreadPoolSize = 6, @@ -810,31 +764,66 @@ private static IgniteConfiguration GetCustomConfig() TcpNoDelay = false, SocketSendBufferSize = 4096 }, - PersistentStoreConfiguration = new PersistentStoreConfiguration + ConsistentId = new MyConsistentId {Data = "abc"}, + DataStorageConfiguration = new DataStorageConfiguration { AlwaysWriteFullPages = true, - CheckpointingFrequency = TimeSpan.FromSeconds(25), - CheckpointingPageBufferSize = 28 * 1024 * 1024, - CheckpointingThreads = 2, + CheckpointFrequency = TimeSpan.FromSeconds(25), + CheckpointPageBufferSize = 28 * 1024 * 1024, + CheckpointThreads = 2, LockWaitTime = TimeSpan.FromSeconds(5), - PersistentStorePath = Path.GetTempPath(), - TlbSize = 64 * 1024, + StoragePath = Path.GetTempPath(), + WalThreadLocalBufferSize = 64 * 1024, WalArchivePath = Path.GetTempPath(), WalFlushFrequency = TimeSpan.FromSeconds(3), WalFsyncDelayNanos = 3, WalHistorySize = 10, - WalMode = WalMode.LogOnly, + WalMode = Configuration.WalMode.LogOnly, WalRecordIteratorBufferSize = 32 * 1024 * 1024, WalSegments = 6, WalSegmentSize = 5 * 1024 * 1024, - WalStorePath = Path.GetTempPath(), + WalPath = Path.GetTempPath(), MetricsEnabled = true, - SubIntervals = 7, - RateTimeInterval = TimeSpan.FromSeconds(9), - CheckpointWriteOrder = CheckpointWriteOrder.Random, - WriteThrottlingEnabled = true - }, - ConsistentId = new MyConsistentId {Data = "abc"} + MetricsSubIntervalCount = 7, + MetricsRateTimeInterval = TimeSpan.FromSeconds(9), + CheckpointWriteOrder = Configuration.CheckpointWriteOrder.Random, + WriteThrottlingEnabled = true, + SystemRegionInitialSize = 64 * 1024 * 1024, + SystemRegionMaxSize = 128 * 1024 * 1024, + ConcurrencyLevel = 1, + PageSize = 8 * 1024, + DefaultDataRegionConfiguration = new DataRegionConfiguration + { + Name = "reg1", + EmptyPagesPoolSize = 50, + EvictionThreshold = 0.8, + InitialSize = 100 * 1024 * 1024, + MaxSize = 150 * 1024 * 1024, + MetricsEnabled = true, + PageEvictionMode = Configuration.DataPageEvictionMode.Random2Lru, + PersistenceEnabled = false, + MetricsRateTimeInterval = TimeSpan.FromMinutes(2), + MetricsSubIntervalCount = 6, + SwapPath = IgniteUtils.GetTempDirectoryName() + }, + DataRegionConfigurations = new[] + { + new DataRegionConfiguration + { + Name = "reg2", + EmptyPagesPoolSize = 51, + EvictionThreshold = 0.7, + InitialSize = 101 * 1024 * 1024, + MaxSize = 151 * 1024 * 1024, + MetricsEnabled = false, + PageEvictionMode = Configuration.DataPageEvictionMode.RandomLru, + PersistenceEnabled = false, + MetricsRateTimeInterval = TimeSpan.FromMinutes(3), + MetricsSubIntervalCount = 7, + SwapPath = IgniteUtils.GetTempDirectoryName() + } + } + } }; } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj index 58abd26fc9e33..2a40e7eacccf5 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.csproj @@ -94,6 +94,10 @@ + + + + @@ -105,6 +109,9 @@ + + + @@ -113,6 +120,7 @@ + @@ -120,6 +128,7 @@ + @@ -604,6 +613,7 @@ + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs index c6b81f02abcf6..e7252b26e3cea 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs @@ -27,6 +27,7 @@ namespace Apache.Ignite.Core.Cache.Configuration using System.Diagnostics.CodeAnalysis; using System.IO; using System.Linq; + using System.Xml.Serialization; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Affinity; using Apache.Ignite.Core.Cache.Affinity.Rendezvous; @@ -34,6 +35,7 @@ namespace Apache.Ignite.Core.Cache.Configuration using Apache.Ignite.Core.Cache.Expiry; using Apache.Ignite.Core.Cache.Store; using Apache.Ignite.Core.Common; + using Apache.Ignite.Core.Configuration; using Apache.Ignite.Core.Impl; using Apache.Ignite.Core.Impl.Binary; using Apache.Ignite.Core.Impl.Cache.Affinity; @@ -286,7 +288,7 @@ private void Read(BinaryReader reader) ReadThrough = reader.ReadBoolean(); WriteThrough = reader.ReadBoolean(); EnableStatistics = reader.ReadBoolean(); - MemoryPolicyName = reader.ReadString(); + DataRegionName = reader.ReadString(); PartitionLossPolicy = (PartitionLossPolicy) reader.ReadInt(); GroupName = reader.ReadString(); CacheStoreFactory = reader.ReadObject>(); @@ -366,7 +368,7 @@ internal void Write(BinaryWriter writer) writer.WriteBoolean(ReadThrough); writer.WriteBoolean(WriteThrough); writer.WriteBoolean(EnableStatistics); - writer.WriteString(MemoryPolicyName); + writer.WriteString(DataRegionName); writer.WriteInt((int) PartitionLossPolicy); writer.WriteString(GroupName); writer.WriteObject(CacheStoreFactory); @@ -747,7 +749,18 @@ internal void Validate(ILogger log) /// Gets or sets the name of the for this cache. /// See . /// - public string MemoryPolicyName { get; set; } + [Obsolete("Use DataRegionName.")] + [XmlIgnore] + public string MemoryPolicyName + { + get { return DataRegionName; } + set { DataRegionName = value; } + } + + /// + /// Gets or sets the name of the data region, see . + /// + public string DataRegionName { get; set; } /// /// Gets or sets write coalescing flag for write-behind cache store operations. @@ -770,7 +783,7 @@ internal void Validate(ILogger log) /// /// Since underlying cache is shared, the following configuration properties should be the same within group: /// , , , - /// + /// /// /// Grouping caches reduces overall overhead, since internal data structures are shared. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/DataPageEvictionMode.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/DataPageEvictionMode.cs index a6263d75edeab..57e60d9df2fe0 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/DataPageEvictionMode.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/DataPageEvictionMode.cs @@ -17,11 +17,14 @@ namespace Apache.Ignite.Core.Cache.Configuration { + using System; + /// /// Memory page eviction mode. /// Only data pages, that store key-value entries, are eligible for eviction. /// The other types of pages, like index or system pages, are not evictable. /// + [Obsolete("Use Apache.Ignite.Core.Configuration.DataPageEvictionMode")] public enum DataPageEvictionMode { /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryConfiguration.cs index 3be6012ad0fe3..12d00024c18ff 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryConfiguration.cs @@ -17,6 +17,7 @@ namespace Apache.Ignite.Core.Cache.Configuration { + using System; using System.Collections.Generic; using System.ComponentModel; using System.Diagnostics; @@ -24,6 +25,7 @@ namespace Apache.Ignite.Core.Cache.Configuration using System.Linq; using Apache.Ignite.Core.Binary; using Apache.Ignite.Core.Common; + using Apache.Ignite.Core.Configuration; /// /// A page memory configuration for an Apache Ignite node. The page memory is a manageable off-heap based @@ -42,7 +44,10 @@ namespace Apache.Ignite.Core.Cache.Configuration /// eviction policies, swapping options, etc. Once you define a new memory region you can bind /// particular Ignite caches to it. /// To learn more about memory policies refer to documentation. + /// + /// Obsolete, use . /// + [Obsolete("Use DataStorageConfiguration.")] public class MemoryConfiguration { /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryPolicyConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryPolicyConfiguration.cs index 16d8dcc813adc..e204ee711fd14 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryPolicyConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/MemoryPolicyConfiguration.cs @@ -21,12 +21,15 @@ namespace Apache.Ignite.Core.Cache.Configuration using System.ComponentModel; using System.Diagnostics.CodeAnalysis; using Apache.Ignite.Core.Binary; + using Apache.Ignite.Core.Configuration; using Apache.Ignite.Core.Impl; using Apache.Ignite.Core.Impl.Binary; /// /// Defines page memory policy configuration. See . + /// Obsolete, use . /// + [Obsolete("Use DataRegionConfiguration.")] public class MemoryPolicyConfiguration { /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/IMemoryMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/IMemoryMetrics.cs index 0298c1fb9f2d1..ff8d64e77f28b 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/IMemoryMetrics.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/IMemoryMetrics.cs @@ -17,9 +17,13 @@ namespace Apache.Ignite.Core.Cache { + using System; + /// /// Memory usage metrics. + /// Obsolete, use . /// + [Obsolete("See IDataRegionMetrics.")] public interface IMemoryMetrics { /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/CheckpointWriteOrder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/CheckpointWriteOrder.cs new file mode 100644 index 0000000000000..5243f4ab6bf4f --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/CheckpointWriteOrder.cs @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Configuration +{ + /// + /// Defines checkpoint pages order on disk. + /// + public enum CheckpointWriteOrder + { + /// + /// Pages are written in order provided by checkpoint pages collection iterator + /// (which is basically a hashtable). + /// + Random, + + /// + /// All checkpoint pages are collected into single list and sorted by page index. + /// Provides almost sequential disk writes, which can be much faster on some SSD models. + /// + Sequential + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataPageEvictionMode.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataPageEvictionMode.cs new file mode 100644 index 0000000000000..ec835bb4d3336 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataPageEvictionMode.cs @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Configuration +{ + /// + /// Memory page eviction mode. + /// Only data pages, that store key-value entries, are eligible for eviction. + /// The other types of pages, like index or system pages, are not evictable. + /// + public enum DataPageEvictionMode + { + /// + /// Eviction is disabled. + /// + Disabled, + + /// + /// Random-LRU algorithm. + /// + /// Once a memory region defined by a memory policy is configured, an off-heap array is allocated to track + /// last usage timestamp for every individual data page. The size of the array equals to + /// / . + /// + /// When a data page is accessed, its timestamp gets updated in the tracking array. The page index in the + /// tracking array equals to pageAddress / . + /// + /// When some pages need to be evicted, the algorithm randomly chooses 5 indexes from the tracking array and + /// evicts a page with the latest timestamp. If some of the indexes point to non-data pages + /// (index or system pages) then the algorithm picks other pages. + /// + RandomLru, + + /// + /// Activates Random-2-LRU algorithm which is a scan resistant version of Random-LRU. + /// + /// This algorithm differs from Random-LRU only in a way that two latest access timestamps are stored for every + /// data page. At the eviction time, a minimum between two latest timestamps is taken for further + /// comparison with minimums of other pages that might be evicted. LRU-2 outperforms LRU by + /// resolving "one-hit wonder" problem - if a data page is accessed rarely, but accidentally accessed once, + /// its protected from eviction for a long time. + /// + Random2Lru + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs new file mode 100644 index 0000000000000..5c4240e818a9f --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Configuration +{ + using System; + using System.ComponentModel; + using System.Diagnostics.CodeAnalysis; + using Apache.Ignite.Core.Binary; + using Apache.Ignite.Core.Cache.Configuration; + using Apache.Ignite.Core.Impl; + using Apache.Ignite.Core.Impl.Binary; + + /// + /// Defines custom data region configuration for Apache Ignite page memory + /// (see ). + /// + /// For each configured data region Apache Ignite instantiates respective memory regions with different + /// parameters like maximum size, eviction policy, swapping options, etc. + /// An Apache Ignite cache can be mapped to a particular region using + /// method. + /// + public class DataRegionConfiguration + { + /// + /// Default value for . + /// + public const bool DefaultPersistenceEnabled = false; + + /// + /// The default eviction threshold. + /// + public const double DefaultEvictionThreshold = 0.9; + + /// + /// The default empty pages pool size. + /// + public const int DefaultEmptyPagesPoolSize = 100; + + /// + /// The default initial size. + /// + public const long DefaultInitialSize = 256 * 1024 * 1024; + + /// + /// The default maximum size, equals to 20% of total RAM. + /// + public static readonly long DefaultMaxSize = (long)((long)NativeMethods.GetTotalPhysicalMemory() * 0.2); + + /// + /// The default sub intervals. + /// + [SuppressMessage("Microsoft.Naming", "CA1702:CompoundWordsShouldBeCasedCorrectly", + Justification = "Consistency with Java config")] + public const int DefaultMetricsSubIntervalCount = 5; + + /// + /// The default rate time interval. + /// + public static readonly TimeSpan DefaultMetricsRateTimeInterval = TimeSpan.FromSeconds(60); + + /// + /// Initializes a new instance of the class. + /// + public DataRegionConfiguration() + { + PersistenceEnabled = DefaultPersistenceEnabled; + EvictionThreshold = DefaultEvictionThreshold; + EmptyPagesPoolSize = DefaultEmptyPagesPoolSize; + InitialSize = DefaultInitialSize; + MaxSize = DefaultMaxSize; + MetricsSubIntervalCount = DefaultMetricsSubIntervalCount; + MetricsRateTimeInterval = DefaultMetricsRateTimeInterval; + } + + /// + /// Initializes a new instance of the class. + /// + /// The reader. + internal DataRegionConfiguration(IBinaryRawReader reader) + { + Name = reader.ReadString(); + PersistenceEnabled = reader.ReadBoolean(); + InitialSize = reader.ReadLong(); + MaxSize = reader.ReadLong(); + SwapPath = reader.ReadString(); + PageEvictionMode = (DataPageEvictionMode)reader.ReadInt(); + EvictionThreshold = reader.ReadDouble(); + EmptyPagesPoolSize = reader.ReadInt(); + MetricsEnabled = reader.ReadBoolean(); + MetricsSubIntervalCount = reader.ReadInt(); + MetricsRateTimeInterval = reader.ReadLongAsTimespan(); + } + + /// + /// Writes this instance to a writer. + /// + internal void Write(IBinaryRawWriter writer) + { + writer.WriteString(Name); + writer.WriteBoolean(PersistenceEnabled); + writer.WriteLong(InitialSize); + writer.WriteLong(MaxSize); + writer.WriteString(SwapPath); + writer.WriteInt((int)PageEvictionMode); + writer.WriteDouble(EvictionThreshold); + writer.WriteInt(EmptyPagesPoolSize); + writer.WriteBoolean(MetricsEnabled); + writer.WriteInt(MetricsSubIntervalCount); + writer.WriteTimeSpanAsLong(MetricsRateTimeInterval); + } + + /// + /// Gets or sets the data region name. + /// + public string Name { get; set; } + + /// + /// Gets or sets a value indicating whether disk persistence is enabled for this region. + /// Default is . + /// + [DefaultValue(DefaultPersistenceEnabled)] + public bool PersistenceEnabled { get; set; } + + /// + /// Gets or sets initial memory region size. + /// When the used memory size exceeds this value, new chunks of memory will be allocated. + /// + [DefaultValue(DefaultInitialSize)] + public long InitialSize { get; set; } + + /// + /// Sets maximum memory region size. The total size should not be less + /// than 10 MB due to internal data structures overhead. + /// + public long MaxSize { get; set; } + + /// + /// Gets or sets the the path to the directory for memory-mapped files. + /// + /// Null for no swap. + /// + public string SwapPath { get; set; } + + /// + /// Gets or sets the page eviction mode. If is used (default) + /// then an out of memory exception will be thrown if the memory region usage + /// goes beyond . + /// + public DataPageEvictionMode PageEvictionMode { get; set; } + + /// + /// Gets or sets the threshold for memory pages eviction initiation. For instance, if the threshold is 0.9 + /// it means that the page memory will start the eviction only after 90% of the memory region is occupied. + /// + [DefaultValue(DefaultEvictionThreshold)] + public double EvictionThreshold { get; set; } + + /// + /// Gets or sets the minimal number of empty pages to be present in reuse lists for this data region. + /// This parameter ensures that Ignite will be able to successfully evict old data entries when the size of + /// (key, value) pair is slightly larger than page size / 2. + /// Increase this parameter if cache can contain very big entries (total size of pages in this pool + /// should be enough to contain largest cache entry). + /// + [DefaultValue(DefaultEmptyPagesPoolSize)] + public int EmptyPagesPoolSize { get; set; } + + /// + /// Gets or sets a value indicating whether memory metrics should be enabled. + /// + /// Metrics can be retrieved with method. + /// + public bool MetricsEnabled { get; set; } + + /// + /// Gets or sets the rate time interval for + /// and monitoring purposes. + /// + /// For instance, after setting the interval to 60 seconds, subsequent calls + /// to will return average allocation + /// rate (pages per second) for the last minute. + /// + [DefaultValue(typeof(TimeSpan), "00:01:00")] + public TimeSpan MetricsRateTimeInterval { get; set; } + + /// + /// Gets or sets the number of sub intervals to split into to calculate + /// and . + /// + /// Bigger value results in more accurate metrics. + /// + [DefaultValue(DefaultMetricsSubIntervalCount)] + [SuppressMessage("Microsoft.Naming", "CA1702:CompoundWordsShouldBeCasedCorrectly", + Justification = "Consistency with Java config")] + public int MetricsSubIntervalCount { get; set; } + + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs new file mode 100644 index 0000000000000..17b4adaa4b4a3 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs @@ -0,0 +1,466 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Configuration +{ + using System; + using System.Collections.Generic; + using System.ComponentModel; + using System.Diagnostics; + using System.Diagnostics.CodeAnalysis; + using System.Linq; + using Apache.Ignite.Core.Binary; + using Apache.Ignite.Core.Common; + using Apache.Ignite.Core.Impl.Binary; + + /// + /// Data storage configuration for Ignite page memory. + /// + /// The page memory is a manageable off-heap based memory architecture that divides all expandable data + /// regions into pages of fixed size. An individual page can store one or many cache key-value entries + /// that allows reusing the memory in the most efficient way and avoid memory fragmentation issues. + /// + /// By default, the page memory allocates a single expandable data region. All the caches that will be + /// configured in an application will be mapped to this data region by default, thus, all the cache data + /// will reside in that data region. + /// + public class DataStorageConfiguration + { + /// + /// Default value for . + /// + public const int DefaultCheckpointThreads = 4; + + /// + /// Default name is assigned to default data region if no user-defined + /// is specified. + /// + public const string DefaultDataRegionName = "default"; + + /// + /// Default value for . + /// + public static readonly TimeSpan DefaultCheckpointFrequency = TimeSpan.FromSeconds(180); + + /// + /// Default value for . + /// + public static readonly TimeSpan DefaultLockWaitTime = TimeSpan.FromSeconds(10); + + /// + /// Default value for . + /// + public const int DefaultWalHistorySize = 20; + + /// + /// Default value for . + /// + public const int DefaultWalSegments = 10; + + /// + /// Default value for . + /// + public const int DefaultWalSegmentSize = 64 * 1024 * 1024; + + /// + /// Default value for . + /// + public const int DefaultTlbSize = 128 * 1024; + + /// + /// Default value for . + /// + public static readonly TimeSpan DefaultWalFlushFrequency = TimeSpan.FromSeconds(2); + + /// + /// Default value for . + /// + public const int DefaultWalRecordIteratorBufferSize = 64 * 1024 * 1024; + + /// + /// Default value for . + /// + public const long DefaultWalFsyncDelayNanos = 1000; + + /// + /// The default sub intervals. + /// + [SuppressMessage("Microsoft.Naming", "CA1702:CompoundWordsShouldBeCasedCorrectly", + Justification = "Consistency with Java config")] + public const int DefaultMetricsSubIntervalCount = 5; + + /// + /// The default rate time interval. + /// + public static readonly TimeSpan DefaultMetricsRateTimeInterval = TimeSpan.FromSeconds(60); + + /// + /// Default value for . + /// + public const string DefaultWalPath = "db/wal"; + + /// + /// Default value for . + /// + public const string DefaultWalArchivePath = "db/wal/archive"; + + /// + /// Default value for . + /// + public const WalMode DefaultWalMode = WalMode.Default; + + /// + /// Default value for . + /// + public const CheckpointWriteOrder DefaultCheckpointWriteOrder = CheckpointWriteOrder.Sequential; + + /// + /// Default value for . + /// + public const bool DefaultWriteThrottlingEnabled = false; + + /// + /// Default size of a memory chunk reserved for system cache initially. + /// + public const long DefaultSystemRegionInitialSize = 40 * 1024 * 1024; + + /// + /// Default max size of a memory chunk for the system cache. + /// + public const long DefaultSystemRegionMaxSize = 100 * 1024 * 1024; + + /// + /// The default page size. + /// + public const int DefaultPageSize = 4 * 1024; + + /// + /// The default concurrency level. + /// + public const int DefaultConcurrencyLevel = 0; + + /// + /// Initializes a new instance of the class. + /// + public DataStorageConfiguration() + { + CheckpointThreads = DefaultCheckpointThreads; + CheckpointFrequency = DefaultCheckpointFrequency; + LockWaitTime = DefaultLockWaitTime; + WalHistorySize = DefaultWalHistorySize; + WalSegments = DefaultWalSegments; + WalSegmentSize = DefaultWalSegmentSize; + WalThreadLocalBufferSize = DefaultTlbSize; + WalFlushFrequency = DefaultWalFlushFrequency; + WalRecordIteratorBufferSize = DefaultWalRecordIteratorBufferSize; + WalFsyncDelayNanos = DefaultWalFsyncDelayNanos; + MetricsRateTimeInterval = DefaultMetricsRateTimeInterval; + MetricsSubIntervalCount = DefaultMetricsSubIntervalCount; + WalArchivePath = DefaultWalArchivePath; + WalPath = DefaultWalPath; + CheckpointWriteOrder = DefaultCheckpointWriteOrder; + WriteThrottlingEnabled = DefaultWriteThrottlingEnabled; + SystemRegionInitialSize = DefaultSystemRegionInitialSize; + SystemRegionMaxSize = DefaultSystemRegionMaxSize; + PageSize = DefaultPageSize; + } + + /// + /// Initializes a new instance of the class. + /// + /// The reader. + internal DataStorageConfiguration(IBinaryRawReader reader) + { + Debug.Assert(reader != null); + + StoragePath = reader.ReadString(); + CheckpointFrequency = reader.ReadLongAsTimespan(); + CheckpointPageBufferSize = reader.ReadLong(); + CheckpointThreads = reader.ReadInt(); + LockWaitTime = reader.ReadLongAsTimespan(); + WalHistorySize = reader.ReadInt(); + WalSegments = reader.ReadInt(); + WalSegmentSize = reader.ReadInt(); + WalPath = reader.ReadString(); + WalArchivePath = reader.ReadString(); + WalMode = (WalMode)reader.ReadInt(); + WalThreadLocalBufferSize = reader.ReadInt(); + WalFlushFrequency = reader.ReadLongAsTimespan(); + WalFsyncDelayNanos = reader.ReadLong(); + WalRecordIteratorBufferSize = reader.ReadInt(); + AlwaysWriteFullPages = reader.ReadBoolean(); + MetricsEnabled = reader.ReadBoolean(); + MetricsSubIntervalCount = reader.ReadInt(); + MetricsRateTimeInterval = reader.ReadLongAsTimespan(); + CheckpointWriteOrder = (CheckpointWriteOrder)reader.ReadInt(); + WriteThrottlingEnabled = reader.ReadBoolean(); + + SystemRegionInitialSize = reader.ReadLong(); + SystemRegionMaxSize = reader.ReadLong(); + PageSize = reader.ReadInt(); + ConcurrencyLevel = reader.ReadInt(); + + var count = reader.ReadInt(); + + if (count > 0) + { + DataRegionConfigurations = Enumerable.Range(0, count) + .Select(x => new DataRegionConfiguration(reader)) + .ToArray(); + } + + if (reader.ReadBoolean()) + { + DefaultDataRegionConfiguration = new DataRegionConfiguration(reader); + } + } + + /// + /// Writes this instance to the specified writer. + /// + /// The writer. + internal void Write(IBinaryRawWriter writer) + { + Debug.Assert(writer != null); + + writer.WriteString(StoragePath); + writer.WriteTimeSpanAsLong(CheckpointFrequency); + writer.WriteLong(CheckpointPageBufferSize); + writer.WriteInt(CheckpointThreads); + writer.WriteTimeSpanAsLong(LockWaitTime); + writer.WriteInt(WalHistorySize); + writer.WriteInt(WalSegments); + writer.WriteInt(WalSegmentSize); + writer.WriteString(WalPath); + writer.WriteString(WalArchivePath); + writer.WriteInt((int)WalMode); + writer.WriteInt(WalThreadLocalBufferSize); + writer.WriteTimeSpanAsLong(WalFlushFrequency); + writer.WriteLong(WalFsyncDelayNanos); + writer.WriteInt(WalRecordIteratorBufferSize); + writer.WriteBoolean(AlwaysWriteFullPages); + writer.WriteBoolean(MetricsEnabled); + writer.WriteInt(MetricsSubIntervalCount); + writer.WriteTimeSpanAsLong(MetricsRateTimeInterval); + writer.WriteInt((int)CheckpointWriteOrder); + writer.WriteBoolean(WriteThrottlingEnabled); + + writer.WriteLong(SystemRegionInitialSize); + writer.WriteLong(SystemRegionMaxSize); + writer.WriteInt(PageSize); + writer.WriteInt(ConcurrencyLevel); + + if (DataRegionConfigurations != null) + { + writer.WriteInt(DataRegionConfigurations.Count); + + foreach (var region in DataRegionConfigurations) + { + if (region == null) + { + throw new IgniteException( + "DataStorageConfiguration.DataRegionConfigurations must not contain null items."); + } + + region.Write(writer); + } + } + else + { + writer.WriteInt(0); + } + + if (DefaultDataRegionConfiguration != null) + { + writer.WriteBoolean(true); + DefaultDataRegionConfiguration.Write(writer); + } + else + { + writer.WriteBoolean(false); + } + } + + /// + /// Gets or sets the path where data and indexes will be persisted. + /// + public string StoragePath { get; set; } + + /// + /// Gets or sets the checkpointing frequency which is a minimal interval when the dirty pages will be written + /// to the Persistent Store. + /// + [DefaultValue(typeof(TimeSpan), "00:03:00")] + public TimeSpan CheckpointFrequency { get; set; } + + /// + /// Gets or sets the size of the checkpointing page buffer. + /// + /// Default is 0: Ignite will choose buffer size automatically. + /// + public long CheckpointPageBufferSize { get; set; } + + /// + /// Gets or sets the number of threads for checkpointing. + /// + [DefaultValue(DefaultCheckpointThreads)] + public int CheckpointThreads { get; set; } + + /// + /// Gets or sets the persistent manager file lock wait time. + /// + [DefaultValue(typeof(TimeSpan), "00:00:10")] + public TimeSpan LockWaitTime { get; set; } + + /// + /// Gets or sets the number of checkpoints to store in WAL (Write Ahead Log) history. + /// + [DefaultValue(DefaultWalHistorySize)] + public int WalHistorySize { get; set; } + + /// + /// Gets or sets a number of WAL (Write Ahead Log) segments to work with. + /// For performance reasons, the whole WAL is split into files of fixed length called segments. + /// + [DefaultValue(DefaultWalSegments)] + public int WalSegments { get; set; } + + /// + /// Gets or sets the size of the WAL (Write Ahead Log) segment. + /// For performance reasons, the whole WAL is split into files of fixed length called segments. + /// + [DefaultValue(DefaultWalSegmentSize)] + public int WalSegmentSize { get; set; } + + /// + /// Gets or sets the path to the directory where WAL (Write Ahead Log) is stored. + /// + [DefaultValue(DefaultWalPath)] + public string WalPath { get; set; } + + /// + /// Gets or sets the path to the directory where WAL (Write Ahead Log) archive is stored. + /// Every WAL segment will be fully copied to this directory before it can be reused for WAL purposes. + /// + [DefaultValue(DefaultWalArchivePath)] + public string WalArchivePath { get; set; } + + /// + /// Gets or sets the WAL (Write Ahead Log) mode. + /// + [DefaultValue(DefaultWalMode)] + public WalMode WalMode { get; set; } + + /// + /// Gets or sets the size of the TLB (Thread-Local Buffer), in bytes. + /// + [DefaultValue(DefaultTlbSize)] + public int WalThreadLocalBufferSize { get; set; } + + /// + /// Gets or sets the WAL (Write Ahead Log) flush frequency. + /// + [DefaultValue(typeof(TimeSpan), "00:00:02")] + public TimeSpan WalFlushFrequency { get; set; } + + /// + /// Gets or sets the WAL (Write Ahead Log) fsync (disk sync) delay, in nanoseconds + /// + [DefaultValue(DefaultWalFsyncDelayNanos)] + public long WalFsyncDelayNanos { get; set; } + + /// + /// Gets or sets the size of the WAL (Write Ahead Log) record iterator buffer, in bytes. + /// + [DefaultValue(DefaultWalRecordIteratorBufferSize)] + public int WalRecordIteratorBufferSize { get; set; } + + /// + /// Gets or sets a value indicating whether full pages should always be written. + /// + public bool AlwaysWriteFullPages { get; set; } + + /// + /// Gets or sets a value indicating whether to enable data storage metrics. + /// See . + /// + public bool MetricsEnabled { get; set; } + + /// + /// Gets or sets the length of the time interval for rate-based metrics. + /// This interval defines a window over which hits will be tracked. + /// + [DefaultValue(typeof(TimeSpan), "00:01:00")] + public TimeSpan MetricsRateTimeInterval { get; set; } + + /// + /// Number of sub-intervals to split the into to track the update history. + /// + [DefaultValue(DefaultMetricsSubIntervalCount)] + [SuppressMessage("Microsoft.Naming", "CA1702:CompoundWordsShouldBeCasedCorrectly", + Justification = "Consistency with Java config")] + public int MetricsSubIntervalCount { get; set; } + + /// + /// Gets or sets the checkpoint page write order on disk. + /// + [DefaultValue(DefaultCheckpointWriteOrder)] + public CheckpointWriteOrder CheckpointWriteOrder { get; set; } + + /// + /// Gets or sets a value indicating whether threads that generate dirty + /// pages too fast during ongoing checkpoint will be throttled. + /// + [DefaultValue(DefaultWriteThrottlingEnabled)] + public bool WriteThrottlingEnabled { get; set; } + + /// + /// Gets or sets the size of a memory chunk reserved for system needs. + /// + [DefaultValue(DefaultSystemRegionInitialSize)] + public long SystemRegionInitialSize { get; set; } + + /// + /// Gets or sets the maximum memory region size reserved for system needs. + /// + [DefaultValue(DefaultSystemRegionMaxSize)] + public long SystemRegionMaxSize { get; set; } + + /// + /// Gets or sets the size of the memory page. + /// + [DefaultValue(DefaultPageSize)] + public int PageSize { get; set; } + + /// + /// Gets or sets the number of concurrent segments in Ignite internal page mapping tables. + /// + [DefaultValue(DefaultConcurrencyLevel)] + public int ConcurrencyLevel { get; set; } + + /// + /// Gets or sets the data region configurations. + /// + [SuppressMessage("Microsoft.Usage", "CA2227:CollectionPropertiesShouldBeReadOnly")] + public ICollection DataRegionConfigurations { get; set; } + + /// + /// Gets or sets the default region configuration. + /// + public DataRegionConfiguration DefaultDataRegionConfiguration { get; set; } + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/WalMode.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/WalMode.cs new file mode 100644 index 0000000000000..d6e4532583444 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/WalMode.cs @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Configuration +{ + /// + /// Write Ahead Log mode. + /// + public enum WalMode + { + /// + /// Default mode: full-sync disk writes. These writes survive power loss scenarios. + /// + Default, + + /// + /// Log only mode: flushes application buffers. These writes survive process crash. + /// + LogOnly, + + /// + /// Background mode. Does not force application buffer flush. Data may be lost in case of process crash. + /// + Background, + + /// + /// WAL disabled. + /// + None + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IDataRegionMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/IDataRegionMetrics.cs new file mode 100644 index 0000000000000..0cb619258a1bf --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IDataRegionMetrics.cs @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core +{ + /// + /// Memory usage metrics. + /// + public interface IDataRegionMetrics + { + /// + /// Gets the memory policy name. + /// + string Name { get; } + + /// + /// Gets the count of allocated pages. + /// + long TotalAllocatedPages { get; } + + /// + /// Gets the allocation rate, in pages per second. + /// + float AllocationRate { get; } + + /// + /// Gets the eviction rate, in pages per second. + /// + float EvictionRate { get; } + + /// + /// Gets the percentage of pages fully occupied by entries that are larger than page. + /// + float LargeEntriesPagesPercentage { get; } + + /// + /// Gets the page fill factor: free space to overall size ratio across all pages. + /// + float PageFillFactor { get; } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IDataStorageMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/IDataStorageMetrics.cs new file mode 100644 index 0000000000000..6f3562d94cc98 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IDataStorageMetrics.cs @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core +{ + using System; + + /// + /// Persistent store metrics. + /// + public interface IDataStorageMetrics + { + /// + /// Gets the average number of WAL records per second written during the last time interval. + /// + float WalLoggingRate { get; } + + /// + /// Gets the average number of bytes per second written during the last time interval. + /// + float WalWritingRate { get; } + + /// + /// Gets the current number of WAL segments in the WAL archive. + /// + int WalArchiveSegments { get; } + + /// + /// Gets the average WAL fsync duration in microseconds over the last time interval. + /// + float WalFsyncTimeAverage { get; } + + /// + /// Gets the duration of the last checkpoint. + /// + TimeSpan LastCheckpointDuration { get; } + + /// + /// Gets the duration of last checkpoint lock wait. + /// + TimeSpan LastCheckpointLockWaitDuration { get; } + + /// + /// Gets the duration of last checkpoint mark phase. + /// + TimeSpan LastCheckpointMarkDuration { get; } + + /// + /// Gets the duration of last checkpoint pages write phase. + /// + TimeSpan LastCheckpointPagesWriteDuration { get; } + + /// + /// Gets the duration of the sync phase of the last checkpoint. + /// + TimeSpan LastCheckpointFsyncDuration { get; } + + /// + /// Gets the total number of pages written during the last checkpoint. + /// + long LastCheckpointTotalPagesNumber { get; } + + /// + /// Gets the number of data pages written during the last checkpoint. + /// + long LastCheckpointDataPagesNumber { get; } + + /// + /// Gets the number of pages copied to a temporary checkpoint buffer during the last checkpoint. + /// + long LastCheckpointCopiedOnWritePagesNumber { get; } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IIgnite.cs b/modules/platforms/dotnet/Apache.Ignite.Core/IIgnite.cs index 9548aca1ea4b1..f61da06739627 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IIgnite.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IIgnite.cs @@ -26,6 +26,7 @@ namespace Apache.Ignite.Core using Apache.Ignite.Core.Cluster; using Apache.Ignite.Core.Common; using Apache.Ignite.Core.Compute; + using Apache.Ignite.Core.Configuration; using Apache.Ignite.Core.Datastream; using Apache.Ignite.Core.DataStructures; using Apache.Ignite.Core.Events; @@ -336,7 +337,10 @@ public interface IIgnite : IDisposable /// Gets a collection of memory metrics, one for each . /// /// Memory metrics should be enabled with . + /// + /// Obsolete, use . /// + [Obsolete("Use GetDataRegionMetrics.")] ICollection GetMemoryMetrics(); /// @@ -344,8 +348,11 @@ public interface IIgnite : IDisposable /// /// To get metrics for the default memory region, /// use . + /// + /// Obsolete, use . /// /// Name of the memory policy. + [Obsolete("Use GetDataRegionMetrics.")] IMemoryMetrics GetMemoryMetrics(string memoryPolicyName); /// @@ -367,6 +374,32 @@ public interface IIgnite : IDisposable /// To enable metrics set property /// in . /// + [Obsolete("Use GetDataStorageMetrics.")] IPersistentStoreMetrics GetPersistentStoreMetrics(); + + /// + /// Gets a collection of memory metrics, one for each + /// . + /// + /// Metrics should be enabled with . + /// + ICollection GetDataRegionMetrics(); + + /// + /// Gets the memory metrics for the specified data region. + /// + /// To get metrics for the default memory region, + /// use . + /// + /// Name of the data region. + IDataRegionMetrics GetDataRegionMetrics(string dataRegionName); + + /// + /// Gets the persistent store metrics. + /// + /// To enable metrics set property + /// in . + /// + IDataStorageMetrics GetDataStorageMetrics(); } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs index b0fe0dfc363dc..a6ff324365484 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs @@ -26,6 +26,7 @@ namespace Apache.Ignite.Core using System.Linq; using System.Text; using System.Xml; + using System.Xml.Serialization; using Apache.Ignite.Core.Binary; using Apache.Ignite.Core.Cache; using Apache.Ignite.Core.Cache.Configuration; @@ -439,6 +440,7 @@ internal void Write(BinaryWriter writer) memEventStorage.Write(writer); } +#pragma warning disable 618 // Obsolete if (MemoryConfiguration != null) { writer.WriteBoolean(true); @@ -448,6 +450,7 @@ internal void Write(BinaryWriter writer) { writer.WriteBoolean(false); } +#pragma warning restore 618 // SQL connector. #pragma warning disable 618 // Obsolete @@ -476,6 +479,7 @@ internal void Write(BinaryWriter writer) writer.WriteBoolean(ClientConnectorConfigurationEnabled); // Persistence. +#pragma warning disable 618 // Obsolete if (PersistentStoreConfiguration != null) { writer.WriteBoolean(true); @@ -485,6 +489,18 @@ internal void Write(BinaryWriter writer) { writer.WriteBoolean(false); } +#pragma warning restore 618 + + // Data storage. + if (DataStorageConfiguration != null) + { + writer.WriteBoolean(true); + DataStorageConfiguration.Write(writer); + } + else + { + writer.WriteBoolean(false); + } // Plugins (should be last). if (PluginConfigurations != null) @@ -675,7 +691,9 @@ private void ReadCore(BinaryReader r) if (r.ReadBoolean()) { +#pragma warning disable 618 // Obsolete MemoryConfiguration = new MemoryConfiguration(r); +#pragma warning restore 618 // Obsolete } // SQL. @@ -697,7 +715,15 @@ private void ReadCore(BinaryReader r) // Persistence. if (r.ReadBoolean()) { +#pragma warning disable 618 // Obsolete PersistentStoreConfiguration = new PersistentStoreConfiguration(r); +#pragma warning restore 618 + } + + // Data storage. + if (r.ReadBoolean()) + { + DataStorageConfiguration = new DataStorageConfiguration(r); } } @@ -793,6 +819,7 @@ private void CopyLocalProperties(IgniteConfiguration cfg) /// This property is used to when there are multiple Ignite nodes in one process to distinguish them. /// [Obsolete("Use IgniteInstanceName instead.")] + [XmlIgnore] public string GridName { get { return IgniteInstanceName; } @@ -1243,9 +1270,17 @@ public TimeSpan ClientFailureDetectionTimeout /// /// Gets or sets the page memory configuration. /// for more details. + /// + /// Obsolete, use . /// + [Obsolete("Use DataStorageConfiguration.")] public MemoryConfiguration MemoryConfiguration { get; set; } + /// + /// Gets or sets the data storage configuration. + /// + public DataStorageConfiguration DataStorageConfiguration { get; set; } + /// /// Gets or sets a value indicating how user assemblies should be loaded on remote nodes. /// @@ -1374,14 +1409,17 @@ public TimeSpan LongQueryWarningTimeout /// /// Gets or sets the persistent store configuration. + /// + /// Obsolete, use . /// + [Obsolete("Use DataStorageConfiguration.")] public PersistentStoreConfiguration PersistentStoreConfiguration { get; set; } /// /// Gets or sets a value indicating whether grid should be active on start. /// See also and . /// - /// This property is ignored when is present: + /// This property is ignored when is present: /// cluster is always inactive on start when Ignite Persistence is enabled. /// [DefaultValue(DefaultIsActiveOnStart)] diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd index 988fa1fd89ed1..6ede267df10d2 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd @@ -759,9 +759,9 @@ Value indicating whether statistics gathering is enabled on a cache. These statistics can be retrieved via ICache.GetMetrics(). - + - Name of the MemoryPolicyConfiguration for this cache. + Name of the DataRegionConfiguration for this cache. @@ -1380,7 +1380,7 @@ - Persistent store configuration. + Persistent store configuration. Obsolete, use DataStorageConfiguration. @@ -1490,6 +1490,273 @@ + + + Data storage configuration. + + + + + + Default data region configuration. + + + + + Minimal number of empty pages to be present in reuse lists for this memory policy. + + + + + Threshold for memory pages eviction initiation. For instance, if the threshold is 0.9 it means that the page memory will start the eviction only after 90% of the memory region (defined by this policy) is occupied. + + + + + Data region name. + + + + + Page eviction mode. + + + + + Initial data region size. + + + + + Maximum data region size. + + + + + Path to the directory for memory-mapped files. + + + + + Enable memory metrics. + + + + + Number of sub intervals to split RateTimeInterval into. + + + + + Rate time interval for AllocationRate and EvictionRate monitoring. + + + + + Enable disk persistence for this region. + + + + + + + Data region configurations. + + + + + + Data region configuration. + + + + + Minimal number of empty pages to be present in reuse lists for this memory policy. + + + + + Threshold for memory pages eviction initiation. For instance, if the threshold is 0.9 it means that the page memory will start the eviction only after 90% of the memory region (defined by this policy) is occupied. + + + + + Data region name. + + + + + Page eviction mode. + + + + + Initial data region size. + + + + + Maximum data region size. + + + + + Path to the directory for memory-mapped files. + + + + + Enable memory metrics. + + + + + Number of sub intervals to split RateTimeInterval into. + + + + + Rate time interval for AllocationRate and EvictionRate monitoring. + + + + + Enable disk persistence for this region. + + + + + + + + + + + Path where data and indexes will be persisted. + + + + + Checkpointing frequency which is a minimal interval when the dirty pages will be written to the Persistent Store. + + + + + Size of the checkpointing page buffer. + + + + + Number of threads for checkpointing. + + + + + Persistent manager file lock wait time. + + + + + Number of checkpoints to store in WAL (Write Ahead Log) history. + + + + + Number of WAL (Write Ahead Log) segments to work with. + + + + + Size of the WAL (Write Ahead Log) segment. + + + + + Path to the directory where WAL (Write Ahead Log) is stored. + + + + + Path to the directory where WAL (Write Ahead Log) archive is stored. + + + + + WAL (Write Ahead Log) mode. + + + + + Size of the TLB (Thread-Local Buffer), in bytes. + + + + + WAL (Write Ahead Log) flush frequency. + + + + + WAL (Write Ahead Log) fsync (disk sync) delay, in nanoseconds. + + + + + Size of the WAL (Write Ahead Log) record iterator buffer, in bytes. + + + + + Whether full pages should always be written. + + + + + Enable persistent store metrics. + + + + + Number of sub intervals to split RateTimeInterval into. + + + + + Rate time interval. + + + + + Checkpoint page write order on disk. + + + + + Threads that generate dirty pages too fast during ongoing checkpoint will be throttled. + + + + + Size of the memory page. + + + + + Initial size of a memory region reserved for system needs. + + + + + Maximum size of a memory region reserved for system needs. + + + + + Number of concurrent segments in Ignite internal page mapping tables. + + + + Plugin configurations. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/MemoryMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/MemoryMetrics.cs index ae9f85cd1cf41..978590960e096 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/MemoryMetrics.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/MemoryMetrics.cs @@ -24,7 +24,9 @@ namespace Apache.Ignite.Core.Impl.Cache /// /// Memory metrics. /// +#pragma warning disable 618 internal class MemoryMetrics : IMemoryMetrics +#pragma warning restore 618 { /// /// Initializes a new instance of the class. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cluster/ClusterGroupImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cluster/ClusterGroupImpl.cs index b32d331481dca..cc25a6ee23ba3 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cluster/ClusterGroupImpl.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cluster/ClusterGroupImpl.cs @@ -145,6 +145,15 @@ internal class ClusterGroupImpl : PlatformTargetAdapter, IClusterGroup /** */ private const int OpGetServices = 34; + /** */ + private const int OpDataRegionMetrics = 35; + + /** */ + private const int OpDataRegionMetricsByName = 36; + + /** */ + private const int OpDataStorageMetrics = 37; + /** Initial Ignite instance. */ private readonly IIgniteInternal _ignite; @@ -597,6 +606,7 @@ public ICacheMetrics GetCacheMetrics(string cacheName) /// /// Gets the memory metrics. /// +#pragma warning disable 618 public ICollection GetMemoryMetrics() { return DoInOp(OpMemoryMetrics, stream => @@ -624,6 +634,47 @@ public IMemoryMetrics GetMemoryMetrics(string memoryPolicyName) return DoOutInOp(OpMemoryMetricsByName, w => w.WriteString(memoryPolicyName), stream => stream.ReadBool() ? new MemoryMetrics(Marshaller.StartUnmarshal(stream, false)) : null); } +#pragma warning restore 618 + + /// + /// Gets the data region metrics. + /// + public ICollection GetDataRegionMetrics() + { + return DoInOp(OpDataRegionMetrics, stream => + { + IBinaryRawReader reader = Marshaller.StartUnmarshal(stream, false); + + var cnt = reader.ReadInt(); + + var res = new List(cnt); + + for (int i = 0; i < cnt; i++) + { + res.Add(new DataRegionMetrics(reader)); + } + + return res; + }); + } + + /// + /// Gets the data region metrics. + /// + public IDataRegionMetrics GetDataRegionMetrics(string memoryPolicyName) + { + return DoOutInOp(OpDataRegionMetricsByName, w => w.WriteString(memoryPolicyName), + stream => stream.ReadBool() ? new DataRegionMetrics(Marshaller.StartUnmarshal(stream, false)) : null); + } + + /// + /// Gets the data storage metrics. + /// + public IDataStorageMetrics GetDataStorageMetrics() + { + return DoInOp(OpDataStorageMetrics, stream => + new DataStorageMetrics(Marshaller.StartUnmarshal(stream, false))); + } /// /// Changes Ignite grid state to active or inactive. @@ -647,11 +698,13 @@ public bool IsActive() /// /// Gets the persistent store metrics. /// +#pragma warning disable 618 public IPersistentStoreMetrics GetPersistentStoreMetrics() { return DoInOp(OpGetPersistentStoreMetrics, stream => new PersistentStoreMetrics(Marshaller.StartUnmarshal(stream, false))); } +#pragma warning restore 618 /// /// Creates new Cluster Group from given native projection. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteConfigurationXmlSerializer.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteConfigurationXmlSerializer.cs index a2f7143e07638..be1a7f1fd5370 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteConfigurationXmlSerializer.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Common/IgniteConfigurationXmlSerializer.cs @@ -26,6 +26,7 @@ namespace Apache.Ignite.Core.Impl.Common using System.Linq; using System.Reflection; using System.Xml; + using System.Xml.Serialization; using Apache.Ignite.Core.Events; using Apache.Ignite.Core.Impl.Binary; using Apache.Ignite.Core.Impl.Events; @@ -90,7 +91,7 @@ public static void Serialize(IgniteConfiguration configuration, XmlWriter writer if (!property.CanWrite && !IsKeyValuePair(property.DeclaringType)) return; - if (IsObsolete(property)) + if (IsIgnored(property)) return; } @@ -169,7 +170,7 @@ private static void WriteComplexProperty(object obj, XmlWriter writer, Type valu } // Write attributes - foreach (var prop in props.Where(p => IsBasicType(p.PropertyType) && !IsObsolete(p))) + foreach (var prop in props.Where(p => IsBasicType(p.PropertyType) && !IsIgnored(p))) { var converter = GetConverter(prop, prop.PropertyType); var stringValue = converter.ConvertToInvariantString(prop.GetValue(obj, null)); @@ -557,13 +558,13 @@ private static object GetDefaultValue(PropertyInfo property) } /// - /// Determines whether the specified property is obsolete. + /// Determines whether the specified property is marked with XmlIgnore. /// - private static bool IsObsolete(PropertyInfo property) + private static bool IsIgnored(PropertyInfo property) { Debug.Assert(property != null); - return property.GetCustomAttributes(typeof(ObsoleteAttribute), true).Any(); + return property.GetCustomAttributes(typeof(XmlIgnoreAttribute), true).Any(); } /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataRegionMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataRegionMetrics.cs new file mode 100644 index 0000000000000..7b174a69a1e48 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataRegionMetrics.cs @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Impl +{ + using System.Diagnostics; + using Apache.Ignite.Core.Binary; + + /// + /// Data region metrics. + /// + internal class DataRegionMetrics : IDataRegionMetrics + { + /// + /// Initializes a new instance of the class. + /// + public DataRegionMetrics(IBinaryRawReader reader) + { + Debug.Assert(reader != null); + + Name = reader.ReadString(); + TotalAllocatedPages = reader.ReadLong(); + AllocationRate = reader.ReadFloat(); + EvictionRate = reader.ReadFloat(); + LargeEntriesPagesPercentage = reader.ReadFloat(); + PageFillFactor = reader.ReadFloat(); + } + + /** */ + public string Name { get; private set; } + + /** */ + public long TotalAllocatedPages { get; private set; } + + /** */ + public float AllocationRate { get; private set; } + + /** */ + public float EvictionRate { get; private set; } + + /** */ + public float LargeEntriesPagesPercentage { get; private set; } + + /** */ + public float PageFillFactor { get; private set; } + } +} diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataStorageMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataStorageMetrics.cs new file mode 100644 index 0000000000000..58b3b373c5382 --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/DataStorageMetrics.cs @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Impl +{ + using System; + using System.Diagnostics; + using Apache.Ignite.Core.Binary; + using Apache.Ignite.Core.Impl.Binary; + + /// + /// Data storage metrics. + /// + internal class DataStorageMetrics : IDataStorageMetrics + { + /// + /// Initializes a new instance of the class. + /// + public DataStorageMetrics(IBinaryRawReader reader) + { + Debug.Assert(reader != null); + + WalLoggingRate = reader.ReadFloat(); + WalWritingRate = reader.ReadFloat(); + WalArchiveSegments = reader.ReadInt(); + WalFsyncTimeAverage = reader.ReadFloat(); + LastCheckpointDuration = reader.ReadLongAsTimespan(); + LastCheckpointLockWaitDuration = reader.ReadLongAsTimespan(); + LastCheckpointMarkDuration = reader.ReadLongAsTimespan(); + LastCheckpointPagesWriteDuration = reader.ReadLongAsTimespan(); + LastCheckpointFsyncDuration = reader.ReadLongAsTimespan(); + LastCheckpointTotalPagesNumber = reader.ReadLong(); + LastCheckpointDataPagesNumber = reader.ReadLong(); + LastCheckpointCopiedOnWritePagesNumber = reader.ReadLong(); + } + + /** */ + public float WalLoggingRate { get; private set; } + + /** */ + public float WalWritingRate { get; private set; } + + /** */ + public int WalArchiveSegments { get; private set; } + + /** */ + public float WalFsyncTimeAverage { get; private set; } + + /** */ + public TimeSpan LastCheckpointDuration { get; private set; } + + /** */ + public TimeSpan LastCheckpointLockWaitDuration { get; private set; } + + /** */ + public TimeSpan LastCheckpointMarkDuration { get; private set; } + + /** */ + public TimeSpan LastCheckpointPagesWriteDuration { get; private set; } + + /** */ + public TimeSpan LastCheckpointFsyncDuration { get; private set; } + + /** */ + public long LastCheckpointTotalPagesNumber { get; private set; } + + /** */ + public long LastCheckpointDataPagesNumber { get; private set; } + + /** */ + public long LastCheckpointCopiedOnWritePagesNumber { get; private set; } + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs index 1b4246299cc29..78b7c74537389 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs @@ -752,6 +752,7 @@ public void ResetLostPartitions(params string[] cacheNames) } /** */ +#pragma warning disable 618 public ICollection GetMemoryMetrics() { return _prj.GetMemoryMetrics(); @@ -764,6 +765,7 @@ public IMemoryMetrics GetMemoryMetrics(string memoryPolicyName) return _prj.GetMemoryMetrics(memoryPolicyName); } +#pragma warning restore 618 /** */ public void SetActive(bool isActive) @@ -778,10 +780,30 @@ public bool IsActive() } /** */ +#pragma warning disable 618 public IPersistentStoreMetrics GetPersistentStoreMetrics() { return _prj.GetPersistentStoreMetrics(); } +#pragma warning restore 618 + + /** */ + public ICollection GetDataRegionMetrics() + { + return _prj.GetDataRegionMetrics(); + } + + /** */ + public IDataRegionMetrics GetDataRegionMetrics(string memoryPolicyName) + { + return _prj.GetDataRegionMetrics(memoryPolicyName); + } + + /** */ + public IDataStorageMetrics GetDataStorageMetrics() + { + return _prj.GetDataStorageMetrics(); + } /// /// Gets or creates near cache. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/PersistentStore/PersistentStoreMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/PersistentStore/PersistentStoreMetrics.cs index 85a4fdfc3ed8d..7eeabb210b77f 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/PersistentStore/PersistentStoreMetrics.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/PersistentStore/PersistentStoreMetrics.cs @@ -26,7 +26,9 @@ namespace Apache.Ignite.Core.Impl.PersistentStore /// /// Persistent store metrics. /// +#pragma warning disable 618 internal class PersistentStoreMetrics : IPersistentStoreMetrics +#pragma warning restore 618 { /// /// Initializes a new instance of the class. diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/CheckpointWriteOrder.cs b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/CheckpointWriteOrder.cs index ba1153d29354c..7128796fb4584 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/CheckpointWriteOrder.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/CheckpointWriteOrder.cs @@ -17,9 +17,12 @@ namespace Apache.Ignite.Core.PersistentStore { + using System; + /// /// Defines checkpoint pages order on disk. /// + [Obsolete("Use Apache.Ignite.Core.Data.CheckpointWriteOrder")] public enum CheckpointWriteOrder { /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/IPersistentStoreMetrics.cs b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/IPersistentStoreMetrics.cs index e7e8481d6bf3e..989dbd805ee6c 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/IPersistentStoreMetrics.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/IPersistentStoreMetrics.cs @@ -21,7 +21,9 @@ namespace Apache.Ignite.Core.PersistentStore /// /// Persistent store metrics. + /// Obsolete, see . /// + [Obsolete("Use IDataStorageMetrics")] public interface IPersistentStoreMetrics { /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/PersistentStoreConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/PersistentStoreConfiguration.cs index 7a2248aa3e3ee..e211126b9dc12 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/PersistentStoreConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/PersistentStoreConfiguration.cs @@ -22,11 +22,15 @@ namespace Apache.Ignite.Core.PersistentStore using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using Apache.Ignite.Core.Binary; + using Apache.Ignite.Core.Configuration; using Apache.Ignite.Core.Impl.Binary; /// /// Configures Apache Ignite persistent store. + /// + /// Obsolete, use . /// + [Obsolete("Use DataStorageConfiguration.")] public class PersistentStoreConfiguration { /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/WalMode.cs b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/WalMode.cs index 44d13b8ede817..c937b783deb1c 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/WalMode.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/PersistentStore/WalMode.cs @@ -17,9 +17,12 @@ namespace Apache.Ignite.Core.PersistentStore { + using System; + /// /// Write Ahead Log mode. /// + [Obsolete("Use Apache.Ignite.Core.Data.WalMode")] public enum WalMode { /// From 347696d2426ef5be8294253141e299097a6564cc Mon Sep 17 00:00:00 2001 From: Anton Vinogradov Date: Fri, 20 Oct 2017 17:15:39 +0300 Subject: [PATCH 054/243] Removed redundant libs from libs/optional --- assembly/dependencies-fabric-lgpl.xml | 2 ++ assembly/dependencies-fabric.xml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/assembly/dependencies-fabric-lgpl.xml b/assembly/dependencies-fabric-lgpl.xml index e2fac3ccce3b8..82b5d5cf2cb2c 100644 --- a/assembly/dependencies-fabric-lgpl.xml +++ b/assembly/dependencies-fabric-lgpl.xml @@ -133,6 +133,8 @@ org.apache.ignite:ignite-yardstick org.apache.ignite:ignite-benchmarks org.apache.ignite:ignite-web-agent + org.apache.ignite:ignite-dev-utils + org.apache.ignite:ignite-extdata-platform true diff --git a/assembly/dependencies-fabric.xml b/assembly/dependencies-fabric.xml index 6c4101ecdb3dd..3e9405b215cd1 100644 --- a/assembly/dependencies-fabric.xml +++ b/assembly/dependencies-fabric.xml @@ -138,6 +138,8 @@ org.apache.ignite:ignite-yardstick org.apache.ignite:ignite-benchmarks org.apache.ignite:ignite-web-agent + org.apache.ignite:ignite-dev-utils + org.apache.ignite:ignite-extdata-platform true From a221066b3d029afc392be704a810c0e830fc0c49 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Fri, 20 Oct 2017 21:15:02 +0700 Subject: [PATCH 055/243] IGNITE-6647 Web Console: Added folder for modules migrations. (cherry picked from commit 3700717) --- .../web-console/backend/ignite_modules/migrations/README.txt | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 modules/web-console/backend/ignite_modules/migrations/README.txt diff --git a/modules/web-console/backend/ignite_modules/migrations/README.txt b/modules/web-console/backend/ignite_modules/migrations/README.txt new file mode 100644 index 0000000000000..daeae364d0abb --- /dev/null +++ b/modules/web-console/backend/ignite_modules/migrations/README.txt @@ -0,0 +1,4 @@ +Ignite Web Console +====================================== + +This folder contains scripts for modules model migration. From e6cb5300d51c5184e876b988c4683bc605685874 Mon Sep 17 00:00:00 2001 From: devozerov Date: Fri, 20 Oct 2017 18:11:52 +0300 Subject: [PATCH 056/243] AI release notes. --- RELEASE_NOTES.txt | 97 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt index 23497d8953041..25007c773b10f 100644 --- a/RELEASE_NOTES.txt +++ b/RELEASE_NOTES.txt @@ -1,6 +1,103 @@ Apache Ignite Release Notes =========================== +Apache Ignite In-Memory Data Fabric 2.3 +--------------------------------------- +Ignite: +* Ability to enable persistence per data region. +* Default page size is changed to 4KB. +* Ability to enable and disable rebalancing per-node. +* Added methods for batch services deployment. +* Introduced cache start/stop order on cluster activation. +* EstimatedRebalancingFinishTime and RebalancingStartTime metrics are exposed through MXBean interface. +* Ability to (de)activate cluster via http-rest API. +* Added Ignite update notifier. +* Updated Lucene dependency to version 5.5.2. +* Added an option to disable MBeans. +* Added sorted and multithreaded checkpoint modes. +* Added calculation of fill factor memory metric in persistent mode. +* Added estimated time for rebalance start and completion to cache metrics. +* Added API to add binary metadata locally. +* Added write throttling during an ongoing checkpoint to avoid zero performance drops. +* Ability to convert WAL to human-redable form. +* Ability to handle CacheObject from DataRecord in standalone WAL iterator. +* Support for uninterruptable writes using AsyncFileIOFactory; enabled with -DIGNITE_USE_ASYNC_FILE_IO_FACTORY=true. +* Enhanced persistent store path resolving to not rely on automatically generated consistent ID. +* Pre-configure local event listeners with IgniteConfiguration.LocalEventListeners. +* Massive performance and stability improvements. + +Ignite.NET: +* LINQ: Local collections joins support. +* LINQ: Regex support. + +Ignite CPP: +* Added Compute::Broadcast method. +* Added Ignite::SetActive method. + +SQL: +* Added sqlline utility to the build. +* CREATE TABLE: Added NOT NULL support. +* CREATE TABLE: Ability to specify cache, key type and value type names. +* CREATE TABLE: Added "WRAP_KEY" and "WRAP_VALUE" options to CREATE TABLE command. +* CREATE TABLE: Added WRITE_SYNCHRONIZATION_MODE option. +* ALTER TABLE: ADD COLUMN support. +* Added lazy query execution mode (SqlFieldsQuery.setLazy). +* Added QuerySqlField.inlineSize property. +* Added FieldsQueryCursor interface to get fields metadata for SqlFieldsQuery. +* Added QueryEntity(Class keyClass, Class valueClass) constructor. +* Improved LocalDate, LocalTime and LocalDateTime support for Java 8. +* Optimized memory consumption during query execution. +* Fixed BigInteger data type handling. + +JDBC Driver: +* Batch update support. +* SQLSTATE error codes support. +* Added "enforceJoinOrder" flag to connection string. +* Added "skipReducerOnUpdate" flag. +* Thin driver: Allowed execution of multiple SQL statements in one command. +* Thin driver: Added metadata support. +* Thin driver: Added type conversions in accordance with specification. +* Thin driver: Added schema to connection string. +* Thin driver: Added Statement.closeOnCompletion() support. +* Appended UUID to thick driver's Ignite instance name to avoid collision with user-created instances. +* Fixed a bug in PreparedStatement not clearing query parameters after execution. + +ODBC Driver: +* SQLSTATE error codes support. +* Support for BINARY and VARBINARY types. +* DML statement batching. +* SQL_ATTR_CONNECTION_DEAD connection attribute. +* SQLGetInfo for info types. +* Added "skipReducerOnUpdate" flag. +* SQLRowCount now returns number of affected rows for non-batch queries. +* SQLBindParameter do not unbind parameter if the ParameterValuePtr is NULL anymore. +* Fixed error on cursor closing before all the rows from the result fetched. + +Machine Learning: +* Implemented K-means clustering algorithm optimized for distributed storages. +* Introduced sparse block distributed matrix. +* Initial BLAS implementation. +* Integration with BLAS via netlib. +* Added getRow() and getCol() methods to Matrix API. + +Web Console: +* Improved DDL support. +* Added PK alias generation on Models screen. +* Added generation of QueryEntity.keyFields on model import from RDBMS. +* Added sanitize user on save. +* Added charts with throughput and latency metrics for cache operations. +* Added version to footer. +* Added "Lazy ResultSet" flag on Queries screen. +* Implemented refresh rate for Monitoring screen. +* Implemented cluster nodes log viewer. + +Visor: +* Ability to keep connection opened in batch mode. +* Ability to activate/deactivate cluster. +* Improved cache statistics. +* Added missing configuration properties to "config" command. +* Fixed script execution after alert throttling interval. + Apache Ignite In-Memory Data Fabric 2.2 --------------------------------------- Ignite: From d196045bf8b719f65b4025409112140196aa206c Mon Sep 17 00:00:00 2001 From: devozerov Date: Sat, 21 Oct 2017 18:47:04 +0300 Subject: [PATCH 057/243] IGNITE-6689: SQL: Added DATA_REGION option for CREATE TABLE. --- RELEASE_NOTES.txt | 1 + .../processors/query/GridQueryProcessor.java | 6 ++- .../query/h2/ddl/DdlStatementsProcessor.java | 2 +- .../query/h2/sql/GridSqlCreateTable.java | 17 ++++++++ .../query/h2/sql/GridSqlQueryParser.java | 10 +++++ .../cache/index/H2DynamicTableSelfTest.java | 40 ++++++++++++++++++- 6 files changed, 73 insertions(+), 3 deletions(-) diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt index 25007c773b10f..27c57672edaef 100644 --- a/RELEASE_NOTES.txt +++ b/RELEASE_NOTES.txt @@ -39,6 +39,7 @@ SQL: * CREATE TABLE: Added NOT NULL support. * CREATE TABLE: Ability to specify cache, key type and value type names. * CREATE TABLE: Added "WRAP_KEY" and "WRAP_VALUE" options to CREATE TABLE command. +* CREATE TABLE: Added DATA_REGION option. * CREATE TABLE: Added WRITE_SYNCHRONIZATION_MODE option. * ALTER TABLE: ADD COLUMN support. * Added lazy query execution mode (SqlFieldsQuery.setLazy). diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index c0a169bd9d352..0728e93cbed1c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -1360,6 +1360,7 @@ else if (op instanceof SchemaAlterTableAddColumnOperation) { * @param templateName Template name. * @param cacheName Cache name. * @param cacheGroup Cache group name. + * @param dataRegion Data region name. * @param affinityKey Affinity key column name. * @param atomicityMode Atomicity mode. * @param writeSyncMode Write synchronization mode. @@ -1369,7 +1370,7 @@ else if (op instanceof SchemaAlterTableAddColumnOperation) { */ @SuppressWarnings("unchecked") public void dynamicTableCreate(String schemaName, QueryEntity entity, String templateName, String cacheName, - String cacheGroup, String affinityKey, @Nullable CacheAtomicityMode atomicityMode, + String cacheGroup, @Nullable String dataRegion, String affinityKey, @Nullable CacheAtomicityMode atomicityMode, @Nullable CacheWriteSynchronizationMode writeSyncMode, int backups, boolean ifNotExists) throws IgniteCheckedException { assert !F.isEmpty(templateName); @@ -1403,6 +1404,9 @@ else if (QueryUtils.TEMPLATE_REPLICÄTED.equalsIgnoreCase(templateName)) if (!F.isEmpty(cacheGroup)) ccfg.setGroupName(cacheGroup); + if (!F.isEmpty(dataRegion)) + ccfg.setDataRegionName(dataRegion); + if (atomicityMode != null) ccfg.setAtomicityMode(atomicityMode); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index f39e587177c82..d29a06399a7a9 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -184,7 +184,7 @@ else if (stmt0 instanceof GridSqlCreateTable) { throw err; ctx.query().dynamicTableCreate(cmd.schemaName(), e, cmd.templateName(), cmd.cacheName(), - cmd.cacheGroup(),cmd.affinityKey(), cmd.atomicityMode(), + cmd.cacheGroup(), cmd.dataRegionName(), cmd.affinityKey(), cmd.atomicityMode(), cmd.writeSynchronizationMode(), cmd.backups(), cmd.ifNotExists()); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java index de1082634d437..3608aedb40dc7 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlCreateTable.java @@ -77,6 +77,9 @@ public class GridSqlCreateTable extends GridSqlStatement { /** Forcefully turn single column value into an Object. */ private Boolean wrapVal; + /** Data region. */ + private String dataRegionName; + /** Extra WITH-params. */ private List params; @@ -304,6 +307,20 @@ public void wrapValue(boolean wrapVal) { this.wrapVal = wrapVal; } + /** + * @return Data region name. + */ + public String dataRegionName() { + return dataRegionName; + } + + /** + * @param dataRegionName Data region name. + */ + public void dataRegionName(String dataRegionName) { + this.dataRegionName = dataRegionName; + } + /** * @return Extra WITH-params. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java index 280fb2d420154..46b2aeed9d27e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java @@ -480,6 +480,9 @@ public class GridSqlQueryParser { /** */ public static final String PARAM_WRAP_VALUE = "WRAP_VALUE"; + /** Data region name. */ + public static final String PARAM_DATA_REGION = "DATA_REGION"; + /** */ private final IdentityHashMap h2ObjToGridObj = new IdentityHashMap<>(); @@ -1392,6 +1395,13 @@ else if (CacheWriteSynchronizationMode.PRIMARY_SYNC.name().equalsIgnoreCase(val) break; + case PARAM_DATA_REGION: + ensureNotEmpty(name, val); + + res.dataRegionName(val); + + break; + default: throw new IgniteSQLException("Unsupported parameter: " + name, IgniteQueryErrorCode.PARSING); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java index c56db84cbbb5d..ef59a62bcad1f 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java @@ -44,6 +44,8 @@ import org.apache.ignite.cache.QueryIndex; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.binary.BinaryMarshaller; @@ -80,6 +82,12 @@ public class H2DynamicTableSelfTest extends AbstractSchemaSelfTest { /** */ private final static String INDEXED_CACHE_NAME_2 = INDEXED_CACHE_NAME + "_2"; + /** Data region name. */ + public static final String DATA_REGION_NAME = "my_data_region"; + + /** Bad data region name. */ + public static final String DATA_REGION_NAME_BAD = "my_data_region_bad"; + /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { super.beforeTestsStarted(); @@ -779,7 +787,7 @@ public void testIndexNameConflictCheckDiscovery() throws Exception { e.setKeyType("CityKey"); e.setValueType("City"); - queryProcessor(client()).dynamicTableCreate("PUBLIC", e, CacheMode.PARTITIONED.name(), null, null, + queryProcessor(client()).dynamicTableCreate("PUBLIC", e, CacheMode.PARTITIONED.name(), null, null, null, null, CacheAtomicityMode.ATOMIC, null, 10, false); return null; @@ -862,6 +870,33 @@ public void testAffinityKey() throws Exception { } } + /** + * Test data region. + * + * @throws Exception If failed. + */ + @SuppressWarnings({"ThrowableNotThrown", "unchecked"}) + public void testDataRegion() throws Exception { + // Empty region name. + GridTestUtils.assertThrows(log, new Callable() { + @Override public Void call() throws Exception { + execute("CREATE TABLE TEST_DATA_REGION (name varchar primary key, code int) WITH \"data_region=\""); + + return null; + } + }, IgniteSQLException.class, "Parameter value cannot be empty: DATA_REGION"); + + // Valid region name. + execute("CREATE TABLE TEST_DATA_REGION (name varchar primary key, code int) WITH \"data_region=" + + DATA_REGION_NAME + "\""); + + CacheConfiguration ccfg = + client().cache("SQL_PUBLIC_TEST_DATA_REGION").getConfiguration(CacheConfiguration.class); + + assertEquals(DATA_REGION_NAME, ccfg.getDataRegionName()); + } + + /** * Test various cases of affinity key column specification. */ @@ -1461,7 +1496,10 @@ private IgniteConfiguration clientConfiguration(int idx) throws Exception { private IgniteConfiguration commonConfiguration(int idx) throws Exception { IgniteConfiguration cfg = super.getConfiguration(getTestIgniteInstanceName(idx)); + DataRegionConfiguration dataRegionCfg = new DataRegionConfiguration().setName(DATA_REGION_NAME); + cfg.setMarshaller(new BinaryMarshaller()); + cfg.setDataStorageConfiguration(new DataStorageConfiguration().setDataRegionConfigurations(dataRegionCfg)); return optimize(cfg); } From 69fdac3acf768ecb9df80d4412c4de5ffd5bc4f5 Mon Sep 17 00:00:00 2001 From: Dmitriy Shabalin Date: Mon, 23 Oct 2017 16:09:47 +0700 Subject: [PATCH 058/243] IGNITE-5909 Added list editable component. (cherry picked from commit 01daee6) --- modules/web-console/frontend/.eslintrc | 2 +- modules/web-console/frontend/app/app.js | 3 + .../app/components/list-editable/component.js | 36 +++++ .../list-editable-cols/cols.directive.js | 79 +++++++++++ .../list-editable-cols/cols.style.scss | 51 +++++++ .../list-editable-cols/cols.template.pug | 29 ++++ .../components/list-editable-cols/index.js | 28 ++++ .../list-editable-cols/row.directive.js | 40 ++++++ .../components/list-editable/controller.js | 79 +++++++++++ .../app/components/list-editable/index.js | 27 ++++ .../app/components/list-editable/style.scss | 132 ++++++++++++++++++ .../app/components/list-editable/template.pug | 49 +++++++ .../helpers/jade/form/form-field-dropdown.pug | 2 +- .../app/primitives/form-field/index.scss | 15 ++ .../app/primitives/ui-grid/index.scss | 4 + modules/web-console/frontend/package.json | 2 +- .../frontend/public/images/icons/index.js | 2 + .../frontend/public/images/icons/info.svg | 3 + .../frontend/public/images/icons/sort.svg | 1 + modules/web-console/frontend/tsconfig.json | 12 ++ 20 files changed, 593 insertions(+), 3 deletions(-) create mode 100644 modules/web-console/frontend/app/components/list-editable/component.js create mode 100644 modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.directive.js create mode 100644 modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.style.scss create mode 100644 modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug create mode 100644 modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/index.js create mode 100644 modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/row.directive.js create mode 100644 modules/web-console/frontend/app/components/list-editable/controller.js create mode 100644 modules/web-console/frontend/app/components/list-editable/index.js create mode 100644 modules/web-console/frontend/app/components/list-editable/style.scss create mode 100644 modules/web-console/frontend/app/components/list-editable/template.pug create mode 100644 modules/web-console/frontend/public/images/icons/info.svg create mode 100644 modules/web-console/frontend/public/images/icons/sort.svg create mode 100644 modules/web-console/frontend/tsconfig.json diff --git a/modules/web-console/frontend/.eslintrc b/modules/web-console/frontend/.eslintrc index 4e24d0b75b7a7..3c26fa714081a 100644 --- a/modules/web-console/frontend/.eslintrc +++ b/modules/web-console/frontend/.eslintrc @@ -186,7 +186,7 @@ rules: space-in-parens: 0 space-infix-ops: 2 space-unary-ops: [2, { "words": true, "nonwords": false }] - spaced-comment: [1, "always"] + spaced-comment: [1, "always", {"markers": ["/"]}] use-isnan: 2 valid-jsdoc: 0 valid-typeof: 2 diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js index f2ee8ef7eadc7..44454f659bdd6 100644 --- a/modules/web-console/frontend/app/app.js +++ b/modules/web-console/frontend/app/app.js @@ -128,6 +128,8 @@ import gridItemSelected from './components/grid-item-selected'; import bsSelectMenu from './components/bs-select-menu'; import protectFromBsSelectRender from './components/protect-from-bs-select-render'; import uiGridHovering from './components/ui-grid-hovering'; +import listEditable from './components/list-editable'; + import igniteServices from './services'; // Inject external modules. @@ -202,6 +204,7 @@ angular.module('ignite-console', [ protectFromBsSelectRender.name, AngularStrapTooltip.name, AngularStrapSelect.name, + listEditable.name, // Ignite modules. IgniteModules.name ]) diff --git a/modules/web-console/frontend/app/components/list-editable/component.js b/modules/web-console/frontend/app/components/list-editable/component.js new file mode 100644 index 0000000000000..8cdc08354d676 --- /dev/null +++ b/modules/web-console/frontend/app/components/list-editable/component.js @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import template from './template.pug'; +import controller from './controller'; + +import './style.scss'; + +export default { + controller, + template, + require: { + ngModel: '^ngModel' + }, + bindings: { + }, + transclude: { + noItems: '?listEditableNoItems', + itemView: '?listEditableItemView', + itemEdit: '?listEditableItemEdit' + } +}; diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.directive.js b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.directive.js new file mode 100644 index 0000000000000..55544fb6517d1 --- /dev/null +++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.directive.js @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @ts-check +/// + +import template from './cols.template.pug'; +import './cols.style.scss'; + +/** + * A column definition. + * + * @typedef {Object} IListEditableColDef + * @prop {string} [name] - optional name to display at column head + * @prop {string} cellClass - CSS class to assign to column cells + * @prop {string} [tip] - optional tip to display at column head + */ +export class ListEditableColsController { + /** @type {Array} - column definitions */ + colDefs; + /** @type {string} - optional class to assign to rows */ + rowClass; + /** @type {ng.INgModelController} */ + ngModel; + + static $inject = ['$compile', '$element', '$scope']; + + /** + * @param {ng.ICompileService} $compile + * @param {JQLite} $element + * @param {ng.IScope} $scope + */ + constructor($compile, $element, $scope) { + this.$compile = $compile; + this.$element = $element; + this.$scope = $scope; + } + + $postLink() { + this.$compile(template)(this.$scope.$new(true), (clone, scope) => { + scope.$ctrl = this; + + this.$element[0].parentElement.insertBefore(clone[0], this.$element[0]); + }); + } + + $onDestroy() { + this.$element = null; + } +} + +/** @returns {ng.IDirective} */ +export default function listEditableCols() { + return { + controller: ListEditableColsController, + controllerAs: '$colsCtrl', + require: { + ngModel: 'ngModel' + }, + bindToController: { + colDefs: ' { + children[index].classList.add(...(Array.isArray(cellClass) ? cellClass : [cellClass])); + }); + } + }; +} diff --git a/modules/web-console/frontend/app/components/list-editable/controller.js b/modules/web-console/frontend/app/components/list-editable/controller.js new file mode 100644 index 0000000000000..bc864ce7cbb5e --- /dev/null +++ b/modules/web-console/frontend/app/components/list-editable/controller.js @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import _ from 'lodash'; + +export default class { + static $inject = ['$animate', '$element', '$transclude']; + + constructor($animate, $element, $transclude) { + $animate.enabled(false, $element); + + this.hasItemView = $transclude.isSlotFilled('itemView'); + + this._cache = {}; + } + + $index(item, $index) { + if (item._id) + return item._id; + + return $index; + } + + $onInit() { + this.ngModel.$isEmpty = (value) => { + return !Array.isArray(value) || !value.length; + }; + } + + save(data, idx) { + this.ngModel.$setViewValue(this.ngModel.$viewValue.map((v, i) => i === idx ? data : v)); + } + + revert(idx) { + delete this._cache[idx]; + } + + remove(idx) { + this.ngModel.$setViewValue(this.ngModel.$viewValue.filter((v, i) => i !== idx)); + } + + isEditView(idx) { + return this._cache.hasOwnProperty(idx) || _.isEmpty(this.ngModel.$viewValue[idx]); + } + + getEditView(idx) { + return this._cache[idx]; + } + + startEditView(idx) { + this._cache[idx] = _.clone(this.ngModel.$viewValue[idx]); + } + + stopEditView(data, idx, form) { + delete this._cache[idx]; + + if (form.$pristine) + return; + + if (form.$valid) + this.save(data, idx); + else + this.revert(idx); + } +} diff --git a/modules/web-console/frontend/app/components/list-editable/index.js b/modules/web-console/frontend/app/components/list-editable/index.js new file mode 100644 index 0000000000000..59634c4026105 --- /dev/null +++ b/modules/web-console/frontend/app/components/list-editable/index.js @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import angular from 'angular'; + +import component from './component'; +import listEditableCols from './components/list-editable-cols'; + +export default angular + .module('ignite-console.list-editable', [ + listEditableCols.name + ]) + .component('listEditable', component); diff --git a/modules/web-console/frontend/app/components/list-editable/style.scss b/modules/web-console/frontend/app/components/list-editable/style.scss new file mode 100644 index 0000000000000..0f3f8aecbf34b --- /dev/null +++ b/modules/web-console/frontend/app/components/list-editable/style.scss @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +list-editable { + $min-height: 47px; + $index-column-width: 46px; + + display: block; + flex: 1; + + &-item-view, + &-item-edit, + &-no-items { + flex: 1; + display: block; + } + + &-no-items { + display: flex; + align-items: center; + min-height: $min-height; + padding: 5px 10px; + + font-style: italic; + } + + .le-body { + box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.2); + } + + .le-row { + display: flex; + align-items: center; + justify-content: space-between; + min-height: $min-height; + padding: 5px 0; + + cursor: pointer; + border-top: 1px solid #ddd; + + &:nth-child(odd) { + background-color: #ffffff; + } + + &:nth-child(even) { + background-color: #f9f9f9; + } + + &-index, + &-cross { + display: flex; + height: 36px; + } + + &-index { + width: $index-column-width; + flex-basis: $index-column-width; + padding-left: 10px; + flex-shrink: 0; + flex-grow: 0; + align-items: center; + justify-content: center; + } + + &-cross { + [ignite-icon] { + width: 12px; + height: 12px; + } + } + + &-item { + width: 100%; + + &-view { + display: flex; + min-height: 36px; + align-items: center; + } + + &-edit { + margin-left: -11px; + } + } + + &--editable { + position: relative; + z-index: 1; + + align-items: flex-start; + } + + &:not(.le-row--has-item-view) { + & > .le-row-index, + & > .le-row-cross { + margin-top: 18px; + } + + align-items: flex-start; + } + } + + [divider]:after { + content: attr(divider); + + display: inline-flex; + justify-content: center; + align-self: flex-start; + + width: 20px; + height: 36px; + + margin-top: 18px; + margin-right: -20px; + + line-height: 36px; + } +} diff --git a/modules/web-console/frontend/app/components/list-editable/template.pug b/modules/web-console/frontend/app/components/list-editable/template.pug new file mode 100644 index 0000000000000..a713188607273 --- /dev/null +++ b/modules/web-console/frontend/app/components/list-editable/template.pug @@ -0,0 +1,49 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +.le-body(ng-if='$ctrl.ngModel.$viewValue.length') + .le-row( + ng-repeat='item in $ctrl.ngModel.$viewValue track by $ctrl.$index(item, $index)' + ng-class=`{ + 'le-row--editable': $ctrl.isEditView($index), + 'le-row--has-item-view': $ctrl.hasItemView + }`) + + .le-row-sort + button.btn-ignite.btn-ignite--link-dashed-secondary + svg(ignite-icon='sort') + + .le-row-index + span {{ $index+1 }} + + .le-row-item + .le-row-item-view(ng-if='$ctrl.hasItemView && !$ctrl.isEditView($index)' ng-click='$ctrl.startEditView($index);') + div(ng-transclude='itemView') + div( + ng-if='!$ctrl.hasItemView || $ctrl.isEditView($index)' + ignite-on-focus-out='$ctrl.stopEditView(item, $index, form);' + ignite-on-focus-out-ignored-classes='bssm-click-overlay bssm-item-text bssm-item-button' + ) + .le-row-item-view(ng-show='$ctrl.hasItemView' ng-init='!$ctrl.hasItemView && $ctrl.startEditView($index);item = $ctrl.getEditView($index);') + div(ng-transclude='itemView') + .le-row-item-edit(ng-form name='form') + div(ng-transclude='itemEdit') + + .le-row-cross + button.btn-ignite.btn-ignite--link-dashed-secondary(type='button' ng-click='$ctrl.remove($index)') + svg(ignite-icon='cross') + +div(ng-transclude='noItems' ng-if='!$ctrl.ngModel.$viewValue.length') diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-dropdown.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-dropdown.pug index 117568d24cbc5..cf7d50a051279 100644 --- a/modules/web-console/frontend/app/helpers/jade/form/form-field-dropdown.pug +++ b/modules/web-console/frontend/app/helpers/jade/form/form-field-dropdown.pug @@ -23,7 +23,7 @@ mixin ignite-form-field-dropdown(label, model, name, disabled, required, multipl data-placeholder=placeholderEmpty ? `{{ ${options}.length > 0 ? '${placeholder}' : '${placeholderEmpty}' }}` : placeholder data-ng-model=model - + data-ng-disabled=disabled && `${disabled}` data-ng-required=required && `${required}` bs-select diff --git a/modules/web-console/frontend/app/primitives/form-field/index.scss b/modules/web-console/frontend/app/primitives/form-field/index.scss index 4bc42528dfe5f..f6d849661b4ac 100644 --- a/modules/web-console/frontend/app/primitives/form-field/index.scss +++ b/modules/web-console/frontend/app/primitives/form-field/index.scss @@ -61,6 +61,21 @@ color: $text-color; line-height: 36px; + + &.ng-invalid:not(.ng-pristine), + &.ng-invalid.ng-touched { + border-color: $ignite-brand-primary; + box-shadow: inset 0 1px 3px 0 rgba($ignite-brand-primary, .5); + } + + &:focus { + border-color: $ignite-brand-success; + box-shadow: inset 0 1px 3px 0 rgba($ignite-brand-success, .5); + } + + &:disabled { + opacity: .5; + } } & > input[type='number'] { diff --git a/modules/web-console/frontend/app/primitives/ui-grid/index.scss b/modules/web-console/frontend/app/primitives/ui-grid/index.scss index 88bff694daa22..e86eec7a889d5 100644 --- a/modules/web-console/frontend/app/primitives/ui-grid/index.scss +++ b/modules/web-console/frontend/app/primitives/ui-grid/index.scss @@ -501,6 +501,10 @@ content: ''; } } + + .ui-grid-selection-row-header-buttons::before { + opacity: 1; + } } .ui-grid--ignite.ui-grid-disabled-group-selection { diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index 82c3eeafb4513..95b4a2b26324c 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -32,6 +32,7 @@ "win32" ], "dependencies": { + "@uirouter/angularjs": "1.0.5", "angular": "1.5.11", "angular-acl": "0.1.8", "angular-animate": "1.5.11", @@ -50,7 +51,6 @@ "angular-translate": "2.15.2", "angular-tree-control": "0.2.28", "angular-ui-grid": "4.0.7", - "@uirouter/angularjs": "1.0.5", "babel-core": "6.25.0", "babel-eslint": "7.2.3", "babel-loader": "7.1.1", diff --git a/modules/web-console/frontend/public/images/icons/index.js b/modules/web-console/frontend/public/images/icons/index.js index 5d8ac532a6e9f..d802805ef2ed4 100644 --- a/modules/web-console/frontend/public/images/icons/index.js +++ b/modules/web-console/frontend/public/images/icons/index.js @@ -24,3 +24,5 @@ export download from './download.svg'; export filter from './filter.svg'; export search from './search.svg'; export refresh from './refresh.svg'; +export sort from './sort.svg'; +export info from './info.svg'; diff --git a/modules/web-console/frontend/public/images/icons/info.svg b/modules/web-console/frontend/public/images/icons/info.svg new file mode 100644 index 0000000000000..de92136cb06c2 --- /dev/null +++ b/modules/web-console/frontend/public/images/icons/info.svg @@ -0,0 +1,3 @@ + + + diff --git a/modules/web-console/frontend/public/images/icons/sort.svg b/modules/web-console/frontend/public/images/icons/sort.svg new file mode 100644 index 0000000000000..7e4bb523d7666 --- /dev/null +++ b/modules/web-console/frontend/public/images/icons/sort.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/modules/web-console/frontend/tsconfig.json b/modules/web-console/frontend/tsconfig.json new file mode 100644 index 0000000000000..a70845dc3e991 --- /dev/null +++ b/modules/web-console/frontend/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "allowSyntheticDefaultImports": true, + "target": "ES2017", + "allowJs": true, + "checkJs": true, + "baseUrl": ".", + "paths": { + "*": ["*", "node_modules/*"] + } + } +} \ No newline at end of file From ec1a8e7f698e584e94284220aa13ff15449f366e Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Tue, 24 Oct 2017 09:48:04 +0300 Subject: [PATCH 059/243] IGNITE-6706: Removed ignite-sqlline module from "optional" build directory. This closes #2901. --- assembly/dependencies-fabric-lgpl.xml | 1 + assembly/dependencies-fabric.xml | 1 + assembly/dependencies-sqlline.xml | 12 ------------ 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/assembly/dependencies-fabric-lgpl.xml b/assembly/dependencies-fabric-lgpl.xml index 82b5d5cf2cb2c..1347c055d7471 100644 --- a/assembly/dependencies-fabric-lgpl.xml +++ b/assembly/dependencies-fabric-lgpl.xml @@ -135,6 +135,7 @@ org.apache.ignite:ignite-web-agent org.apache.ignite:ignite-dev-utils org.apache.ignite:ignite-extdata-platform + org.apache.ignite:ignite-sqlline true diff --git a/assembly/dependencies-fabric.xml b/assembly/dependencies-fabric.xml index 3e9405b215cd1..4d9c870ce899e 100644 --- a/assembly/dependencies-fabric.xml +++ b/assembly/dependencies-fabric.xml @@ -140,6 +140,7 @@ org.apache.ignite:ignite-web-agent org.apache.ignite:ignite-dev-utils org.apache.ignite:ignite-extdata-platform + org.apache.ignite:ignite-sqlline true diff --git a/assembly/dependencies-sqlline.xml b/assembly/dependencies-sqlline.xml index f8953a17f9aa3..e58a399980673 100644 --- a/assembly/dependencies-sqlline.xml +++ b/assembly/dependencies-sqlline.xml @@ -37,18 +37,6 @@ false - - target - include/sqlline - - *.jar - - - *-tests.jar - *-javadoc.jar - *-sources.jar - - target/libs include/sqlline From 3e52aca47b0a6a0a47f7d063bd0d2bb51489e523 Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Tue, 24 Oct 2017 09:49:55 +0300 Subject: [PATCH 060/243] IGNITE-6708: Removed ignite-compatibility module from "optional" build directory. This closes #2902. --- assembly/dependencies-fabric-lgpl.xml | 1 + assembly/dependencies-fabric.xml | 1 + 2 files changed, 2 insertions(+) diff --git a/assembly/dependencies-fabric-lgpl.xml b/assembly/dependencies-fabric-lgpl.xml index 1347c055d7471..fe2932e6db725 100644 --- a/assembly/dependencies-fabric-lgpl.xml +++ b/assembly/dependencies-fabric-lgpl.xml @@ -135,6 +135,7 @@ org.apache.ignite:ignite-web-agent org.apache.ignite:ignite-dev-utils org.apache.ignite:ignite-extdata-platform + org.apache.ignite:ignite-compatibility org.apache.ignite:ignite-sqlline diff --git a/assembly/dependencies-fabric.xml b/assembly/dependencies-fabric.xml index 4d9c870ce899e..3bcae044e2a4e 100644 --- a/assembly/dependencies-fabric.xml +++ b/assembly/dependencies-fabric.xml @@ -140,6 +140,7 @@ org.apache.ignite:ignite-web-agent org.apache.ignite:ignite-dev-utils org.apache.ignite:ignite-extdata-platform + org.apache.ignite:ignite-compatibility org.apache.ignite:ignite-sqlline From 103d5b00aa697acca1d41fe39ec27404ac6ac555 Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Tue, 24 Oct 2017 10:32:32 +0300 Subject: [PATCH 061/243] IGNITE-6718: Skipped upload of sqlline and compatibility modules into maven central during build. This closes #2911. --- modules/compatibility/pom.xml | 13 +++++++++++++ modules/sqlline/pom.xml | 9 +++++++++ 2 files changed, 22 insertions(+) diff --git a/modules/compatibility/pom.xml b/modules/compatibility/pom.xml index 166848d2089c7..845d0cd332dfe 100644 --- a/modules/compatibility/pom.xml +++ b/modules/compatibility/pom.xml @@ -91,4 +91,17 @@ test + + + + + org.apache.maven.plugins + maven-deploy-plugin + 2.8.2 + + true + + + + diff --git a/modules/sqlline/pom.xml b/modules/sqlline/pom.xml index bcb71ec536ed6..1c16a197f3408 100644 --- a/modules/sqlline/pom.xml +++ b/modules/sqlline/pom.xml @@ -69,6 +69,15 @@ + + + org.apache.maven.plugins + maven-deploy-plugin + 2.8.2 + + true + + From 4a2c38333c112d4956d6394667672c1470503435 Mon Sep 17 00:00:00 2001 From: apopov Date: Tue, 24 Oct 2017 11:56:33 +0300 Subject: [PATCH 062/243] IGNITE-6362 NPE in Log4J2Logger --- .../ignite/logger/log4j2/Log4J2Logger.java | 129 +++++++----------- .../log4j2/GridLog4j2CorrectFileNameTest.java | 94 ------------- .../log4j2/GridLog4j2InitializedTest.java | 77 ----------- .../log4j2/GridLog4j2LoggingFileTest.java | 68 --------- .../logger/log4j2/Log4j2LoggerSelfTest.java | 7 + .../Log4j2LoggerVerboseModeSelfTest.java | 71 ++++++---- .../testsuites/IgniteLog4j2TestSuite.java | 2 + 7 files changed, 108 insertions(+), 340 deletions(-) delete mode 100644 modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2CorrectFileNameTest.java delete mode 100644 modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2InitializedTest.java delete mode 100644 modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2LoggingFileTest.java diff --git a/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java b/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java index 46215e5cb8be6..e39a9efcca031 100644 --- a/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java +++ b/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java @@ -42,10 +42,8 @@ import org.apache.logging.log4j.core.appender.RollingFileAppender; import org.apache.logging.log4j.core.appender.routing.RoutingAppender; import org.apache.logging.log4j.core.config.AppenderControl; -import org.apache.logging.log4j.core.config.AppenderRef; import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.Configurator; -import org.apache.logging.log4j.core.config.LoggerConfig; import org.apache.logging.log4j.core.layout.PatternLayout; import org.jetbrains.annotations.Nullable; @@ -102,11 +100,6 @@ public class Log4J2Logger implements IgniteLogger, LoggerNodeIdAware { @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") private Logger impl; - /** Auto added at verbose mode console logger (nullable). */ - @GridToStringExclude - @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") - private Logger consoleLog; - /** Quiet flag. */ private final boolean quiet; @@ -117,12 +110,15 @@ public class Log4J2Logger implements IgniteLogger, LoggerNodeIdAware { * Creates new logger with given implementation. * * @param impl Log4j implementation to use. - * @param consoleLog Cosole logger (optional). */ - private Log4J2Logger(final Logger impl, @Nullable final Logger consoleLog) { + private Log4J2Logger(final Logger impl) { assert impl != null; - this.impl = impl; - this.consoleLog = consoleLog; + + addConsoleAppenderIfNeeded(new C1() { + @Override public Logger apply(Boolean init) { + return impl; + } + }); quiet = quiet0; } @@ -130,17 +126,17 @@ private Log4J2Logger(final Logger impl, @Nullable final Logger consoleLog) { /** * Creates new logger with given configuration {@code path}. * - * @param path Path to log4j configuration XML file. + * @param path Path to log4j2 configuration XML file. * @throws IgniteCheckedException Thrown in case logger can't be created. */ public Log4J2Logger(String path) throws IgniteCheckedException { if (path == null) - throw new IgniteCheckedException("Configuration XML file for Log4j must be specified."); + throw new IgniteCheckedException("Configuration XML file for Log4j2 must be specified."); final URL cfgUrl = U.resolveIgniteUrl(path); if (cfgUrl == null) - throw new IgniteCheckedException("Log4j configuration path was not found: " + path); + throw new IgniteCheckedException("Log4j2 configuration path was not found: " + path); addConsoleAppenderIfNeeded(new C1() { @Override public Logger apply(Boolean init) { @@ -204,18 +200,17 @@ public Log4J2Logger(final URL cfgUrl) throws IgniteCheckedException { } /** - * Sets level for internal log4j implementation. + * Cleans up the logger configuration. Should be used in unit tests only for sequential tests run with + * different configurations * - * @param level Log level to set. */ - public void setLevel(Level level) { - LoggerContext ctx = (LoggerContext)LogManager.getContext(false); - - Configuration conf = ctx.getConfiguration(); - - conf.getLoggerConfig(impl.getName()).setLevel(level); + static void cleanup() { + synchronized (mux) { + if (inited) + LogManager.shutdown(); - ctx.updateLoggers(conf); + inited = false; + } } /** {@inheritDoc} */ @@ -242,10 +237,10 @@ public void setLevel(Level level) { Appender innerApp = control.getAppender(); if (innerApp instanceof FileAppender) - return normilize(((FileAppender)innerApp).getFileName()); + return normalize(((FileAppender)innerApp).getFileName()); if (innerApp instanceof RollingFileAppender) - return normilize(((RollingFileAppender)innerApp).getFileName()); + return normalize(((RollingFileAppender)innerApp).getFileName()); } } catch (IllegalAccessException | NoSuchFieldException e) { @@ -265,7 +260,7 @@ public void setLevel(Level level) { * @param path Path. * @return Normalized path. */ - private String normilize(String path) { + private String normalize(String path) { if (!U.isWindows()) return path; @@ -335,7 +330,7 @@ private void addConsoleAppenderIfNeeded(@Nullable IgniteClosure // User launched ignite in verbose mode and did not add console appender with INFO level // to configuration and did not set IGNITE_CONSOLE_APPENDER to false. - consoleLog = createConsoleLogger(); + createConsoleLogger(); } quiet0 = quiet; @@ -348,14 +343,13 @@ private void addConsoleAppenderIfNeeded(@Nullable IgniteClosure * * @return Logger with auto configured console appender. */ - public static Logger createConsoleLogger() { - LoggerContext ctx = (LoggerContext)LogManager.getContext(true); + public Logger createConsoleLogger() { + // from http://logging.apache.org/log4j/2.x/manual/customconfig.html + final LoggerContext ctx = impl.getContext(); - Configuration cfg = ctx.getConfiguration(); + final Configuration cfg = ctx.getConfiguration(); - PatternLayout.Builder builder = PatternLayout.newBuilder(); - - builder + PatternLayout.Builder builder = PatternLayout.newBuilder() .withPattern("%d{ISO8601}][%-5p][%t][%c{1}] %m%n") .withCharset(Charset.defaultCharset()) .withAlwaysWriteExceptions(false) @@ -363,9 +357,7 @@ public static Logger createConsoleLogger() { PatternLayout layout = builder.build(); - ConsoleAppender.Builder consoleAppenderBuilder = ConsoleAppender.newBuilder(); - - consoleAppenderBuilder + ConsoleAppender.Builder consoleAppenderBuilder = ConsoleAppender.newBuilder() .withName(CONSOLE_APPENDER) .withLayout(layout); @@ -373,20 +365,12 @@ public static Logger createConsoleLogger() { consoleApp.start(); - AppenderRef ref = AppenderRef.createAppenderRef(CONSOLE_APPENDER, Level.TRACE, null); - - AppenderRef[] refs = {ref}; - - LoggerConfig logCfg = LoggerConfig.createLogger(false, Level.INFO, LogManager.ROOT_LOGGER_NAME, "", refs, null, null, null); - - logCfg.addAppender(consoleApp, null, null); cfg.addAppender(consoleApp); - - cfg.addLogger(LogManager.ROOT_LOGGER_NAME, logCfg); + cfg.getRootLogger().addAppender(consoleApp, Level.TRACE, null); ctx.updateLoggers(cfg); - return (Logger)LogManager.getContext().getLogger(LogManager.ROOT_LOGGER_NAME); + return ctx.getRootLogger(); } /** {@inheritDoc} */ @@ -398,7 +382,22 @@ public static Logger createConsoleLogger() { // Set nodeId as system variable to be used at configuration. System.setProperty(NODE_ID, U.id8(nodeId)); - ((LoggerContext)LogManager.getContext(false)).reconfigure(); + if (inited) { + final LoggerContext ctx = impl.getContext(); + + synchronized (mux) { + inited = false; + } + + addConsoleAppenderIfNeeded(new C1() { + @Override public Logger apply(Boolean init) { + if (init) + ctx.reconfigure(); + + return (Logger)LogManager.getRootLogger(); + } + }); + } } /** {@inheritDoc} */ @@ -417,20 +416,17 @@ public static Logger createConsoleLogger() { */ @Override public Log4J2Logger getLogger(Object ctgr) { if (ctgr == null) - return new Log4J2Logger((Logger)LogManager.getRootLogger(), - consoleLog == null ? null : (Logger)LogManager.getContext().getLogger("")); + return new Log4J2Logger((Logger)LogManager.getRootLogger()); if (ctgr instanceof Class) { String name = ((Class)ctgr).getName(); - return new Log4J2Logger((Logger)LogManager.getLogger(name), - consoleLog == null ? null : (Logger)LogManager.getContext().getLogger(name)); + return new Log4J2Logger((Logger)LogManager.getLogger(name)); } String name = ctgr.toString(); - return new Log4J2Logger((Logger)LogManager.getLogger(name), - consoleLog == null ? null : (Logger)LogManager.getContext().getLogger(name)); + return new Log4J2Logger((Logger)LogManager.getLogger(name)); } /** {@inheritDoc} */ @@ -439,9 +435,6 @@ public static Logger createConsoleLogger() { warning("Logging at TRACE level without checking if TRACE level is enabled: " + msg); impl.trace(msg); - - if (consoleLog != null) - consoleLog.trace(msg); } /** {@inheritDoc} */ @@ -450,9 +443,6 @@ public static Logger createConsoleLogger() { warning("Logging at DEBUG level without checking if DEBUG level is enabled: " + msg); impl.debug(msg); - - if (consoleLog != null) - consoleLog.debug(msg); } /** {@inheritDoc} */ @@ -461,56 +451,41 @@ public static Logger createConsoleLogger() { warning("Logging at INFO level without checking if INFO level is enabled: " + msg); impl.info(msg); - - if (consoleLog != null) - consoleLog.info(msg); } /** {@inheritDoc} */ @Override public void warning(String msg) { impl.warn(msg); - - if (consoleLog != null) - consoleLog.warn(msg); } /** {@inheritDoc} */ @Override public void warning(String msg, @Nullable Throwable e) { impl.warn(msg, e); - - if (consoleLog != null) - consoleLog.warn(msg, e); } /** {@inheritDoc} */ @Override public void error(String msg) { impl.error(msg); - - if (consoleLog != null) - consoleLog.error(msg); } /** {@inheritDoc} */ @Override public void error(String msg, @Nullable Throwable e) { impl.error(msg, e); - - if (consoleLog != null) - consoleLog.error(msg, e); } /** {@inheritDoc} */ @Override public boolean isTraceEnabled() { - return impl.isTraceEnabled() || (consoleLog != null && consoleLog.isTraceEnabled()); + return impl.isTraceEnabled(); } /** {@inheritDoc} */ @Override public boolean isDebugEnabled() { - return impl.isDebugEnabled() || (consoleLog != null && consoleLog.isDebugEnabled()); + return impl.isDebugEnabled(); } /** {@inheritDoc} */ @Override public boolean isInfoEnabled() { - return impl.isInfoEnabled() || (consoleLog != null && consoleLog.isInfoEnabled()); + return impl.isInfoEnabled(); } /** {@inheritDoc} */ diff --git a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2CorrectFileNameTest.java b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2CorrectFileNameTest.java deleted file mode 100644 index b56be27f07d86..0000000000000 --- a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2CorrectFileNameTest.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ignite.logger.log4j2; - -import java.io.File; -import junit.framework.TestCase; -import org.apache.ignite.Ignite; -import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.internal.util.typedef.G; -import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.testframework.GridTestUtils; -import org.apache.ignite.testframework.junits.common.GridCommonTest; - -/** - * Tests that several grids log to files with correct names. - */ -@GridCommonTest(group = "Logger") -public class GridLog4j2CorrectFileNameTest extends TestCase { - - /** - * Tests correct behaviour in case 2 local nodes are started. - * - * @throws Exception If error occurs. - */ - public void testLogFilesTwoNodes() throws Exception { - checkOneNode(0); - checkOneNode(1); - } - - /** - * Starts the local node and checks for presence of log file. Also checks - * that this is really a log of a started node. - * - * @param id Test-local node ID. - * @throws Exception If error occurred. - */ - private void checkOneNode(int id) throws Exception { - try (Ignite ignite = G.start(getConfiguration("grid" + id))) { - String id8 = U.id8(ignite.cluster().localNode().id()); - String logPath = "work/log/ignite-" + id8 + ".log"; - File logFile = U.resolveIgnitePath(logPath); - assertNotNull("Failed to resolve path: " + logPath, logFile); - assertTrue("Log file does not exist: " + logFile, logFile.exists()); - // We have a row in log with the following content - // con >>> Local node [ID=NodeId ] - String logContent = U.readFileToString(logFile.getAbsolutePath(), - "UTF-8"); - assertTrue( - "Log file does not contain it's node ID: " + logFile, - logContent.contains(">>> Local node [ID=" - + id8.toUpperCase())); - } - } - - /** - * Creates grid configuration. - * - * @param igniteInstanceName Ignite instance name. - * @return Grid configuration. - * @throws Exception If error occurred. - */ - private static IgniteConfiguration getConfiguration(String igniteInstanceName) - throws Exception { - IgniteConfiguration cfg = new IgniteConfiguration(); - - - cfg.setIgniteInstanceName(igniteInstanceName); - // We need of a configuration file passed in - File xml = GridTestUtils - .resolveIgnitePath("modules/core/src/test/config/log4j2-test.xml"); - - assert xml != null; - assert xml.exists() == true; - - cfg.setGridLogger(new Log4J2Logger(xml)); - cfg.setConnectorConfiguration(null); - - return cfg; - } -} \ No newline at end of file diff --git a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2InitializedTest.java b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2InitializedTest.java deleted file mode 100644 index 4758e0a6a1e23..0000000000000 --- a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2InitializedTest.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.logger.log4j2; - -import java.net.URL; -import java.util.UUID; -import junit.framework.TestCase; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.IgniteLogger; -import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.testframework.junits.common.GridCommonTest; - -/** - * Log4j initialized test. - */ -@GridCommonTest(group = "Logger") -public class GridLog4j2InitializedTest extends TestCase { - - /** - * @throws Exception If failed. - */ - @Override protected void setUp() throws Exception { - - } - - /** */ - public void testLogInitialize() { - - IgniteConfiguration cfg = new IgniteConfiguration(); - - cfg.setIgniteInstanceName("grid" + 1); - cfg.setNodeId(new UUID(1, 1)); - // cfg.setIgniteHome("/home/glutters/Documenti/apache-ignite/ignite-master/ignite/"); - - URL xml = U.resolveIgniteUrl("config/ignite-log4j2.xml"); - IgniteLogger log; - try { - - log = new Log4J2Logger(xml); - // log.isQuiet(); - cfg.setGridLogger(log); - } catch (IgniteCheckedException e) { - e.printStackTrace(); - return; - } - - assert log.isInfoEnabled() == true; - - if (log.isDebugEnabled()) - log.debug("This is 'debug' message."); - - log.info("This is 'info' message."); - log.warning("This is 'warning' message."); - log.warning("This is 'warning' message.", new Exception( - "It's a test warning exception")); - log.error("This is 'error' message."); - - assert log.getLogger(GridLog4j2InitializedTest.class.getName()) instanceof Log4J2Logger; - } - -} \ No newline at end of file diff --git a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2LoggingFileTest.java b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2LoggingFileTest.java deleted file mode 100644 index 5c19de065005a..0000000000000 --- a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/GridLog4j2LoggingFileTest.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.logger.log4j2; - -import java.io.File; -import java.util.UUID; -import junit.framework.TestCase; -import org.apache.ignite.IgniteLogger; -import org.apache.ignite.logger.LoggerNodeIdAware; -import org.apache.ignite.testframework.GridTestUtils; -import org.junit.Test; - -/** - * Grid Log4j SPI test. - */ -public class GridLog4j2LoggingFileTest extends TestCase { - /** */ - private IgniteLogger log; - - /** {@inheritDoc} */ - @Override protected void setUp() throws Exception { - - - File xml = GridTestUtils - .resolveIgnitePath("modules/core/src/test/config/log4j2-test.xml"); - - assert xml != null; - assert xml.exists() == true; - - log = new Log4J2Logger(xml).getLogger(getClass()); - ((LoggerNodeIdAware) log).setNodeId(UUID.randomUUID()); - - } - - /** - * Tests log4j logging SPI. - */ - @Test - public void testLog() { - assert log.isDebugEnabled() == true; - assert log.isInfoEnabled() == true; - - log.debug("This is 'debug' message."); - log.info("This is 'info' message."); - log.warning("This is 'warning' message."); - log.warning("This is 'warning' message.", new Exception( - "It's a test warning exception")); - log.error("This is 'error' message."); - log.error("This is 'error' message.", new Exception( - "It's a test error exception")); - } - -} \ No newline at end of file diff --git a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java index 18fa265f6fd7d..a5564da01175a 100644 --- a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java +++ b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java @@ -41,6 +41,13 @@ public class Log4j2LoggerSelfTest extends TestCase { /** */ private static final String LOG_PATH_MAIN = "config/ignite-log4j2.xml"; + /** + * @throws Exception If failed. + */ + @Override protected void setUp() throws Exception { + Log4J2Logger.cleanup(); + } + /** * @throws Exception If failed. */ diff --git a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerVerboseModeSelfTest.java b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerVerboseModeSelfTest.java index 95c7ea8f70398..c28108ce54914 100644 --- a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerVerboseModeSelfTest.java +++ b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerVerboseModeSelfTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.logger.log4j2; import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.PrintStream; import java.util.Collections; import junit.framework.TestCase; @@ -26,6 +27,7 @@ import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; import org.apache.logging.log4j.Level; /** @@ -33,10 +35,18 @@ */ public class Log4j2LoggerVerboseModeSelfTest extends TestCase { /** */ - public static final String LOG_PATH_VERBOSE_TEST = "modules/core/src/test/config/log4j2-verbose-test.xml"; + private static final String LOG_PATH_VERBOSE_TEST = "modules/core/src/test/config/log4j2-verbose-test.xml"; /** - * Test does not work after another tests. Can be run from IDE as separate test. + * @throws Exception If failed. + */ + @Override protected void setUp() throws Exception { + Log4J2Logger.cleanup(); + + } + + /** + * Test works fine after other tests. Please do not forget to call Log4J2Logger.cleanup() * * @throws Exception If failed. */ @@ -47,49 +57,58 @@ public void testVerboseMode() throws Exception { final ByteArrayOutputStream testOut = new ByteArrayOutputStream(); final ByteArrayOutputStream testErr = new ByteArrayOutputStream(); + String consoleOut = "Empty"; + String consoleErr = "Empty"; + String testMsg = "******* Hello Tester! ******* "; + try { System.setOut(new PrintStream(testOut)); System.setErr(new PrintStream(testErr)); + System.setProperty("IGNITE_QUIET", "false"); + try (Ignite ignite = G.start(getConfiguration("verboseLogGrid", LOG_PATH_VERBOSE_TEST))) { - String testMsg = "******* Hello Tester! ******* "; ignite.log().error(testMsg + Level.ERROR); ignite.log().warning(testMsg + Level.WARN); ignite.log().info(testMsg + Level.INFO); ignite.log().debug(testMsg + Level.DEBUG); ignite.log().trace(testMsg + Level.TRACE); - - String consoleOut = testOut.toString(); - String consoleErr = testErr.toString(); - - assertTrue(consoleOut.contains(testMsg + Level.INFO)); - assertTrue(consoleOut.contains(testMsg + Level.DEBUG)); - assertTrue(consoleOut.contains(testMsg + Level.TRACE)); - assertTrue(consoleOut.contains(testMsg + Level.ERROR)); - assertTrue(consoleOut.contains(testMsg + Level.WARN)); - - assertTrue(consoleErr.contains(testMsg + Level.ERROR)); - assertTrue(consoleErr.contains(testMsg + Level.WARN)); - assertTrue(!consoleErr.contains(testMsg + Level.INFO)); - assertTrue(consoleErr.contains(testMsg + Level.DEBUG)); - assertTrue(consoleErr.contains(testMsg + Level.TRACE)); } + } finally { System.setProperty("IGNITE_QUIET", "true"); System.setOut(backupSysOut); System.setErr(backupSysErr); + } - System.out.println("**************** Out Console content ***************"); - System.out.println(testOut.toString()); + testOut.flush(); + testErr.flush(); - System.err.println("**************** Err Console content ***************"); - System.err.println(testErr.toString()); - } + consoleOut = testOut.toString(); + consoleErr = testErr.toString(); + + System.out.println("**************** Out Console content ***************"); + System.out.println(consoleOut); + + System.out.println("**************** Err Console content ***************"); + System.out.println(consoleErr); + + assertTrue(consoleOut.contains(testMsg + Level.INFO)); + assertTrue(consoleOut.contains(testMsg + Level.DEBUG)); + assertTrue(consoleOut.contains(testMsg + Level.TRACE)); + assertTrue(consoleOut.contains(testMsg + Level.ERROR)); + assertTrue(consoleOut.contains(testMsg + Level.WARN)); + + assertTrue(consoleErr.contains(testMsg + Level.ERROR)); + assertTrue(consoleErr.contains(testMsg + Level.WARN)); + assertTrue(!consoleErr.contains(testMsg + Level.INFO)); + assertTrue(consoleErr.contains(testMsg + Level.DEBUG)); + assertTrue(consoleErr.contains(testMsg + Level.TRACE)); } /** @@ -108,9 +127,13 @@ private static IgniteConfiguration getConfiguration(String igniteInstanceName, S setAddresses(Collections.singleton("127.0.0.1:47500..47509")); }}); + File xml = GridTestUtils.resolveIgnitePath(LOG_PATH_VERBOSE_TEST); + + Log4J2Logger logger = new Log4J2Logger(xml); + return new IgniteConfiguration() .setIgniteInstanceName(igniteInstanceName) - .setGridLogger(new Log4J2Logger(logPath)) + .setGridLogger(logger) .setConnectorConfiguration(null) .setDiscoverySpi(disco); } diff --git a/modules/log4j2/src/test/java/org/apache/ignite/testsuites/IgniteLog4j2TestSuite.java b/modules/log4j2/src/test/java/org/apache/ignite/testsuites/IgniteLog4j2TestSuite.java index dd2b4b513e324..a23cb477742f4 100644 --- a/modules/log4j2/src/test/java/org/apache/ignite/testsuites/IgniteLog4j2TestSuite.java +++ b/modules/log4j2/src/test/java/org/apache/ignite/testsuites/IgniteLog4j2TestSuite.java @@ -19,6 +19,7 @@ import junit.framework.TestSuite; import org.apache.ignite.logger.log4j2.Log4j2LoggerSelfTest; +import org.apache.ignite.logger.log4j2.Log4j2LoggerVerboseModeSelfTest; /** * Log4j2 logging tests. @@ -32,6 +33,7 @@ public static TestSuite suite() throws Exception { TestSuite suite = new TestSuite("Log4j2 Logging Test Suite"); suite.addTest(new TestSuite(Log4j2LoggerSelfTest.class)); + suite.addTest(new TestSuite(Log4j2LoggerVerboseModeSelfTest.class)); return suite; } From b92a9c6cbd7a48d6399da6b8cdaad014ee5770c4 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Tue, 24 Oct 2017 14:02:18 +0300 Subject: [PATCH 063/243] IGNITE-6721 - Fixed page evictions in mixed mode --- .../persistence/CacheDataRowAdapter.java | 3 +- .../IgniteCacheDatabaseSharedManager.java | 18 ++---- ...PageEvictionMultinodeMixedRegionsTest.java | 59 +++++++++++++++++++ .../IgniteCacheEvictionSelfTestSuite.java | 3 + 4 files changed, 70 insertions(+), 13 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionMultinodeMixedRegionsTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java index 0fd8323523152..9257424a29ffa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java @@ -22,6 +22,7 @@ import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; +import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.CacheObjectContext; @@ -132,7 +133,7 @@ public final void initFromLink( final long pageId = pageId(nextLink); // Group is null if try evict page, with persistence evictions should be disabled. - assert grp != null || !sharedCtx.database().persistenceEnabled(); + assert grp != null || pageMem instanceof PageMemoryNoStoreImpl; int grpId = grp != null ? grp.groupId() : 0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index da598d14486dc..933c19538c97f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -57,7 +57,6 @@ import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; -import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; @@ -574,10 +573,7 @@ public void readCheckpointAndRestoreMemory(List cachesTo if (!F.isEmpty(memMetricsMap)) { DataRegionMetrics memMetrics = memMetricsMap.get(memPlcName); - if (memMetrics == null) - return null; - else - return new DataRegionMetricsSnapshot(memMetrics); + return memMetrics == null ? null : new DataRegionMetricsSnapshot(memMetrics); } else return null; @@ -794,7 +790,7 @@ public void ensureFreeSpace(DataRegion memPlc) throws IgniteCheckedException { DataRegionConfiguration plcCfg = memPlc.config(); - if (plcCfg.getPageEvictionMode() == DataPageEvictionMode.DISABLED) + if (plcCfg.getPageEvictionMode() == DataPageEvictionMode.DISABLED || plcCfg.isPersistenceEnabled()) return; long memorySize = plcCfg.getMaxSize(); @@ -851,7 +847,7 @@ private DataRegion initMemory( * @param pageMem Page memory. */ private PageEvictionTracker createPageEvictionTracker(DataRegionConfiguration plc, PageMemory pageMem) { - if (plc.getPageEvictionMode() == DataPageEvictionMode.DISABLED || CU.isPersistenceEnabled(cctx.gridConfig())) + if (plc.getPageEvictionMode() == DataPageEvictionMode.DISABLED || plc.isPersistenceEnabled()) return new NoOpPageEvictionTracker(); assert pageMem instanceof PageMemoryNoStoreImpl : pageMem.getClass(); @@ -885,12 +881,10 @@ private PageEvictionTracker createPageEvictionTracker(DataRegionConfiguration pl return null; final PdsFolderSettings folderSettings = cctx.kernalContext().pdsFolderResolver().resolveFolders(); - final String folderName; - if(folderSettings.isCompatible()) - folderName = String.valueOf(folderSettings.consistentId()).replaceAll("[:,\\.]", "_"); - else - folderName = folderSettings.folderName(); + final String folderName = folderSettings.isCompatible() ? + String.valueOf(folderSettings.consistentId()).replaceAll("[:,\\.]", "_") : + folderSettings.folderName(); return buildPath(path, folderName); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionMultinodeMixedRegionsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionMultinodeMixedRegionsTest.java new file mode 100644 index 0000000000000..1015e52b220a0 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/paged/PageEvictionMultinodeMixedRegionsTest.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.eviction.paged; + +import org.apache.ignite.configuration.DataPageEvictionMode; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.testframework.GridTestUtils; + +/** + * + */ +public class PageEvictionMultinodeMixedRegionsTest extends PageEvictionMultinodeTest { + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + setEvictionMode(DataPageEvictionMode.RANDOM_2_LRU, cfg); + + DataRegionConfiguration persReg = new DataRegionConfiguration() + .setName("persisted") + .setPersistenceEnabled(true); + + cfg.getDataStorageConfiguration().setDataRegionConfigurations(persReg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + GridTestUtils.deleteDbFiles(); + + super.beforeTestsStarted(); + + clientGrid.active(true); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + super.afterTestsStopped(); + + GridTestUtils.deleteDbFiles(); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java index 7985e2e4b9bcd..84b14521af097 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java @@ -34,6 +34,7 @@ import org.apache.ignite.internal.processors.cache.eviction.lru.LruNearEvictionPolicySelfTest; import org.apache.ignite.internal.processors.cache.eviction.lru.LruNearOnlyNearEvictionPolicySelfTest; import org.apache.ignite.internal.processors.cache.eviction.paged.PageEvictionDataStreamerTest; +import org.apache.ignite.internal.processors.cache.eviction.paged.PageEvictionMultinodeMixedRegionsTest; import org.apache.ignite.internal.processors.cache.eviction.paged.PageEvictionReadThroughTest; import org.apache.ignite.internal.processors.cache.eviction.paged.PageEvictionTouchOrderTest; import org.apache.ignite.internal.processors.cache.eviction.paged.Random2LruNearEnabledPageEvictionMultinodeTest; @@ -82,6 +83,8 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(PageEvictionReadThroughTest.class)); suite.addTest(new TestSuite(PageEvictionDataStreamerTest.class)); + suite.addTest(new TestSuite(PageEvictionMultinodeMixedRegionsTest.class)); + return suite; } } From 5150e8b25340794dba11f73d53e890176c528fb1 Mon Sep 17 00:00:00 2001 From: Oleg Ostanin Date: Tue, 24 Oct 2017 14:53:16 +0300 Subject: [PATCH 064/243] IGNITE-6660 Python Redis example fails for python 3 run. This closes #2879. Signed-off-by: nikolay_tikhonov --- examples/config/redis/example-redis.xml | 74 +++++++++++++++++++++++++ examples/redis/redis-example.py | 16 +++--- 2 files changed, 82 insertions(+), 8 deletions(-) create mode 100644 examples/config/redis/example-redis.xml diff --git a/examples/config/redis/example-redis.xml b/examples/config/redis/example-redis.xml new file mode 100644 index 0000000000000..5db27a1848d6f --- /dev/null +++ b/examples/config/redis/example-redis.xml @@ -0,0 +1,74 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 127.0.0.1:47500..47509 + + + + + + + + diff --git a/examples/redis/redis-example.py b/examples/redis/redis-example.py index 68e6e2a1e5f3b..c194292d1df0d 100644 --- a/examples/redis/redis-example.py +++ b/examples/redis/redis-example.py @@ -30,34 +30,34 @@ r.set('k1', 1) # check. -print 'Value for "k1": %s' % r.get('k1') +print('Value for "k1": %s' % r.get('k1')) # change entry's value. r.set('k1', 'new_val') # check. -print 'Value for "k1": %s' % r.get('k1') +print('Value for "k1": %s' % r.get('k1')) # set another entry. r.set('k2', 2) # check. -print 'Value for "k2": %s' % r.get('k2') +print('Value for "k2": %s' % r.get('k2')) # get both values. -print 'Values for "k1" and "k2": %s' % r.mget('k1', 'k2') +print('Values for "k1" and "k2": %s' % r.mget('k1', 'k2')) # delete one entry. r.delete('k1') # check one entry left. -print 'Values for "k1" and "k2": %s' % r.mget('k1', 'k2') +print('Values for "k1" and "k2": %s' % r.mget('k1', 'k2')) # check db size -print 'Db size: %d' % r.dbsize() +print('Db size: %d' % r.dbsize()) # increment. -print 'Value for incremented "inc_k" : %s' % r.incr('inc_k') +print('Value for incremented "inc_k" : %s' % r.incr('inc_k')) # increment again. -print 'Value for incremented "inc_k" : %s' % r.incr('inc_k') +print('Value for incremented "inc_k" : %s' % r.incr('inc_k')) From 94df5e9bf82885b5c344f231f13a0aa408268c70 Mon Sep 17 00:00:00 2001 From: vsisko Date: Tue, 24 Oct 2017 19:18:02 +0700 Subject: [PATCH 065/243] IGNITE-6560 Minor changes of Data region configuration. (cherry picked from commit eb81e6c) --- .../visor/cache/VisorCacheConfiguration.java | 11 ++++++----- modules/web-console/backend/app/mongo.js | 1 + .../generator/ConfigurationGenerator.js | 5 ++++- .../generator/defaults/Cluster.service.js | 2 +- .../states/configuration/caches/memory.pug | 16 +++++++++++----- .../configuration/clusters/data-storage.pug | 2 +- 6 files changed, 24 insertions(+), 13 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java index f2fd195e2190e..b0126788db819 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfiguration.java @@ -143,7 +143,7 @@ public class VisorCacheConfiguration extends VisorDataTransferObject { private boolean loadPrevVal; /** Name of {@link DataRegionConfiguration} for this cache */ - private String memPlcName; + private String dataRegName; /** Maximum inline size for sql indexes. */ private int sqlIdxMaxInlineSize; @@ -219,7 +219,7 @@ public VisorCacheConfiguration(IgniteEx ignite, CacheConfiguration ccfg, IgniteU evictFilter = compactClass(ccfg.getEvictionFilter()); lsnrConfigurations = compactIterable(ccfg.getCacheEntryListenerConfigurations()); loadPrevVal = ccfg.isLoadPreviousValue(); - memPlcName = ccfg.getDataRegionName(); + dataRegName = ccfg.getDataRegionName(); sqlIdxMaxInlineSize = ccfg.getSqlIndexMaxInlineSize(); nodeFilter = compactClass(ccfg.getNodeFilter()); qryDetailMetricsSz = ccfg.getQueryDetailMetricsSize(); @@ -462,8 +462,9 @@ public boolean isLoadPreviousValue() { /** * @return {@link DataRegionConfiguration} name. */ + @Deprecated public String getMemoryPolicyName() { - return memPlcName; + return dataRegName; } /** @@ -551,7 +552,7 @@ public IgniteUuid getDynamicDeploymentId() { U.writeString(out, evictFilter); U.writeString(out, lsnrConfigurations); out.writeBoolean(loadPrevVal); - U.writeString(out, memPlcName); + U.writeString(out, dataRegName); out.writeInt(sqlIdxMaxInlineSize); U.writeString(out, nodeFilter); out.writeInt(qryDetailMetricsSz); @@ -595,7 +596,7 @@ public IgniteUuid getDynamicDeploymentId() { evictFilter = U.readString(in); lsnrConfigurations = U.readString(in); loadPrevVal = in.readBoolean(); - memPlcName = U.readString(in); + dataRegName = U.readString(in); sqlIdxMaxInlineSize = in.readInt(); nodeFilter = U.readString(in); qryDetailMetricsSz = in.readInt(); diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index bfe1ae2701550..b4bc9fce50a7f 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -333,6 +333,7 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose }, evictionFilter: String, memoryPolicyName: String, + dataRegionName: String, sqlIndexMaxInlineSize: Number, topologyValidator: String }); diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js index 74588f01e60a3..615857cdc8bca 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js @@ -1957,9 +1957,12 @@ export default class IgniteConfigurationGenerator { // Generate cache memory group. static cacheMemory(cache, available, ccfg = this.cacheConfigurationBean(cache)) { // Since ignite 2.0 - if (available('2.0.0')) + if (available(['2.0.0', '2.3.0'])) ccfg.stringProperty('memoryPolicyName'); + if (available('2.3.0')) + ccfg.stringProperty('dataRegionName'); + // Removed in ignite 2.0 if (available(['1.0.0', '2.0.0'])) { ccfg.enumProperty('memoryMode'); diff --git a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js index 0e786d935160d..bafb2027ecfd9 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js @@ -314,7 +314,7 @@ const DFLT_CLUSTER = { dataStorageConfiguration: { systemCacheInitialSize: 41943040, systemCacheMaxSize: 104857600, - pageSize: 2048, + pageSize: 4096, storagePath: 'db', dataRegionConfigurations: { name: 'default', diff --git a/modules/web-console/frontend/app/modules/states/configuration/caches/memory.pug b/modules/web-console/frontend/app/modules/states/configuration/caches/memory.pug index 17ceedf46531b..e00f2a6ecb949 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/caches/memory.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/caches/memory.pug @@ -34,12 +34,18 @@ include /app/helpers/jade/mixins .panel-body(ng-if=`ui.isPanelLoaded('${form}')`) .col-sm-6 //- Since ignite 2.0 - div(ng-if='$ctrl.available("2.0.0")') - .settings-row + .settings-row(ng-if='$ctrl.available("2.0.0")') +checkbox('Onheap cache enabled', model + '.onheapCacheEnabled', '"OnheapCacheEnabled"', 'Checks if the on-heap cache is enabled for the off-heap based page memory') - .settings-row - +text('Memory policy name:', model + '.memoryPolicyName', '"MemoryPolicyName"', 'false', 'default', - 'Name of memory policy configuration for this cache') + + //- Since ignite 2.0 deprecated in ignite 2.3 + .settings-row(ng-if='$ctrl.available(["2.0.0", "2.3.0"])') + +text('Memory policy name:', model + '.memoryPolicyName', '"MemoryPolicyName"', 'false', 'default', + 'Name of memory policy configuration for this cache') + + //- Since ignite 2.3 + .settings-row(ng-if='$ctrl.available("2.3.0")') + +text('Data region name:', model + '.dataRegionName', '"DataRegionName"', 'false', 'default', + 'Name of data region configuration for this cache') //- Removed in ignite 2.0 div(ng-if='$ctrl.available(["1.0.0", "2.0.0"])') diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug index 9c2dca1013ee4..a635739c16141 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug @@ -34,7 +34,7 @@ include /app/helpers/jade/mixins .col-sm-6 .settings-row +number-min-max('Page size:', model + '.pageSize', '"DataStorageConfigurationPageSize"', - 'true', '2048', '1024', '16384', 'Every memory region is split on pages of fixed size') + 'true', '4096', '1024', '16384', 'Every memory region is split on pages of fixed size') .settings-row +number('Concurrency level:', model + '.concurrencyLevel', '"DataStorageConfigurationConcurrencyLevel"', 'true', 'availableProcessors', '2', 'The number of concurrent segments in Ignite internal page mapping tables') From 10afc0394a842898f2969108bf2abc35ebfe2d73 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Tue, 24 Oct 2017 16:08:45 +0300 Subject: [PATCH 066/243] IGNITE-6724 Fixed PersistentStoreExample --- .../ignite/examples/persistentstore/PersistentStoreExample.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/main/java/org/apache/ignite/examples/persistentstore/PersistentStoreExample.java b/examples/src/main/java/org/apache/ignite/examples/persistentstore/PersistentStoreExample.java index eec0866c7210c..e9731b9c5fcbd 100644 --- a/examples/src/main/java/org/apache/ignite/examples/persistentstore/PersistentStoreExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/persistentstore/PersistentStoreExample.java @@ -51,7 +51,7 @@ public class PersistentStoreExample { private static final String ORG_CACHE = CacheQueryExample.class.getSimpleName() + "Organizations"; /** */ - private static final boolean UPDATE = false; + private static final boolean UPDATE = true; /** * @param args Program arguments, ignored. From 89fdca84e82e112cc90c20b8d88cfc0f663977a1 Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Tue, 24 Oct 2017 17:42:00 +0300 Subject: [PATCH 067/243] IGNITE-6723: Fix to ScalarCreditRiskExample: changed type from scala sequence to scala Array for member of serialized closure. This closes #2919. --- .../ignite/scalar/examples/ScalarCreditRiskExample.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala index 1b0d767edf837..e3ba0014ff332 100644 --- a/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala +++ b/examples/src/main/scala/org/apache/ignite/scalar/examples/ScalarCreditRiskExample.scala @@ -68,7 +68,7 @@ object ScalarCreditRiskExample { // aware if method was executed just locally or on the 100s of cluster nodes. // Credit risk crdRisk is the minimal amount that creditor has to have // available to cover possible defaults. - val crdRisk = ignite$ @< (closures(ignite$.cluster().nodes().size(), portfolio, horizon, iter, percentile), + val crdRisk = ignite$ @< (closures(ignite$.cluster().nodes().size(), portfolio.toArray, horizon, iter, percentile), (s: Seq[Double]) => s.sum / s.size, null) println("Credit risk [crdRisk=" + crdRisk + ", duration=" + @@ -86,7 +86,7 @@ object ScalarCreditRiskExample { * @param percentile Percentile. * @return Collection of closures. */ - private def closures(clusterSize: Int, portfolio: Seq[Credit], horizon: Int, iter: Int, + private def closures(clusterSize: Int, portfolio: Array[Credit], horizon: Int, iter: Int, percentile: Double): Seq[() => Double] = { val iterPerNode: Int = math.round(iter / clusterSize.asInstanceOf[Float]) val lastNodeIter: Int = iter - (clusterSize - 1) * iterPerNode From b8c756b2c8eff3d07bc69e053fa579f6f9d1db64 Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Tue, 24 Oct 2017 19:06:04 +0300 Subject: [PATCH 068/243] IGNITE-6726 Licenses for Sqlline are missing --- assembly/dependencies-sqlline.xml | 5 + modules/sqlline/licenses/apache-2.0.txt | 202 ++++++++++++++++++++++++ modules/sqlline/pom.xml | 18 +-- 3 files changed, 215 insertions(+), 10 deletions(-) create mode 100644 modules/sqlline/licenses/apache-2.0.txt diff --git a/assembly/dependencies-sqlline.xml b/assembly/dependencies-sqlline.xml index e58a399980673..45c027ab87bb4 100644 --- a/assembly/dependencies-sqlline.xml +++ b/assembly/dependencies-sqlline.xml @@ -42,6 +42,11 @@ include/sqlline + + ${basedir}/target/licenses + include/licenses + + bin / diff --git a/modules/sqlline/licenses/apache-2.0.txt b/modules/sqlline/licenses/apache-2.0.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/sqlline/licenses/apache-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/sqlline/pom.xml b/modules/sqlline/pom.xml index 1c16a197f3408..d8ae887f21538 100644 --- a/modules/sqlline/pom.xml +++ b/modules/sqlline/pom.xml @@ -21,21 +21,19 @@ POM file. --> - + + 4.0.0 + - apache-ignite org.apache.ignite - 2.3.0-SNAPSHOT - ../../pom.xml + ignite-parent + 1 + ../../parent - 4.0.0 ignite-sqlline - jar - - sqlline - http://maven.apache.org + 2.3.0-SNAPSHOT + http://ignite.apache.org UTF-8 From 089ebecb3e5962c7a38afd01bd18c77feb23d155 Mon Sep 17 00:00:00 2001 From: vsisko Date: Wed, 25 Oct 2017 11:23:11 +0700 Subject: [PATCH 069/243] IGNITE-6671 Web Agent: Fixed data type conversion for Oracle NUMBER(N) data types. (cherry picked from commit 93be8ea) --- .../agent/db/dialect/OracleMetadataDialect.java | 13 +++++-------- .../ignite/console/agent/rest/RestExecutor.java | 4 ++-- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/db/dialect/OracleMetadataDialect.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/db/dialect/OracleMetadataDialect.java index 6d12c81e1fb1e..b8b72d5422b94 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/db/dialect/OracleMetadataDialect.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/db/dialect/OracleMetadataDialect.java @@ -208,21 +208,18 @@ private int decodeType(ResultSet rs) throws SQLException { } else { if (precision < 1) - return INTEGER; - - if (precision < 2) - return BOOLEAN; + return NUMERIC; - if (precision < 4) + if (precision < 3) return TINYINT; - if (precision < 6) + if (precision < 5) return SMALLINT; - if (precision < 11) + if (precision < 10) return INTEGER; - if (precision < 20) + if (precision < 19) return BIGINT; return NUMERIC; diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java index 13989b46d46de..36f38852d7735 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java @@ -329,11 +329,11 @@ private void writeToken(JsonToken tok, JsonParser p, JsonGenerator gen) throws I break; case VALUE_NUMBER_INT: - gen.writeNumber(p.getLongValue()); + gen.writeNumber(p.getBigIntegerValue()); break; case VALUE_NUMBER_FLOAT: - gen.writeNumber(p.getDoubleValue()); + gen.writeNumber(p.getDecimalValue()); break; case VALUE_TRUE: From 1e56de86525a79c895eba2d839b7887b61979b07 Mon Sep 17 00:00:00 2001 From: Denis Mekhanikov Date: Wed, 25 Oct 2017 14:44:51 +0300 Subject: [PATCH 070/243] IGNITE-6572: SQL: allowed many cache to share the same schema. This closes #2850. (cherry-picked from commit ee6daae) --- .../processors/query/h2/H2Schema.java | 21 +++++ .../processors/query/h2/IgniteH2Indexing.java | 39 ++++++--- .../query/IgniteSqlSchemaIndexingTest.java | 30 ++++++- .../processors/query/SqlSchemaSelfTest.java | 80 +++++++++++++++++-- 4 files changed, 150 insertions(+), 20 deletions(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java index f5cf0f26b5cd8..2fdf32dae9f80 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java @@ -34,6 +34,9 @@ public class H2Schema { /** */ private final ConcurrentMap typeToTbl = new ConcurrentHashMap<>(); + /** Usage count. */ + private int usageCnt; + /** * Constructor. * @@ -50,6 +53,24 @@ public String schemaName() { return schemaName; } + /** + * Increments counter for number of caches having this schema. + * + * @return New value of caches counter. + */ + public int incrementUsageCount() { + return ++usageCnt; + } + + /** + * Increments counter for number of caches having this schema. + * + * @return New value of caches counter. + */ + public int decrementUsageCount() { + return --usageCnt; + } + /** * @return Tables. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index e669b90448ed6..58d28c6b396fa 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -259,6 +259,9 @@ public class IgniteH2Indexing implements GridQueryIndexing { /** */ private GridSpinBusyLock busyLock; + /** */ + private final Object schemaMux = new Object(); + /** */ private final ConcurrentMap runs = new ConcurrentHashMap8<>(); @@ -2290,10 +2293,18 @@ private boolean isDefaultSchema(String schemaName) { @Override public void registerCache(String cacheName, String schemaName, GridCacheContext cctx) throws IgniteCheckedException { if (!isDefaultSchema(schemaName)) { - if (schemas.putIfAbsent(schemaName, new H2Schema(schemaName)) != null) - throw new IgniteCheckedException("Schema already registered: " + U.maskName(schemaName)); + synchronized (schemaMux) { + H2Schema schema = new H2Schema(schemaName); + + H2Schema oldSchema = schemas.putIfAbsent(schemaName, schema); + + if (oldSchema == null) + createSchema(schemaName); + else + schema = oldSchema; - createSchema(schemaName); + schema.incrementUsageCount(); + } } cacheName2schema.put(cacheName, schemaName); @@ -2305,9 +2316,7 @@ private boolean isDefaultSchema(String schemaName) { @Override public void unregisterCache(String cacheName, boolean destroy) { String schemaName = schema(cacheName); - boolean dflt = isDefaultSchema(schemaName); - - H2Schema schema = dflt ? schemas.get(schemaName) : schemas.remove(schemaName); + H2Schema schema = schemas.get(schemaName); if (schema != null) { mapQryExec.onCacheStop(cacheName); @@ -2338,12 +2347,18 @@ private boolean isDefaultSchema(String schemaName) { } } - if (!dflt) { - try { - dropSchema(schemaName); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to drop schema on cache stop (will ignore): " + cacheName, e); + if (!isDefaultSchema(schemaName)) { + synchronized (schemaMux) { + if (schema.decrementUsageCount() == 0) { + schemas.remove(schemaName); + + try { + dropSchema(schemaName); + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to drop schema on cache stop (will ignore): " + cacheName, e); + } + } } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java index 33e35e06db742..570d2db605823 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java @@ -105,7 +105,35 @@ public void testCaseSensitive() throws Exception { return null; } - }, IgniteException.class, "Schema already registered: "); + }, IgniteException.class, "Duplicate index name"); + } + + /** + * Test collision of table names in different caches, sharing a single SQL schema. + * + * @throws Exception If failed. + */ + @SuppressWarnings("ThrowableResultOfMethodCallIgnored") + public void testCustomSchemaMultipleCachesTablesCollision() throws Exception { + //TODO: Rewrite with dynamic cache creation, and GRID start in #beforeTest after resolve of + //TODO: https://issues.apache.org/jira/browse/IGNITE-1094 + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + final CacheConfiguration cfg = cacheConfig("cache1", true, Integer.class, Fact.class) + .setSqlSchema("TEST_SCHEMA"); + + final CacheConfiguration collisionCfg = cacheConfig("cache2", true, Integer.class, Fact.class) + .setSqlSchema("TEST_SCHEMA"); + + IgniteConfiguration icfg = new IgniteConfiguration() + .setLocalHost("127.0.0.1") + .setCacheConfiguration(cfg, collisionCfg); + + Ignition.start(icfg); + + return null; + } + }, IgniteException.class, "Failed to register query type"); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java index 183a88442c73b..a4ee2e3a14d68 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java @@ -18,6 +18,9 @@ package org.apache.ignite.internal.processors.query; import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.query.SqlFieldsQuery; @@ -25,11 +28,9 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; -import java.util.Iterator; -import java.util.List; - /** * Tests for schemas. */ @@ -197,6 +198,63 @@ public void testSchemaChangeOnCacheWithPublicSchema() throws Exception { * @throws Exception If failed. */ public void testCustomSchemaName() throws Exception { + IgniteCache cache = registerQueryEntity("Person", CACHE_PERSON); + + testQueryEntity(cache, "Person"); + } + + /** + * Test multiple caches having the same schema. + * + * @throws Exception If failed. + */ + public void testCustomSchemaMultipleCaches() throws Exception { + for (int i = 1; i <= 3; i++) { + String tbl = "Person" + i; + + IgniteCache cache = registerQueryEntity(tbl, "PersonCache" + i); + + testQueryEntity(cache, tbl); + } + + for (int i = 1; i < 3; i++) { + IgniteCache cache = node.cache("PersonCache" + i); + + testQueryEntity(cache, "Person" + i); + } + } + + /** + * Test concurrent schema creation and destruction. + * + * @throws Exception If failed. + */ + public void testCustomSchemaConcurrentUse() throws Exception { + final AtomicInteger maxIdx = new AtomicInteger(); + + GridTestUtils.runMultiThreaded(new Runnable() { + @Override public void run() { + for (int i = 0; i < 100; i++) { + int idx = maxIdx.incrementAndGet(); + + String tbl = "Person" + idx; + + IgniteCache cache = registerQueryEntity(tbl, "PersonCache" + idx); + + testQueryEntity(cache, tbl); + + cache.destroy(); + } + } + }, 4, "schema-test"); + } + + /** + * @param tbl Table name. + * @param cacheName Cache name. + * @return Cache with registered query entity. + */ + private IgniteCache registerQueryEntity(String tbl, String cacheName) { QueryEntity qe = new QueryEntity() .setValueType(Person.class.getName()) .setKeyType(Long.class.getName()) @@ -207,17 +265,25 @@ public void testCustomSchemaName() throws Exception { .addQueryField("name", String.class.getName(), null) .addQueryField("orgId", Long.class.getName(), null); - qe.setTableName("Person"); + qe.setTableName(tbl); - IgniteCache cache = node.createCache(new CacheConfiguration() - .setName(CACHE_PERSON) + return node.createCache(new CacheConfiguration() + .setName(cacheName) .setQueryEntities(Collections.singletonList(qe)) .setSqlSchema("TEST")); + } + /** + * Uses SQL to retrieve data from cache. + * + * @param cache Cache. + * @param tbl Table. + */ + private void testQueryEntity(IgniteCache cache, String tbl) { cache.put(1L, new Person("Vasya", 2)); assertEquals(1, node.context().query().querySqlFieldsNoCache( - new SqlFieldsQuery("SELECT id, name, orgId FROM TEST.Person where (id = ?)").setArgs(1L), false + new SqlFieldsQuery(String.format("SELECT id, name, orgId FROM TEST.%s where (id = %d)", tbl, 1)), false ).getAll().size()); } From 288c198e36113e7940a8bdca5149ece47d271f4e Mon Sep 17 00:00:00 2001 From: Oleg Ostanin Date: Wed, 25 Oct 2017 18:48:48 +0300 Subject: [PATCH 071/243] IGNITE-6751: Skipped upload of "ignite-dev-utils" module to Maven repository. This closes #2926. --- modules/dev-utils/pom.xml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/modules/dev-utils/pom.xml b/modules/dev-utils/pom.xml index 40e58b8cf1ad3..8fc0b682dd8d2 100644 --- a/modules/dev-utils/pom.xml +++ b/modules/dev-utils/pom.xml @@ -47,4 +47,17 @@ ${project.version} + + + + + org.apache.maven.plugins + maven-deploy-plugin + 2.8.2 + + true + + + + From c0a52294ef58654e269ae66e9abd0bff1fd267bd Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Thu, 26 Oct 2017 10:16:35 +0300 Subject: [PATCH 072/243] IGNITE-6748 Moved checkpoint buffer size to DataRegionConfiguration --- .../DataRegionConfiguration.java | 34 ++++++++- .../DataStorageConfiguration.java | 29 -------- .../MemoryPolicyConfiguration.java | 23 ------ .../apache/ignite/internal/IgnitionEx.java | 13 ++-- .../discovery/GridDiscoveryManager.java | 8 ++- .../GridCacheDatabaseSharedManager.java | 71 +++++++------------ .../utils/PlatformConfigurationUtils.java | 6 +- .../node/VisorDataRegionConfiguration.java | 13 ++++ .../node/VisorDataStorageConfiguration.java | 3 +- .../VisorPersistentStoreConfiguration.java | 1 - .../db/wal/IgnitePdsWalTlbTest.java | 7 +- .../PagesWriteThrottleSandboxTest.java | 2 +- .../pagemem/PagesWriteThrottleSmokeTest.java | 2 +- .../Config/full-config.xml | 6 +- .../IgniteConfigurationSerializerTest.cs | 6 +- .../IgniteConfigurationTest.cs | 6 +- .../Configuration/DataRegionConfiguration.cs | 8 +++ .../Configuration/DataStorageConfiguration.cs | 9 --- .../IgniteConfigurationSection.xsd | 15 ++-- modules/web-console/backend/app/mongo.js | 6 +- .../generator/ConfigurationGenerator.js | 5 +- .../generator/defaults/Cluster.service.js | 3 +- .../configuration/clusters/data-storage.pug | 6 ++ 23 files changed, 134 insertions(+), 148 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java index 50edf5cb4d916..4ae87e39dee65 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataRegionConfiguration.java @@ -129,6 +129,9 @@ public final class DataRegionConfiguration implements Serializable { */ private boolean persistenceEnabled = false; + /** Temporary buffer size for checkpoints in bytes. */ + private long checkpointPageBufSize; + /** * Gets data region name. * @@ -212,11 +215,11 @@ public String getSwapPath() { /** * Sets a path to the memory-mapped files. * - * @param swapFilePath A Path to the memory mapped file. + * @param swapPath A Path to the memory mapped file. * @return {@code this} for chaining. */ - public DataRegionConfiguration setSwapPath(String swapFilePath) { - this.swapPath = swapFilePath; + public DataRegionConfiguration setSwapPath(String swapPath) { + this.swapPath = swapPath; return this; } @@ -403,4 +406,29 @@ public DataRegionConfiguration setMetricsSubIntervalCount(int metricsSubInterval return this; } + + /** + * Gets amount of memory allocated for a checkpoint temporary buffer. + * + * @return Checkpoint page buffer size in bytes or {@code 0} for Ignite + * to choose the buffer size automatically. + */ + public long getCheckpointPageBufferSize() { + return checkpointPageBufSize; + } + + /** + * Sets amount of memory allocated for the checkpoint temporary buffer. The buffer is used to create temporary + * copies of pages that are being written to disk and being update in parallel while the checkpoint is in + * progress. + * + * @param checkpointPageBufSize Checkpoint page buffer size in bytes or {@code 0} for Ignite to + * choose the buffer size automatically. + * @return {@code this} for chaining. + */ + public DataRegionConfiguration setCheckpointPageBufferSize(long checkpointPageBufSize) { + this.checkpointPageBufSize = checkpointPageBufSize; + + return this; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java index bd314abcdb314..8202ef83d7f0d 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java @@ -172,9 +172,6 @@ public class DataStorageConfiguration implements Serializable { /** Lock wait time, in milliseconds. */ private long lockWaitTime = DFLT_LOCK_WAIT_TIME; - /** */ - private long checkpointPageBufSize; - /** */ private int checkpointThreads = DFLT_CHECKPOINT_THREADS; @@ -424,32 +421,6 @@ public DataStorageConfiguration setCheckpointFrequency(long checkpointFreq) { return this; } - /** - * Gets amount of memory allocated for a checkpoint temporary buffer. - * - * @return Checkpoint page buffer size in bytes or {@code 0} for Ignite - * to choose the buffer size automatically. - */ - public long getCheckpointPageBufferSize() { - return checkpointPageBufSize; - } - - /** - * Sets amount of memory allocated for the checkpoint temporary buffer. The buffer is used to create temporary - * copies of pages that are being written to disk and being update in parallel while the checkpoint is in - * progress. - * - * @param checkpointPageBufSize Checkpoint page buffer size in bytes or {@code 0} for Ignite to - * choose the buffer size automatically. - * @return {@code this} for chaining. - */ - public DataStorageConfiguration setCheckpointPageBufferSize(long checkpointPageBufSize) { - this.checkpointPageBufSize = checkpointPageBufSize; - - return this; - } - - /** * Gets a number of threads to use for the checkpoint purposes. * diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java index efe7ae2abae23..a1a822f307f4d 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryPolicyConfiguration.java @@ -123,11 +123,6 @@ public final class MemoryPolicyConfiguration implements Serializable { */ private long rateTimeInterval = DFLT_RATE_TIME_INTERVAL_MILLIS; - /** - * Flag to enable Ignite Native Persistence. - */ - private boolean persistenceEnabled = true; - /** * Gets memory policy name. * @@ -319,24 +314,6 @@ public MemoryPolicyConfiguration setMetricsEnabled(boolean metricsEnabled) { return this; } - /** - * Gets whether Ignite Native Persistence is enabled for this memory policy. - * - * @return Persistence enabled flag. - */ - public boolean isPersistenceEnabled() { - return persistenceEnabled; - } - - /** - * Sets persistence enabled flag. - * - * @param persistenceEnabled Persistence enabled flag. - */ - public void setPersistenceEnabled(boolean persistenceEnabled) { - this.persistenceEnabled = persistenceEnabled; - } - /** * Gets time interval for {@link MemoryMetrics#getAllocationRate()} * and {@link MemoryMetrics#getEvictionRate()} monitoring purposes. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index 36257e274dc5e..67c771bc059ae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -2779,7 +2779,9 @@ public void setCounter(int cnt) { */ private static void convertLegacyDataStorageConfigurationToNew( IgniteConfiguration cfg) throws IgniteCheckedException { - boolean persistenceEnabled = cfg.getPersistentStoreConfiguration() != null; + PersistentStoreConfiguration psCfg = cfg.getPersistentStoreConfiguration(); + + boolean persistenceEnabled = psCfg != null; DataStorageConfiguration dsCfg = new DataStorageConfiguration(); @@ -2814,6 +2816,9 @@ private static void convertLegacyDataStorageConfigurationToNew( region.setSwapPath(mpc.getSwapFilePath()); region.setMetricsEnabled(mpc.isMetricsEnabled()); + if (persistenceEnabled) + region.setCheckpointPageBufferSize(psCfg.getCheckpointingPageBufferSize()); + if (mpc.getName() == null) { throw new IgniteCheckedException(new IllegalArgumentException( "User-defined MemoryPolicyConfiguration must have non-null and non-empty name.")); @@ -2829,7 +2834,8 @@ private static void convertLegacyDataStorageConfigurationToNew( } if (!optionalDataRegions.isEmpty()) - dsCfg.setDataRegionConfigurations(optionalDataRegions.toArray(new DataRegionConfiguration[optionalDataRegions.size()])); + dsCfg.setDataRegionConfigurations(optionalDataRegions.toArray( + new DataRegionConfiguration[optionalDataRegions.size()])); if (!customDfltPlc) { if (!DFLT_MEM_PLC_DEFAULT_NAME.equals(memCfg.getDefaultMemoryPolicyName())) { @@ -2848,10 +2854,7 @@ private static void convertLegacyDataStorageConfigurationToNew( } if (persistenceEnabled) { - PersistentStoreConfiguration psCfg = cfg.getPersistentStoreConfiguration(); - dsCfg.setCheckpointFrequency(psCfg.getCheckpointingFrequency()); - dsCfg.setCheckpointPageBufferSize(psCfg.getCheckpointingPageBufferSize()); dsCfg.setCheckpointThreads(psCfg.getCheckpointingThreads()); dsCfg.setCheckpointWriteOrder(psCfg.getCheckpointWriteOrder()); dsCfg.setFileIOFactory(psCfg.getFileIOFactory()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index a3b157d75a1a6..77b06221f7c6c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -1543,14 +1543,16 @@ private long requiredOffheap() { DataRegionConfiguration[] dataRegions = memCfg.getDataRegionConfigurations(); if (dataRegions != null) { - for (DataRegionConfiguration dataReg : dataRegions) + for (DataRegionConfiguration dataReg : dataRegions) { res += dataReg.getMaxSize(); + + res += GridCacheDatabaseSharedManager.checkpointBufferSize(dataReg); + } } res += memCfg.getDefaultDataRegionConfiguration().getMaxSize(); - // Add persistence (if any). - res += GridCacheDatabaseSharedManager.checkpointBufferSize(ctx.config()); + res += GridCacheDatabaseSharedManager.checkpointBufferSize(memCfg.getDefaultDataRegionConfiguration()); return res; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 596b7b25b4be7..820dbbe12ef4f 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -156,8 +156,14 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** */ public static final String IGNITE_PDS_CHECKPOINT_TEST_SKIP_SYNC = "IGNITE_PDS_CHECKPOINT_TEST_SKIP_SYNC"; - /** Default checkpointing page buffer size (may be adjusted by Ignite). */ - public static final Long DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE = 256L * 1024 * 1024; + /** */ + private static final long GB = 1024L * 1024 * 1024; + + /** Minimum checkpointing page buffer size (may be adjusted by Ignite). */ + public static final Long DFLT_MIN_CHECKPOINTING_PAGE_BUFFER_SIZE = GB / 4; + + /** Default minimum checkpointing page buffer size (may be adjusted by Ignite). */ + public static final Long DFLT_MAX_CHECKPOINTING_PAGE_BUFFER_SIZE = 2 * GB; /** Skip sync. */ private final boolean skipSync = IgniteSystemProperties.getBoolean(IGNITE_PDS_CHECKPOINT_TEST_SKIP_SYNC); @@ -248,9 +254,6 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** */ private long checkpointFreq; - /** */ - private long checkpointPageBufSize; - /** */ private FilePageStoreManager storeMgr; @@ -408,56 +411,27 @@ private void initDataBase() { 30_000, new LinkedBlockingQueue() ); - - checkpointPageBufSize = checkpointBufferSize(cctx.kernalContext().config()); } /** * Get checkpoint buffer size for the given configuration. * - * @param cfg Configuration. + * @param regCfg Configuration. * @return Checkpoint buffer size. */ - public static long checkpointBufferSize(IgniteConfiguration cfg) { - DataStorageConfiguration persistenceCfg = cfg.getDataStorageConfiguration(); - - if (persistenceCfg == null) + public static long checkpointBufferSize(DataRegionConfiguration regCfg) { + if (!regCfg.isPersistenceEnabled()) return 0L; - long res = persistenceCfg.getCheckpointPageBufferSize(); + long res = regCfg.getCheckpointPageBufferSize(); if (res == 0L) { - res = DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE; - - DataStorageConfiguration memCfg = cfg.getDataStorageConfiguration(); - - assert memCfg != null; - - long totalSize = memCfg.getSystemRegionMaxSize(); - - if (memCfg.getDataRegionConfigurations() == null) - totalSize += DataStorageConfiguration.DFLT_DATA_REGION_MAX_SIZE; - else { - for (DataRegionConfiguration memPlc : memCfg.getDataRegionConfigurations()) { - if (Long.MAX_VALUE - memPlc.getMaxSize() > totalSize) - totalSize += memPlc.getMaxSize(); - else { - totalSize = Long.MAX_VALUE; - - break; - } - } - - assert totalSize > 0; - } - - // Limit the checkpoint page buffer size by 2GB. - long dfltSize = 2 * 1024L * 1024L * 1024L; - - long adjusted = Math.min(totalSize / 4, dfltSize); - - if (res < adjusted) - res = adjusted; + if (regCfg.getMaxSize() < GB) + res = Math.min(DFLT_MIN_CHECKPOINTING_PAGE_BUFFER_SIZE, regCfg.getMaxSize()); + else if (regCfg.getMaxSize() < 8 * GB) + res = regCfg.getMaxSize() / 4; + else + res = DFLT_MAX_CHECKPOINTING_PAGE_BUFFER_SIZE; } return res; @@ -689,13 +663,16 @@ private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSi long cacheSize = plcCfg.getMaxSize(); // Checkpoint buffer size can not be greater than cache size, it does not make sense. - long chpBufSize = Math.min(checkpointPageBufSize, cacheSize); + long chpBufSize = checkpointBufferSize(plcCfg); - if (checkpointPageBufSize > cacheSize) + if (chpBufSize > cacheSize) { U.quietAndInfo(log, - "Checkpoint page buffer size is too big, setting to an adjusted cache size [size=" + "Configured checkpoint page buffer size is too big, setting to the max region size [size=" + U.readableSize(cacheSize, false) + ", memPlc=" + plcCfg.getName() + ']'); + chpBufSize = cacheSize; + } + boolean writeThrottlingEnabled = persistenceCfg.isWriteThrottlingEnabled(); if (IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED, false)) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java index 10a1f27af0628..c124d5a8fe838 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java @@ -1619,7 +1619,6 @@ private static DataStorageConfiguration readDataStorageConfiguration(BinaryRawRe DataStorageConfiguration res = new DataStorageConfiguration() .setStoragePath(in.readString()) .setCheckpointFrequency(in.readLong()) - .setCheckpointPageBufferSize(in.readLong()) .setCheckpointThreads(in.readInt()) .setLockWaitTime((int) in.readLong()) .setWalHistorySize(in.readInt()) @@ -1714,7 +1713,6 @@ private static void writeDataStorageConfiguration(BinaryRawWriter w, DataStorage w.writeString(cfg.getStoragePath()); w.writeLong(cfg.getCheckpointFrequency()); - w.writeLong(cfg.getCheckpointPageBufferSize()); w.writeInt(cfg.getCheckpointThreads()); w.writeLong(cfg.getLockWaitTime()); w.writeInt(cfg.getWalHistorySize()); @@ -1779,6 +1777,7 @@ private static void writeDataRegionConfiguration(BinaryRawWriter w, DataRegionCo w.writeBoolean(cfg.isMetricsEnabled()); w.writeInt(cfg.getMetricsSubIntervalCount()); w.writeLong(cfg.getMetricsRateTimeInterval()); + w.writeLong(cfg.getCheckpointPageBufferSize()); } /** @@ -1800,7 +1799,8 @@ private static DataRegionConfiguration readDataRegionConfiguration(BinaryRawRead .setEmptyPagesPoolSize(r.readInt()) .setMetricsEnabled(r.readBoolean()) .setMetricsSubIntervalCount(r.readInt()) - .setMetricsRateTimeInterval(r.readLong()); + .setMetricsRateTimeInterval(r.readLong()) + .setCheckpointPageBufferSize(r.readLong()); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java index 394e2940865df..179e7894fc0b8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataRegionConfiguration.java @@ -68,6 +68,9 @@ public class VisorDataRegionConfiguration extends VisorDataTransferObject { /** Enable Ignite Native Persistence. */ private boolean persistenceEnabled; + /** Temporary buffer size for checkpoints in bytes. */ + private long checkpointPageBufSize; + /** * Default constructor. */ @@ -94,6 +97,7 @@ public VisorDataRegionConfiguration(DataRegionConfiguration plc) { metricsSubIntervalCount = plc.getMetricsSubIntervalCount(); metricsRateTimeInterval = plc.getMetricsRateTimeInterval(); persistenceEnabled = plc.isPersistenceEnabled(); + checkpointPageBufSize = plc.getCheckpointPageBufferSize(); } /** @@ -188,6 +192,13 @@ public boolean isPersistenceEnabled() { return persistenceEnabled; } + /** + * @return Amount of memory allocated for a checkpoint temporary buffer in bytes. + */ + public long getCheckpointPageBufferSize() { + return checkpointPageBufSize; + } + /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { U.writeString(out, name); @@ -201,6 +212,7 @@ public boolean isPersistenceEnabled() { out.writeInt(metricsSubIntervalCount); out.writeLong(metricsRateTimeInterval); out.writeBoolean(persistenceEnabled); + out.writeLong(checkpointPageBufSize); } /** {@inheritDoc} */ @@ -216,6 +228,7 @@ public boolean isPersistenceEnabled() { metricsSubIntervalCount = in.readInt(); metricsRateTimeInterval = in.readLong(); persistenceEnabled = in.readBoolean(); + checkpointPageBufSize = in.readLong(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java index 78bf1c504a910..8470fe10f38ab 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorDataStorageConfiguration.java @@ -155,7 +155,6 @@ public VisorDataStorageConfiguration(DataStorageConfiguration cfg) { storagePath = cfg.getStoragePath(); checkpointFreq = cfg.getCheckpointFrequency(); lockWaitTime = cfg.getLockWaitTime(); - checkpointPageBufSize = cfg.getCheckpointPageBufferSize(); checkpointThreads = cfg.getCheckpointThreads(); checkpointWriteOrder = cfg.getCheckpointWriteOrder(); walHistSize = cfg.getWalHistorySize(); @@ -391,7 +390,7 @@ public boolean isWriteThrottlingEnabled() { U.writeString(out, storagePath); out.writeLong(checkpointFreq); out.writeLong(lockWaitTime); - out.writeLong(checkpointPageBufSize); + out.writeLong(0); out.writeInt(checkpointThreads); U.writeEnum(out, checkpointWriteOrder); out.writeInt(walHistSize); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java index f9d7a64478c88..d26ab355d21e1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistentStoreConfiguration.java @@ -103,7 +103,6 @@ public VisorPersistentStoreConfiguration(DataStorageConfiguration cfg) { persistenceStorePath = cfg.getStoragePath(); checkpointingFreq = cfg.getCheckpointFrequency(); lockWaitTime = cfg.getLockWaitTime(); - checkpointingPageBufSize = cfg.getCheckpointPageBufferSize(); checkpointingThreads = cfg.getCheckpointThreads(); walHistSize = cfg.getWalHistorySize(); walSegments = cfg.getWalSegments(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java index 5700eb3d7c808..3b76b63a33b54 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgnitePdsWalTlbTest.java @@ -31,7 +31,7 @@ import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; -import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.DFLT_MIN_CHECKPOINTING_PAGE_BUFFER_SIZE; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; /** @@ -54,9 +54,10 @@ public class IgnitePdsWalTlbTest extends GridCommonAbstractTest { DataStorageConfiguration memCfg = new DataStorageConfiguration() .setDefaultDataRegionConfiguration( - new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024).setPersistenceEnabled(true)) + new DataRegionConfiguration().setMaxSize(100 * 1024 * 1024) + .setPersistenceEnabled(true) + .setCheckpointPageBufferSize(DFLT_MIN_CHECKPOINTING_PAGE_BUFFER_SIZE + 1)) .setWalMode(WALMode.LOG_ONLY) - .setCheckpointPageBufferSize(DFLT_CHECKPOINTING_PAGE_BUFFER_SIZE + 1) .setWalThreadLocalBufferSize(640000000); cfg.setDataStorageConfiguration(memCfg); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java index 30fb4920463ea..9529f5965c681 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSandboxTest.java @@ -68,12 +68,12 @@ public class PagesWriteThrottleSandboxTest extends GridCommonAbstractTest { DataStorageConfiguration dbCfg = new DataStorageConfiguration() .setDefaultDataRegionConfiguration(new DataRegionConfiguration() .setMaxSize(4000L * 1024 * 1024) + .setCheckpointPageBufferSize(1000L * 1000 * 1000) .setName("dfltDataRegion") .setMetricsEnabled(true) .setPersistenceEnabled(true)) .setWalMode(WALMode.BACKGROUND) .setCheckpointFrequency(20_000) - .setCheckpointPageBufferSize(1000L * 1000 * 1000) .setWriteThrottlingEnabled(true); cfg.setDataStorageConfiguration(dbCfg); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java index ab7aab4e04a52..1875cfbf95e1c 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottleSmokeTest.java @@ -78,12 +78,12 @@ public class PagesWriteThrottleSmokeTest extends GridCommonAbstractTest { DataStorageConfiguration dbCfg = new DataStorageConfiguration() .setDefaultDataRegionConfiguration(new DataRegionConfiguration() .setMaxSize(400 * 1024 * 1024) + .setCheckpointPageBufferSize(200 * 1000 * 1000) .setName("dfltDataRegion") .setMetricsEnabled(true) .setPersistenceEnabled(true)) .setWalMode(WALMode.BACKGROUND) .setCheckpointFrequency(20_000) - .setCheckpointPageBufferSize(200 * 1000 * 1000) .setWriteThrottlingEnabled(true) .setCheckpointThreads(1) .setFileIOFactory(new SlowCheckpointFileIOFactory()); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml index 1e17752c7da3f..215a04d7361fc 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml @@ -131,7 +131,7 @@ - + metricsSubIntervalCount="5" swapPath="swap" checkpointPageBufferSize="7" /> + swapPath="swap2" checkpointPageBufferSize="8" /> \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs index 72c73e4f0d134..87df49252f384 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs @@ -275,7 +275,6 @@ public void TestPredefinedXml() var ds = cfg.DataStorageConfiguration; Assert.IsFalse(ds.AlwaysWriteFullPages); Assert.AreEqual(TimeSpan.FromSeconds(1), ds.CheckpointFrequency); - Assert.AreEqual(2, ds.CheckpointPageBufferSize); Assert.AreEqual(3, ds.CheckpointThreads); Assert.AreEqual(4, ds.ConcurrencyLevel); Assert.AreEqual(TimeSpan.FromSeconds(5), ds.LockWaitTime); @@ -309,6 +308,7 @@ public void TestPredefinedXml() Assert.AreEqual(5, dr.MetricsSubIntervalCount); Assert.AreEqual("swap", dr.SwapPath); Assert.IsTrue(dr.MetricsEnabled); + Assert.AreEqual(7, dr.CheckpointPageBufferSize); dr = ds.DefaultDataRegionConfiguration; Assert.AreEqual(2, dr.EmptyPagesPoolSize); @@ -899,7 +899,6 @@ private static IgniteConfiguration GetTestConfig() { AlwaysWriteFullPages = true, CheckpointFrequency = TimeSpan.FromSeconds(25), - CheckpointPageBufferSize = 28 * 1024 * 1024, CheckpointThreads = 2, LockWaitTime = TimeSpan.FromSeconds(5), StoragePath = Path.GetTempPath(), @@ -934,7 +933,8 @@ private static IgniteConfiguration GetTestConfig() PersistenceEnabled = false, MetricsRateTimeInterval = TimeSpan.FromMinutes(2), MetricsSubIntervalCount = 6, - SwapPath = Path.GetTempPath() + SwapPath = Path.GetTempPath(), + CheckpointPageBufferSize = 7 }, DataRegionConfigurations = new[] { diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs index c8c06b23c0933..f68371ac9816e 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs @@ -543,7 +543,6 @@ private static void CheckDefaultProperties(DataStorageConfiguration cfg) Assert.AreEqual(DataStorageConfiguration.DefaultTlbSize, cfg.WalThreadLocalBufferSize); Assert.AreEqual(DataStorageConfiguration.DefaultCheckpointFrequency, cfg.CheckpointFrequency); Assert.AreEqual(DataStorageConfiguration.DefaultCheckpointThreads, cfg.CheckpointThreads); - Assert.AreEqual(default(long), cfg.CheckpointPageBufferSize); Assert.AreEqual(DataStorageConfiguration.DefaultLockWaitTime, cfg.LockWaitTime); Assert.AreEqual(DataStorageConfiguration.DefaultWalFlushFrequency, cfg.WalFlushFrequency); Assert.AreEqual(DataStorageConfiguration.DefaultWalFsyncDelayNanos, cfg.WalFsyncDelayNanos); @@ -580,6 +579,7 @@ private static void CheckDefaultProperties(DataRegionConfiguration cfg) Assert.AreEqual(DataRegionConfiguration.DefaultPersistenceEnabled, cfg.PersistenceEnabled); Assert.AreEqual(DataRegionConfiguration.DefaultMetricsRateTimeInterval, cfg.MetricsRateTimeInterval); Assert.AreEqual(DataRegionConfiguration.DefaultMetricsSubIntervalCount, cfg.MetricsSubIntervalCount); + Assert.AreEqual(default(long), cfg.CheckpointPageBufferSize); } /// @@ -769,7 +769,6 @@ private static IgniteConfiguration GetCustomConfig() { AlwaysWriteFullPages = true, CheckpointFrequency = TimeSpan.FromSeconds(25), - CheckpointPageBufferSize = 28 * 1024 * 1024, CheckpointThreads = 2, LockWaitTime = TimeSpan.FromSeconds(5), StoragePath = Path.GetTempPath(), @@ -804,7 +803,8 @@ private static IgniteConfiguration GetCustomConfig() PersistenceEnabled = false, MetricsRateTimeInterval = TimeSpan.FromMinutes(2), MetricsSubIntervalCount = 6, - SwapPath = IgniteUtils.GetTempDirectoryName() + SwapPath = IgniteUtils.GetTempDirectoryName(), + CheckpointPageBufferSize = 28 * 1024 * 1024 }, DataRegionConfigurations = new[] { diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs index 5c4240e818a9f..d20ce49ee6e7a 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataRegionConfiguration.cs @@ -104,6 +104,7 @@ internal DataRegionConfiguration(IBinaryRawReader reader) MetricsEnabled = reader.ReadBoolean(); MetricsSubIntervalCount = reader.ReadInt(); MetricsRateTimeInterval = reader.ReadLongAsTimespan(); + CheckpointPageBufferSize = reader.ReadLong(); } /// @@ -122,6 +123,7 @@ internal void Write(IBinaryRawWriter writer) writer.WriteBoolean(MetricsEnabled); writer.WriteInt(MetricsSubIntervalCount); writer.WriteTimeSpanAsLong(MetricsRateTimeInterval); + writer.WriteLong(CheckpointPageBufferSize); } /// @@ -209,5 +211,11 @@ internal void Write(IBinaryRawWriter writer) Justification = "Consistency with Java config")] public int MetricsSubIntervalCount { get; set; } + /// + /// Gets or sets the size of the checkpointing page buffer. + /// + /// Default is 0: Ignite will choose buffer size automatically. + /// + public long CheckpointPageBufferSize { get; set; } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs index 17b4adaa4b4a3..09b3fe4169f10 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs @@ -189,7 +189,6 @@ internal DataStorageConfiguration(IBinaryRawReader reader) StoragePath = reader.ReadString(); CheckpointFrequency = reader.ReadLongAsTimespan(); - CheckpointPageBufferSize = reader.ReadLong(); CheckpointThreads = reader.ReadInt(); LockWaitTime = reader.ReadLongAsTimespan(); WalHistorySize = reader.ReadInt(); @@ -239,7 +238,6 @@ internal void Write(IBinaryRawWriter writer) writer.WriteString(StoragePath); writer.WriteTimeSpanAsLong(CheckpointFrequency); - writer.WriteLong(CheckpointPageBufferSize); writer.WriteInt(CheckpointThreads); writer.WriteTimeSpanAsLong(LockWaitTime); writer.WriteInt(WalHistorySize); @@ -307,13 +305,6 @@ internal void Write(IBinaryRawWriter writer) [DefaultValue(typeof(TimeSpan), "00:03:00")] public TimeSpan CheckpointFrequency { get; set; } - /// - /// Gets or sets the size of the checkpointing page buffer. - /// - /// Default is 0: Ignite will choose buffer size automatically. - /// - public long CheckpointPageBufferSize { get; set; } - /// /// Gets or sets the number of threads for checkpointing. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd index 6ede267df10d2..7e1e39a789951 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd @@ -1556,6 +1556,11 @@ Enable disk persistence for this region. + + + Size of the checkpointing page buffer. + + @@ -1624,6 +1629,11 @@ Enable disk persistence for this region. + + + Size of the checkpointing page buffer. + + @@ -1640,11 +1650,6 @@ Checkpointing frequency which is a minimal interval when the dirty pages will be written to the Persistent Store. - - - Size of the checkpointing page buffer. - - Number of threads for checkpointing. diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index b4bc9fce50a7f..5b02a72fce4b6 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -977,7 +977,8 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose metricsEnabled: Boolean, metricsSubIntervalCount: Number, metricsRateTimeInterval: Number, - persistenceEnabled: Boolean + persistenceEnabled: Boolean, + checkpointPageBufferSize: Number }, dataRegionConfigurations: [{ name: String, @@ -990,7 +991,8 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose metricsEnabled: Boolean, metricsSubIntervalCount: Number, metricsRateTimeInterval: Number, - persistenceEnabled: Boolean + persistenceEnabled: Boolean, + checkpointPageBufferSize: Number }], storagePath: String, metricsEnabled: Boolean, diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js index 615857cdc8bca..3ec55d73a5831 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js @@ -1345,7 +1345,7 @@ export default class IgniteConfigurationGenerator { static dataRegionConfiguration(dataRegionCfg) { const plcBean = new Bean('org.apache.ignite.configuration.DataRegionConfiguration', 'dataRegionCfg', dataRegionCfg, clusterDflts.dataStorageConfiguration.dataRegionConfigurations); - return plcBean.stringProperty('name') + plcBean.stringProperty('name') .longProperty('initialSize') .longProperty('maxSize') .stringProperty('swapPath') @@ -1354,8 +1354,11 @@ export default class IgniteConfigurationGenerator { .intProperty('emptyPagesPoolSize') .intProperty('metricsSubIntervalCount') .longProperty('metricsRateTimeInterval') + .longProperty('checkpointPageBufferSize') .boolProperty('metricsEnabled') .boolProperty('persistenceEnabled'); + + return plcBean; } // Generate data storage configuration. diff --git a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js index bafb2027ecfd9..f636869d99578 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cluster.service.js @@ -327,7 +327,8 @@ const DFLT_CLUSTER = { emptyPagesPoolSize: 100, metricsEnabled: false, metricsSubIntervalCount: 5, - metricsRateTimeInterval: 60000 + metricsRateTimeInterval: 60000, + checkpointPageBufferSize: 0 }, metricsEnabled: false, alwaysWriteFullPages: false, diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug index a635739c16141..82c6dbebf7546 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/data-storage.pug @@ -101,6 +101,9 @@ include /app/helpers/jade/mixins .details-row +number('Metrics rate time interval:', dfltRegionModel + '.metricsRateTimeInterval', '"DfltRegionRateTimeInterval" + $index', 'true', '60000', '1000', 'Time interval for allocation rate and eviction rate monitoring purposes') + .details-row + +number('Checkpoint page buffer:', dfltRegionModel + '.checkpointPageBufferSize', '"DfltCheckpointPageBufferSize" + $index', + 'true', '0', '0', 'Amount of memory allocated for a checkpoint temporary buffer in bytes') .details-row +checkbox('Metrics enabled', dfltRegionModel + '.metricsEnabled', '"DfltRegionMetricsEnabled" + $index', 'Whether memory metrics are enabled by default on node startup') @@ -156,6 +159,9 @@ include /app/helpers/jade/mixins .settings-row +number('Metrics rate time interval:', 'model.metricsRateTimeInterval', '"DataRegionRateTimeInterval" + $index', 'true', '60000', '1000', 'Time interval for allocation rate and eviction rate monitoring purposes') + .details-row + +number('Checkpoint page buffer:', 'model.checkpointPageBufferSize', '"DataRegionCheckpointPageBufferSize" + $index', + 'true', '0', '0', 'Amount of memory allocated for a checkpoint temporary buffer in bytes') .settings-row +checkbox('Metrics enabled', 'model.metricsEnabled', '"DataRegionMetricsEnabled" + $index', 'Whether memory metrics are enabled by default on node startup') From 531086e4b7419b99e47d52db8764113995df3ef1 Mon Sep 17 00:00:00 2001 From: dpavlov Date: Thu, 5 Oct 2017 18:06:27 +0300 Subject: [PATCH 073/243] IGNITE-6539 WAL parser fails if empty log files exist in directory - Fixes #2794. Signed-off-by: Alexey Goncharuk --- .../reader/StandaloneWalRecordsIterator.java | 24 +++-- .../db/wal/reader/IgniteWalReaderTest.java | 89 +++++++++++++++++++ 2 files changed, 104 insertions(+), 9 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java index f1258a04afe37..33a967c78041e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java @@ -45,10 +45,10 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.FileInput; import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer; import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager; -import org.apache.ignite.internal.processors.cache.persistence.wal.SegmentEofException; import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -185,12 +185,11 @@ private void init( * Header record and its position is checked. WAL position is used to determine real index. * File index from file name is ignored. * - * @param allFiles files to scan - * @return list of file descriptors with checked header records, file index is set - * @throws IgniteCheckedException if IO error occurs + * @param allFiles files to scan. + * @return list of file descriptors with checked header records, having correct file index is set */ private List scanIndexesFromFileHeaders( - @Nullable final File[] allFiles) throws IgniteCheckedException { + @Nullable final File[] allFiles) { if (allFiles == null || allFiles.length == 0) return Collections.emptyList(); @@ -198,7 +197,7 @@ private List scanIndexesFromFileHeaders for (File file : allFiles) { if (file.length() < HEADER_RECORD_SIZE) - continue; + continue; //filter out this segment as it is too short FileWALPointer ptr; @@ -211,17 +210,24 @@ private List scanIndexesFromFileHeaders // Header record must be agnostic to the serializer version. final int type = in.readUnsignedByte(); - if (type == WALRecord.RecordType.STOP_ITERATION_RECORD_TYPE) - throw new SegmentEofException("Reached logical end of the segment", null); + if (type == WALRecord.RecordType.STOP_ITERATION_RECORD_TYPE) { + if (log.isInfoEnabled()) + log.info("Reached logical end of the segment for file " + file); + + continue; //filter out this segment + } ptr = RecordV1Serializer.readPosition(in); } catch (IOException e) { - throw new IgniteCheckedException("Failed to scan index from file [" + file + "]", e); + U.warn(log, "Failed to scan index from file [" + file + "]. Skipping this file during iteration", e); + + continue; //filter out this segment } resultingDescs.add(new FileWriteAheadLogManager.FileDescriptor(file, ptr.index())); } Collections.sort(resultingDescs); + return resultingDescs; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java index 4a4010ae1a1ee..6bfd06584758f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java @@ -74,6 +74,7 @@ import org.apache.ignite.transactions.Transaction; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.junit.Assert; import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; @@ -327,6 +328,22 @@ private void putDummyRecords(Ignite ignite, int recordsToWrite) { cache0.put(i, new IndexedObject(i)); } + /** + * Puts provided number of records to fill WAL + * + * @param ignite ignite instance + * @param recordsToWrite count + */ + private void putAllDummyRecords(Ignite ignite, int recordsToWrite) { + IgniteCache cache0 = ignite.cache(CACHE_NAME); + + Map values = new HashMap<>(); + + for (int i = 0; i < recordsToWrite; i++) + values.put(i, new IndexedObject(i)); + + cache0.putAll(values); + } /** * Puts provided number of records to fill WAL under transactions * @@ -702,6 +719,78 @@ else if (val instanceof BinaryObject) { } + /** + * Tests archive completed event is fired + * + * @throws Exception if failed + */ + public void testFillWalForExactSegmentsCount() throws Exception { + customWalMode = WALMode.DEFAULT; + + final CountDownLatch reqSegments = new CountDownLatch(15); + final Ignite ignite = startGrid("node0"); + + ignite.active(true); + + final IgniteEvents evts = ignite.events(); + + if (!evts.isEnabled(EVT_WAL_SEGMENT_ARCHIVED)) + assertTrue("nothing to test", false); + + evts.localListen(new IgnitePredicate() { + @Override public boolean apply(Event e) { + WalSegmentArchivedEvent archComplEvt = (WalSegmentArchivedEvent)e; + long idx = archComplEvt.getAbsWalSegmentIdx(); + log.info("Finished archive for segment [" + idx + ", " + + archComplEvt.getArchiveFile() + "]: [" + e + "]"); + + reqSegments.countDown(); + return true; + } + }, EVT_WAL_SEGMENT_ARCHIVED); + + + int totalEntries = 0; + while (reqSegments.getCount() > 0) { + final int write = 500; + putAllDummyRecords(ignite, write); + totalEntries += write; + Assert.assertTrue("Too much entries generated, but segments was not become available", + totalEntries < 10000); + } + final String subfolderName = genDbSubfolderName(ignite, 0); + + stopGrid("node0"); + + final String workDir = U.defaultWorkDirectory(); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(subfolderName, workDir); + + scanIterateAndCount(factory, workDir, subfolderName, totalEntries, 0, null, null); + } + + /** + * Tests reading of empty WAL from non filled cluster + * + * @throws Exception if failed. + */ + public void testReadEmptyWal() throws Exception { + customWalMode = WALMode.DEFAULT; + + final Ignite ignite = startGrid("node0"); + + ignite.active(true); + ignite.active(false); + + final String subfolderName = genDbSubfolderName(ignite, 0); + + stopGrid("node0"); + + final String workDir = U.defaultWorkDirectory(); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(subfolderName, workDir); + + scanIterateAndCount(factory, workDir, subfolderName, 0, 0, null, null); + } + /** * Creates and fills cache with data. * From 24e063fb8cb58d615a51274a6208f02d463beb0d Mon Sep 17 00:00:00 2001 From: dpavlov Date: Thu, 5 Oct 2017 18:10:25 +0300 Subject: [PATCH 074/243] IGNITE-6553 Standalone WAL iterator fails to handle WAL delete data records - Fixes #2797. --- .../pagemem/wal/record/DataEntry.java | 13 ++++++----- .../pagemem/wal/record/UnwrapDataEntry.java | 22 +++++++++++++------ .../reader/StandaloneWalRecordsIterator.java | 11 +++++++--- .../db/wal/reader/IgniteWalReaderTest.java | 18 ++++++++++----- 4 files changed, 43 insertions(+), 21 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java index d4e0b9f6971a3..cb6b4829de2b6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java @@ -23,6 +23,7 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; +import org.jetbrains.annotations.Nullable; /** * Represents Data Entry ({@link #key}, {@link #val value}) pair update {@link #op operation} in WAL log. @@ -32,13 +33,13 @@ public class DataEntry { @GridToStringInclude protected int cacheId; - /** Cache object key */ + /** Cache object key. */ protected KeyCacheObject key; - /** Cache object value */ - protected CacheObject val; + /** Cache object value. May be {@code} null for {@link GridCacheOperation#DELETE} */ + @Nullable protected CacheObject val; - /** Entry operation performed */ + /** Entry operation performed. */ @GridToStringInclude protected GridCacheOperation op; @@ -67,7 +68,7 @@ private DataEntry() { /** * @param cacheId Cache ID. * @param key Key. - * @param val Value. + * @param val Value or null for delete operation. * @param op Operation. * @param nearXidVer Near transaction version. * @param writeVer Write version. @@ -78,7 +79,7 @@ private DataEntry() { public DataEntry( int cacheId, KeyCacheObject key, - CacheObject val, + @Nullable CacheObject val, GridCacheOperation op, GridCacheVersion nearXidVer, GridCacheVersion writeVer, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/UnwrapDataEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/UnwrapDataEntry.java index 678539d1a9e5f..dbcc65176b6fa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/UnwrapDataEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/UnwrapDataEntry.java @@ -31,13 +31,13 @@ public class UnwrapDataEntry extends DataEntry { /** Cache object value context. Context is used for unwrapping objects. */ private final CacheObjectValueContext cacheObjValCtx; - /** Keep binary. This flag disables converting of non primitive types (BinaryObjects) */ + /** Keep binary. This flag disables converting of non primitive types (BinaryObjects). */ private boolean keepBinary; /** * @param cacheId Cache ID. * @param key Key. - * @param val Value. + * @param val Value or null for delete operation. * @param op Operation. * @param nearXidVer Near transaction version. * @param writeVer Write version. @@ -45,7 +45,7 @@ public class UnwrapDataEntry extends DataEntry { * @param partId Partition ID. * @param partCnt Partition counter. * @param cacheObjValCtx cache object value context for unwrapping objects. - * @param keepBinary disable unwrapping for non primitive objects, Binary Objects would be returned instead + * @param keepBinary disable unwrapping for non primitive objects, Binary Objects would be returned instead. */ public UnwrapDataEntry( final int cacheId, @@ -66,39 +66,47 @@ public UnwrapDataEntry( /** * Unwraps key value from cache key object into primitive boxed type or source class. If client classes were used - * in key, call of this method requires classes to be available in classpath + * in key, call of this method requires classes to be available in classpath. * - * @return Key which was placed into cache. Or null if failed + * @return Key which was placed into cache. Or null if failed to convert. */ public Object unwrappedKey() { try { if (keepBinary && key instanceof BinaryObject) return key; + Object unwrapped = key.value(cacheObjValCtx, false); + if (unwrapped instanceof BinaryObject) { if (keepBinary) return unwrapped; unwrapped = ((BinaryObject)unwrapped).deserialize(); } + return unwrapped; } catch (Exception e) { cacheObjValCtx.kernalContext().log(UnwrapDataEntry.class) .error("Unable to convert key [" + key + "]", e); + return null; } } /** * Unwraps value value from cache value object into primitive boxed type or source class. If client classes were - * used in key, call of this method requires classes to be available in classpath + * used in key, call of this method requires classes to be available in classpath. * - * @return Value which was placed into cache. Or null if failed + * @return Value which was placed into cache. Or null for delete operation or for failure. */ public Object unwrappedValue() { try { + if (val == null) + return null; + if (keepBinary && val instanceof BinaryObject) return val; + return val.value(cacheObjValCtx, false); } catch (Exception e) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java index 33a967c78041e..e51d1f29a6b1f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java @@ -335,12 +335,17 @@ private DataEntry postProcessDataEntry( if (dataEntry instanceof LazyDataEntry) { final LazyDataEntry lazyDataEntry = (LazyDataEntry)dataEntry; + key = processor.toKeyCacheObject(fakeCacheObjCtx, lazyDataEntry.getKeyType(), lazyDataEntry.getKeyBytes()); - val = processor.toCacheObject(fakeCacheObjCtx, - lazyDataEntry.getValType(), - lazyDataEntry.getValBytes()); + + final byte type = lazyDataEntry.getValType(); + + val = type == 0 ? null : + processor.toCacheObject(fakeCacheObjCtx, + type, + lazyDataEntry.getValBytes()); } else { key = dataEntry.key(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java index 6bfd06584758f..90f6ef5d4b5c7 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java @@ -344,6 +344,7 @@ private void putAllDummyRecords(Ignite ignite, int recordsToWrite) { cache0.putAll(values); } + /** * Puts provided number of records to fill WAL under transactions * @@ -758,7 +759,7 @@ public void testFillWalForExactSegmentsCount() throws Exception { Assert.assertTrue("Too much entries generated, but segments was not become available", totalEntries < 10000); } - final String subfolderName = genDbSubfolderName(ignite, 0); + final String subfolderName = U.maskForFileName(ignite.cluster().localNode().consistentId().toString()); stopGrid("node0"); @@ -781,7 +782,7 @@ public void testReadEmptyWal() throws Exception { ignite.active(true); ignite.active(false); - final String subfolderName = genDbSubfolderName(ignite, 0); + final String subfolderName = U.maskForFileName(ignite.cluster().localNode().consistentId().toString()); stopGrid("node0"); @@ -900,8 +901,16 @@ private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception { deletesFound != null && deletesFound > 0); } - @NotNull private IgniteWalIteratorFactory createWalIteratorFactory(String subfolderName, - String workDir) throws IgniteCheckedException { + /** + * @param subfolderName Subfolder name. + * @param workDir Work directory. + * @return WAL iterator factory. + * @throws IgniteCheckedException If failed. + */ + @NotNull private IgniteWalIteratorFactory createWalIteratorFactory( + String subfolderName, + String workDir + ) throws IgniteCheckedException { final File binaryMeta = U.resolveWorkDirectory(workDir, "binary_meta", false); final File binaryMetaWithConsId = new File(binaryMeta, subfolderName); final File marshallerMapping = U.resolveWorkDirectory(workDir, "marshaller", false); @@ -912,7 +921,6 @@ private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception { marshallerMapping); } - /** * @param values collection with numbers * @return sum of numbers From ac86276c3665880df8f8345b61a90ae1da90c131 Mon Sep 17 00:00:00 2001 From: devozerov Date: Thu, 26 Oct 2017 16:03:58 +0300 Subject: [PATCH 075/243] IGNITE-6111: SQL: ability to execute INSERT INTO without specifying column names. This closes #2881. --- .../processors/query/h2/IgniteH2Indexing.java | 27 ++++++++- .../processors/query/h2/opt/GridH2Table.java | 60 +++++++++++++++++++ .../IgniteCacheInsertSqlQuerySelfTest.java | 18 ++++++ 3 files changed, 104 insertions(+), 1 deletion(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 58d28c6b396fa..7191cfa83a7d5 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -393,12 +393,37 @@ private PreparedStatement prepareStatement(Connection c, String sql, boolean use return stmt; } - stmt = c.prepareStatement(sql); + stmt = prepare0(c, sql); cache.put(sql, stmt); return stmt; } + else + return prepare0(c, sql); + } + + /** + * Prepare statement. + * + * @param c Connection. + * @param sql SQL. + * @return Prepared statement. + * @throws SQLException If failed. + */ + private PreparedStatement prepare0(Connection c, String sql) throws SQLException { + boolean insertHack = GridH2Table.insertHackRequired(sql); + + if (insertHack) { + GridH2Table.insertHack(true); + + try { + return c.prepareStatement(sql); + } + finally { + GridH2Table.insertHack(false); + } + } else return c.prepareStatement(sql); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java index cc1e2157381bb..7ecce2ff00629 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex; import org.apache.ignite.internal.util.typedef.F; import org.h2.command.ddl.CreateTableData; +import org.h2.command.dml.Insert; import org.h2.engine.DbObject; import org.h2.engine.Session; import org.h2.engine.SysProperties; @@ -65,6 +66,9 @@ * H2 Table implementation. */ public class GridH2Table extends TableBase { + /** Insert hack flag. */ + private static final ThreadLocal INSERT_HACK = new ThreadLocal<>(); + /** Cache context. */ private final GridCacheContext cctx; @@ -972,4 +976,60 @@ public void addColumns(List cols, boolean ifNotExists) { unlock(true); } } + + /** {@inheritDoc} */ + @Override public Column[] getColumns() { + Boolean insertHack = INSERT_HACK.get(); + + if (insertHack != null && insertHack) { + StackTraceElement[] elems = Thread.currentThread().getStackTrace(); + + StackTraceElement elem = elems[2]; + + if (F.eq(elem.getClassName(), Insert.class.getName()) && F.eq(elem.getMethodName(), "prepare")) { + Column[] columns0 = new Column[columns.length - 3]; + + System.arraycopy(columns, 3, columns0, 0, columns0.length); + + return columns0; + } + } + + return columns; + } + + /** + * Set insert hack flag. + * + * @param val Value. + */ + public static void insertHack(boolean val) { + INSERT_HACK.set(val); + } + + /** + * Check whether insert hack is required. This is true in case statement contains "INSERT INTO ... VALUES". + * + * @param sql SQL statement. + * @return {@code True} if target combination is found. + */ + @SuppressWarnings("RedundantIfStatement") + public static boolean insertHackRequired(String sql) { + if (F.isEmpty(sql)) + return false; + + sql = sql.toLowerCase(); + + int idxInsert = sql.indexOf("insert"); + + if (idxInsert < 0) + return false; + + int idxInto = sql.indexOf("into", idxInsert); + + if (idxInto < 0) + return false; + + return true; + } } \ No newline at end of file diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInsertSqlQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInsertSqlQuerySelfTest.java index fbf01faccadf9..0f72883507129 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInsertSqlQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInsertSqlQuerySelfTest.java @@ -118,6 +118,24 @@ public void testInsertWithDynamicKeyInstantiation() { assertEquals(createPerson(2, "Alex"), p.get(new Key(2))); } + /** + * Test insert with implicit column names. + */ + public void testImplicitColumnNames() { + IgniteCache p = ignite(0).cache("K2P").withKeepBinary(); + + p.query(new SqlFieldsQuery( + "insert into Person values (1, 1, 'Vova')")).getAll(); + + assertEquals(createPerson(1, "Vova"), p.get(new Key(1))); + + p.query(new SqlFieldsQuery( + "insert into Person values (2, 2, 'Sergi'), (3, 3, 'Alex')")).getAll(); + + assertEquals(createPerson(2, "Sergi"), p.get(new Key(2))); + assertEquals(createPerson(3, "Alex"), p.get(new Key(3))); + } + /** * */ From 937bc30c4a5f1951e7c755728b61571fe50eff26 Mon Sep 17 00:00:00 2001 From: Roman Shtykh Date: Thu, 26 Oct 2017 17:13:49 +0300 Subject: [PATCH 076/243] IGNITE-6534: SQL: configure NotNull fields with annotations. This closes #2782. --- .../org/apache/ignite/cache/QueryEntity.java | 11 ++++-- .../query/annotations/QuerySqlField.java | 7 ++++ .../query/QueryEntityTypeDescriptor.java | 19 ++++++++++ .../query/IgniteSqlNotNullConstraintTest.java | 35 ++++++++++--------- 4 files changed, 54 insertions(+), 18 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java index 0b82d6a9b05c9..2002b4fa5c7c3 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java @@ -134,7 +134,7 @@ public QueryEntity(String keyType, String valType) { * @param valCls Value type. */ public QueryEntity(Class keyCls, Class valCls) { - this(convert(processKeyAndValueClasses(keyCls,valCls))); + this(convert(processKeyAndValueClasses(keyCls, valCls))); } /** @@ -353,6 +353,7 @@ public String getTableName() { /** * Sets table name for this query entity. + * * @param tableName table name */ public void setTableName(String tableName) { @@ -382,6 +383,7 @@ public QueryEntity setNotNullFields(@Nullable Set notNullFields) { /** * Utility method for building query entities programmatically. + * * @param fullName Full name of the field. * @param type Type of the field. * @param alias Field alias. @@ -469,6 +471,9 @@ private static QueryEntity convert(QueryEntityTypeDescriptor desc) { if (!F.isEmpty(idxs)) entity.setIndexes(idxs); + if (!F.isEmpty(desc.notNullFields())) + entity.setNotNullFields(desc.notNullFields()); + return entity; } @@ -591,6 +596,9 @@ private static void processAnnotation(boolean key, QuerySqlField sqlAnn, QueryTe desc.addFieldToIndex(idxName, prop.fullName(), 0, sqlAnn.descending()); } + if (sqlAnn.notNull()) + desc.addNotNullField(prop.fullName()); + if ((!F.isEmpty(sqlAnn.groups()) || !F.isEmpty(sqlAnn.orderedGroups())) && sqlAnn.inlineSize() != QueryIndex.DFLT_INLINE_SIZE) { throw new CacheException("Inline size cannot be set on a field with group index [" + @@ -612,7 +620,6 @@ private static void processAnnotation(boolean key, QuerySqlField sqlAnn, QueryTe desc.addFieldToTextIndex(prop.fullName()); } - /** {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/annotations/QuerySqlField.java b/modules/core/src/main/java/org/apache/ignite/cache/query/annotations/QuerySqlField.java index 64aaa3a5c4398..03434749f7091 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/annotations/QuerySqlField.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/annotations/QuerySqlField.java @@ -56,6 +56,13 @@ */ boolean descending() default false; + /** + * Specifies whether the specified field can be {@code null}. + * + * @return {@code True} if the field is not allowed to accept {@code null} values. + */ + boolean notNull() default false; + /** * Array of index groups this field belongs to. Groups are used for compound indexes, * whenever index should be created on more than one field. All fields within the same diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java index 837a08f7d260d..fd0ef2b518ca3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java @@ -51,6 +51,9 @@ public class QueryEntityTypeDescriptor { @GridToStringInclude private final Map indexes = new HashMap<>(); + /** */ + private Set notNullFields = new HashSet<>(); + /** */ private QueryEntityIndexDescriptor fullTextIdx; @@ -174,6 +177,22 @@ public void addProperty(QueryEntityClassProperty prop, boolean key, boolean fail keyProps.add(name); } + /** + * Adds a notNull field. + * + * @param field notNull field. + */ + public void addNotNullField(String field) { + notNullFields.add(field); + } + + /** + * @return notNull fields. + */ + public Set notNullFields() { + return notNullFields; + } + /** * @return Class properties. */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java index 8283003e0bf11..0c3b42c483962 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java @@ -77,7 +77,7 @@ public class IgniteSqlNotNullConstraintTest extends GridCommonAbstractTest { private static String CACHE_PERSON = "person-PARTITIONED-TRANSACTIONAL"; /** Name of SQL table. */ - private static String TABLE_PERSON = "\"" + CACHE_PERSON + "\".\"PERSON\""; + private static String TABLE_PERSON = "\"" + CACHE_PERSON + "\".\"PERSON\""; /** Template of cache with read-through setting. */ private static String CACHE_READ_THROUGH = "cacheReadThrough"; @@ -151,17 +151,19 @@ public class IgniteSqlNotNullConstraintTest extends GridCommonAbstractTest { private List cacheConfigurations() { List res = new ArrayList<>(); - for (boolean wrt : new boolean[] { false, true}) { - res.add(buildCacheConfiguration(CacheMode.LOCAL, CacheAtomicityMode.ATOMIC, false, wrt)); - res.add(buildCacheConfiguration(CacheMode.LOCAL, CacheAtomicityMode.TRANSACTIONAL, false, wrt)); + for (boolean wrt : new boolean[] {false, true}) { + for (boolean annot : new boolean[] {false, true}) { + res.add(buildCacheConfiguration(CacheMode.LOCAL, CacheAtomicityMode.ATOMIC, false, wrt, annot)); + res.add(buildCacheConfiguration(CacheMode.LOCAL, CacheAtomicityMode.TRANSACTIONAL, false, wrt, annot)); - res.add(buildCacheConfiguration(CacheMode.REPLICATED, CacheAtomicityMode.ATOMIC, false, wrt)); - res.add(buildCacheConfiguration(CacheMode.REPLICATED, CacheAtomicityMode.TRANSACTIONAL, false, wrt)); + res.add(buildCacheConfiguration(CacheMode.REPLICATED, CacheAtomicityMode.ATOMIC, false, wrt, annot)); + res.add(buildCacheConfiguration(CacheMode.REPLICATED, CacheAtomicityMode.TRANSACTIONAL, false, wrt, annot)); - res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.ATOMIC, false, wrt)); - res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.ATOMIC, true, wrt)); - res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.TRANSACTIONAL, false, wrt)); - res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.TRANSACTIONAL, true, wrt)); + res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.ATOMIC, false, wrt, annot)); + res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.ATOMIC, true, wrt, annot)); + res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.TRANSACTIONAL, false, wrt, annot)); + res.add(buildCacheConfiguration(CacheMode.PARTITIONED, CacheAtomicityMode.TRANSACTIONAL, true, wrt, annot)); + } } return res; @@ -169,11 +171,11 @@ private List cacheConfigurations() { /** */ private CacheConfiguration buildCacheConfiguration(CacheMode mode, - CacheAtomicityMode atomicityMode, boolean hasNear, boolean writeThrough) { + CacheAtomicityMode atomicityMode, boolean hasNear, boolean writeThrough, boolean notNullAnnotated) { CacheConfiguration cfg = new CacheConfiguration(CACHE_PREFIX + "-" + mode.name() + "-" + atomicityMode.name() + (hasNear ? "-near" : "") + - (writeThrough ? "-writethrough" : "")); + (writeThrough ? "-writethrough" : "") + (notNullAnnotated ? "-annot" : "")); cfg.setCacheMode(mode); cfg.setAtomicityMode(atomicityMode); @@ -181,7 +183,8 @@ private CacheConfiguration buildCacheConfiguration(CacheMode mode, QueryEntity qe = new QueryEntity(new QueryEntity(Integer.class, Person.class)); - qe.setNotNullFields(Collections.singleton("name")); + if (!notNullAnnotated) + qe.setNotNullFields(Collections.singleton("name")); cfg.setQueryEntities(F.asList(qe)); @@ -716,7 +719,7 @@ public void testTransactionalNotNullCheckDmlInsertValues() throws Exception { /** */ private void checkNotNullCheckDmlInsertValues(CacheAtomicityMode atomicityMode) throws Exception { executeSql("CREATE TABLE test(id INT PRIMARY KEY, name VARCHAR NOT NULL) WITH \"atomicity=" - + atomicityMode.name() + "\""); + + atomicityMode.name() + "\""); GridTestUtils.assertThrows(log(), new Callable() { @Override public Object call() throws Exception { @@ -1022,7 +1025,7 @@ private List> executeSql(String sqlText) throws Exception { /** */ private void cleanup() throws Exception { - for (CacheConfiguration ccfg: cacheConfigurations()) { + for (CacheConfiguration ccfg : cacheConfigurations()) { String cacheName = ccfg.getName(); if (ccfg.getCacheMode() == CacheMode.LOCAL) { @@ -1088,7 +1091,7 @@ private void checkNodeState(IgniteEx node, String schemaName, String tableName, /** */ public static class Person { /** */ - @QuerySqlField + @QuerySqlField(notNull = true) private String name; /** */ From 3675d6401b78b4a70148b4ce7081fafaafcfca44 Mon Sep 17 00:00:00 2001 From: Denis Mekhanikov Date: Fri, 27 Oct 2017 14:12:36 +0300 Subject: [PATCH 077/243] ignite-5860 Try process TcpDiscoveryClientReconnectMessage from socket reader instead of always processing it on coordinator. (cherry picked from commit 56a63f8) --- .../ignite/spi/discovery/tcp/ClientImpl.java | 52 ++- .../ignite/spi/discovery/tcp/ServerImpl.java | 311 +++++++++--------- .../spi/discovery/tcp/TcpDiscoveryImpl.java | 4 +- ...entDiscoverySpiFailureTimeoutSelfTest.java | 20 +- .../tcp/TcpClientDiscoverySpiSelfTest.java | 275 +++++++++++++++- 5 files changed, 467 insertions(+), 195 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java index 5dbfe6e9d4b8d..139c11049ecc5 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java @@ -26,6 +26,7 @@ import java.net.SocketTimeoutException; import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -470,7 +471,8 @@ else if (state == DISCONNECTED) { } /** - * @param recon {@code True} if reconnects. + * @param prevAddr If reconnect is in progress, then previous address of the router the client was connected to + * and {@code null} otherwise. * @param timeout Timeout. * @return Opened socket or {@code null} if timeout. * @throws InterruptedException If interrupted. @@ -478,9 +480,9 @@ else if (state == DISCONNECTED) { * @see TcpDiscoverySpi#joinTimeout */ @SuppressWarnings("BusyWait") - @Nullable private T2 joinTopology(boolean recon, long timeout) + @Nullable private T2 joinTopology(InetSocketAddress prevAddr, long timeout) throws IgniteSpiException, InterruptedException { - Collection addrs = null; + List addrs = null; long startTime = U.currentTimeMillis(); @@ -489,7 +491,7 @@ else if (state == DISCONNECTED) { throw new InterruptedException(); while (addrs == null || addrs.isEmpty()) { - addrs = spi.resolvedAddresses(); + addrs = new ArrayList<>(spi.resolvedAddresses()); if (!F.isEmpty(addrs)) { if (log.isDebugEnabled()) @@ -509,22 +511,30 @@ else if (state == DISCONNECTED) { } } - Collection addrs0 = new ArrayList<>(addrs); + // Process failed node last. + if (prevAddr != null) { + int idx = addrs.indexOf(prevAddr); - Iterator it = addrs.iterator(); + if (idx != -1) + Collections.swap(addrs, idx, 0); + } + + Collection addrs0 = new ArrayList<>(addrs); boolean wait = false; - while (it.hasNext()) { + for (int i = addrs.size() - 1; i >= 0; i--) { if (Thread.currentThread().isInterrupted()) throw new InterruptedException(); - InetSocketAddress addr = it.next(); + InetSocketAddress addr = addrs.get(i); + + boolean recon = prevAddr != null; T3 sockAndRes = sendJoinRequest(recon, addr); if (sockAndRes == null) { - it.remove(); + addrs.remove(i); continue; } @@ -852,8 +862,8 @@ private NavigableSet allVisibleNodes() { } /** {@inheritDoc} */ - @Override protected IgniteSpiThread workerThread() { - return msgWorker; + @Override protected Collection threads() { + return Arrays.asList(sockWriter, msgWorker); } /** @@ -1336,15 +1346,20 @@ private class Reconnector extends IgniteSpiThread { private boolean clientAck; /** */ - private boolean join; + private final boolean join; + + /** */ + private final InetSocketAddress prevAddr; /** * @param join {@code True} if reconnects during join. + * @param prevAddr Address of the node, that this client was previously connected to. */ - protected Reconnector(boolean join) { + protected Reconnector(boolean join, InetSocketAddress prevAddr) { super(spi.ignite().name(), "tcp-client-disco-reconnector", log); this.join = join; + this.prevAddr = prevAddr; } /** @@ -1374,7 +1389,7 @@ public void cancel() { try { while (true) { - T2 joinRes = joinTopology(true, timeout); + T2 joinRes = joinTopology(prevAddr, timeout); if (joinRes == null) { if (join) { @@ -1609,6 +1624,10 @@ else if (msg instanceof TcpDiscoveryNodeFailedMessage && } else if (msg instanceof SocketClosedMessage) { if (((SocketClosedMessage)msg).sock == currSock) { + Socket sock = currSock.sock; + + InetSocketAddress prevAddr = new InetSocketAddress(sock.getInetAddress(), sock.getPort()); + currSock = null; boolean join = joinLatch.getCount() > 0; @@ -1637,8 +1656,7 @@ else if (msg instanceof SocketClosedMessage) { assert reconnector == null; - final Reconnector reconnector = new Reconnector(join); - this.reconnector = reconnector; + reconnector = new Reconnector(join, prevAddr); reconnector.start(); } } @@ -1811,7 +1829,7 @@ private void tryJoin() throws InterruptedException { T2 joinRes; try { - joinRes = joinTopology(false, spi.joinTimeout); + joinRes = joinTopology(null, spi.joinTimeout); } catch (IgniteSpiException e) { joinError(e); diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index 60f9d4e6270ed..182090a4f7dac 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -218,6 +218,9 @@ class ServerImpl extends TcpDiscoveryImpl { /** Pending custom messages that should not be sent between NodeAdded and NodeAddFinished messages. */ private Queue pendingCustomMsgs = new ArrayDeque<>(); + /** Messages history used for client reconnect. */ + private final EnsuredMessageHistory msgHist = new EnsuredMessageHistory(); + /** If non-shared IP finder is used this flag shows whether IP finder contains local address. */ private boolean ipFinderHasLocAddr; @@ -1660,8 +1663,23 @@ private void clearNodeAddedMessage(TcpDiscoveryAbstractMessage msg) { } /** {@inheritDoc} */ - @Override protected IgniteSpiThread workerThread() { - return msgWorker; + @Override protected Collection threads() { + Collection threads; + + synchronized (mux) { + threads = new ArrayList<>(readers.size() + clientMsgWorkers.size() + 4); + threads.addAll(readers); + } + + threads.addAll(clientMsgWorkers.values()); + threads.add(tcpSrvr); + threads.add(ipFinderCleaner); + threads.add(msgWorker); + threads.add(statsPrinter); + + threads.removeAll(Collections.singleton(null)); + + return threads; } /** @@ -2091,7 +2109,9 @@ else if (msg instanceof TcpDiscoveryNodeLeftMessage) else if (msg instanceof TcpDiscoveryNodeFailedMessage) clearClientAddFinished(((TcpDiscoveryNodeFailedMessage)msg).failedNodeId()); - msgs.add(msg); + synchronized (msgs) { + msgs.add(msg); + } } /** @@ -2130,14 +2150,16 @@ private void clearClientAddFinished(UUID clientId) { // Client connection failed before it received TcpDiscoveryNodeAddedMessage. List res = null; - for (TcpDiscoveryAbstractMessage msg : msgs) { - if (msg instanceof TcpDiscoveryNodeAddedMessage) { - if (node.id().equals(((TcpDiscoveryNodeAddedMessage)msg).node().id())) - res = new ArrayList<>(msgs.size()); - } + synchronized (msgs) { + for (TcpDiscoveryAbstractMessage msg : msgs) { + if (msg instanceof TcpDiscoveryNodeAddedMessage) { + if (node.id().equals(((TcpDiscoveryNodeAddedMessage)msg).node().id())) + res = new ArrayList<>(msgs.size()); + } - if (res != null) - res.add(prepare(msg, node.id())); + if (res != null) + res.add(prepare(msg, node.id())); + } } if (log.isDebugEnabled()) { @@ -2150,20 +2172,26 @@ private void clearClientAddFinished(UUID clientId) { return res; } else { - if (msgs.isEmpty()) - return Collections.emptyList(); + Collection cp; - Collection cp = new ArrayList<>(msgs.size()); + boolean skip; - boolean skip = true; + synchronized (msgs) { + if (msgs.isEmpty()) + return Collections.emptyList(); - for (TcpDiscoveryAbstractMessage msg : msgs) { - if (skip) { - if (msg.id().equals(lastMsgId)) - skip = false; + cp = new ArrayList<>(msgs.size()); + + skip = true; + + for (TcpDiscoveryAbstractMessage msg : msgs) { + if (skip) { + if (msg.id().equals(lastMsgId)) + skip = false; + } + else + cp.add(prepare(msg, node.id())); } - else - cp.add(prepare(msg, node.id())); } cp = !skip ? cp : null; @@ -2452,9 +2480,6 @@ private class RingMessageWorker extends MessageWorkerAdapter pending = msgHist.messages(msg.lastMessageId(), node); - - if (pending != null) { - msg.pendingMessages(pending); - msg.success(true); - - if (log.isDebugEnabled()) - log.debug("Accept client reconnect, restored pending messages " + - "[locNodeId=" + locNodeId + ", clientNodeId=" + nodeId + ']'); - } - else { - if (log.isDebugEnabled()) - log.debug("Failing reconnecting client node because failed to restore pending " + - "messages [locNodeId=" + locNodeId + ", clientNodeId=" + nodeId + ']'); - - TcpDiscoveryNodeFailedMessage nodeFailedMsg = new TcpDiscoveryNodeFailedMessage(locNodeId, - node.id(), node.internalOrder()); - - processNodeFailedMessage(nodeFailedMsg); - - if (nodeFailedMsg.verified()) - msgHist.add(nodeFailedMsg); - } - } - else if (log.isDebugEnabled()) - log.debug("Reconnecting client node is already failed [nodeId=" + nodeId + ']'); - - if (isLocNodeRouter) { - ClientMessageWorker wrk = clientMsgWorkers.get(nodeId); - - if (wrk != null) - wrk.addMessage(msg); - else if (log.isDebugEnabled()) - log.debug("Failed to reconnect client node (disconnected during the process) [locNodeId=" + - locNodeId + ", clientNodeId=" + nodeId + ']'); - } - else { - if (sendMessageToRemotes(msg)) - sendMessageAcrossRing(msg); - } - } - else { - if (sendMessageToRemotes(msg)) - sendMessageAcrossRing(msg); - } - } - else { - if (isLocalNodeCoordinator()) - addMessage(new TcpDiscoveryDiscardMessage(locNodeId, msg.id(), false)); - - if (isLocNodeRouter) { - ClientMessageWorker wrk = clientMsgWorkers.get(nodeId); - - if (wrk != null) - wrk.addMessage(msg); - else if (log.isDebugEnabled()) - log.debug("Failed to reconnect client node (disconnected during the process) [locNodeId=" + - locNodeId + ", clientNodeId=" + nodeId + ']'); - } - else { - if (ring.hasRemoteNodes() && !isLocalNodeCoordinator()) - sendMessageAcrossRing(msg); - } - } - } - /** * Processes node added message. * @@ -4050,9 +3982,6 @@ private void processNodeAddedMessage(TcpDiscoveryNodeAddedMessage msg) { processNodeAddFinishedMessage(addFinishMsg); - if (addFinishMsg.verified()) - msgHist.add(addFinishMsg); - addMessage(new TcpDiscoveryDiscardMessage(locNodeId, msg.id(), false)); return; @@ -5117,9 +5046,6 @@ private void processMetricsUpdateMessage(TcpDiscoveryMetricsUpdateMessage msg) { locNodeId, clientNode.id(), clientNode.internalOrder()); processNodeFailedMessage(nodeFailedMsg); - - if (nodeFailedMsg.verified()) - msgHist.add(nodeFailedMsg); } } } @@ -5314,9 +5240,6 @@ private void processCustomMessage(TcpDiscoveryCustomEventMessage msg) { ackMsg.topologyVersion(msg.topologyVersion()); processCustomMessage(ackMsg); - - if (ackMsg.verified()) - msgHist.add(ackMsg); } catch (IgniteCheckedException e) { U.error(log, "Failed to marshal discovery custom message.", e); @@ -5418,12 +5341,8 @@ private void checkPendingCustomMessages() { if (joiningEmpty && isLocalNodeCoordinator()) { TcpDiscoveryCustomEventMessage msg; - while ((msg = pollPendingCustomeMessage()) != null) { + while ((msg = pollPendingCustomeMessage()) != null) processCustomMessage(msg); - - if (msg.verified()) - msgHist.add(msg); - } } } @@ -5977,24 +5896,22 @@ else if (msg instanceof TcpDiscoveryJoinRequestMessage) { } } else if (msg instanceof TcpDiscoveryClientReconnectMessage) { - if (clientMsgWrk != null) { - TcpDiscoverySpiState state = spiStateCopy(); + TcpDiscoverySpiState state = spiStateCopy(); - if (state == CONNECTED) { - spi.writeToSocket(msg, sock, RES_OK, sockTimeout); + if (state == CONNECTED) { + spi.writeToSocket(msg, sock, RES_OK, sockTimeout); - if (clientMsgWrk.getState() == State.NEW) - clientMsgWrk.start(); + if (clientMsgWrk != null && clientMsgWrk.getState() == State.NEW) + clientMsgWrk.start(); - msgWorker.addMessage(msg); + processClientReconnectMessage((TcpDiscoveryClientReconnectMessage)msg); - continue; - } - else { - spi.writeToSocket(msg, sock, RES_CONTINUE_JOIN, sockTimeout); + continue; + } + else { + spi.writeToSocket(msg, sock, RES_CONTINUE_JOIN, sockTimeout); - break; - } + break; } } else if (msg instanceof TcpDiscoveryDuplicateIdMessage) { @@ -6237,6 +6154,100 @@ else if (msg instanceof TcpDiscoveryRingLatencyCheckMessage) { } } + /** + * Processes client reconnect message. + * + * @param msg Client reconnect message. + */ + private void processClientReconnectMessage(TcpDiscoveryClientReconnectMessage msg) { + UUID nodeId = msg.creatorNodeId(); + + UUID locNodeId = getLocalNodeId(); + + boolean isLocNodeRouter = msg.routerNodeId().equals(locNodeId); + + TcpDiscoveryNode node = ring.node(nodeId); + + assert node == null || node.isClient(); + + if (node != null) { + node.clientRouterNodeId(msg.routerNodeId()); + node.clientAliveTime(spi.clientFailureDetectionTimeout()); + } + + if (!msg.verified()) { + if (isLocNodeRouter || isLocalNodeCoordinator()) { + if (node != null) { + Collection pending = msgHist.messages(msg.lastMessageId(), node); + + if (pending != null) { + msg.verify(locNodeId); + msg.pendingMessages(pending); + msg.success(true); + + if (log.isDebugEnabled()) + log.debug("Accept client reconnect, restored pending messages " + + "[locNodeId=" + locNodeId + ", clientNodeId=" + nodeId + ']'); + } + else if (!isLocalNodeCoordinator()) { + if (log.isDebugEnabled()) + log.debug("Failed to restore pending messages for reconnecting client. " + + "Forwarding reconnection message to coordinator " + + "[locNodeId=" + locNodeId + ", clientNodeId=" + nodeId + ']'); + } + else { + msg.verify(locNodeId); + + if (log.isDebugEnabled()) + log.debug("Failing reconnecting client node because failed to restore pending " + + "messages [locNodeId=" + locNodeId + ", clientNodeId=" + nodeId + ']'); + + TcpDiscoveryNodeFailedMessage nodeFailedMsg = new TcpDiscoveryNodeFailedMessage(locNodeId, + node.id(), node.internalOrder()); + + msgWorker.addMessage(nodeFailedMsg); + } + } + else { + msg.verify(locNodeId); + + if (log.isDebugEnabled()) + log.debug("Reconnecting client node is already failed [nodeId=" + nodeId + ']'); + } + + if (msg.verified() && isLocNodeRouter) { + ClientMessageWorker wrk = clientMsgWorkers.get(nodeId); + + if (wrk != null) + wrk.addMessage(msg); + else if (log.isDebugEnabled()) + log.debug("Failed to reconnect client node (disconnected during the process) [locNodeId=" + + locNodeId + ", clientNodeId=" + nodeId + ']'); + } + else + msgWorker.addMessage(msg); + } + else + msgWorker.addMessage(msg); + } + else { + if (isLocalNodeCoordinator()) + msgWorker.addMessage(new TcpDiscoveryDiscardMessage(locNodeId, msg.id(), false)); + + if (isLocNodeRouter) { + ClientMessageWorker wrk = clientMsgWorkers.get(nodeId); + + if (wrk != null) + wrk.addMessage(msg); + else if (log.isDebugEnabled()) + log.debug("Failed to reconnect client node (disconnected during the process) [locNodeId=" + + locNodeId + ", clientNodeId=" + nodeId + ']'); + } + else if (ring.hasRemoteNodes() && !isLocalNodeCoordinator()) + msgWorker.addMessage(msg); + } + } + /** * Processes client metrics update message. * diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java index b31e2e4b593e5..f3cf48dd84f71 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryImpl.java @@ -299,9 +299,9 @@ protected static String threadStatus(Thread t) { /** * FOR TEST ONLY!!! * - * @return Worker thread. + * @return Worker threads. */ - protected abstract IgniteSpiThread workerThread(); + protected abstract Collection threads(); /** * @throws IgniteSpiException If failed. diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiFailureTimeoutSelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiFailureTimeoutSelfTest.java index 689ac72bb6274..f1c826ac21853 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiFailureTimeoutSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiFailureTimeoutSelfTest.java @@ -56,15 +56,9 @@ public class TcpClientDiscoverySpiFailureTimeoutSelfTest extends TcpClientDiscov /** */ private final static long FAILURE_THRESHOLD = 10_000; - /** */ - private final static long CLIENT_FAILURE_THRESHOLD = 30_000; - /** Failure detection timeout for nodes configuration. */ private static long failureThreshold = FAILURE_THRESHOLD; - /** Client failure detection timeout for nodes configuration. */ - private static long clientFailureThreshold = CLIENT_FAILURE_THRESHOLD; - /** */ private static boolean useTestSpi; @@ -75,7 +69,7 @@ public class TcpClientDiscoverySpiFailureTimeoutSelfTest extends TcpClientDiscov /** {@inheritDoc} */ @Override protected long clientFailureDetectionTimeout() { - return clientFailureThreshold; + return clientFailureDetectionTimeout; } /** {@inheritDoc} */ @@ -153,7 +147,7 @@ public void testFailureTimeoutWorkabilitySmallTimeout() throws Exception { */ public void testFailureTimeoutServerClient() throws Exception { failureThreshold = 3000; - clientFailureThreshold = 2000; + clientFailureDetectionTimeout = 2000; try { startServerNodes(1); @@ -190,13 +184,12 @@ public void testFailureTimeoutServerClient() throws Exception { long detectTime = failureDetectTime[0] - failureTime; assertTrue("Client node failure detected too fast: " + detectTime + "ms", - detectTime > clientFailureThreshold - 200); + detectTime > clientFailureDetectionTimeout - 200); assertTrue("Client node failure detected too slow: " + detectTime + "ms", - detectTime < clientFailureThreshold + 5000); + detectTime < clientFailureDetectionTimeout + 5000); } finally { failureThreshold = FAILURE_THRESHOLD; - clientFailureThreshold = CLIENT_FAILURE_THRESHOLD; } } @@ -207,7 +200,7 @@ public void testFailureTimeoutServerClient() throws Exception { */ public void testFailureTimeout3Server() throws Exception { failureThreshold = 1000; - clientFailureThreshold = 10000; + clientFailureDetectionTimeout = 10000; useTestSpi = true; try { @@ -254,11 +247,10 @@ public void testFailureTimeout3Server() throws Exception { assertTrue("Server node failure detected too fast: " + detectTime + "ms", detectTime > failureThreshold - 100); assertTrue("Server node failure detected too slow: " + detectTime + "ms", - detectTime < clientFailureThreshold); + detectTime < clientFailureDetectionTimeout); } finally { failureThreshold = FAILURE_THRESHOLD; - clientFailureThreshold = CLIENT_FAILURE_THRESHOLD; useTestSpi = false; } } diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java index 329783e9a2924..ee88b0fc867eb 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java @@ -62,8 +62,8 @@ import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.IgniteSpiOperationTimeoutException; import org.apache.ignite.spi.IgniteSpiOperationTimeoutHelper; +import org.apache.ignite.spi.IgniteSpiThread; import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode; -import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryAbstractMessage; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryClientReconnectMessage; @@ -73,6 +73,7 @@ import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.jetbrains.annotations.Nullable; + import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.MINUTES; import static org.apache.ignite.events.EventType.EVT_CLIENT_NODE_DISCONNECTED; @@ -87,7 +88,7 @@ */ public class TcpClientDiscoverySpiSelfTest extends GridCommonAbstractTest { /** */ - private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); /** */ protected static final AtomicInteger srvIdx = new AtomicInteger(); @@ -122,6 +123,9 @@ public class TcpClientDiscoverySpiSelfTest extends GridCommonAbstractTest { /** */ private static CountDownLatch clientFailedLatch; + /** */ + private static CountDownLatch clientReconnectedLatch; + /** */ private static CountDownLatch msgLatch; @@ -137,11 +141,14 @@ public class TcpClientDiscoverySpiSelfTest extends GridCommonAbstractTest { /** */ protected long netTimeout = TcpDiscoverySpi.DFLT_NETWORK_TIMEOUT; + /** */ + protected Integer reconnectCnt; + /** */ private boolean longSockTimeouts; /** */ - private long clientFailureDetectionTimeout = 1000; + protected long clientFailureDetectionTimeout = 1000; /** */ private IgniteInClosure2X afterWrite; @@ -207,6 +214,9 @@ else if (igniteInstanceName.startsWith("client")) { disco.setJoinTimeout(joinTimeout); disco.setNetworkTimeout(netTimeout); + if (reconnectCnt != null) + disco.setReconnectCount(reconnectCnt); + disco.setClientReconnectDisabled(reconnectDisabled); if (disco instanceof TestTcpDiscoverySpi) @@ -253,6 +263,7 @@ protected TcpDiscoverySpi getDiscoverySpi() { clientIpFinder = null; joinTimeout = TcpDiscoverySpi.DFLT_JOIN_TIMEOUT; netTimeout = TcpDiscoverySpi.DFLT_NETWORK_TIMEOUT; + clientFailureDetectionTimeout = 1000; longSockTimeouts = false; assert G.allGrids().isEmpty(); @@ -557,6 +568,221 @@ public void testClientReconnectOnRouterFail() throws Exception { checkNodes(2, 3); } + /** + * Client should reconnect to available server without EVT_CLIENT_NODE_RECONNECTED event. + * + * @throws Exception If failed. + */ + public void testClientReconnectOnRouterSuspend() throws Exception { + reconnectAfterSuspend(false); + } + + /** + * Client should receive all topology updates after reconnect. + * + * @throws Exception If failed. + */ + public void testClientReconnectOnRouterSuspendTopologyChange() throws Exception { + reconnectAfterSuspend(true); + } + + /** + * @param changeTop If {@code true} topology is changed after client disconnects + * @throws Exception if failed. + */ + private void reconnectAfterSuspend(boolean changeTop) throws Exception { + reconnectCnt = 2; + + startServerNodes(2); + + Ignite srv0 = grid("server-0"); + TcpDiscoveryNode srv0Node = (TcpDiscoveryNode)srv0.cluster().localNode(); + + TcpDiscoveryNode srv1Node = (TcpDiscoveryNode)grid("server-1").cluster().localNode(); + + clientIpFinder = new TcpDiscoveryVmIpFinder(); + + clientIpFinder.setAddresses( + Collections.singleton("localhost:" + srv0Node.discoveryPort())); + + startClientNodes(1); + + Ignite client = grid("client-0"); + TcpDiscoveryNode clientNode = (TcpDiscoveryNode)client.cluster().localNode(); + TestTcpDiscoverySpi clientSpi = (TestTcpDiscoverySpi)client.configuration().getDiscoverySpi(); + + UUID clientNodeId = clientNode.id(); + + checkNodes(2, 1); + + clientIpFinder.setAddresses(Collections.singleton("localhost:" + srv1Node.discoveryPort())); + + srvFailedLatch = new CountDownLatch(1); + + attachListeners(2, 1); + + log.info("Pausing router"); + + TestTcpDiscoverySpi srvSpi = (TestTcpDiscoverySpi)srv0.configuration().getDiscoverySpi(); + + int joinedNodesNum = 3; + final CountDownLatch srvJoinedLatch = new CountDownLatch(joinedNodesNum); + + if (changeTop) { + client.events().localListen(new IgnitePredicate() { + @Override public boolean apply(Event e) { + srvJoinedLatch.countDown(); + + return true; + } + }, EVT_NODE_JOINED); + } + + srvSpi.pauseAll(true); + + if (changeTop) + startServerNodes(joinedNodesNum); + + try { + await(srvFailedLatch, 60_000); + + if (changeTop) + await(srvJoinedLatch, 5000); + + assertEquals("connected", clientSpi.getSpiState()); + assertEquals(clientNodeId, clientNode.id()); + assertEquals(srv1Node.id(), clientNode.clientRouterNodeId()); + } + finally { + srvSpi.resumeAll(); + } + } + + /** + * @throws Exception if failed. + */ + public void testClientReconnectHistoryMissingOnRouter() throws Exception { + clientFailureDetectionTimeout = 60000; + netTimeout = 60000; + + startServerNodes(2); + + Ignite srv0 = grid("server-0"); + TcpDiscoveryNode srv0Node = (TcpDiscoveryNode)srv0.cluster().localNode(); + + clientIpFinder = new TcpDiscoveryVmIpFinder(); + clientIpFinder.setAddresses( + Collections.singleton("localhost:" + srv0Node.discoveryPort())); + + startClientNodes(1); + + attachListeners(0, 1); + + Ignite client = grid("client-0"); + TcpDiscoveryNode clientNode = (TcpDiscoveryNode)client.cluster().localNode(); + TestTcpDiscoverySpi clientSpi = (TestTcpDiscoverySpi)client.configuration().getDiscoverySpi(); + UUID clientNodeId = clientNode.id(); + + checkNodes(2, 1); + + clientSpi.pauseAll(true); + + stopGrid(srv0.name()); + + startServerNodes(1); + + Ignite srv2 = grid("server-2"); + TcpDiscoveryNode srv2Node = (TcpDiscoveryNode)srv2.cluster().localNode(); + clientIpFinder.setAddresses( + Collections.singleton("localhost:" + srv2Node.discoveryPort())); + + clientSpi.resumeAll(); + + awaitPartitionMapExchange(); + + assertEquals("connected", clientSpi.getSpiState()); + assertEquals(clientNodeId, clientNode.id()); + assertEquals(srv2Node.id(), clientNode.clientRouterNodeId()); + } + + /** + * @throws Exception If failed. + */ + public void testReconnectAfterPause() throws Exception { + startServerNodes(2); + startClientNodes(1); + + Ignite client = grid("client-0"); + TestTcpDiscoverySpi clientSpi = (TestTcpDiscoverySpi)client.configuration().getDiscoverySpi(); + + clientReconnectedLatch = new CountDownLatch(1); + + attachListeners(0, 1); + + clientSpi.pauseAll(false); + + try { + clientSpi.brakeConnection(); + + Thread.sleep(clientFailureDetectionTimeout() * 2); + } + finally { + clientSpi.resumeAll(); + } + + await(clientReconnectedLatch); + } + + /** + * @throws Exception if failed. + */ + public void testReconnectAfterMassiveTopologyChange() throws Exception { + clientIpFinder = IP_FINDER; + + clientFailureDetectionTimeout = 60000; + netTimeout = 60000; + + int initSrvsNum = 5; + int killNum = 3; + int iterations = 10; + + startServerNodes(initSrvsNum); + startClientNodes(1); + + Ignite client = grid("client-0"); + TcpDiscoveryNode clientNode = (TcpDiscoveryNode)client.cluster().localNode(); + TestTcpDiscoverySpi clientSpi = (TestTcpDiscoverySpi)client.configuration().getDiscoverySpi(); + final UUID clientNodeId = clientNode.id(); + + final CountDownLatch srvJoinedLatch = new CountDownLatch(iterations * killNum); + + client.events().localListen(new IgnitePredicate() { + @Override public boolean apply(Event e) { + srvJoinedLatch.countDown(); + + return true; + } + }, EVT_NODE_JOINED); + + int minAliveSrvId = 0; + + for (int i = 0; i < iterations; i++) { + for (int j = 0; j < killNum; j++) { + stopGrid(minAliveSrvId); + + minAliveSrvId++; + } + + startServerNodes(killNum); + + awaitPartitionMapExchange(); + } + + await(srvJoinedLatch); + assertEquals("connected", clientSpi.getSpiState()); + assertEquals(clientNodeId, clientNode.id()); + } + /** * @throws Exception If failed. */ @@ -1410,17 +1636,16 @@ else if (evt.type() == EVT_CLIENT_NODE_RECONNECTED) { srvSpi.failNode(client.cluster().localNode().id(), null); - if (changeTop) { - Ignite g = startGrid("server-" + srvIdx.getAndIncrement()); + assertTrue(disconnectLatch.await(5000, MILLISECONDS)); + assertTrue(failLatch.await(5000, MILLISECONDS)); - srvNodeIds.add(g.cluster().localNode().id()); + if (changeTop) { + startServerNodes(1); clientSpi.resumeAll(); } - assertTrue(disconnectLatch.await(5000, MILLISECONDS)); assertTrue(reconnectLatch.await(5000, MILLISECONDS)); - assertTrue(failLatch.await(5000, MILLISECONDS)); assertTrue(joinLatch.await(5000, MILLISECONDS)); long topVer = changeTop ? 5L : 4L; @@ -2026,6 +2251,20 @@ private void attachListeners(int srvCnt, int clientCnt) throws Exception { }, EVT_NODE_FAILED); } } + + if (clientReconnectedLatch != null) { + for (int i = 0; i < clientCnt; i++) { + G.ignite("client-" + i).events().localListen(new IgnitePredicate() { + @Override public boolean apply(Event evt) { + info("Reconnected event fired on client: " + evt); + + clientReconnectedLatch.countDown(); + + return true; + } + }, EVT_CLIENT_NODE_RECONNECTED); + } + } } /** @@ -2095,7 +2334,16 @@ else if (srvNodeIds.contains(id)) * @throws InterruptedException If interrupted. */ protected void await(CountDownLatch latch) throws InterruptedException { - assertTrue("Latch count: " + latch.getCount(), latch.await(awaitTime(), MILLISECONDS)); + await(latch, awaitTime()); + } + + /** + * @param latch Latch. + * @param timeout Timeout. + * @throws InterruptedException If interrupted. + */ + protected void await(CountDownLatch latch, long timeout) throws InterruptedException { + assertTrue("Latch count: " + latch.getCount(), latch.await(timeout, MILLISECONDS)); } /** @@ -2324,8 +2572,10 @@ public void pauseSocketWrite() { public void pauseAll(boolean suspend) { pauseResumeOperation(true, openSockLock, writeLock); - if (suspend) - impl.workerThread().suspend(); + if (suspend) { + for (Thread t : impl.threads()) + t.suspend(); + } } /** @@ -2334,7 +2584,8 @@ public void pauseAll(boolean suspend) { public void resumeAll() { pauseResumeOperation(false, openSockLock, writeLock); - impl.workerThread().resume(); + for (IgniteSpiThread t : impl.threads()) + t.resume(); } /** {@inheritDoc} */ From b1e670109e49bc33a9c8d6486cbfc637f13eaede Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Mon, 30 Oct 2017 11:07:21 +0700 Subject: [PATCH 078/243] IGNITE-6570 Added missing import. (cherry picked from commit 67859f4) --- modules/web-console/frontend/app/utils/SimpleWorkerPool.js | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/web-console/frontend/app/utils/SimpleWorkerPool.js b/modules/web-console/frontend/app/utils/SimpleWorkerPool.js index d8ed28b6186e4..b76dc525973bd 100644 --- a/modules/web-console/frontend/app/utils/SimpleWorkerPool.js +++ b/modules/web-console/frontend/app/utils/SimpleWorkerPool.js @@ -19,6 +19,7 @@ import {Observable} from 'rxjs/Observable'; import {Subject} from 'rxjs/Subject'; import 'rxjs/add/observable/race'; import 'rxjs/add/operator/filter'; +import 'rxjs/add/operator/map'; import 'rxjs/add/operator/pluck'; import 'rxjs/add/operator/take'; import 'rxjs/add/operator/toPromise'; From 748f6df6b3f2753ad965fafcce55520bed6def8f Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Mon, 30 Oct 2017 14:56:22 +0700 Subject: [PATCH 079/243] IGNITE-6670 Web Agent: Improved demo startup. (cherry picked from commit b4bd20e) --- .../ignite/console/demo/AgentClusterDemo.java | 32 +++++++++++-------- .../demo/service/DemoCachesLoadService.java | 3 +- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/AgentClusterDemo.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/AgentClusterDemo.java index 886888b168d89..7157df7e8c65d 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/AgentClusterDemo.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/AgentClusterDemo.java @@ -27,9 +27,9 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteServices; import org.apache.ignite.Ignition; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.MemoryPolicyConfiguration; import org.apache.ignite.console.demo.service.DemoCachesLoadService; import org.apache.ignite.console.demo.service.DemoComputeLoadService; import org.apache.ignite.console.demo.service.DemoRandomCacheLoadService; @@ -53,6 +53,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_PERFORMANCE_SUGGESTIONS_DISABLED; import static org.apache.ignite.IgniteSystemProperties.IGNITE_QUIET; import static org.apache.ignite.IgniteSystemProperties.IGNITE_UPDATE_NOTIFIER; +import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE; import static org.apache.ignite.console.demo.AgentDemoUtils.newScheduledThreadPool; import static org.apache.ignite.events.EventType.EVTS_DISCOVERY; import static org.apache.ignite.internal.visor.util.VisorTaskUtils.VISOR_TASK_EVTS; @@ -132,15 +133,16 @@ private static IgniteConfiguration igniteConfiguration(int basePort, int gridIdx cfg.setGridLogger(new Slf4jLogger(log)); cfg.setMetricsLogFrequency(0); - MemoryConfiguration memCfg = new MemoryConfiguration(); + DataRegionConfiguration dataRegCfg = new DataRegionConfiguration(); + dataRegCfg.setName("demo"); + dataRegCfg.setMetricsEnabled(true); + dataRegCfg.setMaxSize(DFLT_DATA_REGION_INITIAL_SIZE); - MemoryPolicyConfiguration memPlc = new MemoryPolicyConfiguration(); - memPlc.setName("demo"); - memPlc.setMetricsEnabled(true); + DataStorageConfiguration dataStorageCfg = new DataStorageConfiguration(); + dataStorageCfg.setDefaultDataRegionConfiguration(dataRegCfg); + dataStorageCfg.setSystemRegionMaxSize(DFLT_DATA_REGION_INITIAL_SIZE); - memCfg.setMemoryPolicies(memPlc); - - cfg.setMemoryConfiguration(memCfg); + cfg.setDataStorageConfiguration(dataStorageCfg); if (client) cfg.setClientMode(true); @@ -157,10 +159,10 @@ private static void deployServices(IgniteServices services) { services.deployMultiple("Demo service: Multiple instances", new DemoServiceMultipleInstances(), 7, 3); services.deployNodeSingleton("Demo service: Node singleton", new DemoServiceNodeSingleton()); services.deployClusterSingleton("Demo service: Cluster singleton", new DemoServiceClusterSingleton()); + services.deployClusterSingleton("Demo caches load service", new DemoCachesLoadService(20)); services.deployKeyAffinitySingleton("Demo service: Key affinity singleton", new DemoServiceKeyAffinity(), DemoCachesLoadService.CAR_CACHE_NAME, "id"); - services.deployClusterSingleton("Demo caches load service", new DemoCachesLoadService(20)); services.deployNodeSingleton("RandomCache load service", new DemoRandomCacheLoadService(20)); services.deployMultiple("Demo service: Compute load", new DemoComputeLoadService(), 2, 1); @@ -195,8 +197,10 @@ public static CountDownLatch tryStart() { int idx = cnt.incrementAndGet(); int port = basePort.get(); + IgniteEx ignite = null; + try { - IgniteEx ignite = (IgniteEx)Ignition.start(igniteConfiguration(port, idx, false)); + ignite = (IgniteEx)Ignition.start(igniteConfiguration(port, idx, false)); if (idx == 0) { Collection jettyAddrs = ignite.localNode().attribute(ATTR_REST_JETTY_ADDRS); @@ -219,8 +223,6 @@ public static CountDownLatch tryStart() { demoUrl = String.format("http://%s:%d", jettyHost, jettyPort); initLatch.countDown(); - - deployServices(ignite.services(ignite.cluster().forServers())); } } catch (Throwable e) { @@ -234,13 +236,15 @@ public static CountDownLatch tryStart() { } finally { if (idx == NODE_CNT) { + deployServices(ignite.services(ignite.cluster().forServers())); + log.info("DEMO: All embedded nodes for demo successfully started"); execSrv.shutdown(); } } } - }, 1, 10, TimeUnit.SECONDS); + }, 1, 5, TimeUnit.SECONDS); } return initLatch; diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/service/DemoCachesLoadService.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/service/DemoCachesLoadService.java index 40fd4ac1ba1fe..6691d1d3ec321 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/service/DemoCachesLoadService.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/demo/service/DemoCachesLoadService.java @@ -194,7 +194,6 @@ public DemoCachesLoadService(int cnt) { }, 10, 3, TimeUnit.SECONDS); } - /** * Create base cache configuration. * @@ -208,7 +207,7 @@ private static CacheConfiguration cacheConfiguration(String name) { ccfg.setQueryDetailMetricsSize(10); ccfg.setStatisticsEnabled(true); ccfg.setSqlFunctionClasses(SQLFunctions.class); - ccfg.setMemoryPolicyName("demo"); + ccfg.setDataRegionName("demo"); return ccfg; } From 923b744d950116a6293b6eb93b33ebf6cc6ee317 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Mon, 30 Oct 2017 19:21:15 +0700 Subject: [PATCH 080/243] IGNITE-6789 Web Console: Reworked sorting of caches on Queries screen. (cherry picked from commit 1d39507) --- modules/web-console/frontend/app/modules/sql/sql.controller.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index a3fc0cacfc1d4..332d4d267cf14 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -864,7 +864,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', }); return cachesAcc; - }, []), 'label'); + }, []), (cache) => cache.label.toLowerCase()); // Reset to first cache in case of stopped selected. const cacheNames = _.map($scope.caches, (cache) => cache.value); From 488c20949c966892fb6a61f2edce9e53edf059e0 Mon Sep 17 00:00:00 2001 From: vsisko Date: Mon, 30 Oct 2017 19:43:48 +0700 Subject: [PATCH 081/243] IGNITE-6398 Web Console: Added support for ClientConnectorConfiguration. (cherry picked from commit 58dbbc5) --- .../visor/VisorDataTransferObject.java | 3 + .../VisorClientConnectorConfiguration.java | 166 ++++++++++++++++++ .../visor/node/VisorGridConfiguration.java | 26 ++- .../node/VisorSqlConnectorConfiguration.java | 3 + .../config/VisorConfigurationCommand.scala | 29 ++- modules/web-console/backend/app/mongo.js | 11 ++ .../generator/AbstractTransformer.js | 5 + .../generator/ConfigurationGenerator.js | 35 +++- .../clusters/client-connector.pug | 59 +++++++ .../configuration/clusters/sql-connector.pug | 4 +- .../views/configuration/clusters.tpl.pug | 6 + 11 files changed, 332 insertions(+), 15 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorClientConnectorConfiguration.java create mode 100644 modules/web-console/frontend/app/modules/states/configuration/clusters/client-connector.pug diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java index 866089103ebf6..4635c3e36561f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java @@ -41,6 +41,9 @@ public abstract class VisorDataTransferObject implements Externalizable { /** Version 2. */ protected static final byte V2 = 2; + /** Version 3. */ + protected static final byte V3 = 3; + /** * @param col Source collection. * @param Collection type. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorClientConnectorConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorClientConnectorConfiguration.java new file mode 100644 index 0000000000000..397b72a3c0002 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorClientConnectorConfiguration.java @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.node; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; +import org.jetbrains.annotations.Nullable; + +/** + * Data transfer object for client connector configuration. + */ +public class VisorClientConnectorConfiguration extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Host. */ + private String host; + + /** Port. */ + private int port; + + /** Port range. */ + private int portRange; + + /** Max number of opened cursors per connection. */ + private int maxOpenCursorsPerConn; + + /** Socket send buffer size. */ + private int sockSndBufSize; + + /** Socket receive buffer size. */ + private int sockRcvBufSize; + + /** TCP no delay. */ + private boolean tcpNoDelay; + + /** Thread pool size. */ + private int threadPoolSize ; + + /** + * Default constructor. + */ + public VisorClientConnectorConfiguration() { + // No-op. + } + + /** + * Create data transfer object for Sql connector configuration. + * + * @param cfg Sql connector configuration. + */ + public VisorClientConnectorConfiguration(ClientConnectorConfiguration cfg) { + host = cfg.getHost(); + port = cfg.getPort(); + portRange = cfg.getPortRange(); + maxOpenCursorsPerConn = cfg.getMaxOpenCursorsPerConnection(); + sockSndBufSize = cfg.getSocketSendBufferSize(); + sockRcvBufSize = cfg.getSocketReceiveBufferSize(); + tcpNoDelay = cfg.isTcpNoDelay(); + threadPoolSize = cfg.getThreadPoolSize(); + } + + /** + * @return Host. + */ + @Nullable public String getHost() { + return host; + } + + /** + * @return Port. + */ + public int getPort() { + return port; + } + + /** + * @return Port range. + */ + public int getPortRange() { + return portRange; + } + + /** + * @return Maximum number of opened cursors. + */ + public int getMaxOpenCursorsPerConnection() { + return maxOpenCursorsPerConn; + } + + /** + * @return Socket send buffer size in bytes. + */ + public int getSocketSendBufferSize() { + return sockSndBufSize; + } + + /** + * @return Socket receive buffer size in bytes. + */ + public int getSocketReceiveBufferSize() { + return sockRcvBufSize; + } + + /** + * @return TCP NO_DELAY flag. + */ + public boolean isTcpNoDelay() { + return tcpNoDelay; + } + /** + * @return Thread pool that is in charge of processing SQL requests. + */ + public int getThreadPoolSize() { + return threadPoolSize; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeString(out, host); + out.writeInt(port); + out.writeInt(portRange); + out.writeInt(maxOpenCursorsPerConn); + out.writeInt(sockSndBufSize); + out.writeInt(sockRcvBufSize ); + out.writeBoolean(tcpNoDelay); + out.writeInt(threadPoolSize); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + host = U.readString(in); + port = in.readInt(); + portRange = in.readInt(); + maxOpenCursorsPerConn = in.readInt(); + sockSndBufSize = in.readInt(); + sockRcvBufSize = in.readInt(); + tcpNoDelay = in.readBoolean(); + threadPoolSize = in.readInt(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorClientConnectorConfiguration.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java index 99cce40e2a80f..9e2370c4ed6b0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java @@ -26,9 +26,9 @@ import java.util.Properties; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.configuration.BinaryConfiguration; +import org.apache.ignite.configuration.ClientConnectorConfiguration; import org.apache.ignite.configuration.HadoopConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.SqlConnectorConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -122,6 +122,9 @@ public class VisorGridConfiguration extends VisorDataTransferObject { /** Configuration of data storage. */ private VisorDataStorageConfiguration dataStorage; + /** Client connector configuration */ + private VisorClientConnectorConfiguration clnConnCfg; + /** * Default constructor. */ @@ -177,10 +180,10 @@ public VisorGridConfiguration(IgniteEx ignite) { if (hc != null) hadoopCfg = new VisorHadoopConfiguration(hc); - SqlConnectorConfiguration scc = c.getSqlConnectorConfiguration(); + ClientConnectorConfiguration ccc = c.getClientConnectorConfiguration(); - if (scc != null) - sqlConnCfg = new VisorSqlConnectorConfiguration(scc); + if (ccc != null) + clnConnCfg = new VisorClientConnectorConfiguration(ccc); srvcCfgs = VisorServiceConfiguration.list(c.getServiceConfiguration()); @@ -355,6 +358,13 @@ public VisorSqlConnectorConfiguration getSqlConnectorConfiguration() { return sqlConnCfg; } + /** + * @return Client connector configuration. + */ + public VisorClientConnectorConfiguration getClientConnectorConfiguration() { + return clnConnCfg; + } + /** * @return List of service configurations */ @@ -371,7 +381,7 @@ public VisorDataStorageConfiguration getDataStorageConfiguration() { /** {@inheritDoc} */ @Override public byte getProtocolVersion() { - return V2; + return V3; } /** {@inheritDoc} */ @@ -402,6 +412,7 @@ public VisorDataStorageConfiguration getDataStorageConfiguration() { out.writeObject(sqlConnCfg); U.writeCollection(out, srvcCfgs); out.writeObject(dataStorage); + out.writeObject(clnConnCfg); } /** {@inheritDoc} */ @@ -432,8 +443,11 @@ public VisorDataStorageConfiguration getDataStorageConfiguration() { sqlConnCfg = (VisorSqlConnectorConfiguration) in.readObject(); srvcCfgs = U.readList(in); - if (protoVer == V2) + if (protoVer >= V2) dataStorage = (VisorDataStorageConfiguration)in.readObject(); + + if (protoVer >= V3) + clnConnCfg = (VisorClientConnectorConfiguration)in.readObject(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSqlConnectorConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSqlConnectorConfiguration.java index ccde415479a97..2c2f959094ae0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSqlConnectorConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSqlConnectorConfiguration.java @@ -28,7 +28,10 @@ /** * Data transfer object for SQL connector configuration. + * + * Deprecated as of Apache Ignite 2.3 */ +@Deprecated public class VisorSqlConnectorConfiguration extends VisorDataTransferObject { /** */ private static final long serialVersionUID = 0L; diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala index 13ab778c3e915..b9b0af96f6a61 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala @@ -17,19 +17,16 @@ package org.apache.ignite.visor.commands.config +import java.util.UUID import org.apache.ignite.cluster.ClusterGroupEmptyException import org.apache.ignite.internal.util.scala.impl import org.apache.ignite.internal.util.{IgniteUtils => U} -import org.apache.ignite.lang.IgniteBiTuple import org.apache.ignite.visor.VisorTag import org.apache.ignite.visor.commands.cache.VisorCacheCommand._ import org.apache.ignite.visor.commands.common.{VisorConsoleCommand, VisorTextTable} import org.apache.ignite.visor.visor._ -import java.lang.System._ -import java.util.UUID - -import org.apache.ignite.internal.visor.node.{VisorSpiDescription, VisorGridConfiguration, VisorNodeConfigurationCollectorTask} +import org.apache.ignite.internal.visor.node.{VisorGridConfiguration, VisorNodeConfigurationCollectorTask, VisorSpiDescription} import org.apache.ignite.internal.visor.util.VisorTaskUtils._ import scala.collection.JavaConversions._ @@ -240,6 +237,26 @@ class VisorConfigurationCommand extends VisorConsoleCommand { spisT.render() + println("\nClient connector configuration") + + val cliConnCfg = cfg.getClientConnectorConfiguration + val cliConnTbl = VisorTextTable() + + if (cliConnCfg != null) { + cliConnTbl += ("Host", safe(cliConnCfg.getHost, safe(basic.getLocalHost))) + cliConnTbl += ("Port", cliConnCfg.getPort) + cliConnTbl += ("Port range", cliConnCfg.getPortRange) + cliConnTbl += ("Socket send buffer size", formatMemory(cliConnCfg.getSocketSendBufferSize)) + cliConnTbl += ("Socket receive buffer size", formatMemory(cliConnCfg.getSocketReceiveBufferSize)) + cliConnTbl += ("Max connection cursors", cliConnCfg.getMaxOpenCursorsPerConnection) + cliConnTbl += ("Pool size", cliConnCfg.getThreadPoolSize) + cliConnTbl += ("TCP_NODELAY", bool2Str(cliConnCfg.isTcpNoDelay)) + + cliConnTbl.render() + } + else + println("Client Connection is not configured") + println("\nPeer-to-Peer:") val p2pT = VisorTextTable() @@ -371,7 +388,7 @@ class VisorConfigurationCommand extends VisorConsoleCommand { * @return List of strings. */ private[this] def compactProperty(name: String, value: String): List[String] = { - val ps = getProperty("path.separator") + val ps = System.getProperty("path.separator") // Split all values having path separator into multiple lines (with few exceptions...). val lst = diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index 5b02a72fce4b6..7043fcdfd04b9 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -854,6 +854,17 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose className: String } }], + clientConnectorConfiguration: { + enabled: Boolean, + host: String, + port: Number, + portRange: Number, + socketSendBufferSize: Number, + socketReceiveBufferSize: Number, + tcpNoDelay: {type: Boolean, default: true}, + maxOpenCursorsPerConnection: Number, + threadPoolSize: Number + }, loadBalancingSpi: [{ kind: {type: String, enum: ['RoundRobin', 'Adaptive', 'WeightedRandom', 'Custom']}, RoundRobin: { diff --git a/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js b/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js index f6f471c3c482e..af799bd20f456 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js @@ -78,6 +78,11 @@ export default class AbstractTransformer { return this.toSection(this.generator.clusterCacheKeyConfiguration(keyCfgs)); } + // Generate client connector configuration. + static clusterClientConnector(cluster, available) { + return this.toSection(this.generator.clusterClientConnector(cluster, available)); + } + // Generate collision group. static clusterCollision(collision) { return this.toSection(this.generator.clusterCollision(collision)); diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js index 1b12d52b5fe30..16202f809e833 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js @@ -78,6 +78,10 @@ export default class IgniteConfigurationGenerator { this.clusterBinary(cluster.binaryConfiguration, cfg); this.clusterCacheKeyConfiguration(cluster.cacheKeyConfiguration, cfg); this.clusterCheckpoint(cluster, cluster.caches, cfg); + + if (available('2.3.0')) + this.clusterClientConnector(cluster, available, cfg); + this.clusterCollision(cluster.collision, cfg); this.clusterCommunication(cluster, cfg); this.clusterConnector(cluster.connector, cfg); @@ -106,7 +110,9 @@ export default class IgniteConfigurationGenerator { if (available(['2.1.0', '2.3.0'])) this.clusterPersistence(cluster.persistenceStoreConfiguration, available, cfg); - this.clusterQuery(cluster, available, cfg); + if (available(['2.1.0', '2.3.0'])) + this.clusterQuery(cluster, available, cfg); + this.clusterServiceConfiguration(cluster.serviceConfigurations, cluster.caches, cfg); this.clusterSsl(cluster, cfg); @@ -760,6 +766,33 @@ export default class IgniteConfigurationGenerator { return cfg; } + // Generate cluster query group. + static clusterClientConnector(cluster, available, cfg = this.igniteConfigurationBean(cluster)) { + if (!available('2.3.0')) + return cfg; + + cfg.intProperty('longQueryWarningTimeout'); + + if (_.get(cluster, 'clientConnectorConfiguration.enabled') !== true) + return cfg; + + const bean = new Bean('org.apache.ignite.configuration.ClientConnectorConfiguration', 'cliConnCfg', + cluster.clientConnectorConfiguration, clusterDflts.clientConnectorConfiguration); + + bean.stringProperty('host') + .intProperty('port') + .intProperty('portRange') + .intProperty('socketSendBufferSize') + .intProperty('socketReceiveBufferSize') + .intProperty('maxOpenCursorsPerConnection') + .intProperty('threadPoolSize') + .boolProperty('tcpNoDelay'); + + cfg.beanProperty('clientConnectorConfiguration', bean); + + return cfg; + } + // Generate collision group. static clusterCollision(collision, cfg = this.igniteConfigurationBean()) { let colSpi; diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/client-connector.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/client-connector.pug new file mode 100644 index 0000000000000..dd2fa6dedc4cf --- /dev/null +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/client-connector.pug @@ -0,0 +1,59 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +include /app/helpers/jade/mixins + +-var form = 'clientConnector' +-var model = 'backupItem' +-var connectionModel = model + '.clientConnectorConfiguration' +-var connectionEnabled = connectionModel + '.enabled' + +.panel.panel-default(ng-show='$ctrl.available("2.3.0")' ng-form=form novalidate) + .panel-heading(bs-collapse-toggle ng-click=`ui.loadPanel('${form}')`) + ignite-form-panel-chevron + label Client connector configuration + ignite-form-field-tooltip.tipLabel + | Client connector configuration + ignite-form-revert + .panel-collapse(role='tabpanel' bs-collapse-target id=`${form}`) + .panel-body(ng-if=`$ctrl.available("2.3.0") && ui.isPanelLoaded('${form}')`) + .col-sm-6 + .settings-row + +checkbox('Enabled', connectionEnabled, '"ClientConnectorEnabled"', 'Flag indicating whether to configure client connector configuration') + .settings-row + +text-enabled('Host:', `${connectionModel}.host`, '"ClientConnectorHost"', connectionEnabled, 'false', 'localhost', 'Host') + .settings-row + +number('Port:', `${connectionModel}.port`, '"ClientConnectorPort"', connectionEnabled, '10800', '1025', 'Port') + .settings-row + +number('Port range:', `${connectionModel}.portRange`, '"ClientConnectorPortRange"', connectionEnabled, '100', '0', 'Port range') + .settings-row + +number('Socket send buffer size:', `${connectionModel}.socketSendBufferSize`, '"ClientConnectorSocketSendBufferSize"', connectionEnabled, '0', '0', + 'Socket send buffer size.
        \ + When set to 0, operation system default will be used') + .settings-row + +number('Socket receive buffer size:', `${connectionModel}.socketReceiveBufferSize`, '"ClientConnectorSocketReceiveBufferSize"', connectionEnabled, '0', '0', + 'Socket receive buffer size.
        \ + When set to 0, operation system default will be used') + .settings-row + +number('Max connection cursors:', `${connectionModel}.maxOpenCursorsPerConnection`, '"ClientConnectorMaxOpenCursorsPerConnection"', connectionEnabled, '128', '0', + 'Max number of opened cursors per connection') + .settings-row + +number('Pool size:', `${connectionModel}.threadPoolSize`, '"ClientConnectorThreadPoolSize"', connectionEnabled, 'max(8, availableProcessors)', '1', + 'Size of thread pool that is in charge of processing SQL requests') + .settings-row + +checkbox-enabled('TCP_NODELAY option', `${connectionModel}.tcpNoDelay`, '"ClientConnectorTcpNoDelay"', connectionEnabled, 'Value for TCP_NODELAY socket option') + .col-sm-6 + +preview-xml-java(model, 'clusterClientConnector') diff --git a/modules/web-console/frontend/app/modules/states/configuration/clusters/sql-connector.pug b/modules/web-console/frontend/app/modules/states/configuration/clusters/sql-connector.pug index 89e00d759f9f1..d72f962074424 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/clusters/sql-connector.pug +++ b/modules/web-console/frontend/app/modules/states/configuration/clusters/sql-connector.pug @@ -21,7 +21,7 @@ include /app/helpers/jade/mixins -var connectionModel = model + '.sqlConnectorConfiguration' -var connectionEnabled = connectionModel + '.enabled' -.panel.panel-default(ng-show='$ctrl.available("2.1.0")' ng-form=form novalidate) +.panel.panel-default(ng-show='$ctrl.available(["2.1.0", "2.3.0"])' ng-form=form novalidate) .panel-heading(bs-collapse-toggle ng-click=`ui.loadPanel('${form}')`) ignite-form-panel-chevron label Query configuration @@ -30,7 +30,7 @@ include /app/helpers/jade/mixins //- TODO IGNITE-5415 Add link to documentation. ignite-form-revert .panel-collapse(role='tabpanel' bs-collapse-target id=`${form}`) - .panel-body(ng-if=`$ctrl.available("2.1.0") && ui.isPanelLoaded('${form}')`) + .panel-body(ng-if=`$ctrl.available(["2.1.0", "2.3.0"]) && ui.isPanelLoaded('${form}')`) .col-sm-6 .settings-row +checkbox('Enabled', connectionEnabled, '"SqlConnectorEnabled"', 'Flag indicating whether to configure SQL connector configuration') diff --git a/modules/web-console/frontend/views/configuration/clusters.tpl.pug b/modules/web-console/frontend/views/configuration/clusters.tpl.pug index 26a1da13bd4fb..19ed3500c5223 100644 --- a/modules/web-console/frontend/views/configuration/clusters.tpl.pug +++ b/modules/web-console/frontend/views/configuration/clusters.tpl.pug @@ -45,6 +45,10 @@ include /app/helpers/jade/mixins include /app/modules/states/configuration/clusters/binary include /app/modules/states/configuration/clusters/cache-key-cfg include /app/modules/states/configuration/clusters/checkpoint + + //- Since ignite 2.3 + include /app/modules/states/configuration/clusters/client-connector + include /app/modules/states/configuration/clusters/collision include /app/modules/states/configuration/clusters/communication include /app/modules/states/configuration/clusters/connector @@ -73,6 +77,8 @@ include /app/helpers/jade/mixins //- Since ignite 2.1, deprecated in ignite 2.3 include /app/modules/states/configuration/clusters/persistence + + //- Deprecated in ignite 2.3 include /app/modules/states/configuration/clusters/sql-connector include /app/modules/states/configuration/clusters/service From b433df44c99f0c21ca894e99e71317ba2466331f Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Mon, 30 Oct 2017 18:31:59 +0300 Subject: [PATCH 082/243] IGNITE-6746 Persist system cache when persistent data regions are present --- .../processors/cache/GridCacheUtils.java | 14 +++++++--- .../IgniteCacheDatabaseSharedManager.java | 26 ++++++++++++------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 26e22543be012..1f289d0840e37 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -1701,14 +1701,22 @@ public static boolean isPersistentCache(CacheConfiguration ccfg, DataStorageConf return false; } + /** * @return {@code true} if persistence is enabled for at least one data region, {@code false} if not. */ public static boolean isPersistenceEnabled(IgniteConfiguration cfg) { - if (cfg.getDataStorageConfiguration() == null) + return isPersistenceEnabled(cfg.getDataStorageConfiguration()); + } + + /** + * @return {@code true} if persistence is enabled for at least one data region, {@code false} if not. + */ + public static boolean isPersistenceEnabled(DataStorageConfiguration cfg) { + if (cfg == null) return false; - DataRegionConfiguration dfltReg = cfg.getDataStorageConfiguration().getDefaultDataRegionConfiguration(); + DataRegionConfiguration dfltReg = cfg.getDefaultDataRegionConfiguration(); if (dfltReg == null) return false; @@ -1716,7 +1724,7 @@ public static boolean isPersistenceEnabled(IgniteConfiguration cfg) { if (dfltReg.isPersistenceEnabled()) return true; - DataRegionConfiguration[] regCfgs = cfg.getDataStorageConfiguration().getDataRegionConfigurations(); + DataRegionConfiguration[] regCfgs = cfg.getDataRegionConfigurations(); if (regCfgs == null) return false; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index 933c19538c97f..f00edbeaf15ec 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -57,6 +57,7 @@ import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; @@ -218,36 +219,35 @@ protected void initDataRegions(DataStorageConfiguration memCfg) throws IgniteChe if (dataRegionCfgs != null) { for (DataRegionConfiguration dataRegionCfg : dataRegionCfgs) - addDataRegion(memCfg, dataRegionCfg, dataRegionCfg.getName()); + addDataRegion(memCfg, dataRegionCfg); } addDataRegion( memCfg, - memCfg.getDefaultDataRegionConfiguration(), - memCfg.getDefaultDataRegionConfiguration().getName() + memCfg.getDefaultDataRegionConfiguration() ); addDataRegion( memCfg, createSystemDataRegion( memCfg.getSystemRegionInitialSize(), - memCfg.getSystemRegionMaxSize() - ), - SYSTEM_DATA_REGION_NAME + memCfg.getSystemRegionMaxSize(), + CU.isPersistenceEnabled(memCfg) + ) ); } /** * @param dataStorageCfg Database config. * @param dataRegionCfg Data region config. - * @param dataRegionName Data region name. * @throws IgniteCheckedException If failed to initialize swap path. */ private void addDataRegion( DataStorageConfiguration dataStorageCfg, - DataRegionConfiguration dataRegionCfg, - String dataRegionName + DataRegionConfiguration dataRegionCfg ) throws IgniteCheckedException { + String dataRegionName = dataRegionCfg.getName(); + String dfltMemPlcName = dataStorageCfg.getDefaultDataRegionConfiguration().getName(); if (dfltMemPlcName == null) @@ -315,15 +315,21 @@ private boolean hasCustomDefaultDataRegion(DataRegionConfiguration[] memPlcsCfgs /** * @param sysCacheInitSize Initial size of PageMemory to be created for system cache. * @param sysCacheMaxSize Maximum size of PageMemory to be created for system cache. + * @param persistenceEnabled Persistence enabled flag. * * @return {@link DataRegionConfiguration configuration} of DataRegion for system cache. */ - private DataRegionConfiguration createSystemDataRegion(long sysCacheInitSize, long sysCacheMaxSize) { + private DataRegionConfiguration createSystemDataRegion( + long sysCacheInitSize, + long sysCacheMaxSize, + boolean persistenceEnabled + ) { DataRegionConfiguration res = new DataRegionConfiguration(); res.setName(SYSTEM_DATA_REGION_NAME); res.setInitialSize(sysCacheInitSize); res.setMaxSize(sysCacheMaxSize); + res.setPersistenceEnabled(persistenceEnabled); return res; } From b6cd74ca5cb0b96aa3defac87cd3ebb8680bc90c Mon Sep 17 00:00:00 2001 From: Sergey Chugunov Date: Fri, 27 Oct 2017 18:31:49 +0300 Subject: [PATCH 083/243] IGNITE-6641 Improved partition ID handling for links - Fixes #2866. Signed-off-by: Alexey Goncharuk --- .../ignite/internal/pagemem/PageIdUtils.java | 19 +++++++++++++++++++ .../persistence/freelist/FreeListImpl.java | 18 ++++++++++++++++-- .../cache/persistence/tree/io/DataPageIO.java | 17 +++++++++++++++-- 3 files changed, 50 insertions(+), 4 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java index d47f2de903961..2754d79aa49d1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdUtils.java @@ -190,6 +190,14 @@ public static long rotatePageId(long pageId) { (updatedRotationId << (PAGE_IDX_SIZE + PART_ID_SIZE + FLAG_SIZE)); } + /** + * Masks partition ID from full page ID. + * @param pageId Page ID to mask partition ID from. + */ + public static long maskPartitionId(long pageId) { + return pageId & ~((-1L << PAGE_IDX_SIZE) & (~(-1L << PAGE_IDX_SIZE + PART_ID_SIZE))); + } + /** * Change page type. * @@ -213,4 +221,15 @@ public static String toDetailString(long pageId) { ")" ; } + + /** + * @param pageId Page ID. + * @param partId Partition ID. + */ + public static long changePartitionId(long pageId, int partId) { + byte flag = flag(pageId); + int pageIdx = pageIndex(pageId); + + return pageId(partId, flag, pageIdx); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java index 6a87d3e84da6b..5bacc53a4c896 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeListImpl.java @@ -191,7 +191,7 @@ private int addRow( CacheDataRow row, int rowSize ) throws IgniteCheckedException { - io.addRow(pageAddr, row, rowSize, pageSize()); + io.addRow(pageId, pageAddr, row, rowSize, pageSize()); if (needWalDeltaRecord(pageId, page, null)) { // TODO IGNITE-5829 This record must contain only a reference to a logical WAL record with the actual data. @@ -256,12 +256,20 @@ private int addRowFragment( /** */ - private final PageHandler rmvRow = new RemoveRowHandler(); + private final PageHandler rmvRow; /** * */ private final class RemoveRowHandler extends PageHandler { + /** Indicates whether partition ID should be masked from page ID. */ + private final boolean maskPartId; + + /** */ + RemoveRowHandler(boolean maskPartId) { + this.maskPartId = maskPartId; + } + @Override public Long run( int cacheId, long pageId, @@ -293,6 +301,7 @@ private final class RemoveRowHandler extends PageHandler { if (oldBucket != newBucket) { // It is possible that page was concurrently taken for put, in this case put will handle bucket change. + pageId = maskPartId ? PageIdUtils.maskPartitionId(pageId) : pageId; if (removeDataPage(pageId, page, pageAddr, io, oldBucket)) put(null, pageId, page, pageAddr, newBucket); } @@ -330,6 +339,9 @@ public FreeListImpl( long metaPageId, boolean initNew) throws IgniteCheckedException { super(cacheId, name, memPlc.pageMemory(), BUCKETS, wal, metaPageId); + + rmvRow = new RemoveRowHandler(cacheId == 0); + this.evictionTracker = memPlc.evictionTracker(); this.reuseList = reuseList == null ? this : reuseList; int pageSize = pageMem.pageSize(); @@ -492,6 +504,8 @@ private long allocateDataPage(int part) throws IgniteCheckedException { if (allocated) pageId = allocateDataPage(row.partition()); + else + pageId = PageIdUtils.changePartitionId(pageId, (row.partition())); DataPageIO init = reuseBucket || allocated ? DataPageIO.VERSIONS.latest() : null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java index 628ff385b34ab..173ed665ca5cf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java @@ -775,6 +775,7 @@ private boolean isEnoughSpace(int newEntryFullSize, int firstEntryOff, int direc /** * Adds row to this data page and sets respective link to the given row object. * + * @param pageId page ID. * @param pageAddr Page address. * @param row Cache data row. * @param rowSize Row size. @@ -782,6 +783,7 @@ private boolean isEnoughSpace(int newEntryFullSize, int firstEntryOff, int direc * @throws IgniteCheckedException If failed. */ public void addRow( + final long pageId, final long pageAddr, CacheDataRow row, final int rowSize, @@ -800,7 +802,7 @@ public void addRow( int itemId = addItem(pageAddr, fullEntrySize, directCnt, indirectCnt, dataOff, pageSize); - setLink(row, pageAddr, itemId); + setLinkByPageId(row, pageId, itemId); } /** @@ -1021,7 +1023,18 @@ private int addRowFragment( * @param itemId Item ID. */ private void setLink(CacheDataRow row, long pageAddr, int itemId) { - row.link(PageIdUtils.link(getPageId(pageAddr), itemId)); + long pageId = getPageId(pageAddr); + + setLinkByPageId(row, pageId, itemId); + } + + /** + * @param row Row to set link to. + * @param pageId Page ID. + * @param itemId Item ID. + */ + private void setLinkByPageId(CacheDataRow row, long pageId, int itemId) { + row.link(PageIdUtils.link(pageId, itemId)); } /** From 0a80f2a2670df91c0bcd250d0cbd30aaa87d83cc Mon Sep 17 00:00:00 2001 From: Sergey Chugunov Date: Wed, 1 Nov 2017 13:12:35 +0300 Subject: [PATCH 084/243] IGNITE-6641 Fixed partition ID handling improvement --- .../cache/persistence/freelist/io/PagesListNodeIO.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/io/PagesListNodeIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/io/PagesListNodeIO.java index a93cdf3e457e3..8eba4b1363e55 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/io/PagesListNodeIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/io/PagesListNodeIO.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.freelist.io; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; @@ -210,7 +211,7 @@ public boolean removePage(long pageAddr, long dataPageId) { int cnt = getCount(pageAddr); for (int i = 0; i < cnt; i++) { - if (getAt(pageAddr, i) == dataPageId) { + if (PageIdUtils.maskPartitionId(getAt(pageAddr, i)) == PageIdUtils.maskPartitionId(dataPageId)) { if (i != cnt - 1) copyMemory(pageAddr, offset(i + 1), pageAddr, offset(i), 8 * (cnt - i - 1)); From 926b5aa6e1d5c36dfaf3fb874aebd1ba98e4eb1d Mon Sep 17 00:00:00 2001 From: devozerov Date: Wed, 1 Nov 2017 16:47:16 +0300 Subject: [PATCH 085/243] IGNITE-6624: SQL: optimized backup filtering (no key wrap/unwrap). This closes #2847. --- .../cache/persistence/CacheDataRowAdapter.java | 2 +- .../ignite/spi/indexing/IndexingQueryCacheFilter.java | 10 ++++++++++ .../ignite/internal/processors/query/h2/H2Cursor.java | 9 +-------- .../processors/query/h2/database/H2PkHashIndex.java | 2 +- .../processors/query/h2/opt/GridH2IndexBase.java | 9 ++------- 5 files changed, 15 insertions(+), 17 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java index 9257424a29ffa..9f2e031076a48 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java @@ -555,7 +555,7 @@ public boolean isReady() { /** {@inheritDoc} */ @Override public int partition() { - return PageIdUtils.partId(link); + return PageIdUtils.partId(pageId(link)); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java index 6257f47e2f516..fa3ec97634968 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/indexing/IndexingQueryCacheFilter.java @@ -64,6 +64,16 @@ public IndexingQueryCacheFilter(GridCacheAffinityManager aff, Set parts public boolean apply(Object key) { int part = aff.partition(key); + return applyPartition(part); + } + + /** + * Apply filter. + * + * @param part Partition. + * @return {@code True} if passed. + */ + public boolean applyPartition(int part) { if (parts == null) return aff.primaryByPartition(locNode, part, topVer); else diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java index e09108d7a01db..f4c35f2ee4410 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java @@ -82,14 +82,7 @@ public H2Cursor(GridCursor cursor) { if (row.expireTime() > 0 && row.expireTime() <= time) continue; - if (filter == null) - return true; - - Object key = row.getValue(0).getObject(); - - assert key != null; - - if (filter.apply(key)) + if (filter == null || filter.applyPartition(row.partition())) return true; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java index 891e59f86a4df..7a92f1bcbec98 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java @@ -219,7 +219,7 @@ private H2Cursor(GridCursor cursor, IndexingQueryCacheFi CacheDataRow dataRow = cursor.get(); - if (filter.apply(dataRow.key())) + if (filter.applyPartition(dataRow.partition())) return true; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java index 92b7d108da414..c28d067e90972 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java @@ -1612,17 +1612,12 @@ protected FilteringCursor(GridCursor cursor, long time, IndexingQuery * @param row Row. * @return If this row was accepted. */ - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "SimplifiableIfStatement"}) protected boolean accept(GridH2Row row) { if (row.expireTime() != 0 && row.expireTime() <= time) return false; - if (fltr == null) - return true; - - Object key = row.getValue(KEY_COL).getObject(); - - return fltr.apply(key); + return fltr == null || fltr.applyPartition(row.partition()); } /** {@inheritDoc} */ From d4053067481ebe2a005692ef20e9ac248ae07bd0 Mon Sep 17 00:00:00 2001 From: devozerov Date: Thu, 2 Nov 2017 21:37:19 +0300 Subject: [PATCH 086/243] IGNITE-6626: SQL: avoid row materialization when filtering out not relevant partitions. This closes #2848. --- .../cache/persistence/tree/BPlusTree.java | 28 ++--- .../ignite/internal/util/IgniteTree.java | 12 ++ .../query/h2/opt/GridH2SpatialIndex.java | 16 ++- .../processors/query/h2/H2Cursor.java | 18 +-- .../processors/query/h2/database/H2Tree.java | 17 ++- .../query/h2/database/H2TreeIndex.java | 18 ++- .../query/h2/database/io/H2ExtrasInnerIO.java | 10 +- .../query/h2/database/io/H2ExtrasLeafIO.java | 10 +- .../query/h2/opt/GridH2IndexBase.java | 112 ++---------------- 9 files changed, 88 insertions(+), 153 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index c73b4c75cc51b..8e6e099395401 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -928,23 +928,12 @@ private void checkDestroyed() { throw new IllegalStateException("Tree is being concurrently destroyed: " + getName()); } - /** - * @param lower Lower bound inclusive or {@code null} if unbounded. - * @param upper Upper bound inclusive or {@code null} if unbounded. - * @return Cursor. - * @throws IgniteCheckedException If failed. - */ + /** {@inheritDoc} */ @Override public GridCursor find(L lower, L upper) throws IgniteCheckedException { return find(lower, upper, null); } - /** - * @param lower Lower bound inclusive or {@code null} if unbounded. - * @param upper Upper bound inclusive or {@code null} if unbounded. - * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row. - * @return Cursor. - * @throws IgniteCheckedException If failed. - */ + /** {@inheritDoc} */ public final GridCursor find(L lower, L upper, Object x) throws IgniteCheckedException { checkDestroyed(); @@ -4544,13 +4533,22 @@ private boolean fillFromBuffer(long pageAddr, BPlusIO io, int startIdx, int c if (rows == EMPTY) rows = (T[])new Object[cnt]; + int foundCnt = 0; + for (int i = 0; i < cnt; i++) { T r = getRow(io, pageAddr, startIdx + i, x); - rows = GridArrays.set(rows, i, r); + if (r != null) + rows = GridArrays.set(rows, foundCnt++, r); + } + + if (foundCnt == 0) { + rows = (T[])EMPTY; + + return false; } - GridArrays.clearTail(rows, cnt); + GridArrays.clearTail(rows, foundCnt); return true; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java index 396b8a46f3a8c..9e854d28f6cb0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteTree.java @@ -63,6 +63,18 @@ public interface IgniteTree { */ public GridCursor find(L lower, L upper) throws IgniteCheckedException; + /** + * Returns a cursor from lower to upper bounds inclusive. + * + * @param lower Lower bound or {@code null} if unbounded. + * @param upper Upper bound or {@code null} if unbounded. + * @param x Implementation specific argument, {@code null} always means that we need to return full detached + * data row. + * @return Cursor. + * @throws IgniteCheckedException If failed. + */ + public GridCursor find(L lower, L upper, Object x) throws IgniteCheckedException; + /** * Returns a value mapped to the lowest key, or {@code null} if tree is empty * @return Value. diff --git a/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java b/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java index 2cd36b38db87b..b6125c896c3d0 100644 --- a/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java +++ b/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java @@ -32,6 +32,8 @@ import org.apache.ignite.internal.processors.query.h2.H2Cursor; import org.apache.ignite.internal.util.GridCursorIteratorWrapper; import org.apache.ignite.internal.util.lang.GridCursor; +import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; +import org.apache.ignite.spi.indexing.IndexingQueryFilter; import org.h2.engine.Session; import org.h2.index.Cursor; import org.h2.index.IndexLookupBatch; @@ -322,6 +324,12 @@ private GridCursor rowIterator(Iterator i, TableFilter fi if (!i.hasNext()) return EMPTY_CURSOR; + long time = System.currentTimeMillis(); + + IndexingQueryFilter qryFilter = threadLocalFilter(); + + IndexingQueryCacheFilter qryCacheFilter = qryFilter != null ? qryFilter.forCache(getTable().cacheName()) : null; + List rows = new ArrayList<>(); do { @@ -329,11 +337,15 @@ private GridCursor rowIterator(Iterator i, TableFilter fi assert row != null; - rows.add(row); + if (row.expireTime() != 0 && row.expireTime() <= time) + continue; + + if (qryCacheFilter == null || qryCacheFilter.applyPartition(row.partition())) + rows.add(row); } while (i.hasNext()); - return filter(new GridCursorIteratorWrapper(rows.iterator()), threadLocalFilter()); + return new GridCursorIteratorWrapper(rows.iterator()); } /** {@inheritDoc} */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java index f4c35f2ee4410..01b35046c01b6 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Cursor.java @@ -21,7 +21,6 @@ import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; import org.h2.index.Cursor; import org.h2.message.DbException; import org.h2.result.Row; @@ -34,28 +33,16 @@ public class H2Cursor implements Cursor { /** */ private final GridCursor cursor; - /** */ - private final IndexingQueryCacheFilter filter; - /** */ private final long time = U.currentTimeMillis(); /** * @param cursor Cursor. - * @param filter Filter. */ - public H2Cursor(GridCursor cursor, IndexingQueryCacheFilter filter) { + public H2Cursor(GridCursor cursor) { assert cursor != null; this.cursor = cursor; - this.filter = filter; - } - - /** - * @param cursor Cursor. - */ - public H2Cursor(GridCursor cursor) { - this(cursor, null); } /** {@inheritDoc} */ @@ -82,8 +69,7 @@ public H2Cursor(GridCursor cursor) { if (row.expireTime() > 0 && row.expireTime() <= time) continue; - if (filter == null || filter.applyPartition(row.partition())) - return true; + return true; } return false; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java index 6214be4c07b44..f8f17042a6fa9 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.concurrent.atomic.AtomicLong; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; @@ -29,8 +30,10 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasInnerIO; import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasLeafIO; +import org.apache.ignite.internal.processors.query.h2.database.io.H2RowLinkIO; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; import org.h2.result.SearchRow; import org.h2.table.IndexColumn; import org.h2.value.Value; @@ -118,8 +121,20 @@ public H2RowFactory getRowFactory() { } /** {@inheritDoc} */ - @Override protected GridH2Row getRow(BPlusIO io, long pageAddr, int idx, Object ignore) + @Override protected GridH2Row getRow(BPlusIO io, long pageAddr, int idx, Object filter) throws IgniteCheckedException { + if (filter != null) { + // Filter out not interesting partitions without deserializing the row. + IndexingQueryCacheFilter filter0 = (IndexingQueryCacheFilter)filter; + + long link = ((H2RowLinkIO)io).getLink(pageAddr, idx); + + int part = PageIdUtils.partId(PageIdUtils.pageId(link)); + + if (!filter0.applyPartition(part)) + return null; + } + return (GridH2Row)io.getLookupRow(this, pageAddr, idx); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java index 3c0ab5e399901..b3307d09802f2 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java @@ -178,7 +178,7 @@ private List getAvailableInlineColumns(IndexColumn[] cols) { H2Tree tree = treeForRead(seg); - return new H2Cursor(tree.find(lower, upper), p); + return new H2Cursor(tree.find(lower, upper, p)); } catch (IgniteCheckedException e) { throw DbException.convert(e); @@ -318,19 +318,27 @@ private List getAvailableInlineColumns(IndexColumn[] cols) { } /** {@inheritDoc} */ - @Override protected GridCursor doFind0( + @Override protected H2Cursor doFind0( IgniteTree t, @Nullable SearchRow first, boolean includeFirst, @Nullable SearchRow last, IndexingQueryFilter filter) { try { - GridCursor range = t.find(first, last); + IndexingQueryCacheFilter p = null; + + if (filter != null) { + String cacheName = getTable().cacheName(); + + p = filter.forCache(cacheName); + } + + GridCursor range = t.find(first, last, p); if (range == null) - return EMPTY_CURSOR; + range = EMPTY_CURSOR; - return filter(range, filter); + return new H2Cursor(range); } catch (IgniteCheckedException e) { throw DbException.convert(e); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java index b8877e9471011..a4aa600af5983 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java @@ -33,7 +33,7 @@ /** * Inner page for H2 row references. */ -public class H2ExtrasInnerIO extends BPlusInnerIO { +public class H2ExtrasInnerIO extends BPlusInnerIO implements H2RowLinkIO { /** Payload size. */ private final int payloadSize; @@ -129,12 +129,8 @@ private H2ExtrasInnerIO(short type, int ver, int payloadSize) { PageUtils.putLong(dstPageAddr, dstOff + payloadSize, link); } - /** - * @param pageAddr Page address. - * @param idx Index. - * @return Link to row. - */ - private long getLink(long pageAddr, int idx) { + /** {@inheritDoc} */ + @Override public long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx) + payloadSize); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java index 6161f8dee2063..8723601aae20b 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java @@ -33,7 +33,7 @@ /** * Leaf page for H2 row references. */ -public class H2ExtrasLeafIO extends BPlusLeafIO { +public class H2ExtrasLeafIO extends BPlusLeafIO implements H2RowLinkIO { /** Payload size. */ private final int payloadSize; @@ -126,12 +126,8 @@ private H2ExtrasLeafIO(short type, int ver, int payloadSize) { return ((H2Tree)tree).getRowFactory().getRow(link); } - /** - * @param pageAddr Page address. - * @param idx Index. - * @return Link to row. - */ - private long getLink(long pageAddr, int idx) { + /** {@inheritDoc} */ + @Override public long getLink(long pageAddr, int idx) { return PageUtils.getLong(pageAddr, offset(idx) + payloadSize); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java index c28d067e90972..5d4a4e6960930 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java @@ -28,6 +28,7 @@ import org.apache.ignite.internal.managers.communication.GridMessageListener; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.query.h2.H2Cursor; import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2IndexRangeRequest; import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2IndexRangeResponse; import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2RowMessage; @@ -46,7 +47,6 @@ import org.apache.ignite.logger.NullLogger; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.spi.indexing.IndexingQueryFilter; -import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter; import org.h2.engine.Session; import org.h2.index.BaseIndex; import org.h2.index.Cursor; @@ -73,7 +73,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.NoSuchElementException; import java.util.UUID; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Future; @@ -260,17 +259,6 @@ public final int getDistributedMultiplier(Session ses, TableFilter[] filters, in return (GridH2Table)super.getTable(); } - /** - * Filters rows from expired ones and using predicate. - * - * @param cursor GridCursor over rows. - * @param filter Optional filter. - * @return Filtered iterator. - */ - protected GridCursor filter(GridCursor cursor, IndexingQueryFilter filter) { - return new FilteringCursor(cursor, U.currentTimeMillis(), filter, getTable().cacheName()); - } - /** * @return Filter for currently running query or {@code null} if none. */ @@ -1570,7 +1558,7 @@ protected IgniteTree treeForRead(int segment) { * @param filter Filter. * @return Iterator over rows in given range. */ - protected GridCursor doFind0( + protected H2Cursor doFind0( IgniteTree t, @Nullable SearchRow first, boolean includeFirst, @@ -1579,78 +1567,12 @@ protected GridCursor doFind0( throw new UnsupportedOperationException(); } - /** - * Cursor which filters by expiration time and predicate. - */ - protected static class FilteringCursor implements GridCursor { - /** */ - private final GridCursor cursor; - - /** */ - private final IndexingQueryCacheFilter fltr; - - /** */ - private final long time; - - /** */ - private GridH2Row next; - - /** - * @param cursor GridCursor. - * @param time Time for expired rows filtering. - * @param qryFilter Filter. - * @param cacheName Cache name. - */ - protected FilteringCursor(GridCursor cursor, long time, IndexingQueryFilter qryFilter, - String cacheName) { - this.cursor = cursor; - this.time = time; - this.fltr = qryFilter != null ? qryFilter.forCache(cacheName) : null; - } - - /** - * @param row Row. - * @return If this row was accepted. - */ - @SuppressWarnings({"unchecked", "SimplifiableIfStatement"}) - protected boolean accept(GridH2Row row) { - if (row.expireTime() != 0 && row.expireTime() <= time) - return false; - - return fltr == null || fltr.applyPartition(row.partition()); - } - - /** {@inheritDoc} */ - @Override public boolean next() throws IgniteCheckedException { - next = null; - - while (cursor.next()) { - GridH2Row t = cursor.get(); - - if (accept(t)) { - next = t; - return true; - } - } - - return false; - } - - /** {@inheritDoc} */ - @Override public GridH2Row get() throws IgniteCheckedException { - if (next == null) - throw new NoSuchElementException(); - - return next; - } - } - /** * */ private static final class CursorIteratorWrapper implements Iterator { /** */ - private final GridCursor cursor; + private final H2Cursor cursor; /** Next element. */ private GridH2Row next; @@ -1658,18 +1580,13 @@ private static final class CursorIteratorWrapper implements Iterator /** * @param cursor Cursor. */ - private CursorIteratorWrapper(GridCursor cursor) { + private CursorIteratorWrapper(H2Cursor cursor) { assert cursor != null; this.cursor = cursor; - try { - if (cursor.next()) - next = cursor.get(); - } - catch (IgniteCheckedException e) { - throw U.convertException(e); - } + if (cursor.next()) + next = (GridH2Row)cursor.get(); } /** {@inheritDoc} */ @@ -1679,19 +1596,14 @@ private CursorIteratorWrapper(GridCursor cursor) { /** {@inheritDoc} */ @Override public GridH2Row next() { - try { - GridH2Row res = next; + GridH2Row res = next; - if (cursor.next()) - next = cursor.get(); - else - next = null; + if (cursor.next()) + next = (GridH2Row)cursor.get(); + else + next = null; - return res; - } - catch (IgniteCheckedException e) { - throw U.convertException(e); - } + return res; } /** {@inheritDoc} */ From 7f39934721111d3170d10eaf6f910b15f1889bec Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Fri, 3 Nov 2017 11:16:11 +0300 Subject: [PATCH 087/243] IGNITE-6778 Fixed GridCacheDatabaseSharedManager#persistenceEnabled method usages --- .../affinity/GridAffinityAssignmentCache.java | 16 ++++++- .../cache/CacheAffinitySharedManager.java | 3 +- .../processors/cache/CacheGroupContext.java | 6 ++- .../processors/cache/GridCacheContext.java | 4 +- .../processors/cache/GridCacheMapEntry.java | 2 +- .../processors/cache/GridCacheProcessor.java | 6 ++- .../cache/GridCacheSharedContext.java | 2 +- .../processors/cache/GridCacheUtils.java | 15 +++++++ .../processors/cache/StoredCacheData.java | 9 ++++ .../dht/GridDhtLocalPartition.java | 2 +- .../dht/GridDhtPartitionTopologyImpl.java | 2 +- .../preloader/GridDhtPartitionDemander.java | 2 +- .../preloader/GridDhtPartitionSupplier.java | 4 +- .../GridDhtPartitionsExchangeFuture.java | 43 +++++++++++-------- .../dht/preloader/GridDhtPreloader.java | 6 +-- .../GridCacheDatabaseSharedManager.java | 15 ++++--- .../IgniteCacheDatabaseSharedManager.java | 7 --- .../cluster/GridClusterStateProcessor.java | 2 +- .../processors/query/GridQueryIndexing.java | 4 +- .../processors/query/GridQueryProcessor.java | 14 +++--- ...niteClientCacheInitializationFailTest.java | 2 +- .../processors/query/h2/IgniteH2Indexing.java | 11 +---- 22 files changed, 107 insertions(+), 70 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java index cbc4eae4c0bfa..c451b77ff804a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java @@ -589,8 +589,20 @@ private void awaitTopologyVersion(AffinityTopologyVersion topVer) { IgniteInternalFuture fut = readyFuture(topVer); - if (fut != null) - fut.get(); + if (fut != null) { + Thread curTh = Thread.currentThread(); + + String threadName = curTh.getName(); + + try { + curTh.setName(threadName + " (waiting " + topVer + ")"); + + fut.get(); + } + finally { + curTh.setName(threadName); + } + } } catch (IgniteCheckedException e) { throw new IgniteException("Failed to wait for affinity ready future for topology version: " + topVer, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index eaaa24d19ba61..a67aeec0648a0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -2592,8 +2592,7 @@ void clear() { * @param sql SQL flag. */ private void saveCacheConfiguration(CacheConfiguration cfg, boolean sql) { - if (cctx.pageStore() != null && cctx.database().persistenceEnabled() && - CU.isPersistentCache(cfg, cctx.gridConfig().getDataStorageConfiguration()) && + if (cctx.pageStore() != null && CU.isPersistentCache(cfg, cctx.gridConfig().getDataStorageConfiguration()) && !cctx.kernalContext().clientNode()) { try { StoredCacheData data = new StoredCacheData(cfg); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java index 18acacf8bc5d6..d9523e3f3281e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java @@ -342,7 +342,9 @@ private void removeCacheContext(GridCacheContext cctx) { public GridCacheContext singleCacheContext() { List caches = this.caches; - assert !sharedGroup() && caches.size() == 1 : ctx.kernalContext().isStopping(); + assert !sharedGroup() && caches.size() == 1 : + "stopping=" + ctx.kernalContext().isStopping() + ", groupName=" + ccfg.getGroupName() + + ", caches=" + caches; return caches.get(0); } @@ -484,7 +486,7 @@ public boolean queriesEnabled() { * @return {@code True} if fast eviction is allowed. */ public boolean allowFastEviction() { - return ctx.database().persistenceEnabled() && !queriesEnabled(); + return persistenceEnabled() && !queriesEnabled(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java index 34d3c97137c0d..3581f9fbb8b28 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java @@ -2038,9 +2038,9 @@ public boolean allowFastLocalRead(int part, List affNodes, Affinity boolean result = affinityNode() && rebalanceEnabled() && hasPartition(part, affNodes, topVer); // When persistence is enabled, only reading from partitions with OWNING state is allowed. - assert !result || !ctx.cache().context().database().persistenceEnabled() || + assert !result || !group().persistenceEnabled() || topology().partitionState(localNodeId(), part) == OWNING : - "result = " + result + ", persistenceEnabled = " + ctx.cache().context().database().persistenceEnabled() + + "result = " + result + ", persistenceEnabled = " + group().persistenceEnabled() + ", partitionState = " + topology().partitionState(localNodeId(), part); return result; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index 0bdade6510113..63cf6300d71d6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -2545,7 +2545,7 @@ protected final boolean hasValueUnlocked() { boolean walEnabled = !cctx.isNear() && cctx.group().persistenceEnabled(); - if (cctx.shared().database().persistenceEnabled()) { + if (cctx.group().persistenceEnabled()) { unswap(false); if (!isNew()) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index ad8f74a71bd57..a990c3f324d57 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -1026,7 +1026,9 @@ private void stopCacheOnReconnect(GridCacheContext cctx, List assert desc != null : cctx.name(); - ctx.query().onCacheStop0(cctx.name(), false); + boolean rmvIdx = !cache.context().group().persistenceEnabled(); + + ctx.query().onCacheStop0(cctx.name(), rmvIdx); ctx.query().onCacheStart0(cctx, desc.schema()); } } @@ -1150,7 +1152,7 @@ private void stopCache(GridCacheAdapter cache, boolean cancel, boolean des cache.stop(); - ctx.kernalContext().query().onCacheStop(ctx, destroy); + ctx.kernalContext().query().onCacheStop(ctx, !cache.context().group().persistenceEnabled() || destroy); if (isNearEnabled(ctx)) { GridDhtCacheAdapter dht = ctx.near().dht(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java index d8614b585ecba..5bf1343720b66 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java @@ -206,7 +206,7 @@ public GridCacheSharedContext( locStoreCnt = new AtomicInteger(); - if (dbMgr != null && dbMgr.persistenceEnabled()) + if (dbMgr != null && CU.isPersistenceEnabled(kernalCtx.config())) dhtAtomicUpdCnt = new AtomicIntegerArray(kernalCtx.config().getSystemThreadPoolSize()); msgLog = kernalCtx.log(CU.CACHE_MSG_LOG_CATEGORY); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 1f289d0840e37..53fb4d3b49938 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -1686,6 +1686,21 @@ public static boolean isPersistentCache(CacheConfiguration ccfg, DataStorageConf if (dsCfg == null) return false; + // Special handling for system cache is needed. + if (isSystemCache(ccfg.getName())) { + if (dsCfg.getDefaultDataRegionConfiguration().isPersistenceEnabled()) + return true; + + if (dsCfg.getDataRegionConfigurations() != null) { + for (DataRegionConfiguration drConf : dsCfg.getDataRegionConfigurations()) { + if (drConf.isPersistenceEnabled()) + return true; + } + } + + return false; + } + String regName = ccfg.getDataRegionName(); if (regName == null || regName.equals(dsCfg.getDefaultDataRegionConfiguration().getName())) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StoredCacheData.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StoredCacheData.java index 39c3cd156fc41..5a88036fb91ae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StoredCacheData.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StoredCacheData.java @@ -22,7 +22,9 @@ import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.marshaller.jdk.JdkMarshaller; /** @@ -36,9 +38,11 @@ public class StoredCacheData implements Serializable { private static final long serialVersionUID = 0L; /** Cache configuration. */ + @GridToStringInclude private final CacheConfiguration ccfg; /** Query entities. */ + @GridToStringInclude private Collection qryEntities; /** SQL flag - {@code true} if cache was created with {@code CREATE TABLE}. */ @@ -90,4 +94,9 @@ public boolean sql() { public void sql(boolean sql) { this.sql = sql; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(StoredCacheData.class, this); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java index 17d1e49bd1881..536d5209cb9b6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java @@ -553,7 +553,7 @@ public void restoreState(GridDhtPartitionState stateToRestore) { * @return {@code true} if cas succeeds. */ private boolean casState(long state, GridDhtPartitionState toState) { - if (ctx.database().persistenceEnabled() && grp.dataRegion().config().isPersistenceEnabled()) { + if (grp.persistenceEnabled()) { synchronized (this) { boolean update = this.state.compareAndSet(state, setPartState(state, toState)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java index 380066a7e9c46..7abe09b5a74c9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java @@ -1355,7 +1355,7 @@ private boolean shouldOverridePartitionMap(GridDhtPartitionMap currentMap, GridD GridDhtPartitionMap nodeMap = partMap.get(ctx.localNodeId()); - if (nodeMap != null && ctx.database().persistenceEnabled() && readyTopVer.initialized()) { + if (nodeMap != null && grp.persistenceEnabled() && readyTopVer.initialized()) { for (Map.Entry e : nodeMap.entrySet()) { int p = e.getKey(); GridDhtPartitionState state = e.getValue(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index 54661ec2d6282..5407c7df8f931 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -517,7 +517,7 @@ private GridDhtPartitionDemandMessage createDemandMessage(GridDhtPartitionDemand for (Integer part : parts) { try { - if (ctx.database().persistenceEnabled()) { + if (grp.persistenceEnabled()) { if (partCntrs == null) partCntrs = new HashMap<>(parts.size(), 1.0f); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java index e25ace72148bf..6eb31ed37a8c6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java @@ -304,12 +304,12 @@ public void handleDemandMessage(int idx, UUID id, GridDhtPartitionDemandMessage d.isHistorical(part) ? d.partitionCounter(part) : null); if (!iter.historical()) { - assert !grp.shared().database().persistenceEnabled() || !d.isHistorical(part); + assert !grp.persistenceEnabled() || !d.isHistorical(part); s.clean(part); } else - assert grp.shared().database().persistenceEnabled() && d.isHistorical(part); + assert grp.persistenceEnabled() && d.isHistorical(part); } else iter = (IgniteRebalanceIterator)sctx.entryIt; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java index 9c7451eef617d..7468b25a56248 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java @@ -711,12 +711,16 @@ private void initCachesOnLocalJoin() throws IgniteCheckedException { List> caches = cctx.cache().cachesToStartOnLocalJoin(); - if (cctx.database().persistenceEnabled() && !cctx.kernalContext().clientNode()) { + if (!cctx.kernalContext().clientNode()) { List startDescs = new ArrayList<>(); if (caches != null) { - for (T2 c : caches) - startDescs.add(c.get1()); + for (T2 c : caches) { + DynamicCacheDescriptor startDesc = c.get1(); + + if (CU.isPersistentCache(startDesc.cacheConfiguration(), cctx.gridConfig().getDataStorageConfiguration())) + startDescs.add(startDesc); + } } cctx.database().readCheckpointAndRestoreMemory(startDescs); @@ -805,11 +809,15 @@ private ExchangeType onClusterStateChangeRequest(boolean crd) { try { cctx.activate(); - if (cctx.database().persistenceEnabled() && !cctx.kernalContext().clientNode()) { + if (!cctx.kernalContext().clientNode()) { List startDescs = new ArrayList<>(); - for (ExchangeActions.CacheActionData startReq : exchActions.cacheStartRequests()) - startDescs.add(startReq.descriptor()); + for (ExchangeActions.CacheActionData startReq : exchActions.cacheStartRequests()) { + DynamicCacheDescriptor desc = startReq.descriptor(); + + if (CU.isPersistentCache(desc.cacheConfiguration(), cctx.gridConfig().getDataStorageConfiguration())) + startDescs.add(desc); + } cctx.database().readCheckpointAndRestoreMemory(startDescs); } @@ -2436,22 +2444,21 @@ private void finishExchangeOnCoordinator(@Nullable Collection sndRe * */ private void assignPartitionsStates() { - if (cctx.database().persistenceEnabled()) { - for (Map.Entry e : cctx.affinity().cacheGroups().entrySet()) { - if (e.getValue().config().getCacheMode() == CacheMode.LOCAL) - continue; + for (Map.Entry e : cctx.affinity().cacheGroups().entrySet()) { + CacheGroupDescriptor grpDesc = e.getValue(); + if (grpDesc.config().getCacheMode() == CacheMode.LOCAL) + continue; - GridDhtPartitionTopology top; + if (!CU.isPersistentCache(grpDesc.config(), cctx.gridConfig().getDataStorageConfiguration())) + continue; - CacheGroupContext grpCtx = cctx.cache().cacheGroup(e.getKey()); + CacheGroupContext grpCtx = cctx.cache().cacheGroup(e.getKey()); - if (grpCtx != null) - top = grpCtx.topology(); - else - top = cctx.exchange().clientTopology(e.getKey(), events().discoveryCache()); + GridDhtPartitionTopology top = grpCtx != null ? + grpCtx.topology() : + cctx.exchange().clientTopology(e.getKey(), events().discoveryCache()); - assignPartitionStates(top); - } + assignPartitionStates(top); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java index c8d104111b383..0b499fb6c7aaa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java @@ -220,7 +220,7 @@ private IgniteCheckedException stopError() { ClusterNode histSupplier = null; - if (ctx.database().persistenceEnabled() && exchFut != null) { + if (grp.persistenceEnabled() && exchFut != null) { UUID nodeId = exchFut.partitionHistorySupplier(grp.groupId(), p); if (nodeId != null) @@ -235,7 +235,7 @@ private IgniteCheckedException stopError() { continue; // For. } - assert ctx.database().persistenceEnabled(); + assert grp.persistenceEnabled(); assert remoteOwners(p, topVer).contains(histSupplier) : remoteOwners(p, topVer); GridDhtPartitionDemandMessage msg = assigns.get(histSupplier); @@ -250,7 +250,7 @@ private IgniteCheckedException stopError() { msg.addPartition(p, true); } else { - if (ctx.database().persistenceEnabled()) { + if (grp.persistenceEnabled()) { if (part.state() == RENTING || part.state() == EVICTED) { IgniteInternalFuture rentFut = part.rent(false); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index 820dbbe12ef4f..ff15cc02d8ae2 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -891,11 +891,6 @@ private void shutdownCheckpointer(boolean cancel) { return idxRebuildFuts.get(cacheId); } - /** {@inheritDoc} */ - @Override public boolean persistenceEnabled() { - return true; - } - /** {@inheritDoc} */ @Override public void onCacheGroupsStopped( Collection> stoppedGrps @@ -1978,6 +1973,9 @@ public class Checkpointer extends GridWorker { /** Shutdown now. */ private volatile boolean shutdownNow; + /** */ + private long lastCpTs; + /** * @param gridName Grid name. * @param name Thread name. @@ -2355,6 +2353,13 @@ private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws long cpTs = System.currentTimeMillis(); + // This can happen in an unlikely event of two checkpoints happening + // within a currentTimeMillis() granularity window. + if (cpTs == lastCpTs) + cpTs++; + + lastCpTs = cpTs; + CheckpointEntry cpEntry = writeCheckpointEntry( tmpWriteBuf, cpTs, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index f00edbeaf15ec..8771f6adc9168 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -667,13 +667,6 @@ private void unregisterMBean(String name) { } } - /** - * - */ - public boolean persistenceEnabled() { - return false; - } - /** {@inheritDoc} */ @Override public boolean checkpointLockIsHeldByThread() { return true; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java index 3cd0451c37e69..927fd9048b657 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java @@ -454,7 +454,7 @@ public IgniteInternalFuture changeGlobalState(final boolean activate) { List storedCfgs = null; - if (activate && sharedCtx.database().persistenceEnabled()) { + if (activate && CU.isPersistenceEnabled(ctx.config())) { try { Map cfgs = ctx.cache().context().pageStore().readCacheConfigurations(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java index 93d541d480523..5fb8e1cec05ea 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java @@ -197,10 +197,10 @@ public void registerCache(String cacheName, String schemaName, GridCacheContext< * Unregisters cache. * * @param cacheName Cache name. - * @param destroy Destroy flag. + * @param rmvIdx If {@code true}, will remove index. * @throws IgniteCheckedException If failed to drop cache schema. */ - public void unregisterCache(String cacheName, boolean destroy) throws IgniteCheckedException; + public void unregisterCache(String cacheName, boolean rmvIdx) throws IgniteCheckedException; /** * Registers type if it was not known before or updates it otherwise. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index 0728e93cbed1c..42463ed64c8be 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -846,9 +846,9 @@ public void onCacheStart(GridCacheContext cctx, QuerySchema schema) throws Ignit /** * @param cctx Cache context. - * @param destroy Destroy flag. + * @param removeIdx If {@code true}, will remove index. */ - public void onCacheStop(GridCacheContext cctx, boolean destroy) { + public void onCacheStop(GridCacheContext cctx, boolean removeIdx) { if (idx == null) return; @@ -856,7 +856,7 @@ public void onCacheStop(GridCacheContext cctx, boolean destroy) { return; try { - onCacheStop0(cctx.name(), destroy); + onCacheStop0(cctx.name(), removeIdx); } finally { busyLock.leaveBusy(); @@ -1527,9 +1527,9 @@ private void registerCache0( * Use with {@link #busyLock} where appropriate. * * @param cacheName Cache name. - * @param destroy Destroy flag. + * @param rmvIdx If {@code true}, will remove index. */ - public void onCacheStop0(String cacheName, boolean destroy) { + public void onCacheStop0(String cacheName, boolean rmvIdx) { if (idx == null) return; @@ -1567,7 +1567,7 @@ public void onCacheStop0(String cacheName, boolean destroy) { // Notify indexing. try { - idx.unregisterCache(cacheName, destroy); + idx.unregisterCache(cacheName, rmvIdx); } catch (Exception e) { U.error(log, "Failed to clear indexing on cache unregister (will ignore): " + cacheName, e); @@ -2559,7 +2559,7 @@ private void processStatusMessage(SchemaOperationStatusMessage msg) { private void saveCacheConfiguration(DynamicCacheDescriptor desc) { GridCacheSharedContext cctx = ctx.cache().context(); - if (cctx.pageStore() != null && cctx.database().persistenceEnabled() && !cctx.kernalContext().clientNode() && + if (cctx.pageStore() != null && !cctx.kernalContext().clientNode() && CU.isPersistentCache(desc.cacheConfiguration(), cctx.gridConfig().getDataStorageConfiguration())) { CacheConfiguration cfg = desc.cacheConfiguration(); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java index 83dd9c94c29f1..ab8fbbf26af58 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java @@ -299,7 +299,7 @@ private static class FailedIndexing implements GridQueryIndexing { } /** {@inheritDoc} */ - @Override public void unregisterCache(String spaceName, boolean destroy) throws IgniteCheckedException { + @Override public void unregisterCache(String spaceName, boolean rmvIdx) throws IgniteCheckedException { // No-op } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 7191cfa83a7d5..690b9ff13c9a6 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -2276,11 +2276,6 @@ private void createSqlFunctions(String schema, Class[] clss) throws IgniteChe mapQryExec.cancelLazyWorkers(); - if (ctx != null && !ctx.cache().context().database().persistenceEnabled()) { - for (H2Schema schema : schemas.values()) - schema.dropAll(); - } - for (Connection c : conns) U.close(c, log); @@ -2338,7 +2333,7 @@ private boolean isDefaultSchema(String schemaName) { } /** {@inheritDoc} */ - @Override public void unregisterCache(String cacheName, boolean destroy) { + @Override public void unregisterCache(String cacheName, boolean rmvIdx) { String schemaName = schema(cacheName); H2Schema schema = schemas.get(schemaName); @@ -2356,9 +2351,7 @@ private boolean isDefaultSchema(String schemaName) { for (H2TableDescriptor tbl : schema.tables()) { if (F.eq(tbl.cache().name(), cacheName)) { try { - boolean removeIdx = !ctx.cache().context().database().persistenceEnabled() || destroy; - - tbl.table().setRemoveIndexOnDestroy(removeIdx); + tbl.table().setRemoveIndexOnDestroy(rmvIdx); dropTable(tbl); } From 61e07a61d51de3ae5f06396e68ac4628ab39ae61 Mon Sep 17 00:00:00 2001 From: devozerov Date: Fri, 3 Nov 2017 14:04:33 +0300 Subject: [PATCH 088/243] IGNITE-6825: SQL: Fixed GridH2Table unlock in case of interrupt. This closes #2976. --- .../processors/query/h2/opt/GridH2Table.java | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java index 7ecce2ff00629..ea05ab10e8f5f 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java @@ -238,14 +238,13 @@ public GridCacheContext cache() { /** {@inheritDoc} */ @Override public boolean lock(Session ses, boolean exclusive, boolean force) { - Boolean putRes = sessions.putIfAbsent(ses, exclusive); + // In accordance with base method semantics, we'll return true if we were already exclusively locked. + Boolean res = sessions.get(ses); - // In accordance with base method semantics, we'll return true if we were already exclusively locked - if (putRes != null) - return putRes; - - ses.addLock(this); + if (res != null) + return res; + // Acquire the lock. lock(exclusive); if (destroyed) { @@ -254,6 +253,11 @@ public GridCacheContext cache() { throw new IllegalStateException("Table " + identifierString() + " already destroyed."); } + // Mutate state. + sessions.put(ses, exclusive); + + ses.addLock(this); + return false; } From 0721b214e714a9dbc7599bf1d76e380f599d1bbb Mon Sep 17 00:00:00 2001 From: Alexander Fedotov Date: Fri, 3 Nov 2017 14:12:21 +0300 Subject: [PATCH 089/243] IGNITE-4172: SQL: Added support for Java 8 Time API classes in date\time functions. This closes #2438. --- .../processors/query/GridQueryProcessor.java | 2 +- .../internal/processors/query/QueryUtils.java | 53 ++- .../util/Jsr310Java8DateTimeApiUtils.java | 80 ++++ modules/indexing/pom.xml | 48 +++ ...yWithJsr310Java8DateTimeApiFieldsTest.java | 351 ++++++++++++++++++ ...heQueryJsr310Java8DateTimeApiBaseTest.java | 88 +++++ ...sr310Java8DateTimeApiSupportTestSuite.java | 38 ++ 7 files changed, 641 insertions(+), 19 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/util/Jsr310Java8DateTimeApiUtils.java create mode 100644 modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryEntityWithJsr310Java8DateTimeApiFieldsTest.java create mode 100644 modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryJsr310Java8DateTimeApiBaseTest.java create mode 100644 modules/indexing/src/test/java8/org/apache/ignite/testsuites/CacheQueryJsr310Java8DateTimeApiSupportTestSuite.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index 42463ed64c8be..f5fb7818350be 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -1381,7 +1381,7 @@ public void dynamicTableCreate(String schemaName, QueryEntity entity, String tem if (ccfg == null) { if (QueryUtils.TEMPLATE_PARTITIONED.equalsIgnoreCase(templateName)) ccfg = new CacheConfiguration<>().setCacheMode(CacheMode.PARTITIONED); - else if (QueryUtils.TEMPLATE_REPLICÄTED.equalsIgnoreCase(templateName)) + else if (QueryUtils.TEMPLATE_REPLICATED.equalsIgnoreCase(templateName)) ccfg = new CacheConfiguration<>().setCacheMode(CacheMode.REPLICATED); else throw new SchemaOperationException(SchemaOperationException.CODE_CACHE_NOT_FOUND, templateName); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java index 1b61ce938d874..9584e05f27fd0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java @@ -21,7 +21,9 @@ import java.math.BigDecimal; import java.sql.Time; import java.sql.Timestamp; +import java.util.Arrays; import java.util.Collection; +import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; @@ -54,9 +56,11 @@ import org.apache.ignite.internal.processors.query.property.QueryPropertyAccessor; import org.apache.ignite.internal.processors.query.property.QueryReadOnlyMethodsAccessor; import org.apache.ignite.internal.processors.query.schema.SchemaOperationException; +import org.apache.ignite.internal.util.Jsr310Java8DateTimeApiUtils; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.IgniteSystemProperties.IGNITE_INDEXING_DISCOVERY_HISTORY_SIZE; @@ -82,7 +86,7 @@ public class QueryUtils { public static final String TEMPLATE_PARTITIONED = "PARTITIONED"; /** Well-known template name for REPLICATED cache. */ - public static final String TEMPLATE_REPLICÄTED = "REPLICATED"; + public static final String TEMPLATE_REPLICATED = "REPLICATED"; /** Discovery history size. */ private static final int DISCO_HIST_SIZE = getInteger(IGNITE_INDEXING_DISCOVERY_HISTORY_SIZE, 1000); @@ -91,23 +95,36 @@ public class QueryUtils { private static final Class GEOMETRY_CLASS = U.classForName("com.vividsolutions.jts.geom.Geometry", null); /** */ - private static final Set> SQL_TYPES = new HashSet<>(F.>asList( - Integer.class, - Boolean.class, - Byte.class, - Short.class, - Long.class, - BigDecimal.class, - Double.class, - Float.class, - Time.class, - Timestamp.class, - java.util.Date.class, - java.sql.Date.class, - String.class, - UUID.class, - byte[].class - )); + private static final Set> SQL_TYPES = createSqlTypes(); + + /** + * Creates SQL types set. + * + * @return SQL types set. + */ + @NotNull private static Set> createSqlTypes() { + Set> sqlClasses = new HashSet<>(Arrays.>asList( + Integer.class, + Boolean.class, + Byte.class, + Short.class, + Long.class, + BigDecimal.class, + Double.class, + Float.class, + Time.class, + Timestamp.class, + Date.class, + java.sql.Date.class, + String.class, + UUID.class, + byte[].class + )); + + sqlClasses.addAll(Jsr310Java8DateTimeApiUtils.jsr310ApiClasses()); + + return sqlClasses; + } /** * Get table name for entity. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/Jsr310Java8DateTimeApiUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/Jsr310Java8DateTimeApiUtils.java new file mode 100644 index 0000000000000..9febf19c917ce --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/Jsr310Java8DateTimeApiUtils.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.util; + +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; + +import java.util.ArrayList; +import java.util.Collection; + +/** + * Provides utility functions for JSR-310 Java 8 Date and Time API types + * based on reflection. + */ +public final class Jsr310Java8DateTimeApiUtils { + /** Class. */ + private static final Class LOCAL_TIME_CLASS = U.classForName("java.time.LocalTime", null); + + /** Class. */ + private static final Class LOCAL_DATE_CLASS = U.classForName("java.time.LocalDate", null); + + /** Class. */ + private static final Class LOCAL_DATE_TIME_CLASS = U.classForName("java.time.LocalDateTime", null); + + /** JSR-310 API classes. */ + private static final Collection> JSR_310_API_CLASSES = createJsr310ApiClassesCollection(); + + /** + * Creates a collection of the available JSR-310 classes. + * + * @return Collection of the available JSR-310 classes. + */ + @NotNull private static Collection> createJsr310ApiClassesCollection() { + Collection> res = new ArrayList<>(3); + + if (LOCAL_DATE_CLASS != null) + res.add(LOCAL_DATE_CLASS); + + if (LOCAL_TIME_CLASS != null) + res.add(LOCAL_TIME_CLASS); + + if (LOCAL_DATE_TIME_CLASS != null) + res.add(LOCAL_DATE_TIME_CLASS); + + return res; + } + + /** + * Default private constructor. + * + *

        Prevents creation of instances of this class.

        + */ + private Jsr310Java8DateTimeApiUtils() { + // No-op + } + + /** + * Returns the available JSR-310 classes. + * + * @return Available JSR-310 classes. + */ + @NotNull public static Collection> jsr310ApiClasses() { + return JSR_310_API_CLASSES; + } +} diff --git a/modules/indexing/pom.xml b/modules/indexing/pom.xml index cd310fd58f0bb..a279285d75407 100644 --- a/modules/indexing/pom.xml +++ b/modules/indexing/pom.xml @@ -34,6 +34,10 @@ 2.3.0-SNAPSHOT http://ignite.apache.org + + ${project.build.testSourceDirectory} + + org.apache.ignite @@ -121,8 +125,52 @@ + + + java8 + + [1.8,) + + + -Xdoclint:none + ${project.build.testSourceDirectory}/../java8 + + + + + maven-compiler-plugin + + 1.8 + 1.8 + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 1.9.1 + + + add-tests + generate-test-sources + + add-test-source + + + + ${java8.test.folder} + + + + + + org.apache.maven.plugins maven-jar-plugin diff --git a/modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryEntityWithJsr310Java8DateTimeApiFieldsTest.java b/modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryEntityWithJsr310Java8DateTimeApiFieldsTest.java new file mode 100644 index 0000000000000..903b8dca86efc --- /dev/null +++ b/modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryEntityWithJsr310Java8DateTimeApiFieldsTest.java @@ -0,0 +1,351 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2; + +import java.io.Serializable; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.List; +import java.util.Objects; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.configuration.CacheConfiguration; + +/** + * Tests queries against entities with JSR-310 Java 8 Date and Time API fields. + */ +public class CacheQueryEntityWithJsr310Java8DateTimeApiFieldsTest extends CacheQueryJsr310Java8DateTimeApiBaseTest { + /** + * Entity containing JSR-310 fields. + */ + private static class EntityWithJsr310Fields implements Serializable { + + /** Serial version UID. */ + private static final long serialVersionUID = 1L; + + /** ID. */ + @QuerySqlField(index = true) + private Long id; + + /** {@link LocalTime} field. */ + @QuerySqlField(index = true) + private LocalTime locTime; + + /** {@link LocalDate} field. */ + @QuerySqlField(index = true) + private LocalDate locDate; + + /** {@link LocalDateTime} field. */ + @QuerySqlField(index = true) + private LocalDateTime locDateTime; + + /** + * Default constructor. + */ + EntityWithJsr310Fields() { + } + + /** + * Copy constructor. + * + * @param entity Entity to copy from. + */ + EntityWithJsr310Fields(EntityWithJsr310Fields entity) { + id = entity.id; + locTime = LocalTime.from(entity.locTime); + locDate = LocalDate.from(entity.locDate); + locDateTime = LocalDateTime.from(entity.locDateTime); + } + + /** + * Constructor. + * + * @param id ID. + * @param locTime {@link LocalTime} value. + * @param locDate {@link LocalDate} value. + * @param locDateTime {@link LocalDateTime} value. + */ + EntityWithJsr310Fields(Long id, LocalTime locTime, LocalDate locDate, LocalDateTime locDateTime) { + this.id = id; + this.locTime = locTime; + this.locDate = locDate; + this.locDateTime = locDateTime; + } + + /** + * Returns the ID. + * + * @return ID. + */ + public Long getId() { + return id; + } + + /** + * Sets the ID. + * + * @param id ID. + */ + public void setId(Long id) { + this.id = id; + } + + /** + * Returns the {@link LocalDateTime} field value + * + * @return {@link LocalDateTime} field value; + */ + public LocalDateTime getLocalDateTime() { + return locDateTime; + } + + /** + * Returns the {@link LocalDateTime} field value. + * + * @param locDateTime {@link LocalDateTime} value. + */ + public void setLocalDateTime(LocalDateTime locDateTime) { + this.locDateTime = locDateTime; + } + + /** + * Returns the {@link LocalDate} field value. + * + * @return {@link LocalDate} field value. + */ + public LocalDate getLocalDate() { + return locDate; + } + + /** + * Sets the {@link LocalDate} field value. + * + * @param locDate {@link LocalDate} value. + */ + public void setLocalDate(LocalDate locDate) { + this.locDate = locDate; + } + + /** + * Returns the {@link LocalTime} field value. + * + * @return {@link LocalTime} field value. + */ + public LocalTime getLocalTime() { + return locTime; + } + + /** + * Sets the {@link LocalTime} field value. + * + * @param locTime {@link LocalTime} value. + */ + public void setLocalTime(LocalTime locTime) { + this.locTime = locTime; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + EntityWithJsr310Fields fields = (EntityWithJsr310Fields)o; + + return Objects.equals(id, fields.id) && Objects.equals(locDateTime, fields.locDateTime) && + Objects.equals(locDate, fields.locDate) && Objects.equals(locTime, fields.locTime); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(id, locDateTime, locDate, locTime); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return "EntityWithJsr310Fields{" + "id=" + id + ", locDateTime=" + locDateTime + ", locDate=" + locDate + + ", locTime=" + locTime + '}'; + } + } + + /** Cache. */ + private IgniteCache cache; + + /** Entity with JSR-310 fields instance. */ + private final EntityWithJsr310Fields entity = + new EntityWithJsr310Fields(1L, LOCAL_TIME, LOCAL_DATE, LOCAL_DATE_TIME); + + /** + * Creates a cache configuration. + * + * @return Cache configuration. + */ + private static CacheConfiguration createCacheConfig() { + return createCacheConfig( + "entityWithJava8DataTimeFields", Long.class, EntityWithJsr310Fields.class + ); + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + Ignite ignite = startGridsMultiThreaded(1, true); + cache = ignite.getOrCreateCache(createCacheConfig()); + + cache.put(entity.getId(), entity); + } + + /** + * Tests insertion of an entity. + * + * @throws Exception If failed. + */ + public void testInsertEntityFields() throws Exception { + cache.remove(entity.getId()); + + assertEquals(0, cache.size()); + + SqlFieldsQuery qry = new SqlFieldsQuery( + "insert into EntityWithJsr310Fields(_key, id, locTime, locDate, locDateTime) values(?, ?, ?, ?, ?)" + ).setArgs( + entity.getId(), entity.getId(), entity.getLocalTime(), entity.getLocalDate(), entity.getLocalDateTime() + ); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertEquals(1L, qryResults.get(0).get(0)); + assertEquals(1, cache.size()); + assertEquals(entity, cache.get(entity.getId())); + } + + /** + * Tests that DATEDIFF SQL function works for {@link LocalDateTime} + * fields with the time part set to midnight. + * + * @throws Exception If failed. + */ + public void testDateDiffForLocalDateTimeFieldAtMidnight() throws Exception { + SqlFieldsQuery qry = + new SqlFieldsQuery("select DATEDIFF('DAY', locDateTime, CURRENT_DATE ()) from EntityWithJsr310Fields"); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertTrue((Long)qryResults.get(0).get(0) >= DAYS_BEFORE_NOW); + } + + /** + * Tests that selection for a {@link LocalTime} field returns {@link Time}. + * + * @throws Exception If failed. + */ + public void testSelectLocalTimeFieldReturnsTime() throws Exception { + SqlFieldsQuery qry = new SqlFieldsQuery("select locTime from EntityWithJsr310Fields"); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertEquals(Time.class, qryResults.get(0).get(0).getClass()); + } + + /** + * Tests that selection for a {@link LocalDate} field returns {@link Date}. + * + * @throws Exception If failed. + */ + public void testSelectLocalDateFieldReturnsDate() throws Exception { + SqlFieldsQuery qry = new SqlFieldsQuery("select locDate from EntityWithJsr310Fields"); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertEquals(Date.class, qryResults.get(0).get(0).getClass()); + } + + /** + * Tests that selection for a {@link LocalDateTime} field returns {@link Timestamp}. + * + * @throws Exception If failed. + */ + public void testSelectLocalDateTimeFieldReturnsTimestamp() throws Exception { + SqlFieldsQuery qry = new SqlFieldsQuery("select locDateTime from EntityWithJsr310Fields"); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertEquals(Timestamp.class, qryResults.get(0).get(0).getClass()); + } + + /** + * Tests selection of an entity by a {@link LocalTime} field. + */ + public void testSelectByAllJsr310Fields() { + SqlFieldsQuery qry = new SqlFieldsQuery( + "select locDate from EntityWithJsr310Fields where locTime = ? and locDate = ? and locDateTime = ?" + ).setArgs(entity.getLocalTime(), entity.getLocalDate(), entity.getLocalDateTime()); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertEquals(Date.valueOf(entity.getLocalDate()), qryResults.get(0).get(0)); + } + + /** + * Tests updating of all JSR-310 fields. + */ + public void testUpdateAllJsr310Fields() { + EntityWithJsr310Fields expEntity = new EntityWithJsr310Fields(entity); + + expEntity.setLocalTime(expEntity.getLocalTime().plusHours(1)); + expEntity.setLocalDate(expEntity.getLocalDate().plusDays(1)); + expEntity.setLocalDateTime(LocalDateTime.of(expEntity.getLocalDate(), expEntity.getLocalTime())); + + SqlFieldsQuery qry = new SqlFieldsQuery( + "update EntityWithJsr310Fields set locTime = ?, locDate = ?, locDateTime = ? where id = ?" + ).setArgs(expEntity.getLocalTime(), expEntity.getLocalDate(), expEntity.getLocalDateTime(), entity.getId()); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertEquals(1L, qryResults.get(0).get(0)); + assertEquals(expEntity, cache.get(expEntity.getId())); + } + + /** + * Tests deleting by all JSR-310 fields. + */ + public void testDeleteByAllJsr310Fields() { + SqlFieldsQuery qry = new SqlFieldsQuery( + "delete from EntityWithJsr310Fields where locTime = ? and locDate = ? and locDateTime = ?" + ).setArgs(entity.getLocalTime(), entity.getLocalDate(), entity.getLocalDateTime()); + + List> qryResults = cache.query(qry).getAll(); + + assertEquals(1, qryResults.size()); + assertEquals(1L, qryResults.get(0).get(0)); + assertEquals(0, cache.size()); + } +} diff --git a/modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryJsr310Java8DateTimeApiBaseTest.java b/modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryJsr310Java8DateTimeApiBaseTest.java new file mode 100644 index 0000000000000..6f643b426b2f3 --- /dev/null +++ b/modules/indexing/src/test/java8/org/apache/ignite/internal/processors/query/h2/CacheQueryJsr310Java8DateTimeApiBaseTest.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2; + +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.CacheWriteSynchronizationMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * Base class for JSR-310 Java 8 Date and Time API queries tests. + */ +public abstract class CacheQueryJsr310Java8DateTimeApiBaseTest extends GridCommonAbstractTest { + /** IP finder. */ + private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** {@link LocalTime} instance. */ + protected static final LocalTime LOCAL_TIME = LocalTime.now().minusHours(10); + + /** + * The number of days subtracted from the current time when constructing + * {@link LocalDate} and {@link LocalDateTime} + * instances. + */ + protected static final long DAYS_BEFORE_NOW = 10; + + /** {@link LocalDate} instance. */ + protected static final LocalDate LOCAL_DATE = LocalDate.now().minusDays(DAYS_BEFORE_NOW); + + /** {@link LocalDateTime} instance. */ + protected static final LocalDateTime LOCAL_DATE_TIME = LocalDateTime.of(LOCAL_DATE, LocalTime.MIDNIGHT); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + TcpDiscoverySpi discoverySpi = (TcpDiscoverySpi)cfg.getDiscoverySpi(); + + discoverySpi.setIpFinder(IP_FINDER); + + return cfg; + } + + /** + * Creates a cache configuration with the specified cache name + * and indexed type key/value pairs. + * + * @param cacheName Cache name + * @param indexedTypes key/value pairs according to {@link CacheConfiguration#setIndexedTypes(Class[])}. + * @param Key type. + * @param Value type. + * @return Cache configuration. + */ + protected static CacheConfiguration createCacheConfig(String cacheName, Class... indexedTypes) { + return new CacheConfiguration(cacheName) + .setCacheMode(CacheMode.REPLICATED) + .setAtomicityMode(CacheAtomicityMode.ATOMIC) + .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC) + .setIndexedTypes(indexedTypes); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + } +} diff --git a/modules/indexing/src/test/java8/org/apache/ignite/testsuites/CacheQueryJsr310Java8DateTimeApiSupportTestSuite.java b/modules/indexing/src/test/java8/org/apache/ignite/testsuites/CacheQueryJsr310Java8DateTimeApiSupportTestSuite.java new file mode 100644 index 0000000000000..aa7aed8a59c01 --- /dev/null +++ b/modules/indexing/src/test/java8/org/apache/ignite/testsuites/CacheQueryJsr310Java8DateTimeApiSupportTestSuite.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.testsuites; + +import junit.framework.TestSuite; +import org.apache.ignite.internal.processors.query.h2.CacheQueryEntityWithJsr310Java8DateTimeApiFieldsTest; + +/** + * Test suite for JSR-310 Java 8 Date and Time API queries. + */ +public class CacheQueryJsr310Java8DateTimeApiSupportTestSuite extends TestSuite { + /** + * @return Test suite. + * @throws Exception If failed. + */ + public static TestSuite suite() throws Exception { + TestSuite suite = new TestSuite("JSR-310 Java 8 Date and Time API Cache Queries Test Suite"); + + suite.addTestSuite(CacheQueryEntityWithJsr310Java8DateTimeApiFieldsTest.class); + + return suite; + } +} From 4fafd18e5763b99ca5eaaf39b65f7df2fbc4a5a8 Mon Sep 17 00:00:00 2001 From: Alexander Fedotov Date: Wed, 25 Oct 2017 14:43:16 +0300 Subject: [PATCH 090/243] IGNITE-6555 When a CacheStore with a @SpringResource annotated field is configured Ignite fails to start via igniteSpringBean. This closes #2893. Signed-off-by: nikolay_tikhonov (cherry picked from commit dde348607ed266f87bf594cb2a221ec39d84cfdb) --- .../org/apache/ignite/IgniteSpringBean.java | 32 ++- .../GridSpringBeanSerializationSelfTest.java | 4 +- .../ignite/internal/IgniteSpringBeanTest.java | 2 +- ...ridServiceInjectionSpringResourceTest.java | 10 +- ...SpringBeanSpringResourceInjectionTest.java | 215 ++++++++++++++++++ .../ignite/spring/injection/spring-bean.xml | 84 +++++++ .../testsuites/IgniteSpringTestSuite.java | 2 + 7 files changed, 334 insertions(+), 15 deletions(-) create mode 100644 modules/spring/src/test/java/org/apache/ignite/spring/injection/IgniteSpringBeanSpringResourceInjectionTest.java create mode 100644 modules/spring/src/test/java/org/apache/ignite/spring/injection/spring-bean.xml diff --git a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java index 4cba76ed80897..95fceec9f13ec 100644 --- a/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java +++ b/modules/spring/src/main/java/org/apache/ignite/IgniteSpringBean.java @@ -38,9 +38,10 @@ import org.jetbrains.annotations.Nullable; import org.springframework.beans.BeansException; import org.springframework.beans.factory.DisposableBean; -import org.springframework.beans.factory.InitializingBean; +import org.springframework.beans.factory.SmartInitializingSingleton; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; +import org.springframework.context.event.ContextRefreshedEvent; /** * Ignite Spring bean allows to bypass {@link Ignition} methods. @@ -48,9 +49,20 @@ * Spring configuration file directly without invoking static * {@link Ignition} methods. This class can be wired directly from * Spring and can be referenced from within other Spring beans. - * By virtue of implementing {@link DisposableBean} and {@link InitializingBean} + * By virtue of implementing {@link DisposableBean} and {@link SmartInitializingSingleton} * interfaces, {@code IgniteSpringBean} automatically starts and stops underlying * grid instance. + * + *

        + * A note should be taken that Ignite instance is started after all other + * Spring beans have been initialized and right before Spring context is refreshed. + * That implies that it's not valid to reference IgniteSpringBean from + * any kind of Spring bean init methods like {@link javax.annotation.PostConstruct}. + * If it's required to reference IgniteSpringBean for other bean + * initialization purposes, it should be done from a {@link ContextRefreshedEvent} + * listener method declared in that bean. + *

        + * *

        *

        Spring Configuration Example

        * Here is a typical example of describing it in Spring file: @@ -79,7 +91,7 @@ * *

        */ -public class IgniteSpringBean implements Ignite, DisposableBean, InitializingBean, +public class IgniteSpringBean implements Ignite, DisposableBean, SmartInitializingSingleton, ApplicationContextAware, Externalizable { /** */ private static final long serialVersionUID = 0L; @@ -145,7 +157,6 @@ public ApplicationContext getApplicationContext() throws BeansException { /** {@inheritDoc} */ @Override public void destroy() throws Exception { - // If there were some errors when afterPropertiesSet() was called. if (g != null) { // Do not cancel started tasks, wait for them. G.stop(g.name(), false); @@ -153,11 +164,16 @@ public ApplicationContext getApplicationContext() throws BeansException { } /** {@inheritDoc} */ - @Override public void afterPropertiesSet() throws Exception { + @Override public void afterSingletonsInstantiated() { if (cfg == null) cfg = new IgniteConfiguration(); - g = IgniteSpring.start(cfg, appCtx); + try { + g = IgniteSpring.start(cfg, appCtx); + } + catch (IgniteCheckedException e) { + throw new IgniteException("Failed to start IgniteSpringBean", e); + } } /** {@inheritDoc} */ @@ -620,7 +636,9 @@ public ApplicationContext getApplicationContext() throws BeansException { protected void checkIgnite() throws IllegalStateException { if (g == null) { throw new IllegalStateException("Ignite is in invalid state to perform this operation. " + - "It either not started yet or has already being or have stopped " + + "It either not started yet or has already being or have stopped.\n" + + "Make sure that IgniteSpringBean is not referenced from any kind of Spring bean init methods " + + "like @PostConstruct}.\n" + "[ignite=" + g + ", cfg=" + cfg + ']'); } } diff --git a/modules/spring/src/test/java/org/apache/ignite/internal/GridSpringBeanSerializationSelfTest.java b/modules/spring/src/test/java/org/apache/ignite/internal/GridSpringBeanSerializationSelfTest.java index 6f7d27ad27a2d..6a72213235c09 100644 --- a/modules/spring/src/test/java/org/apache/ignite/internal/GridSpringBeanSerializationSelfTest.java +++ b/modules/spring/src/test/java/org/apache/ignite/internal/GridSpringBeanSerializationSelfTest.java @@ -53,7 +53,7 @@ public class GridSpringBeanSerializationSelfTest extends GridCommonAbstractTest bean.setConfiguration(cfg); - bean.afterPropertiesSet(); + bean.afterSingletonsInstantiated(); } /** @@ -97,4 +97,4 @@ public void testSerialization() throws Exception { assert bean0.cluster().localNode() != null; assert bean0.cluster().localNode().attribute(ATTR_KEY); } -} \ No newline at end of file +} diff --git a/modules/spring/src/test/java/org/apache/ignite/internal/IgniteSpringBeanTest.java b/modules/spring/src/test/java/org/apache/ignite/internal/IgniteSpringBeanTest.java index d465904a83656..c7f0d59780f35 100644 --- a/modules/spring/src/test/java/org/apache/ignite/internal/IgniteSpringBeanTest.java +++ b/modules/spring/src/test/java/org/apache/ignite/internal/IgniteSpringBeanTest.java @@ -31,7 +31,7 @@ public void testInitialization() throws Exception { try (IgniteSpringBean bean = new IgniteSpringBean()) { bean.setConfiguration(getConfiguration("test")); - bean.afterPropertiesSet(); + bean.afterSingletonsInstantiated(); bean.compute(); } diff --git a/modules/spring/src/test/java/org/apache/ignite/spring/injection/GridServiceInjectionSpringResourceTest.java b/modules/spring/src/test/java/org/apache/ignite/spring/injection/GridServiceInjectionSpringResourceTest.java index c6a6d0548805f..891c42e277258 100644 --- a/modules/spring/src/test/java/org/apache/ignite/spring/injection/GridServiceInjectionSpringResourceTest.java +++ b/modules/spring/src/test/java/org/apache/ignite/spring/injection/GridServiceInjectionSpringResourceTest.java @@ -26,10 +26,11 @@ import org.apache.ignite.services.Service; import org.apache.ignite.services.ServiceContext; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.springframework.context.event.ContextRefreshedEvent; +import org.springframework.context.event.EventListener; import org.springframework.context.support.AbstractApplicationContext; import org.springframework.context.support.FileSystemXmlApplicationContext; -import javax.annotation.PostConstruct; import java.io.BufferedReader; import java.io.BufferedWriter; import java.io.IOException; @@ -234,12 +235,11 @@ public void setIgnite(Ignite ignite) { /** * @throws Exception If failed. */ - @PostConstruct - public void init() throws Exception { + @EventListener + public void init(ContextRefreshedEvent evt) throws Exception { DummyService srv = ignite.services().serviceProxy(SERVICE_NAME, DummyService.class, false); assertNotNull(srv); } } - -} \ No newline at end of file +} diff --git a/modules/spring/src/test/java/org/apache/ignite/spring/injection/IgniteSpringBeanSpringResourceInjectionTest.java b/modules/spring/src/test/java/org/apache/ignite/spring/injection/IgniteSpringBeanSpringResourceInjectionTest.java new file mode 100644 index 0000000000000..2a06deb6e872e --- /dev/null +++ b/modules/spring/src/test/java/org/apache/ignite/spring/injection/IgniteSpringBeanSpringResourceInjectionTest.java @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spring.injection; + +import java.io.Serializable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import javax.cache.Cache; +import javax.cache.integration.CacheLoaderException; +import javax.cache.integration.CacheWriterException; +import org.apache.ignite.Ignite; +import org.apache.ignite.cache.store.CacheStoreAdapter; +import org.apache.ignite.resources.SpringResource; +import org.apache.ignite.services.Service; +import org.apache.ignite.services.ServiceContext; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.springframework.beans.factory.BeanFactory; +import org.springframework.context.support.ClassPathXmlApplicationContext; + +/** + * Test checking injections of {@link SpringResource} annotated fields. + */ +public class IgniteSpringBeanSpringResourceInjectionTest extends GridCommonAbstractTest { + /** */ + private static final String SPRING_CFG_LOCATION = "/org/apache/ignite/spring/injection/spring-bean.xml"; + + /** */ + private static final String BEAN_TO_INJECT_NAME = "beanToInject"; + + /** + * Cache store with {@link SpringResource} fields to be injected. + */ + public static class IgniteCacheStoreWithSpringResource extends CacheStoreAdapter + implements Serializable + { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + @SpringResource(resourceClass = Integer.class) + private transient Integer injectedSpringFld; + + /** + * @return Injected Spring field. + */ + public Integer getInjectedSpringField() { + return injectedSpringFld; + } + + /** {@inheritDoc} */ + @Override public V load(K key) throws CacheLoaderException { + return null; + } + + /** {@inheritDoc} */ + @Override public void write(Cache.Entry entry) throws CacheWriterException { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void delete(Object key) throws CacheWriterException { + // No-op. + } + } + + /** + * Interface of a service with {@link SpringResource} fields to be injected. + */ + public interface ServiceWithSpringResource { + /** + * @return Injected Spring field. + */ + Integer getInjectedSpringField(); + } + + /** + * Service with {@link SpringResource} fields to be injected. + */ + public static class ServiceWithSpringResourceImpl implements ServiceWithSpringResource, Service { + /** */ + private static final long serialVersionUID = 0L; + /** */ + @SpringResource(resourceClass = Integer.class) + private transient Integer injectedSpringFld; + + /** {@inheritDoc} */ + @Override public Integer getInjectedSpringField() { + return injectedSpringFld; + } + + /** {@inheritDoc} */ + @Override public void cancel(ServiceContext ctx) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void init(ServiceContext ctx) throws Exception { + // No-op. + } + + /** {@inheritDoc} */ + @Override public void execute(ServiceContext ctx) throws Exception { + // No-op. + } + } + + /** + * + */ + private abstract static class TestSpringResourceInjectedRunnable implements Runnable { + /** */ + private final String springCfgLocation; + + /** */ + private final String beanToInjectName; + + /** */ + protected BeanFactory appCtx; + + /** + * Constructor. + * + * @param springCfgLocation Spring config location. + * @param beanToInjectName Bean to inject name. + */ + protected TestSpringResourceInjectedRunnable(String springCfgLocation, String beanToInjectName) { + this.springCfgLocation = springCfgLocation; + this.beanToInjectName = beanToInjectName; + } + + /** {@inheritDoc} */ + @Override public void run() { + appCtx = new ClassPathXmlApplicationContext(springCfgLocation); + + Integer beanToInject = (Integer)appCtx.getBean(beanToInjectName); + + assertEquals(beanToInject, getInjectedBean()); + } + + /** + * @return Injected bean to check. + */ + abstract Integer getInjectedBean(); + } + + /** */ + private void doTestSpringResourceInjected(Runnable testRunnable) throws Exception { + ExecutorService executorSvc = Executors.newSingleThreadExecutor(); + + Future fut = executorSvc.submit(testRunnable); + + try { + fut.get(5, TimeUnit.SECONDS); + } + catch (TimeoutException ignored) { + fail("Failed to wait for completion. Deadlock is possible"); + } + finally { + executorSvc.shutdownNow(); + } + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(true); + } + + /** */ + public void testSpringResourceInjectedInCacheStore() throws Exception { + doTestSpringResourceInjected( + new TestSpringResourceInjectedRunnable(SPRING_CFG_LOCATION, BEAN_TO_INJECT_NAME) { + /** {@inheritDoc} */ + @Override Integer getInjectedBean() { + IgniteCacheStoreWithSpringResource cacheStore = + appCtx.getBean(IgniteCacheStoreWithSpringResource.class); + + return cacheStore.getInjectedSpringField(); + } + } + ); + } + + /** */ + public void testSpringResourceInjectedInService() throws Exception { + doTestSpringResourceInjected( + new TestSpringResourceInjectedRunnable(SPRING_CFG_LOCATION, BEAN_TO_INJECT_NAME) { + /** {@inheritDoc} */ + @Override Integer getInjectedBean() { + Ignite ignite = appCtx.getBean(Ignite.class); + ServiceWithSpringResource svc = ignite.services().service("ServiceWithSpringResource"); + + return svc.getInjectedSpringField(); + } + } + ); + } +} diff --git a/modules/spring/src/test/java/org/apache/ignite/spring/injection/spring-bean.xml b/modules/spring/src/test/java/org/apache/ignite/spring/injection/spring-bean.xml new file mode 100644 index 0000000000000..d40f1528e8b89 --- /dev/null +++ b/modules/spring/src/test/java/org/apache/ignite/spring/injection/spring-bean.xml @@ -0,0 +1,84 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 127.0.0.1:47500..47509 + + + + + + + + + + diff --git a/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java b/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java index 9ebb784077ce9..ff96c7bac26bf 100644 --- a/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java +++ b/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java @@ -21,6 +21,7 @@ import org.apache.ignite.cache.spring.GridSpringCacheManagerSelfTest; import org.apache.ignite.cache.spring.SpringCacheManagerContextInjectionTest; import org.apache.ignite.cache.spring.SpringCacheTest; +import org.apache.ignite.spring.injection.IgniteSpringBeanSpringResourceInjectionTest; import org.apache.ignite.internal.IgniteSpringBeanTest; import org.apache.ignite.cache.store.jdbc.CacheJdbcBlobStoreFactorySelfTest; import org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactorySelfTest; @@ -77,6 +78,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(GridSpringTransactionManagerSelfTest.class); suite.addTestSuite(GridServiceInjectionSpringResourceTest.class); + suite.addTestSuite(IgniteSpringBeanSpringResourceInjectionTest.class); suite.addTestSuite(GridTransformSpringInjectionSelfTest.class); From 47f02bf2947e58ff3ee492c41d64e56337506c5e Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Wed, 25 Oct 2017 15:07:06 +0300 Subject: [PATCH 091/243] IGNITE-6511: Fixed SQLGetDiagRec behaviour in ODBC (cherry picked from commit bbf4853) --- .../cpp/odbc-test/src/api_robustness_test.cpp | 25 +++++++++------- modules/platforms/cpp/odbc/src/odbc.cpp | 29 +++++++++++++++---- 2 files changed, 38 insertions(+), 16 deletions(-) diff --git a/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp b/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp index 82f7274cf8d17..c17e079e25f10 100644 --- a/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/api_robustness_test.cpp @@ -976,26 +976,31 @@ BOOST_AUTO_TEST_CASE(TestSQLGetDiagField) BOOST_AUTO_TEST_CASE(TestSQLGetDiagRec) { - // There are no checks because we do not really care what is the result of these - // calls as long as they do not cause segmentation fault. - Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache"); - // Should fail. - SQLRETURN ret = SQLGetTypeInfo(stmt, SQL_INTERVAL_MONTH); - - BOOST_REQUIRE_EQUAL(ret, SQL_ERROR); - SQLCHAR state[ODBC_BUFFER_SIZE]; SQLINTEGER nativeError = 0; SQLCHAR message[ODBC_BUFFER_SIZE]; SQLSMALLINT messageLen = 0; - // Everithing is ok - ret = SQLGetDiagRec(SQL_HANDLE_STMT, stmt, 1, state, &nativeError, message, sizeof(message), &messageLen); + // Generating error. + SQLRETURN ret = SQLGetTypeInfo(stmt, SQL_INTERVAL_MONTH); + BOOST_REQUIRE_EQUAL(ret, SQL_ERROR); + // Everithing is ok. + ret = SQLGetDiagRec(SQL_HANDLE_STMT, stmt, 1, state, &nativeError, message, sizeof(message), &messageLen); BOOST_REQUIRE_EQUAL(ret, SQL_SUCCESS); + // Should return error. + ret = SQLGetDiagRec(SQL_HANDLE_STMT, stmt, 1, state, &nativeError, message, -1, &messageLen); + BOOST_REQUIRE_EQUAL(ret, SQL_ERROR); + + // Should return message length. + ret = SQLGetDiagRec(SQL_HANDLE_STMT, stmt, 1, state, &nativeError, message, 1, &messageLen); + BOOST_REQUIRE_EQUAL(ret, SQL_SUCCESS_WITH_INFO); + + // There are no checks because we do not really care what is the result of these + // calls as long as they do not cause segmentation fault. SQLGetDiagRec(SQL_HANDLE_STMT, stmt, 1, 0, &nativeError, message, sizeof(message), &messageLen); SQLGetDiagRec(SQL_HANDLE_STMT, stmt, 1, state, 0, message, sizeof(message), &messageLen); SQLGetDiagRec(SQL_HANDLE_STMT, stmt, 1, state, &nativeError, 0, sizeof(message), &messageLen); diff --git a/modules/platforms/cpp/odbc/src/odbc.cpp b/modules/platforms/cpp/odbc/src/odbc.cpp index baa276a55afa4..8121a3bcbca1c 100644 --- a/modules/platforms/cpp/odbc/src/odbc.cpp +++ b/modules/platforms/cpp/odbc/src/odbc.cpp @@ -984,16 +984,22 @@ namespace ignite { Diagnosable *diag = reinterpret_cast(handle); + if (!diag) + return SQL_INVALID_HANDLE; + records = &diag->GetDiagnosticRecords(); break; } default: - break; + return SQL_INVALID_HANDLE; } - if (!records || recNum < 1 || recNum > records->GetStatusRecordsNumber()) + if (recNum < 1 || msgBufferLen < 0) + return SQL_ERROR; + + if (!records || recNum > records->GetStatusRecordsNumber()) return SQL_NO_DATA; const DiagnosticRecord& record = records->GetStatusRecord(recNum); @@ -1004,13 +1010,24 @@ namespace ignite if (nativeError) *nativeError = 0; - SqlLen outResLen; - ApplicationDataBuffer outBuffer(OdbcNativeType::AI_CHAR, msgBuffer, msgBufferLen, &outResLen); + const std::string& errMsg = record.GetMessageText(); - outBuffer.PutString(record.GetMessageText()); + if (!msgBuffer || msgBufferLen < static_cast(errMsg.size() + 1)) + { + if (!msgLen) + return SQL_ERROR; + + CopyStringToBuffer(errMsg, reinterpret_cast(msgBuffer), static_cast(msgBufferLen)); + + *msgLen = static_cast(errMsg.size()); + + return SQL_SUCCESS_WITH_INFO; + } + + CopyStringToBuffer(errMsg, reinterpret_cast(msgBuffer), static_cast(msgBufferLen)); if (msgLen) - *msgLen = static_cast(outResLen); + *msgLen = static_cast(errMsg.size()); return SQL_SUCCESS; } From 3c1efb69424c227fb0254e9987e24e9b837d984d Mon Sep 17 00:00:00 2001 From: mcherkasov Date: Wed, 25 Oct 2017 18:44:53 +0300 Subject: [PATCH 092/243] IGNITE-6639 TCP communication skip local node loopback address. This closes #2909. Signed-off-by: nikolay_tikhonov --- .../spi/communication/tcp/TcpCommunicationSpi.java | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java index 7a5466602f994..6bcabcd542e2c 100755 --- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java @@ -3017,6 +3017,10 @@ private LinkedHashSet nodeAddresses(ClusterNode node) throws if (isExtAddrsExist) addrs.addAll(extAddrs); + if (log.isDebugEnabled()) + log.debug("Addresses resolved from attributes [rmtNode=" + node.id() + ", addrs=" + addrs + + ", isRmtAddrsExist=" + isRmtAddrsExist + ']'); + if (filterReachableAddresses) { Set allInetAddrs = U.newHashSet(addrs.size()); @@ -3046,7 +3050,7 @@ private LinkedHashSet nodeAddresses(ClusterNode node) throws } if (log.isDebugEnabled()) - log.debug("Addresses to connect for node [rmtNode=" + node.id() + ", addrs=" + addrs.toString() + ']'); + log.debug("Addresses to connect for node [rmtNode=" + node.id() + ", addrs=" + addrs + ']'); } return addrs; @@ -3080,6 +3084,14 @@ protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) int lastWaitingTimeout = 1; while (!conn) { // Reconnection on handshake timeout. + if (addr.getAddress().isLoopbackAddress() && addr.getPort() == boundTcpPort) { + if (log.isDebugEnabled()) + log.debug("Skipping local address [addr=" + addr + + ", locAddrs=" + node.attribute(createSpiAttributeName(ATTR_ADDRS)) + + ", node=" + node + ']'); + continue; + } + try { SocketChannel ch = SocketChannel.open(); From 6ba2ec8174be5ec736a06a1368c587f0d008e04b Mon Sep 17 00:00:00 2001 From: mcherkasov Date: Fri, 27 Oct 2017 19:55:10 +0300 Subject: [PATCH 093/243] IGNITE-6774 Java doc is broken: "LUDecomposition.java:40: warning - Tag @see: missing final '>'". This closes #2941. Signed-off-by: nikolay_tikhonov --- .../apache/ignite/ml/math/decompositions/LUDecomposition.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/decompositions/LUDecomposition.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/decompositions/LUDecomposition.java index df7b6cd1d2f4f..8b79d9bdf1e94 100644 --- a/modules/ml/src/main/java/org/apache/ignite/ml/math/decompositions/LUDecomposition.java +++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/decompositions/LUDecomposition.java @@ -35,7 +35,7 @@ * @see MathWorld * @see Wikipedia * - * TODO: IGNITE-5828, Maybe we should make this class (and other decompositions) Externalizable. + *

        TODO: IGNITE-5828, Maybe we should make this class (and other decompositions) Externalizable.

        */ public class LUDecomposition implements Destroyable { /** Default bound to determine effective singularity in LU decomposition. */ From 80fce622cec83d2c8d6bfbe619da9b8957594635 Mon Sep 17 00:00:00 2001 From: nikolay_tikhonov Date: Wed, 25 Oct 2017 18:46:59 +0300 Subject: [PATCH 094/243] IGNITE-6654 Ignite client can hang in case IgniteOOM on server. This closes #2908. Signed-off-by: nikolay_tikhonov --- .../pagemem/impl/PageMemoryNoStoreImpl.java | 6 +- .../dht/atomic/GridDhtAtomicCache.java | 7 +- .../datastreamer/DataStreamerImpl.java | 23 +- .../IgniteOutOfMemoryPropagationTest.java | 251 ++++++++++++++++++ .../testsuites/IgniteCacheTestSuite6.java | 5 + 5 files changed, 285 insertions(+), 7 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java index 6ba68c28e71a6..e219d6ee61797 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java @@ -290,9 +290,11 @@ public PageMemoryNoStoreImpl( if (relPtr == INVALID_REL_PTR) throw new IgniteOutOfMemoryException("Not enough memory allocated " + - "(consider increasing data region size or enabling evictions) " + "[policyName=" + dataRegionCfg.getName() + - ", size=" + U.readableSize(dataRegionCfg.getMaxSize(), true) + "]" + ", size=" + U.readableSize(dataRegionCfg.getMaxSize(), true) + "]" + U.nl() + + "Consider increasing memory policy size, enabling evictions, adding more nodes to the cluster, " + + "reducing number of backups or reducing model size." + ); assert (relPtr & ~PageIdUtils.PAGE_IDX_MASK) == 0 : U.hexLong(relPtr & ~PageIdUtils.PAGE_IDX_MASK); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java index 5095f45679bd7..a7dd615b33223 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; +import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.pagemem.wal.StorageException; import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -96,6 +97,7 @@ import org.apache.ignite.internal.util.typedef.CO; import org.apache.ignite.internal.util.typedef.CX1; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; @@ -3213,7 +3215,10 @@ && writeThrough() && !req.skipStore(), catch (GridDhtInvalidPartitionException ignored) { // Ignore. } - catch (IgniteCheckedException e) { + catch (IgniteCheckedException|RuntimeException e) { + if(e instanceof RuntimeException && !X.hasCause(e, IgniteOutOfMemoryException.class)) + throw (RuntimeException)e; + IgniteCheckedException err = new IgniteCheckedException("Failed to update key on backup node: " + key, e); if (nearRes != null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java index 6ed552a5700f7..d38132fa3059c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java @@ -188,6 +188,9 @@ public class DataStreamerImpl implements IgniteDataStreamer, Delayed /** {@code True} if data loader has been cancelled. */ private volatile boolean cancelled; + /** Cancellation reason. */ + private volatile Throwable cancellationReason = null; + /** Fail counter. */ private final LongAdder8 failCntr = new LongAdder8(); @@ -210,7 +213,12 @@ public class DataStreamerImpl implements IgniteDataStreamer, Delayed failCntr.increment(); - cancelled = true; + synchronized (DataStreamerImpl.this) { + if(cancellationReason == null) + cancellationReason = err; + + cancelled = true; + } } } }; @@ -399,12 +407,12 @@ private void enterBusy() { if (disconnectErr != null) throw disconnectErr; - throw new IllegalStateException("Data streamer has been closed."); + closedException(); } else if (cancelled) { busyLock.leaveBusy(); - throw new IllegalStateException("Data streamer has been closed."); + closedException(); } } @@ -886,7 +894,7 @@ else if (remaps + 1 > maxRemapCnt) { @Override public void run() { try { if (cancelled) - throw new IllegalStateException("DataStreamer closed."); + closedException(); load0(entriesForNode, resFut, activeKeys, remaps + 1); } @@ -989,6 +997,13 @@ else if (remaps + 1 > maxRemapCnt) { } } + /** + * Throws stream closed exception. + */ + private void closedException() { + throw new IllegalStateException("Data streamer has been closed.", cancellationReason); + } + /** * @param key Key to map. * @param topVer Topology version. diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java new file mode 100644 index 0000000000000..a13cbd478ab0c --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java @@ -0,0 +1,251 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.HashMap; +import java.util.Map; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.CacheWriteSynchronizationMode; +import org.apache.ignite.cluster.ClusterTopologyException; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.Transaction; +import org.apache.ignite.transactions.TransactionConcurrency; +import org.apache.ignite.transactions.TransactionIsolation; + +/** + * + */ +public class IgniteOutOfMemoryPropagationTest extends GridCommonAbstractTest { + + /** */ + public static final int NODES = 3; + + /** */ + private CacheAtomicityMode atomicityMode; + + /** */ + private CacheMode mode; + + /** */ + private int backupsCount; + + /** */ + private CacheWriteSynchronizationMode writeSyncMode; + + /** */ + private IgniteEx client; + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + + assert G.allGrids().isEmpty(); + } + + /** {@inheritDoc} */ + @Override protected long getTestTimeout() { + return 20 * 60 * 1000; + } + + /** */ + public void testPutOOMPropagation() throws Exception { + testOOMPropagation(false); + } + + /** */ + public void testStreamerOOMPropagation() throws Exception { + testOOMPropagation(true); + } + + /** */ + private void testOOMPropagation(boolean useStreamer) throws Exception { + for (CacheAtomicityMode atomicityMode : CacheAtomicityMode.values()) { + for (CacheMode cacheMode : CacheMode.values()) { + for (CacheWriteSynchronizationMode writeSyncMode : CacheWriteSynchronizationMode.values()) { + for (int backupsCount = 0; backupsCount < 1; backupsCount++) { + if (writeSyncMode == CacheWriteSynchronizationMode.FULL_ASYNC + || cacheMode == CacheMode.REPLICATED) + continue; + + if (atomicityMode == CacheAtomicityMode.TRANSACTIONAL && !useStreamer) { + for (TransactionConcurrency concurrency : TransactionConcurrency.values()) { + for (TransactionIsolation isolation : TransactionIsolation.values()) { + checkOOMPropagation( + false, + CacheAtomicityMode.TRANSACTIONAL, + cacheMode, + writeSyncMode, + backupsCount, + concurrency, + isolation); + } + } + } + else + checkOOMPropagation(useStreamer, atomicityMode, cacheMode, writeSyncMode, backupsCount); + } + } + } + } + } + + /** */ + private void checkOOMPropagation(boolean useStreamer, CacheAtomicityMode atomicityMode, CacheMode cacheMode, + CacheWriteSynchronizationMode writeSyncMode, int backupsCount) throws Exception { + checkOOMPropagation(useStreamer, atomicityMode, cacheMode, writeSyncMode, backupsCount, null, null); + } + + /** */ + private void checkOOMPropagation(boolean useStreamer, CacheAtomicityMode atomicityMode, CacheMode cacheMode, + CacheWriteSynchronizationMode writeSyncMode, int backupsCount, + TransactionConcurrency concurrency, TransactionIsolation isolation) throws Exception { + Throwable t = null; + + System.out.println("Checking conf: CacheAtomicityMode." + atomicityMode + + " CacheMode." + mode + " CacheWriteSynchronizationMode." + writeSyncMode + " backupsCount = " + backupsCount + + " TransactionConcurrency." + concurrency + " TransactionIsolation." + isolation); + + initGrid(atomicityMode, cacheMode, writeSyncMode, backupsCount); + try { + forceOOM(useStreamer, concurrency, isolation); + } + catch (Throwable t0) { + t = t0; + + t.printStackTrace(System.out); + + assertTrue(X.hasCause(t, IgniteOutOfMemoryException.class, ClusterTopologyException.class)); + } + finally { + assertNotNull(t); + + stopAllGrids(); + } + } + + /** + * Ignite grid of 3 server nodes with passed parameters. + * + * @param atomicityMode atomicity mode + * @param mode cache mode + * @param writeSyncMode cache write synchronization mode + * @param backupsCount backups count + * @throws Exception + */ + private void initGrid(CacheAtomicityMode atomicityMode, CacheMode mode, + CacheWriteSynchronizationMode writeSyncMode, int backupsCount) throws Exception { + + this.atomicityMode = atomicityMode; + this.mode = mode; + this.backupsCount = backupsCount; + this.writeSyncMode = writeSyncMode; + + Ignition.setClientMode(false); + + for (int i = 0; i < NODES; i++) + startGrid(i); + + Ignition.setClientMode(true); + + client = startGrid(NODES + 1); + + // it is required to start first node in test jvm, but we can not start client node, + // because client will fail to connect and test will fail too. + // as workaround start first server node in test jvm and then stop it. + stopGrid(0); + } + + + /** */ + public void forceOOM(boolean useStreamer, TransactionConcurrency concurrency, + TransactionIsolation isolation) throws Exception { + final IgniteCache cache = client.cache(DEFAULT_CACHE_NAME); + + IgniteDataStreamer streamer = client.dataStreamer(DEFAULT_CACHE_NAME); + + Map map = new HashMap<>(); + + Transaction tx = null; + + for (int i = 0; i < Integer.MAX_VALUE; i++) { + map.put("k" + i, "v" + i); + + if (map.size() > 1_000) { + if (concurrency != null && isolation != null) + tx = client.transactions().txStart(concurrency, isolation); + + if (useStreamer) + streamer.addData(map); + else + cache.putAll(map); + + map.clear(); + + if (tx != null) { + tx.commit(); + tx.close(); + } + } + } + } + + /** {@inheritDoc} */ + @Override protected boolean isMultiJvm() { + return true; + } + + /** {@inheritDoc} */ + @Override protected boolean isRemoteJvm(String igniteInstanceName) { + return !(Ignition.isClientMode() || igniteInstanceName.endsWith("0")); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + DataStorageConfiguration memCfg = new DataStorageConfiguration(); + + memCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setMaxSize(10 * 1024 * 1024 + 1)); + + cfg.setDataStorageConfiguration(memCfg); + + CacheConfiguration baseCfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME); + + baseCfg.setAtomicityMode(this.atomicityMode); + baseCfg.setCacheMode(this.mode); + baseCfg.setBackups(this.backupsCount); + baseCfg.setWriteSynchronizationMode(this.writeSyncMode); + + cfg.setCacheConfiguration(baseCfg); + + return cfg; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java index 7c71381c1ac89..8a2d6a066f840 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java @@ -18,6 +18,7 @@ package org.apache.ignite.testsuites; import junit.framework.TestSuite; +import org.apache.ignite.internal.processors.cache.IgniteOutOfMemoryPropagationTest; import org.apache.ignite.internal.processors.cache.distributed.CacheExchangeMergeTest; import org.apache.ignite.internal.processors.cache.distributed.CachePartitionStateTest; import org.apache.ignite.internal.processors.cache.distributed.GridCachePartitionEvictionDuringReadThroughSelfTest; @@ -54,6 +55,10 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(TxRollbackOnTimeoutNearCacheTest.class); suite.addTestSuite(IgniteCacheThreadLocalTxTest.class); + +// TODO enable this test after IGNITE-6753, now it takes too long +// suite.addTestSuite(IgniteOutOfMemoryPropagationTest.class); + return suite; } } From c50c9ad09597e56f9b1c5fbd7b1ada3ede815401 Mon Sep 17 00:00:00 2001 From: nikolay_tikhonov Date: Thu, 26 Oct 2017 14:24:27 +0300 Subject: [PATCH 095/243] IGNITE-6071 White list of exceptions to suppress in createTcpClient. This closes #2575. Signed-off-by: nikolay_tikhonov --- .../tcp/TcpCommunicationSpi.java | 3 +- .../ignite/spi/discovery/tcp/ServerImpl.java | 2 + ...tConnectAfterCommunicationFailureTest.java | 156 ++++++++++++++++++ .../IgniteClientReconnectTestSuite.java | 4 +- 4 files changed, 163 insertions(+), 2 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/IgniteClientConnectAfterCommunicationFailureTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java index 6bcabcd542e2c..ec586b450fb13 100755 --- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java @@ -3351,7 +3351,8 @@ else if (X.hasCause(e, SocketTimeoutException.class)) } } - if (X.hasCause(errs, ConnectException.class, HandshakeException.class)) + if (!X.hasCause(errs, SocketTimeoutException.class, HandshakeTimeoutException.class, + IgniteSpiOperationTimeoutException.class)) throw errs; } diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index 182090a4f7dac..94c9ca9aa0b7a 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -732,6 +732,8 @@ else if (!spi.failureDetectionTimeoutEnabled() && reconCnt == spi.getReconnectCo finally { U.closeQuiet(sock); } + + U.sleep(200); } } catch (Throwable t) { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientConnectAfterCommunicationFailureTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientConnectAfterCommunicationFailureTest.java new file mode 100644 index 0000000000000..301d5f24e4922 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientConnectAfterCommunicationFailureTest.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import java.util.Arrays; +import java.util.UUID; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.util.nio.GridCommunicationClient; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * Tests client to be able restore connection to cluster on subsequent attempts after communication problems. + */ +public class IgniteClientConnectAfterCommunicationFailureTest extends GridCommonAbstractTest { + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + cfg.setNetworkTimeout(500); + cfg.setCommunicationSpi(new TcpCommunicationSpi(gridName.contains("block"))); + + if (gridName.contains("client")) { + cfg.setClientMode(true); + } + + return cfg; + } + + /** + * @throws Exception If failed. + */ + public void testClientReconnects() throws Exception { + Ignite srv1 = startGrid("server1"); + Ignite srv2 = startGrid("server2"); + startGrid("client-block"); + + assertEquals(1, srv2.cluster().forClients().nodes().size()); + assertEquals(1, srv1.cluster().forClients().nodes().size()); + } + + /** + * @throws Exception If failed. + */ + public void testClientThreadsSuspended() throws Exception { + Ignite srv1 = startGrid("server1"); + Ignite srv2 = startGrid("server2"); + Ignite client = startGrid("client"); + + boolean blockedAnything = false; + + for (Thread thread : Thread.getAllStackTraces().keySet()) { + if (thread.getName().contains("%client%")) { + thread.suspend(); + blockedAnything = true; + } + } + + Thread.sleep(10000); + + for (Thread thread : Thread.getAllStackTraces().keySet()) { + if (thread.getName().contains("%client%")) + thread.resume(); + } + + for (int j = 0; j < 10; j++) { + boolean topOk = true; + + for (Ignite node : Arrays.asList(srv1, srv2, client)) { + if (node.cluster().nodes().size() != 3) { + U.warn(log, "Grid size is incorrect (will re-run check in 1000 ms) " + + "[name=" + node.name() + ", size=" + node.cluster().nodes().size() + ']'); + + topOk = false; + + break; + } + } + + if (topOk) + return; + else + Thread.sleep(1000); + } + + assertTrue(blockedAnything); + assertEquals(1, srv2.cluster().forClients().nodes().size()); + assertEquals(1, srv1.cluster().forClients().nodes().size()); + } + + /** + * Will never connect with the first node id, normal operation after. + */ + private class TcpCommunicationSpi extends org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi { + /** + * Whether this instance should actually block. + */ + private final boolean isBlocking; + + /** + * Local node ID that is prevented from creating connections. + */ + private volatile UUID blockedNodeId = null; + + /** + * + * @param isBlocking Whether this instance should actually block. + */ + public TcpCommunicationSpi(boolean isBlocking) { + this.isBlocking = isBlocking; + } + + /** {@inheritDoc} */ + @Override protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) + throws IgniteCheckedException { + if (blockHandshakeOnce(getLocalNode().id())) { + throw new IgniteCheckedException("Node is blocked"); + } + + return super.createTcpClient(node, connIdx); + } + + /** Check if this connection is blocked. */ + private boolean blockHandshakeOnce(UUID nodeId) { + if (isBlocking && (blockedNodeId == null || blockedNodeId.equals(nodeId))) { + blockedNodeId = nodeId; + return true; + } + return false; + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteClientReconnectTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteClientReconnectTestSuite.java index 03d3fe230c296..d0e907cde01f8 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteClientReconnectTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteClientReconnectTestSuite.java @@ -18,6 +18,7 @@ package org.apache.ignite.testsuites; import junit.framework.TestSuite; +import org.apache.ignite.internal.IgniteClientConnectAfterCommunicationFailureTest; import org.apache.ignite.internal.IgniteClientReconnectApiExceptionTest; import org.apache.ignite.internal.IgniteClientReconnectAtomicsTest; import org.apache.ignite.internal.IgniteClientReconnectBinaryContexTest; @@ -43,6 +44,7 @@ public class IgniteClientReconnectTestSuite extends TestSuite { public static TestSuite suite() throws Exception { TestSuite suite = new TestSuite("Ignite Client Reconnect Test Suite"); + suite.addTestSuite(IgniteClientConnectAfterCommunicationFailureTest.class); suite.addTestSuite(IgniteClientReconnectStopTest.class); suite.addTestSuite(IgniteClientReconnectApiExceptionTest.class); suite.addTestSuite(IgniteClientReconnectDiscoveryStateTest.class); @@ -59,4 +61,4 @@ public static TestSuite suite() throws Exception { return suite; } -} \ No newline at end of file +} From e0dcdac8680d61204cbea175b86f03e4f8a23bea Mon Sep 17 00:00:00 2001 From: Sergey Chugunov Date: Mon, 23 Oct 2017 17:18:33 +0300 Subject: [PATCH 096/243] IGNITE-6668 Do not block metadata read when calling from discovery thread - Fixes #2880. Signed-off-by: Alexey Goncharuk --- .../CacheObjectBinaryProcessorImpl.java | 10 +++++++- .../spi/discovery/IgniteDiscoveryThread.java | 23 +++++++++++++++++++ .../ignite/spi/discovery/tcp/ServerImpl.java | 3 ++- 3 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/spi/discovery/IgniteDiscoveryThread.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java index 0b85d2b3c7b1b..6a709360b05c6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java @@ -82,6 +82,8 @@ import org.apache.ignite.spi.IgniteNodeValidationResult; import org.apache.ignite.spi.discovery.DiscoveryDataBag; import org.apache.ignite.spi.discovery.DiscoveryDataBag.GridDiscoveryData; +import org.apache.ignite.spi.discovery.IgniteDiscoveryThread; +import org.apache.ignite.thread.IgniteThread; import org.jetbrains.annotations.Nullable; import org.jsr166.ConcurrentHashMap8; @@ -495,6 +497,9 @@ public GridBinaryMarshaller marshaller() { } if (holder != null) { + if (IgniteThread.current() instanceof IgniteDiscoveryThread) + return holder.metadata(); + if (holder.pendingVersion() - holder.acceptedVersion() > 0) { GridFutureAdapter fut = transport.awaitMetadataUpdate(typeId, holder.pendingVersion()); @@ -534,7 +539,10 @@ public GridBinaryMarshaller marshaller() { } } } - else { + else if (holder != null) { + if (IgniteThread.current() instanceof IgniteDiscoveryThread) + return holder.metadata().wrap(binaryCtx); + if (holder.pendingVersion() - holder.acceptedVersion() > 0) { GridFutureAdapter fut = transport.awaitMetadataUpdate( typeId, diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/IgniteDiscoveryThread.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/IgniteDiscoveryThread.java new file mode 100644 index 0000000000000..a3e376c6f1c5a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/IgniteDiscoveryThread.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.spi.discovery; + +/** + * Marker interface for discovery thread on cluster server node. + */ +public interface IgniteDiscoveryThread { +} diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index 94c9ca9aa0b7a..ada078012681e 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -101,6 +101,7 @@ import org.apache.ignite.spi.IgniteSpiThread; import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage; import org.apache.ignite.spi.discovery.DiscoverySpiListener; +import org.apache.ignite.spi.discovery.IgniteDiscoveryThread; import org.apache.ignite.spi.discovery.tcp.internal.DiscoveryDataPacket; import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode; import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNodesRing; @@ -2474,7 +2475,7 @@ private void advance() { /** * Message worker thread for messages processing. */ - private class RingMessageWorker extends MessageWorkerAdapter { + private class RingMessageWorker extends MessageWorkerAdapter implements IgniteDiscoveryThread { /** Next node. */ @SuppressWarnings({"FieldAccessedSynchronizedAndUnsynchronized"}) private TcpDiscoveryNode next; From 8d863484f5ad43db35d4bfd7d09d328e0bd55b77 Mon Sep 17 00:00:00 2001 From: Sergey Chugunov Date: Wed, 11 Oct 2017 15:33:23 +0300 Subject: [PATCH 097/243] IGNITE-6536 Node fails when detects mapping storage corruption Signed-off-by: Andrey Gura --- .../internal/MarshallerMappingFileStore.java | 15 ++-- .../IgniteMarshallerCacheFSRestoreTest.java | 71 ++++++++++++++++++- 2 files changed, 77 insertions(+), 9 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java index eabbdb81d2340..59a99b861751a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java @@ -167,16 +167,19 @@ void restoreMappings(MarshallerContext marshCtx) throws IgniteCheckedException { try (FileInputStream in = new FileInputStream(file)) { try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { - String className = reader.readLine(); + String clsName = reader.readLine(); - marshCtx.registerClassNameLocally(platformId, typeId, className); + if (clsName == null) { + throw new IgniteCheckedException("Class name is null for [platformId=" + platformId + + ", typeId=" + typeId + "], marshaller mappings storage is broken. " + + "Clean up marshaller directory (/marshaller) and restart the node."); + } + + marshCtx.registerClassNameLocally(platformId, typeId, clsName); } } catch (IOException e) { - throw new IgniteCheckedException("Reading marshaller mapping from file " - + name - + " failed." - , e); + throw new IgniteCheckedException("Reading marshaller mapping from file " + name + " failed.", e); } } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java index 21a3e4344ba85..ac15971f091ca 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java @@ -23,13 +23,16 @@ import java.nio.charset.StandardCharsets; import java.util.Collection; import java.util.Map; +import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; import org.apache.ignite.internal.processors.marshaller.MappingProposedMessage; @@ -47,6 +50,9 @@ public class IgniteMarshallerCacheFSRestoreTest extends GridCommonAbstractTest { /** */ private volatile boolean isDuplicateObserved = true; + /** */ + private boolean isPersistenceEnabled; + /** * */ @@ -67,6 +73,7 @@ private static class SimpleValue { } } + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); @@ -75,13 +82,17 @@ private static class SimpleValue { cfg.setDiscoverySpi(discoSpi); - CacheConfiguration singleCacheConfig = new CacheConfiguration() + CacheConfiguration singleCacheCfg = new CacheConfiguration() .setName(DEFAULT_CACHE_NAME) .setCacheMode(CacheMode.PARTITIONED) .setBackups(1) .setAtomicityMode(CacheAtomicityMode.ATOMIC); - cfg.setCacheConfiguration(singleCacheConfig); + cfg.setCacheConfiguration(singleCacheCfg); + + //persistence must be enabled to verify restoring mappings from FS case + if (isPersistenceEnabled) + cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); return cfg; } @@ -110,11 +121,14 @@ private void cleanUpWorkDir() throws Exception { * In that case the request must not be marked as duplicate and must be processed in a regular way. * No hangs must take place. * - * @see IGNITE-5401 Take a look at JIRA ticket for more information about context of this test. + * @see IGNITE-5401 JIRA ticket + * provides more information about context of this test. * * This test must never hang on proposing of MarshallerMapping. */ public void testFileMappingReadAndPropose() throws Exception { + isPersistenceEnabled = false; + prepareMarshallerFileStore(); IgniteEx ignite0 = startGrid(0); @@ -162,6 +176,57 @@ private void prepareMarshallerFileStore() throws Exception { } } + /** + * Verifies scenario that node with corrupted marshaller mapping store must fail on startup + * with appropriate error message. + * + * @see IGNITE-6536 JIRA provides more information + * about this case. + */ + public void testNodeStartFailsOnCorruptedStorage() throws Exception { + isPersistenceEnabled = true; + + Ignite ig = startGrids(3); + + ig.active(true); + + ig.cache(DEFAULT_CACHE_NAME).put(0, new SimpleValue(0, "value0")); + + stopAllGrids(); + + corruptMarshallerStorage(); + + try { + startGrid(0); + } + catch (IgniteCheckedException e) { + verifyException((IgniteCheckedException) e.getCause()); + } + } + + /** + * Class name for CustomClass class mapping file gets cleaned up from file system. + */ + private void corruptMarshallerStorage() throws Exception { + String marshallerDir = U.defaultWorkDirectory() + File.separator + "marshaller"; + + File[] storedMappingsFiles = new File(marshallerDir).listFiles(); + + assert storedMappingsFiles.length == 1; + + try (FileOutputStream out = new FileOutputStream(storedMappingsFiles[0])) { + out.getChannel().truncate(0); + } + } + + /** */ + private void verifyException(IgniteCheckedException e) throws Exception { + String msg = e.getMessage(); + + if (msg == null || !msg.contains("Class name is null")) + throw new Exception("Exception with unexpected message was thrown: " + msg, e); + } + /** */ private class TestTcpDiscoverySpi extends TcpDiscoverySpi { From f4186458c7d79c5c8b7c12dc98a431c2688bec00 Mon Sep 17 00:00:00 2001 From: vd-pyatkov Date: Wed, 8 Nov 2017 11:12:48 +0300 Subject: [PATCH 098/243] IGNITE-6737 GridDeploymentPerVersionStore retries loading class infinitely. This fixes #2934 (cherry picked from commit f52f8f9) --- .../GridDeploymentPerVersionStore.java | 8 +- .../DeploymentClassLoaderCallableTest.java | 127 ++++++++++++++++++ .../testsuites/IgniteP2PSelfTestSuite.java | 2 + .../tests/p2p/compute/ExternalCallable.java | 38 ++++++ .../tests/p2p/compute/ExternalCallable1.java | 39 ++++++ .../tests/p2p/compute/ExternalCallable2.java | 39 ++++++ 6 files changed, 251 insertions(+), 2 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java create mode 100644 modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java create mode 100644 modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java create mode 100644 modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java index 0bf8328472237..070b3906c8b0f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java @@ -544,8 +544,12 @@ else if (log.isDebugEnabled()) // New deployment was added while outside of synchronization. // Need to recheck it again. - if (!d.pendingUndeploy() && !d.undeployed() && !depsToCheck.contains(d)) - retry = true; + if (!d.pendingUndeploy() && !d.undeployed() && !depsToCheck.contains(d)) { + Map parties = d.participants(); + + if (parties == null || parties.get(meta.senderNodeId()) == null) + retry = true; + } } if (retry) { diff --git a/modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java b/modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java new file mode 100644 index 0000000000000..a9cec685d7cef --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java @@ -0,0 +1,127 @@ +package org.apache.ignite.p2p; + +import java.lang.reflect.Constructor; +import java.net.URL; +import org.apache.ignite.Ignite; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.testframework.GridTestExternalClassLoader; +import org.apache.ignite.testframework.config.GridTestProperties; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + */ +public class DeploymentClassLoaderCallableTest extends GridCommonAbstractTest { + /** */ + private static final String RUN_CLS = "org.apache.ignite.tests.p2p.compute.ExternalCallable"; + + /** */ + private static final String RUN_CLS1 = "org.apache.ignite.tests.p2p.compute.ExternalCallable1"; + + /** */ + private static final String RUN_CLS2 = "org.apache.ignite.tests.p2p.compute.ExternalCallable2"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName) + .setPeerClassLoadingEnabled(true); + } + + /** + * @throws Exception if failed. + */ + public void testDeploymentFromSecondAndThird() throws Exception { + try { + startGrid(1); + + final Ignite ignite2 = startGrid(2); + final Ignite ignite3 = startGrid(3); + + runJob0(ignite2, 10_000); + + runJob1(ignite3, 10_000); + runJob2(ignite3, 10_000); + } + finally { + stopAllGrids(); + } + } + + /** + * @throws Exception if failed. + */ + public void testDeploymentFromEach() throws Exception { + try { + final Ignite ignite1 = startGrid(1); + final Ignite ignite2 = startGrid(2); + final Ignite ignite3 = startGrid(3); + + runJob0(ignite1, 10_000); + + runJob1(ignite2, 10_000); + + runJob2(ignite3, 10_000); + } + finally { + stopAllGrids(); + } + } + + /** + * @throws Exception if failed. + */ + public void testDeploymentFromOne() throws Exception { + try { + startGrid(1); + startGrid(2); + + final Ignite ignite3 = startGrid(3); + + runJob0(ignite3, 10_000); + runJob1(ignite3, 10_000); + runJob2(ignite3, 10_000); + } + finally { + stopAllGrids(); + } + } + + /** + * @param ignite Ignite instance. + * @param timeout Timeout. + * @throws Exception If failed. + */ + private void runJob1(Ignite ignite, long timeout) throws Exception { + ClassLoader testClassLoader1 = new GridTestExternalClassLoader(new URL[] { + new URL(GridTestProperties.getProperty("p2p.uri.cls"))}, RUN_CLS, RUN_CLS2); + + Constructor ctor = testClassLoader1.loadClass(RUN_CLS1).getConstructor(); + ignite.compute().withTimeout(timeout).broadcast((IgniteCallable)ctor.newInstance()); + } + + /** + * @param ignite Ignite instance. + * @param timeout Timeout. + * @throws Exception If failed. + */ + private void runJob0(Ignite ignite, long timeout) throws Exception { + ClassLoader testClassLoader = new GridTestExternalClassLoader(new URL[] { + new URL(GridTestProperties.getProperty("p2p.uri.cls"))}, RUN_CLS1, RUN_CLS2); + + Constructor ctor = testClassLoader.loadClass(RUN_CLS).getConstructor(); + ignite.compute().withTimeout(timeout).broadcast((IgniteCallable)ctor.newInstance()); + } + + /** + * @param ignite Ignite instance. + * @param timeout Timeout. + * @throws Exception If failed. + */ + private void runJob2(Ignite ignite, long timeout) throws Exception { + ClassLoader testClassLoader = new GridTestExternalClassLoader(new URL[] { + new URL(GridTestProperties.getProperty("p2p.uri.cls"))}, RUN_CLS, RUN_CLS1); + + Constructor ctor = testClassLoader.loadClass(RUN_CLS2).getConstructor(); + ignite.compute().withTimeout(timeout).broadcast((IgniteCallable)ctor.newInstance()); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java index 7bade98338e3a..abd99678f8be5 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java @@ -20,6 +20,7 @@ import java.util.Set; import junit.framework.TestSuite; import org.apache.ignite.internal.managers.deployment.GridDeploymentMessageCountSelfTest; +import org.apache.ignite.p2p.DeploymentClassLoaderCallableTest; import org.apache.ignite.p2p.GridP2PClassLoadingSelfTest; import org.apache.ignite.p2p.GridP2PContinuousDeploymentSelfTest; import org.apache.ignite.p2p.GridP2PDifferentClassLoaderSelfTest; @@ -70,6 +71,7 @@ public static TestSuite suite(Set ignoredTests) throws Exception { suite.addTest(new TestSuite(GridP2PTimeoutSelfTest.class)); suite.addTest(new TestSuite(GridP2PMissedResourceCacheSizeSelfTest.class)); suite.addTest(new TestSuite(GridP2PContinuousDeploymentSelfTest.class)); + suite.addTest(new TestSuite(DeploymentClassLoaderCallableTest.class)); GridTestUtils.addTestIfNeeded(suite, GridDeploymentMessageCountSelfTest.class, ignoredTests); return suite; diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java new file mode 100644 index 0000000000000..16ce49363810c --- /dev/null +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java @@ -0,0 +1,38 @@ +package org.apache.ignite.tests.p2p.compute; + +import org.apache.ignite.Ignite; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; + +/** + */ +public class ExternalCallable implements IgniteCallable { + /** */ + @IgniteInstanceResource + Ignite ignite; + + /** */ + private int param; + + /** + */ + public ExternalCallable() { + // No-op. + } + + /** + * @param param Param. + */ + public ExternalCallable(int param) { + this.param = param; + } + + /** {@inheritDoc} */ + @Override public Object call() { + System.err.println("!!!!! I am job " + param + " on " + ignite.name()); + + return 42; + } +} + + diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java new file mode 100644 index 0000000000000..f7bd8a793e09a --- /dev/null +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java @@ -0,0 +1,39 @@ +package org.apache.ignite.tests.p2p.compute; + +import org.apache.ignite.Ignite; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; + +/** + */ +public class ExternalCallable1 implements IgniteCallable { + /** */ + @IgniteInstanceResource + Ignite ignite; + + /** */ + private int param; + + /** + * + */ + public ExternalCallable1() { + // No-op. + } + + /** + * @param param Param. + */ + public ExternalCallable1(int param) { + this.param = param; + } + + /** {@inheritDoc} */ + @Override public Object call() { + System.err.println("!!!!! I am job_1 " + param + " on " + ignite.name()); + + return 42; + } +} + + diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java new file mode 100644 index 0000000000000..7a09f9d7defba --- /dev/null +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java @@ -0,0 +1,39 @@ +package org.apache.ignite.tests.p2p.compute; + +import org.apache.ignite.Ignite; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; + +/** + */ +public class ExternalCallable2 implements IgniteCallable { + /** */ + @IgniteInstanceResource + Ignite ignite; + + /** */ + private int param; + + /** + * + */ + public ExternalCallable2() { + // No-op. + } + + /** + * @param param Param. + */ + public ExternalCallable2(int param) { + this.param = param; + } + + /** {@inheritDoc} */ + @Override public Object call() { + System.err.println("!!!!! I am job_2 " + param + " on " + ignite.name()); + + return 42; + } +} + + From 691d1e16fb235cc198b190fc9be530995e427d63 Mon Sep 17 00:00:00 2001 From: vd-pyatkov Date: Wed, 8 Nov 2017 15:32:14 +0300 Subject: [PATCH 099/243] Fixed licensed header (cherry picked from commit 32a24db) --- .../p2p/DeploymentClassLoaderCallableTest.java | 17 +++++++++++++++++ .../tests/p2p/compute/ExternalCallable.java | 17 +++++++++++++++++ .../tests/p2p/compute/ExternalCallable1.java | 17 +++++++++++++++++ .../tests/p2p/compute/ExternalCallable2.java | 17 +++++++++++++++++ 4 files changed, 68 insertions(+) diff --git a/modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java b/modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java index a9cec685d7cef..9c0e4462f7970 100644 --- a/modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java +++ b/modules/core/src/test/java/org/apache/ignite/p2p/DeploymentClassLoaderCallableTest.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.ignite.p2p; import java.lang.reflect.Constructor; diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java index 16ce49363810c..25f1f3ea832d6 100644 --- a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.ignite.tests.p2p.compute; import org.apache.ignite.Ignite; diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java index f7bd8a793e09a..6a6befc7d6265 100644 --- a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.ignite.tests.p2p.compute; import org.apache.ignite.Ignite; diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java index 7a09f9d7defba..7d1d0f78f6bd2 100644 --- a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.ignite.tests.p2p.compute; import org.apache.ignite.Ignite; From f129ab64116ab02918359c44bd5802bd0500b279 Mon Sep 17 00:00:00 2001 From: Sergey Chugunov Date: Tue, 7 Nov 2017 15:10:18 +0300 Subject: [PATCH 100/243] IGNITE-6722 Writing to disk metadate in registerLocally methods was added Signed-off-by: Andrey Gura (cherry-picked commit 3132a18) --- .../internal/MarshallerContextImpl.java | 2 + .../internal/MarshallerMappingFileStore.java | 23 ++++++++++ .../cache/binary/BinaryMetadataFileStore.java | 43 ++++++++++++++++++- .../cache/binary/BinaryMetadataTransport.java | 4 +- .../CacheObjectBinaryProcessorImpl.java | 15 +++++-- 5 files changed, 81 insertions(+), 6 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java index 1e5c3708d7422..08661a3b11414 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java @@ -277,6 +277,8 @@ private void processResource(URL url) throws IOException { { ConcurrentMap cache = getCacheFor(platformId); + fileStore.mergeAndWriteMapping(platformId, typeId, clsName); + cache.put(typeId, new MappedName(clsName, true)); return true; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java index 59a99b861751a..6fb1371f10ea3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java @@ -184,6 +184,29 @@ void restoreMappings(MarshallerContext marshCtx) throws IgniteCheckedException { } } + /** + * Checks if marshaller mapping for given [platformId, typeId] pair is already presented on disk. + * If so verifies that it is the same (if no {@link IgniteCheckedException} is thrown). + * If there is not such mapping writes it. + * + * @param platformId Platform id. + * @param typeId Type id. + * @param typeName Type name. + */ + void mergeAndWriteMapping(byte platformId, int typeId, String typeName) throws IgniteCheckedException { + String existingTypeName = readMapping(platformId, typeId); + + if (existingTypeName != null) { + if (!existingTypeName.equals(typeName)) + throw new IgniteCheckedException("Failed to merge new and existing marshaller mappings." + + " For [platformId=" + platformId + ", typeId=" + typeId + "]" + + " new typeName=" + typeName + ", existing typeName=" + existingTypeName + "." + + " Consider cleaning up persisted mappings from /marshaller directory."); + } + else + writeMapping(platformId, typeId, typeName); + } + /** * @param fileName Name of file with marshaller mapping information. * @throws IgniteCheckedException If file name format is broken. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java index 19514c0e3acf4..a58918b4cd43f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.binary.BinaryMetadata; +import org.apache.ignite.internal.binary.BinaryUtils; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; @@ -85,7 +86,7 @@ class BinaryMetadataFileStore { /** * @param binMeta Binary metadata to be written to disk. */ - void saveMetadata(BinaryMetadata binMeta) { + void writeMetadata(BinaryMetadata binMeta) { if (!CU.isPersistenceEnabled(ctx.config())) return; @@ -123,4 +124,44 @@ void restoreMetadata() { } } } + + /** + * Checks if binary metadata for the same typeId is already presented on disk. + * If so merges it with new metadata and stores the result. + * Otherwise just writes new metadata. + * + * @param binMeta new binary metadata to write to disk. + */ + void mergeAndWriteMetadata(BinaryMetadata binMeta) { + BinaryMetadata existingMeta = readMetadata(binMeta.typeId()); + + if (existingMeta != null) { + BinaryMetadata mergedMeta = BinaryUtils.mergeMetadata(existingMeta, binMeta); + + writeMetadata(mergedMeta); + } else + writeMetadata(binMeta); + } + + /** + * Reads binary metadata for given typeId. + * + * @param typeId typeId of BinaryMetadata to be read. + */ + private BinaryMetadata readMetadata(int typeId) { + File file = new File(workDir, Integer.toString(typeId) + ".bin"); + + if (!file.exists()) + return null; + + try (FileInputStream in = new FileInputStream(file)) { + return U.unmarshal(ctx.config().getMarshaller(), in, U.resolveClassLoader(ctx.config())); + } + catch (Exception e) { + U.warn(log, "Failed to restore metadata from file: " + file.getName() + + "; exception was thrown: " + e.getMessage()); + } + + return null; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java index 010ab0f46311d..3a77190b60247 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java @@ -462,9 +462,9 @@ private final class MetadataUpdateAcceptedListener implements CustomEventListene return; } - metaLocCache.put(typeId, new BinaryMetadataHolder(holder.metadata(), holder.pendingVersion(), newAcceptedVer)); + metadataFileStore.writeMetadata(holder.metadata()); - metadataFileStore.saveMetadata(holder.metadata()); + metaLocCache.put(typeId, new BinaryMetadataHolder(holder.metadata(), holder.pendingVersion(), newAcceptedVer)); } for (BinaryMetadataUpdatedListener lsnr : binaryUpdatedLsnrs) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java index 6a709360b05c6..ed4c520053dad 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java @@ -463,9 +463,18 @@ public GridBinaryMarshaller marshaller() { BinaryMetadata oldMeta = metaHolder != null ? metaHolder.metadata() : null; - BinaryMetadata mergedMeta = BinaryUtils.mergeMetadata(oldMeta, newMeta0); + try { + BinaryMetadata mergedMeta = BinaryUtils.mergeMetadata(oldMeta, newMeta0); + + metadataFileStore.mergeAndWriteMetadata(mergedMeta); - metadataLocCache.put(typeId, new BinaryMetadataHolder(mergedMeta, 0, 0)); + metadataLocCache.put(typeId, new BinaryMetadataHolder(mergedMeta, 0, 0)); + } + catch (BinaryObjectException e) { + throw new BinaryObjectException("New binary metadata is incompatible with binary metadata" + + " persisted locally." + + " Consider cleaning up persisted metadata from /binary_meta directory.", e); + } } /** {@inheritDoc} */ @@ -918,7 +927,7 @@ else if (type == BinaryObjectImpl.TYPE_BINARY_ENUM) metadataLocCache.put(e.getKey(), localHolder); if (!ctx.clientNode()) - metadataFileStore.saveMetadata(holder.metadata()); + metadataFileStore.writeMetadata(holder.metadata()); } } } From c29777096d51b88f2d29eb7858a10703cdac8884 Mon Sep 17 00:00:00 2001 From: alexdel Date: Fri, 10 Nov 2017 09:26:42 +0700 Subject: [PATCH 101/243] IGNITE-6833 Web Console: Fixed incremental SASS compilation in dev-server mode. (cherry picked from commit 8c343a1) --- modules/web-console/frontend/package.json | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index 95b4a2b26324c..38208df68eebb 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -63,13 +63,13 @@ "bootstrap-sass": "3.3.7", "brace": "0.10.0", "copy-webpack-plugin": "4.0.1", - "css-loader": "0.28.4", + "css-loader": "0.28.7", "eslint": "4.3.0", "eslint-friendly-formatter": "3.0.0", "eslint-loader": "1.9.0", "eslint-plugin-babel": "4.1.1", "expose-loader": "0.7.3", - "extract-text-webpack-plugin": "3.0.0", + "extract-text-webpack-plugin": "3.0.2", "file-loader": "0.11.2", "file-saver": "1.3.3", "font-awesome": "4.7.0", @@ -81,7 +81,7 @@ "json-bigint": "0.2.3", "jszip": "3.1.4", "lodash": "4.17.4", - "node-sass": "4.5.3", + "node-sass": "4.6.0", "nvd3": "1.8.4", "pako": "1.0.6", "progress-bar-webpack-plugin": "1.10.0", @@ -93,12 +93,12 @@ "rxjs": "5.4.2", "sass-loader": "6.0.6", "socket.io-client": "1.7.3", - "style-loader": "0.18.2", + "style-loader": "0.19.0", "svg-sprite-loader": "3.0.7", "tf-metatags": "2.0.0", - "webpack": "3.3.0", - "webpack-dev-server": "2.6.1", - "webpack-merge": "4.1.0", + "webpack": "3.8.1", + "webpack-dev-server": "2.9.4", + "webpack-merge": "4.1.1", "worker-loader": "0.8.1" }, "devDependencies": { From 4f59b3b589e9074ca4398e137dd338239b3d5f80 Mon Sep 17 00:00:00 2001 From: alexdel Date: Fri, 10 Nov 2017 09:46:43 +0700 Subject: [PATCH 102/243] IGNITE-6824 Web Console: Upgraded Angular from 1.5.x to 1.6.x. (cherry picked from commit 85cf958) --- .../ui-ace-docker/ui-ace-docker.controller.js | 26 +-- .../ui-ace-java/ui-ace-java.controller.js | 150 +++++++++--------- .../ui-ace-pojos/ui-ace-pojos.controller.js | 148 ++++++++--------- .../ui-ace-pom/ui-ace-pom.controller.js | 26 +-- .../ui-ace-sharp/ui-ace-sharp.controller.js | 12 +- .../ui-ace-spring/ui-ace-spring.controller.js | 146 ++++++++--------- .../app/modules/dialog/dialog.controller.js | 26 +-- .../app/modules/form/field/down.directive.js | 16 +- .../app/modules/form/field/up.directive.js | 14 +- .../app/modules/form/group/add.directive.js | 24 ++- .../modules/form/group/tooltip.directive.js | 24 ++- .../app/modules/navbar/userbar.directive.js | 32 ++-- modules/web-console/frontend/package.json | 17 +- 13 files changed, 341 insertions(+), 320 deletions(-) diff --git a/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.controller.js b/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.controller.js index 8ebaae4ba777c..4f443aead3c1a 100644 --- a/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.controller.js +++ b/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.controller.js @@ -18,20 +18,22 @@ export default ['$scope', 'IgniteVersion', 'IgniteDockerGenerator', function($scope, Version, docker) { const ctrl = this; - // Watchers definition. - const clusterWatcher = () => { - delete ctrl.data; + this.$onInit = () => { + // Watchers definition. + const clusterWatcher = () => { + delete ctrl.data; - if (!$scope.cluster) - return; + if (!$scope.cluster) + return; - ctrl.data = docker.generate($scope.cluster, Version.currentSbj.getValue()); - }; + ctrl.data = docker.generate($scope.cluster, Version.currentSbj.getValue()); + }; - // Setup watchers. - Version.currentSbj.subscribe({ - next: clusterWatcher - }); + // Setup watchers. + Version.currentSbj.subscribe({ + next: clusterWatcher + }); - $scope.$watch('cluster', clusterWatcher); + $scope.$watch('cluster', clusterWatcher); + }; }]; diff --git a/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.controller.js b/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.controller.js index e50ac6cee3d45..22f7d183d03de 100644 --- a/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.controller.js +++ b/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.controller.js @@ -18,103 +18,105 @@ export default ['IgniteVersion', 'JavaTransformer', function(Version, java) { const ctrl = this; - delete ctrl.data; + this.$onInit = () => { + delete ctrl.data; - const client = ctrl.client === 'true'; + const client = ctrl.client === 'true'; - const available = Version.available.bind(Version); + const available = Version.available.bind(Version); - // Setup generator. - switch (ctrl.generator) { - case 'igniteConfiguration': - const clsName = client ? 'ClientConfigurationFactory' : 'ServerConfigurationFactory'; + // Setup generator. + switch (ctrl.generator) { + case 'igniteConfiguration': + const clsName = client ? 'ClientConfigurationFactory' : 'ServerConfigurationFactory'; - ctrl.generate = (cluster) => java.cluster(cluster, Version.currentSbj.getValue(), 'config', clsName, client); + ctrl.generate = (cluster) => java.cluster(cluster, Version.currentSbj.getValue(), 'config', clsName, client); - break; - case 'clusterCaches': - ctrl.generate = (cluster, caches) => { - const clusterCaches = _.reduce(caches, (acc, cache) => { - if (_.includes(cluster.caches, cache.value)) - acc.push(cache.cache); + break; + case 'clusterCaches': + ctrl.generate = (cluster, caches) => { + const clusterCaches = _.reduce(caches, (acc, cache) => { + if (_.includes(cluster.caches, cache.value)) + acc.push(cache.cache); - return acc; - }, []); + return acc; + }, []); - const cfg = java.generator.clusterGeneral(cluster, available); + const cfg = java.generator.clusterGeneral(cluster, available); - java.generator.clusterCaches(cluster, clusterCaches, null, available, false, cfg); + java.generator.clusterCaches(cluster, clusterCaches, null, available, false, cfg); - return java.toSection(cfg); - }; + return java.toSection(cfg); + }; - break; - case 'cacheStore': - case 'cacheQuery': - ctrl.generate = (cache, domains) => { - const cacheDomains = _.reduce(domains, (acc, domain) => { - if (_.includes(cache.domains, domain.value)) - acc.push(domain.meta); + break; + case 'cacheStore': + case 'cacheQuery': + ctrl.generate = (cache, domains) => { + const cacheDomains = _.reduce(domains, (acc, domain) => { + if (_.includes(cache.domains, domain.value)) + acc.push(domain.meta); - return acc; - }, []); + return acc; + }, []); - return java[ctrl.generator](cache, cacheDomains, available); - }; + return java[ctrl.generator](cache, cacheDomains, available); + }; - break; - case 'cacheNodeFilter': - ctrl.generate = (cache, igfss) => { - const cacheIgfss = _.reduce(igfss, (acc, igfs) => { - acc.push(igfs.igfs); + break; + case 'cacheNodeFilter': + ctrl.generate = (cache, igfss) => { + const cacheIgfss = _.reduce(igfss, (acc, igfs) => { + acc.push(igfs.igfs); - return acc; - }, []); + return acc; + }, []); - return java.cacheNodeFilter(cache, cacheIgfss); - }; + return java.cacheNodeFilter(cache, cacheIgfss); + }; - break; - case 'clusterServiceConfiguration': - ctrl.generate = (cluster, caches) => { - const clusterCaches = _.reduce(caches, (acc, cache) => { - if (_.includes(cluster.caches, cache.value)) - acc.push(cache.cache); + break; + case 'clusterServiceConfiguration': + ctrl.generate = (cluster, caches) => { + const clusterCaches = _.reduce(caches, (acc, cache) => { + if (_.includes(cluster.caches, cache.value)) + acc.push(cache.cache); - return acc; - }, []); + return acc; + }, []); - return java.clusterServiceConfiguration(cluster.serviceConfigurations, clusterCaches); - }; + return java.clusterServiceConfiguration(cluster.serviceConfigurations, clusterCaches); + }; - break; - case 'clusterCheckpoint': - ctrl.generate = (cluster, caches) => { - const clusterCaches = _.reduce(caches, (acc, cache) => { - if (_.includes(cluster.caches, cache.value)) - acc.push(cache.cache); + break; + case 'clusterCheckpoint': + ctrl.generate = (cluster, caches) => { + const clusterCaches = _.reduce(caches, (acc, cache) => { + if (_.includes(cluster.caches, cache.value)) + acc.push(cache.cache); - return acc; - }, []); + return acc; + }, []); - return java.clusterCheckpoint(cluster, clusterCaches); - }; + return java.clusterCheckpoint(cluster, clusterCaches); + }; - break; - case 'igfss': - ctrl.generate = (cluster, igfss) => { - const clusterIgfss = _.reduce(igfss, (acc, igfs) => { - if (_.includes(cluster.igfss, igfs.value)) - acc.push(igfs.igfs); + break; + case 'igfss': + ctrl.generate = (cluster, igfss) => { + const clusterIgfss = _.reduce(igfss, (acc, igfs) => { + if (_.includes(cluster.igfss, igfs.value)) + acc.push(igfs.igfs); - return acc; - }, []); + return acc; + }, []); - return java.clusterIgfss(clusterIgfss, available); - }; + return java.clusterIgfss(clusterIgfss, available); + }; - break; - default: - ctrl.generate = (master) => java[ctrl.generator](master, available); - } + break; + default: + ctrl.generate = (master) => java[ctrl.generator](master, available); + } + }; }]; diff --git a/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.controller.js b/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.controller.js index 61bf086bc741e..774d73e3e6fc2 100644 --- a/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.controller.js +++ b/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.controller.js @@ -18,78 +18,80 @@ export default ['$scope', 'JavaTypes', 'JavaTransformer', function($scope, JavaTypes, generator) { const ctrl = this; - // Watchers definition. - // Watcher clean instance data if instance to cluster caches was change - const cleanPojos = () => { - delete ctrl.class; - delete ctrl.pojos; - delete ctrl.classes; + this.$onInit = () => { + // Watchers definition. + // Watcher clean instance data if instance to cluster caches was change + const cleanPojos = () => { + delete ctrl.class; + delete ctrl.pojos; + delete ctrl.classes; + }; + + // Watcher update pojos when changes caches and checkers useConstructor and includeKeyFields + const updatePojos = () => { + delete ctrl.pojos; + + if (_.isNil(ctrl.cluster) || _.isEmpty(ctrl.cluster.caches)) + return; + + ctrl.pojos = generator.pojos(ctrl.cluster.caches, ctrl.useConstructor, ctrl.includeKeyFields); + }; + + // Watcher update classes after + const updateClasses = (value) => { + delete ctrl.classes; + + if (!value) + return; + + const classes = ctrl.classes = []; + + _.forEach(ctrl.pojos, (pojo) => { + if (_.nonNil(pojo.keyClass)) + classes.push(pojo.keyType); + + classes.push(pojo.valueType); + }); + }; + + // Update pojos class. + const updateClass = (value) => { + if (_.isEmpty(value)) + return; + + const pojo = value[0]; + + ctrl.class = ctrl.class || (pojo.keyClass ? pojo.keyType : pojo.valueType); + }; + + // Update pojos data. + const updatePojosData = (value) => { + if (_.isNil(value)) + return; + + _.forEach(ctrl.pojos, (pojo) => { + if (pojo.keyType === ctrl.class) { + ctrl.data = pojo.keyClass; + + return false; + } + + if (pojo.valueType === ctrl.class) { + ctrl.data = pojo.valueClass; + + return false; + } + }); + }; + + // Setup watchers. Watchers order is important. + $scope.$watch('ctrl.cluster.caches', cleanPojos); + $scope.$watch('ctrl.cluster.caches', updatePojos); + $scope.$watch('ctrl.cluster.caches', updateClasses); + $scope.$watch('ctrl.useConstructor', updatePojos); + $scope.$watch('ctrl.includeKeyFields', updatePojos); + $scope.$watch('ctrl.pojos', updateClass); + $scope.$watch('ctrl.pojos', updatePojosData); + $scope.$watch('ctrl.class', updatePojosData); }; - - // Watcher update pojos when changes caches and checkers useConstructor and includeKeyFields - const updatePojos = () => { - delete ctrl.pojos; - - if (_.isNil(ctrl.cluster) || _.isEmpty(ctrl.cluster.caches)) - return; - - ctrl.pojos = generator.pojos(ctrl.cluster.caches, ctrl.useConstructor, ctrl.includeKeyFields); - }; - - // Watcher update classes after - const updateClasses = (value) => { - delete ctrl.classes; - - if (!value) - return; - - const classes = ctrl.classes = []; - - _.forEach(ctrl.pojos, (pojo) => { - if (_.nonNil(pojo.keyClass)) - classes.push(pojo.keyType); - - classes.push(pojo.valueType); - }); - }; - - // Update pojos class. - const updateClass = (value) => { - if (_.isEmpty(value)) - return; - - const pojo = value[0]; - - ctrl.class = ctrl.class || (pojo.keyClass ? pojo.keyType : pojo.valueType); - }; - - // Update pojos data. - const updatePojosData = (value) => { - if (_.isNil(value)) - return; - - _.forEach(ctrl.pojos, (pojo) => { - if (pojo.keyType === ctrl.class) { - ctrl.data = pojo.keyClass; - - return false; - } - - if (pojo.valueType === ctrl.class) { - ctrl.data = pojo.valueClass; - - return false; - } - }); - }; - - // Setup watchers. Watchers order is important. - $scope.$watch('ctrl.cluster.caches', cleanPojos); - $scope.$watch('ctrl.cluster.caches', updatePojos); - $scope.$watch('ctrl.cluster.caches', updateClasses); - $scope.$watch('ctrl.useConstructor', updatePojos); - $scope.$watch('ctrl.includeKeyFields', updatePojos); - $scope.$watch('ctrl.pojos', updateClass); - $scope.$watch('ctrl.pojos', updatePojosData); - $scope.$watch('ctrl.class', updatePojosData); }]; diff --git a/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.controller.js b/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.controller.js index 0135eb3ad3c7e..2e421b2d6b82d 100644 --- a/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.controller.js +++ b/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.controller.js @@ -18,20 +18,22 @@ export default ['$scope', 'IgniteVersion', 'IgniteMavenGenerator', function($scope, Version, maven) { const ctrl = this; - // Watchers definition. - const clusterWatcher = (value) => { - delete ctrl.data; + this.$onInit = () => { + // Watchers definition. + const clusterWatcher = (value) => { + delete ctrl.data; - if (!value) - return; + if (!value) + return; - ctrl.data = maven.generate($scope.cluster, Version.currentSbj.getValue()); - }; + ctrl.data = maven.generate($scope.cluster, Version.currentSbj.getValue()); + }; - // Setup watchers. - Version.currentSbj.subscribe({ - next: clusterWatcher - }); + // Setup watchers. + Version.currentSbj.subscribe({ + next: clusterWatcher + }); - $scope.$watch('cluster', clusterWatcher); + $scope.$watch('cluster', clusterWatcher); + }; }]; diff --git a/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.controller.js b/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.controller.js index e600773bf680b..e87cacad214eb 100644 --- a/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.controller.js +++ b/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.controller.js @@ -21,12 +21,14 @@ const CLIENT_CFG = 'ClientConfigurationFactory'; export default ['$scope', 'IgniteSharpTransformer', function($scope, generator) { const ctrl = this; - delete ctrl.data; + this.$onInit = () => { + delete ctrl.data; - // Set default generator - ctrl.generator = (cluster) => { - const type = $scope.cfg ? CLIENT_CFG : SERVER_CFG; + // Set default generator + ctrl.generator = (cluster) => { + const type = $scope.cfg ? CLIENT_CFG : SERVER_CFG; - return generator.cluster(cluster, 'config', type, $scope.cfg); + return generator.cluster(cluster, 'config', type, $scope.cfg); + }; }; }]; diff --git a/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.controller.js b/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.controller.js index 17da1fd18d14c..7eccf6daba3bd 100644 --- a/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.controller.js +++ b/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.controller.js @@ -18,99 +18,101 @@ export default ['IgniteVersion', 'SpringTransformer', function(Version, spring) { const ctrl = this; - delete ctrl.data; + this.$onInit = () => { + delete ctrl.data; - const available = Version.available.bind(Version); + const available = Version.available.bind(Version); - // Setup generator. - switch (ctrl.generator) { - case 'igniteConfiguration': - ctrl.generate = (cluster) => spring.cluster(cluster, Version.currentSbj.getValue(), ctrl.client === 'true'); + // Setup generator. + switch (ctrl.generator) { + case 'igniteConfiguration': + ctrl.generate = (cluster) => spring.cluster(cluster, Version.currentSbj.getValue(), ctrl.client === 'true'); - break; - case 'clusterCaches': - ctrl.generate = (cluster, caches) => { - const clusterCaches = _.reduce(caches, (acc, cache) => { - if (_.includes(cluster.caches, cache.value)) - acc.push(cache.cache); + break; + case 'clusterCaches': + ctrl.generate = (cluster, caches) => { + const clusterCaches = _.reduce(caches, (acc, cache) => { + if (_.includes(cluster.caches, cache.value)) + acc.push(cache.cache); - return acc; - }, []); + return acc; + }, []); - const cfg = spring.generator.clusterGeneral(cluster, available); + const cfg = spring.generator.clusterGeneral(cluster, available); - spring.generator.clusterCaches(cluster, clusterCaches, null, available, false, cfg); + spring.generator.clusterCaches(cluster, clusterCaches, null, available, false, cfg); - return spring.toSection(cfg); - }; + return spring.toSection(cfg); + }; - break; - case 'cacheStore': - case 'cacheQuery': - ctrl.generate = (cache, domains) => { - const cacheDomains = _.reduce(domains, (acc, domain) => { - if (_.includes(cache.domains, domain.value)) - acc.push(domain.meta); + break; + case 'cacheStore': + case 'cacheQuery': + ctrl.generate = (cache, domains) => { + const cacheDomains = _.reduce(domains, (acc, domain) => { + if (_.includes(cache.domains, domain.value)) + acc.push(domain.meta); - return acc; - }, []); + return acc; + }, []); - return spring[ctrl.generator](cache, cacheDomains, available); - }; + return spring[ctrl.generator](cache, cacheDomains, available); + }; - break; - case 'cacheNodeFilter': - ctrl.generate = (cache, igfss) => { - const cacheIgfss = _.reduce(igfss, (acc, igfs) => { - acc.push(igfs.igfs); + break; + case 'cacheNodeFilter': + ctrl.generate = (cache, igfss) => { + const cacheIgfss = _.reduce(igfss, (acc, igfs) => { + acc.push(igfs.igfs); - return acc; - }, []); + return acc; + }, []); - return spring.cacheNodeFilter(cache, cacheIgfss); - }; + return spring.cacheNodeFilter(cache, cacheIgfss); + }; - break; - case 'clusterServiceConfiguration': - ctrl.generate = (cluster, caches) => { - const clusterCaches = _.reduce(caches, (acc, cache) => { - if (_.includes(cluster.caches, cache.value)) - acc.push(cache.cache); + break; + case 'clusterServiceConfiguration': + ctrl.generate = (cluster, caches) => { + const clusterCaches = _.reduce(caches, (acc, cache) => { + if (_.includes(cluster.caches, cache.value)) + acc.push(cache.cache); - return acc; - }, []); + return acc; + }, []); - return spring.clusterServiceConfiguration(cluster.serviceConfigurations, clusterCaches); - }; + return spring.clusterServiceConfiguration(cluster.serviceConfigurations, clusterCaches); + }; - break; - case 'clusterCheckpoint': - ctrl.generate = (cluster, caches) => { - const clusterCaches = _.reduce(caches, (acc, cache) => { - if (_.includes(cluster.caches, cache.value)) - acc.push(cache.cache); + break; + case 'clusterCheckpoint': + ctrl.generate = (cluster, caches) => { + const clusterCaches = _.reduce(caches, (acc, cache) => { + if (_.includes(cluster.caches, cache.value)) + acc.push(cache.cache); - return acc; - }, []); + return acc; + }, []); - return spring.clusterCheckpoint(cluster, clusterCaches); - }; + return spring.clusterCheckpoint(cluster, clusterCaches); + }; - break; - case 'igfss': - ctrl.generate = (cluster, igfss) => { - const clusterIgfss = _.reduce(igfss, (acc, igfs) => { - if (_.includes(cluster.igfss, igfs.value)) - acc.push(igfs.igfs); + break; + case 'igfss': + ctrl.generate = (cluster, igfss) => { + const clusterIgfss = _.reduce(igfss, (acc, igfs) => { + if (_.includes(cluster.igfss, igfs.value)) + acc.push(igfs.igfs); - return acc; - }, []); + return acc; + }, []); - return spring.clusterIgfss(clusterIgfss, available); - }; + return spring.clusterIgfss(clusterIgfss, available); + }; - break; - default: - ctrl.generate = (master) => spring[ctrl.generator](master, available); - } + break; + default: + ctrl.generate = (master) => spring[ctrl.generator](master, available); + } + }; }]; diff --git a/modules/web-console/frontend/app/modules/dialog/dialog.controller.js b/modules/web-console/frontend/app/modules/dialog/dialog.controller.js index 0256b84e4753a..a75ff1f599058 100644 --- a/modules/web-console/frontend/app/modules/dialog/dialog.controller.js +++ b/modules/web-console/frontend/app/modules/dialog/dialog.controller.js @@ -18,19 +18,21 @@ export default ['$rootScope', '$scope', 'IgniteDialog', function($root, $scope, IgniteDialog) { const ctrl = this; - const dialog = new IgniteDialog({ - scope: $scope - }); + this.$onInit = () => { + const dialog = new IgniteDialog({ + scope: $scope + }); - ctrl.show = () => { - dialog.$promise.then(dialog.show); - }; + ctrl.show = () => { + dialog.$promise.then(dialog.show); + }; - $scope.$watch(() => ctrl.title, () => { - $scope.title = ctrl.title; - }); + $scope.$watch(() => ctrl.title, () => { + $scope.title = ctrl.title; + }); - $scope.$watch(() => ctrl.content, () => { - $scope.content = ctrl.content; - }); + $scope.$watch(() => ctrl.content, () => { + $scope.content = ctrl.content; + }); + }; }]; diff --git a/modules/web-console/frontend/app/modules/form/field/down.directive.js b/modules/web-console/frontend/app/modules/form/field/down.directive.js index e9c2aa7ecd43a..c957e9788d94b 100644 --- a/modules/web-console/frontend/app/modules/form/field/down.directive.js +++ b/modules/web-console/frontend/app/modules/form/field/down.directive.js @@ -17,14 +17,20 @@ export default ['igniteFormFieldDown', ['$tooltip', ($tooltip) => { const controller = ['$element', function($element) { - $tooltip($element, { title: 'Move item down' }); + const ctrl = this; - this.down = () => { - const i = this.models.indexOf(this.model); + this.$onInit = () => { + $tooltip($element, { title: 'Move item down' }); - this.models.splice(i, 1); - this.models.splice(i + 1, 0, this.model); + ctrl.down = () => { + const i = ctrl.models.indexOf(ctrl.model); + + ctrl.models.splice(i, 1); + ctrl.models.splice(i + 1, 0, ctrl.model); + }; }; + + }]; return { diff --git a/modules/web-console/frontend/app/modules/form/field/up.directive.js b/modules/web-console/frontend/app/modules/form/field/up.directive.js index c0140de56f41a..6f87180b67929 100644 --- a/modules/web-console/frontend/app/modules/form/field/up.directive.js +++ b/modules/web-console/frontend/app/modules/form/field/up.directive.js @@ -17,13 +17,17 @@ export default ['igniteFormFieldUp', ['$tooltip', ($tooltip) => { const controller = ['$element', function($element) { - $tooltip($element, { title: 'Move item up' }); + const ctrl = this; - this.up = () => { - const idx = this.models.indexOf(this.model); + this.$onInit = () => { + $tooltip($element, { title: 'Move item up' }); - this.models.splice(idx, 1); - this.models.splice(idx - 1, 0, this.model); + this.up = () => { + const idx = ctrl.models.indexOf(ctrl.model); + + ctrl.models.splice(idx, 1); + ctrl.models.splice(idx - 1, 0, ctrl.model); + }; }; }]; diff --git a/modules/web-console/frontend/app/modules/form/group/add.directive.js b/modules/web-console/frontend/app/modules/form/group/add.directive.js index 7e9a50c02717e..71070cc55644d 100644 --- a/modules/web-console/frontend/app/modules/form/group/add.directive.js +++ b/modules/web-console/frontend/app/modules/form/group/add.directive.js @@ -18,23 +18,21 @@ const template = ''; export default ['igniteFormGroupAdd', ['$tooltip', ($tooltip) => { - const link = ($scope, $element, $attrs, $ctrls, $transclude) => { - const content = Array.prototype.slice - .apply($transclude($scope)) - .reduce((html, el) => html += el.outerHTML || el.textContent || el, ''); - - $tooltip($element, { title: content }); - - $element.closest('.group').find('.group-legend').append($element); - }; - return { restrict: 'E', scope: {}, template, - link, + link($scope, $el, $attr, $ctrl, $transclude) { + $transclude((clone) => { + const title = Array.from(clone) + .reduce((html, el) => html += el.outerHTML || el.textContent || el, ''); + const legend = $el.closest('.group').find('.group-legend'); + + $tooltip($el, {title}); + legend.append($el); + }); + }, replace: true, - transclude: true, - require: ['^form'] + transclude: true }; }]]; diff --git a/modules/web-console/frontend/app/modules/form/group/tooltip.directive.js b/modules/web-console/frontend/app/modules/form/group/tooltip.directive.js index 6027765d22e3c..4190deeac9b3e 100644 --- a/modules/web-console/frontend/app/modules/form/group/tooltip.directive.js +++ b/modules/web-console/frontend/app/modules/form/group/tooltip.directive.js @@ -18,23 +18,21 @@ const template = ''; export default ['igniteFormGroupTooltip', ['$tooltip', ($tooltip) => { - const link = ($scope, $element, $attrs, $ctrls, $transclude) => { - const content = Array.prototype.slice - .apply($transclude($scope)) - .reduce((html, el) => html += el.outerHTML || el.textContent || el, ''); - - $tooltip($element, { title: content }); - - $element.closest('.group').find('.group-legend').append($element); - }; - return { restrict: 'E', scope: {}, template, - link, + link($scope, $el, $attr, $ctrl, $transclude) { + $transclude((clone) => { + const title = Array.from(clone) + .reduce((html, el) => html += el.outerHTML || el.textContent || el, ''); + const legend = $el.closest('.group').find('.group-legend'); + + $tooltip($el, {title}); + legend.append($el); + }); + }, replace: true, - transclude: true, - require: ['^form'] + transclude: true }; }]]; diff --git a/modules/web-console/frontend/app/modules/navbar/userbar.directive.js b/modules/web-console/frontend/app/modules/navbar/userbar.directive.js index 279314f1f074d..51f46d4c5187b 100644 --- a/modules/web-console/frontend/app/modules/navbar/userbar.directive.js +++ b/modules/web-console/frontend/app/modules/navbar/userbar.directive.js @@ -21,27 +21,29 @@ export default ['igniteUserbar', [function() { controller: ['$rootScope', 'IgniteUserbar', 'AclService', function($root, IgniteUserbar, AclService) { const ctrl = this; - ctrl.items = [ - {text: 'Profile', sref: 'base.settings.profile'}, - {text: 'Getting started', click: 'gettingStarted.tryShow(true)'} - ]; + this.$onInit = () => { + ctrl.items = [ + {text: 'Profile', sref: 'base.settings.profile'}, + {text: 'Getting started', click: 'gettingStarted.tryShow(true)'} + ]; - const _rebuildSettings = () => { - ctrl.items.splice(2); + const _rebuildSettings = () => { + ctrl.items.splice(2); - if (AclService.can('admin_page')) - ctrl.items.push({text: 'Admin panel', sref: 'base.settings.admin'}); + if (AclService.can('admin_page')) + ctrl.items.push({text: 'Admin panel', sref: 'base.settings.admin'}); - ctrl.items.push(...IgniteUserbar); + ctrl.items.push(...IgniteUserbar); - if (AclService.can('logout')) - ctrl.items.push({text: 'Log out', sref: 'logout'}); - }; + if (AclService.can('logout')) + ctrl.items.push({text: 'Log out', sref: 'logout'}); + }; - if ($root.user) - _rebuildSettings(null, $root.user); + if ($root.user) + _rebuildSettings(null, $root.user); - $root.$on('user', _rebuildSettings); + $root.$on('user', _rebuildSettings); + }; }], controllerAs: 'userbar' }; diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index 38208df68eebb..49e69b4636bdf 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -32,23 +32,22 @@ "win32" ], "dependencies": { - "@uirouter/angularjs": "1.0.5", - "angular": "1.5.11", - "angular-acl": "0.1.8", - "angular-animate": "1.5.11", - "angular-aria": "1.5.11", - "angular-cookies": "1.5.11", + "@uirouter/angularjs": "1.0.10", + "angular": "1.6.6", + "angular-acl": "0.1.10", + "angular-animate": "1.6.6", + "angular-aria": "1.6.6", + "angular-cookies": "1.6.6", "angular-drag-and-drop-lists": "1.4.0", "angular-gridster": "0.13.14", "angular-motion": "0.4.4", "angular-nvd3": "1.0.9", "angular-retina": "0.4.0", - "angular-sanitize": "1.5.11", + "angular-sanitize": "1.6.6", "angular-smart-table": "2.1.8", "angular-socket-io": "0.7.0", "angular-strap": "2.3.12", - "angular-touch": "1.5.11", - "angular-translate": "2.15.2", + "angular-translate": "2.16.0", "angular-tree-control": "0.2.28", "angular-ui-grid": "4.0.7", "babel-core": "6.25.0", From ef00c02886d0649989f4811767d157aa20b8c0bf Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 31 Oct 2017 14:20:45 +0300 Subject: [PATCH 103/243] IGNITE-6357: Added support of multiple SQL statements for ODBC (cherry picked from commit 87e3797) --- .../odbc/odbc/OdbcMessageParser.java | 60 +++++- .../odbc/OdbcQueryExecuteBatchResult.java | 20 +- .../odbc/odbc/OdbcQueryExecuteResult.java | 11 +- .../odbc/OdbcQueryMoreResultsRequest.java | 61 ++++++ .../odbc/odbc/OdbcQueryMoreResultsResult.java | 66 ++++++ .../odbc/odbc/OdbcQueryResults.java | 106 ++++++++++ .../processors/odbc/odbc/OdbcRequest.java | 3 + .../odbc/odbc/OdbcRequestHandler.java | 175 +++++++-------- .../processors/odbc/odbc/OdbcResultSet.java | 101 +++++++++ .../processors/odbc/odbc/OdbcUtils.java | 31 +++ .../odbc-example/src/odbc_example.cpp | 8 +- .../cpp/odbc-test/src/queries_test.cpp | 200 +++++++++++++++++- .../cpp/odbc/include/ignite/odbc/message.h | 97 ++++++++- .../include/ignite/odbc/query/batch_query.h | 16 +- .../ignite/odbc/query/column_metadata_query.h | 7 + .../include/ignite/odbc/query/data_query.h | 22 +- .../ignite/odbc/query/foreign_keys_query.h | 7 + .../ignite/odbc/query/primary_keys_query.h | 7 + .../odbc/include/ignite/odbc/query/query.h | 7 + .../ignite/odbc/query/special_columns_query.h | 7 + .../ignite/odbc/query/table_metadata_query.h | 7 + .../ignite/odbc/query/type_info_query.h | 7 + .../cpp/odbc/include/ignite/odbc/statement.h | 6 +- modules/platforms/cpp/odbc/src/cursor.cpp | 2 +- modules/platforms/cpp/odbc/src/message.cpp | 62 +++++- modules/platforms/cpp/odbc/src/odbc.cpp | 2 +- .../cpp/odbc/src/query/batch_query.cpp | 80 +++---- .../odbc/src/query/column_metadata_query.cpp | 5 + .../cpp/odbc/src/query/data_query.cpp | 93 +++++++- .../cpp/odbc/src/query/foreign_keys_query.cpp | 5 + .../cpp/odbc/src/query/primary_keys_query.cpp | 5 + .../odbc/src/query/special_columns_query.cpp | 5 + .../odbc/src/query/table_metadata_query.cpp | 5 + .../cpp/odbc/src/query/type_info_query.cpp | 5 + modules/platforms/cpp/odbc/src/statement.cpp | 14 +- 35 files changed, 1103 insertions(+), 212 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsResult.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java index 04e2e25e3348f..bf74bc5fb66bf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java @@ -165,6 +165,15 @@ public OdbcMessageParser(GridKernalContext ctx, ClientListenerProtocolVersion ve break; } + case OdbcRequest.MORE_RESULTS: { + long queryId = reader.readLong(); + int pageSize = reader.readInt(); + + res = new OdbcQueryMoreResultsRequest(queryId, pageSize); + + break; + } + default: throw new IgniteException("Unknown ODBC command: [cmd=" + cmd + ']'); } @@ -233,13 +242,13 @@ else if (res0 instanceof OdbcQueryExecuteResult) { for (OdbcColumnMeta meta : metas) meta.write(writer); - writer.writeLong(res.affectedRows()); + writeAffectedRows(writer, res.affectedRows()); } else if (res0 instanceof OdbcQueryExecuteBatchResult) { OdbcQueryExecuteBatchResult res = (OdbcQueryExecuteBatchResult) res0; writer.writeBoolean(res.errorMessage() == null); - writer.writeLong(res.rowsAffected()); + writeAffectedRows(writer, res.affectedRows()); if (res.errorMessage() != null) { writer.writeLong(res.errorSetIdx()); @@ -276,6 +285,33 @@ else if (res0 instanceof OdbcQueryFetchResult) { } } } + else if (res0 instanceof OdbcQueryMoreResultsResult) { + OdbcQueryMoreResultsResult res = (OdbcQueryMoreResultsResult) res0; + + if (log.isDebugEnabled()) + log.debug("Resulting query ID: " + res.queryId()); + + writer.writeLong(res.queryId()); + + Collection items0 = res.items(); + + assert items0 != null; + + writer.writeBoolean(res.last()); + + writer.writeInt(items0.size()); + + for (Object row0 : items0) { + if (row0 != null) { + Collection row = (Collection)row0; + + writer.writeInt(row.size()); + + for (Object obj : row) + SqlListenerUtils.writeObject(writer, obj, true); + } + } + } else if (res0 instanceof OdbcQueryCloseResult) { OdbcQueryCloseResult res = (OdbcQueryCloseResult) res0; @@ -320,4 +356,24 @@ else if (res0 instanceof OdbcQueryGetParamsMetaResult) { return writer.array(); } + + /** + * @param writer Writer to use. + * @param affectedRows Affected rows. + */ + private void writeAffectedRows(BinaryWriterExImpl writer, Collection affectedRows) { + if (ver.compareTo(OdbcConnectionContext.VER_2_3_0) < 0) { + long summ = 0; + + for (Long value : affectedRows) + summ += value == null ? 0 : value; + + writer.writeLong(summ); + } + else { + writer.writeInt(affectedRows.size()); + for (Long value : affectedRows) + writer.writeLong(value); + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchResult.java index c8f61dc92160e..e86c7c8d6e234 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchResult.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.odbc.odbc; +import java.util.Collection; import org.apache.ignite.internal.processors.odbc.ClientListenerResponse; import org.jetbrains.annotations.Nullable; @@ -25,7 +26,7 @@ */ public class OdbcQueryExecuteBatchResult { /** Rows affected. */ - private final long rowsAffected; + private final Collection affectedRows; /** Index of the set which caused an error. */ private final long errorSetIdx; @@ -37,23 +38,24 @@ public class OdbcQueryExecuteBatchResult { private final String errorMessage; /** - * @param rowsAffected Number of rows affected by the query. + * @param affectedRows Number of rows affected by the query. */ - public OdbcQueryExecuteBatchResult(long rowsAffected) { - this.rowsAffected = rowsAffected; + public OdbcQueryExecuteBatchResult(Collection affectedRows) { + this.affectedRows = affectedRows; this.errorSetIdx = -1; this.errorMessage = null; this.errorCode = ClientListenerResponse.STATUS_SUCCESS; } /** - * @param rowsAffected Number of rows affected by the query. + * @param affectedRows Number of rows affected by the query. * @param errorSetIdx Sets processed. * @param errorCode Error code. * @param errorMessage Error message. */ - public OdbcQueryExecuteBatchResult(long rowsAffected, long errorSetIdx, int errorCode, String errorMessage) { - this.rowsAffected = rowsAffected; + public OdbcQueryExecuteBatchResult(Collection affectedRows, long errorSetIdx, int errorCode, + String errorMessage) { + this.affectedRows = affectedRows; this.errorSetIdx = errorSetIdx; this.errorMessage = errorMessage; this.errorCode = errorCode; @@ -62,8 +64,8 @@ public OdbcQueryExecuteBatchResult(long rowsAffected, long errorSetIdx, int erro /** * @return Number of rows affected by the query. */ - public long rowsAffected() { - return rowsAffected; + public Collection affectedRows() { + return affectedRows; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteResult.java index 38dd0b4d7f55b..8182e97dcdc3c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteResult.java @@ -29,15 +29,16 @@ public class OdbcQueryExecuteResult { /** Fields metadata. */ private final Collection columnsMetadata; - /** Rows affected by the query. */ - private final long affectedRows; + /** Rows affected by the statements. */ + private final Collection affectedRows; /** * @param queryId Query ID. * @param columnsMetadata Columns metadata. * @param affectedRows Affected rows. */ - public OdbcQueryExecuteResult(long queryId, Collection columnsMetadata, long affectedRows) { + public OdbcQueryExecuteResult(long queryId, Collection columnsMetadata, + Collection affectedRows) { this.queryId = queryId; this.columnsMetadata = columnsMetadata; this.affectedRows = affectedRows; @@ -58,9 +59,9 @@ public Collection columnsMetadata() { } /** - * @return Number of rows affected by the query. + * @return Number of rows affected by the statements. */ - public long affectedRows() { + public Collection affectedRows() { return affectedRows; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsRequest.java new file mode 100644 index 0000000000000..5651f6d20f848 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsRequest.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * SQL listener query fetch request. + */ +public class OdbcQueryMoreResultsRequest extends OdbcRequest { + /** Query ID. */ + private final long queryId; + + /** Page size - maximum number of rows to return. */ + private final int pageSize; + + /** + * @param queryId Query ID. + * @param pageSize Page size. + */ + public OdbcQueryMoreResultsRequest(long queryId, int pageSize) { + super(MORE_RESULTS); + + this.queryId = queryId; + this.pageSize = pageSize; + } + + /** + * @return Page size. + */ + public int pageSize() { + return pageSize; + } + + /** + * @return Query ID. + */ + public long queryId() { + return queryId; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(OdbcQueryMoreResultsRequest.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsResult.java new file mode 100644 index 0000000000000..faa5e278bb6f4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryMoreResultsResult.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import java.util.Collection; + +/** + * SQL listener query fetch result. + */ +public class OdbcQueryMoreResultsResult { + /** Query ID. */ + private final long queryId; + + /** Query result rows. */ + private final Collection items; + + /** Flag indicating the query has no non-fetched results. */ + private final boolean last; + + /** + * @param queryId Query ID. + * @param items Query result rows. + * @param last Flag indicating the query has no unfetched results. + */ + public OdbcQueryMoreResultsResult(long queryId, Collection items, boolean last){ + this.queryId = queryId; + this.items = items; + this.last = last; + } + + /** + * @return Query ID. + */ + public long queryId() { + return queryId; + } + + /** + * @return Query result rows. + */ + public Collection items() { + return items; + } + + /** + * @return Flag indicating the query has no non-fetched results. + */ + public boolean last() { + return last; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java new file mode 100644 index 0000000000000..23788c7a860b3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import java.util.ArrayList; +import java.util.List; +import org.apache.ignite.cache.query.FieldsQueryCursor; +import org.apache.ignite.internal.processors.cache.QueryCursorImpl; + +/** + * ODBC result set + */ +public class OdbcQueryResults { + /** Current cursor. */ + private final List>> cursors; + + /** Rows affected. */ + private final List rowsAffected; + + /** Current result set. */ + private OdbcResultSet currentResultSet; + + /** Current result set index. */ + private int currentResultSetIdx; + + /** + * @param cursors Result set cursors. + */ + OdbcQueryResults(List>> cursors) { + this.cursors = cursors; + this.currentResultSetIdx = 0; + + rowsAffected = new ArrayList<>(cursors.size()); + + for (FieldsQueryCursor> cursor : cursors) + rowsAffected.add(OdbcUtils.rowsAffected(cursor)); + + nextResultSet(); + } + + /** + * Get affected rows for all result sets. + * @return List of numbers of table rows affected by every statement. + */ + public List rowsAffected() { + return rowsAffected; + } + + /** + * @return {@code true} if any of the result sets still has non-fetched rows. + */ + public boolean hasUnfetchedRows() { + if (currentResultSet != null && currentResultSet.hasUnfetchedRows()) + return true; + + for (FieldsQueryCursor> cursor : cursors) { + QueryCursorImpl> cursor0 = (QueryCursorImpl>)cursor; + + if (cursor0.isQuery()) + return true; + } + return false; + } + + /** + * Close all cursors. + */ + public void closeAll() { + for (FieldsQueryCursor> cursor : cursors) + cursor.close(); + } + + /** + * @return Current result set. + */ + public OdbcResultSet currentResultSet() { + return currentResultSet; + } + + /** + * Move to next result set. + */ + public void nextResultSet() { + currentResultSet = null; + + if (currentResultSetIdx != cursors.size()) { + currentResultSet = new OdbcResultSet(cursors.get(currentResultSetIdx)); + ++currentResultSetIdx; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java index 4b21b792b5dce..9b9aa01fca1d9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequest.java @@ -44,6 +44,9 @@ public class OdbcRequest extends ClientListenerRequestNoId { /** Execute sql query with the batch of parameters. */ public static final int QRY_EXEC_BATCH = 8; + /** Get next result set. */ + public static final int MORE_RESULTS = 9; + /** Command. */ private final int cmd; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java index 32375fddd7a06..7f6b48d9d11a8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java @@ -22,13 +22,13 @@ import java.sql.Types; import java.util.ArrayList; import java.util.Collection; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.GridKernalContext; @@ -41,17 +41,16 @@ import org.apache.ignite.internal.processors.odbc.ClientListenerRequestHandler; import org.apache.ignite.internal.processors.odbc.ClientListenerResponse; import org.apache.ignite.internal.processors.odbc.odbc.escape.OdbcEscapeUtils; -import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; import org.apache.ignite.internal.processors.query.GridQueryIndexing; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.lang.IgniteBiTuple; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_COLS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_PARAMS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.META_TBLS; +import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.MORE_RESULTS; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.QRY_CLOSE; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.QRY_EXEC; import static org.apache.ignite.internal.processors.odbc.odbc.OdbcRequest.QRY_EXEC_BATCH; @@ -77,7 +76,7 @@ public class OdbcRequestHandler implements ClientListenerRequestHandler { private final int maxCursors; /** Current queries cursors. */ - private final ConcurrentHashMap> qryCursors = new ConcurrentHashMap<>(); + private final ConcurrentHashMap qryResults = new ConcurrentHashMap<>(); /** Distributed joins flag. */ private final boolean distributedJoins; @@ -157,6 +156,9 @@ public OdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int case META_PARAMS: return getParamsMeta((OdbcQueryGetParamsMetaRequest)req); + + case MORE_RESULTS: + return moreResults((OdbcQueryMoreResultsRequest)req); } return new OdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Unsupported ODBC request: " + req); @@ -185,8 +187,8 @@ public void onDisconnect() { { try { - for (IgniteBiTuple tuple : qryCursors.values()) - tuple.get1().close(); + for (OdbcQueryResults res : qryResults.values()) + res.closeAll(); } finally { busyLock.leaveBusy(); @@ -224,7 +226,7 @@ private SqlFieldsQueryEx makeQuery(String schema, String sql, Object[] args) { * @return Response. */ private ClientListenerResponse executeQuery(OdbcQueryExecuteRequest req) { - int cursorCnt = qryCursors.size(); + int cursorCnt = qryResults.size(); if (maxCursors > 0 && cursorCnt >= maxCursors) return new OdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Too many open cursors (either close " + @@ -243,26 +245,22 @@ private ClientListenerResponse executeQuery(OdbcQueryExecuteRequest req) { SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments()); - QueryCursorImpl> qryCur = (QueryCursorImpl>)ctx.query().querySqlFieldsNoCache(qry, true); - - long rowsAffected = 0; + List>> cursors = ctx.query().querySqlFieldsNoCache(qry, true, false); - if (!qryCur.isQuery()) { - rowsAffected = getRowsAffected(qryCur); + OdbcQueryResults results = new OdbcQueryResults(cursors); - qryCur.close(); - } + if (!results.hasUnfetchedRows()) + results.closeAll(); else - qryCursors.put(qryId, new IgniteBiTuple(qryCur, null)); + qryResults.put(qryId, results); - List fieldsMeta = ((QueryCursorImpl) qryCur).fieldsMeta(); - - OdbcQueryExecuteResult res = new OdbcQueryExecuteResult(qryId, convertMetadata(fieldsMeta), rowsAffected); + OdbcQueryExecuteResult res = new OdbcQueryExecuteResult(qryId, results.currentResultSet().fieldsMeta(), + results.rowsAffected()); return new OdbcResponse(res); } catch (Exception e) { - qryCursors.remove(qryId); + qryResults.remove(qryId); U.error(log, "Failed to execute SQL query [reqId=" + req.requestId() + ", req=" + req + ']', e); @@ -277,7 +275,7 @@ private ClientListenerResponse executeQuery(OdbcQueryExecuteRequest req) { * @return Response. */ private ClientListenerResponse executeBatchQuery(OdbcQueryExecuteBatchRequest req) { - long rowsAffected = 0; + List rowsAffected = new ArrayList<>(req.arguments().length); int currentSet = 0; try { @@ -304,10 +302,10 @@ private ClientListenerResponse executeBatchQuery(OdbcQueryExecuteBatchRequest re throw new IgniteException("Batching of parameters only supported for DML statements. [query=" + req.sqlQuery() + ']'); - rowsAffected += getRowsAffected(qryCur); + rowsAffected.add(OdbcUtils.rowsAffected(qryCur)); for (currentSet = 1; currentSet < paramSet.length; ++currentSet) - rowsAffected += executeQuery(qry, paramSet[currentSet]); + rowsAffected.add(executeQuery(qry, paramSet[currentSet])); OdbcQueryExecuteBatchResult res = new OdbcQueryExecuteBatchResult(rowsAffected); @@ -331,29 +329,7 @@ private long executeQuery(SqlFieldsQuery qry, Object[] row) { QueryCursor> cur = ctx.query().querySqlFieldsNoCache(qry, true); - return getRowsAffected(cur); - } - - /** - * Get affected rows for DML statement. - * @param qryCur Cursor. - * @return Number of table rows affected. - */ - private static long getRowsAffected(QueryCursor> qryCur) { - Iterator> iter = qryCur.iterator(); - - if (iter.hasNext()) { - List res = iter.next(); - - if (res.size() > 0) { - Long affected = (Long) res.get(0); - - if (affected != null) - return affected; - } - } - - return 0; + return OdbcUtils.rowsAffected(cur); } /** @@ -366,20 +342,20 @@ private ClientListenerResponse closeQuery(OdbcQueryCloseRequest req) { long queryId = req.queryId(); try { - IgniteBiTuple tuple = qryCursors.get(queryId); + OdbcQueryResults results = qryResults.get(queryId); - if (tuple == null) + if (results == null) return new OdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Failed to find query with ID: " + queryId); - CloseCursor(tuple, queryId); + CloseCursor(results, queryId); OdbcQueryCloseResult res = new OdbcQueryCloseResult(queryId); return new OdbcResponse(res); } catch (Exception e) { - qryCursors.remove(queryId); + qryResults.remove(queryId); U.error(log, "Failed to close SQL query [reqId=" + req.requestId() + ", req=" + queryId + ']', e); @@ -396,34 +372,21 @@ private ClientListenerResponse closeQuery(OdbcQueryCloseRequest req) { private ClientListenerResponse fetchQuery(OdbcQueryFetchRequest req) { try { long queryId = req.queryId(); - IgniteBiTuple tuple = qryCursors.get(queryId); + OdbcQueryResults results = qryResults.get(queryId); - if (tuple == null) + if (results == null) return new OdbcResponse(ClientListenerResponse.STATUS_FAILED, "Failed to find query with ID: " + queryId); - Iterator iter = tuple.get2(); + OdbcResultSet set = results.currentResultSet(); - if (iter == null) { - QueryCursor cur = tuple.get1(); + List items = set.fetch(req.pageSize()); - assert(cur != null); - - iter = cur.iterator(); - - tuple.put(cur, iter); - } - - List items = new ArrayList<>(); - - for (int i = 0; i < req.pageSize() && iter.hasNext(); ++i) - items.add(iter.next()); - - boolean lastPage = !iter.hasNext(); + boolean lastPage = !set.hasUnfetchedRows(); // Automatically closing cursor if no more data is available. - if (lastPage) - CloseCursor(tuple, queryId); + if (!results.hasUnfetchedRows()) + CloseCursor(results, queryId); OdbcQueryFetchResult res = new OdbcQueryFetchResult(queryId, items, lastPage); @@ -580,19 +543,56 @@ private ClientListenerResponse getParamsMeta(OdbcQueryGetParamsMetaRequest req) } } + /** + * {@link OdbcQueryMoreResultsRequest} command handler. + * + * @param req Execute query request. + * @return Response. + */ + private ClientListenerResponse moreResults(OdbcQueryMoreResultsRequest req) { + try { + long queryId = req.queryId(); + OdbcQueryResults results = qryResults.get(queryId); + + if (results == null) + return new OdbcResponse(ClientListenerResponse.STATUS_FAILED, + "Failed to find query with ID: " + queryId); + + results.nextResultSet(); + + OdbcResultSet set = results.currentResultSet(); + + List items = set.fetch(req.pageSize()); + + boolean lastPage = !set.hasUnfetchedRows(); + + // Automatically closing cursor if no more data is available. + if (!results.hasUnfetchedRows()) + CloseCursor(results, queryId); + + OdbcQueryMoreResultsResult res = new OdbcQueryMoreResultsResult(queryId, items, lastPage); + + return new OdbcResponse(res); + } + catch (Exception e) { + U.error(log, "Failed to get more SQL query results [reqId=" + + req.requestId() + ", req=" + req + ']', e); + + return exceptionToResult(e); + } + } + /** * Close cursor. - * @param tuple Query map element. + * @param results Query map element. * @param queryId Query ID. */ - private void CloseCursor(IgniteBiTuple tuple, long queryId) { - QueryCursor cur = tuple.get1(); - - assert(cur != null); + private void CloseCursor(OdbcQueryResults results, long queryId) { + assert(results != null); - cur.close(); + results.closeAll(); - qryCursors.remove(queryId); + qryResults.remove(queryId); } /** @@ -654,27 +654,6 @@ private static byte sqlTypeToBinary(int sqlType) { } } - /** - * Convert metadata in collection from {@link GridQueryFieldMetadata} to - * {@link OdbcColumnMeta}. - * - * @param meta Internal query field metadata. - * @return Odbc query field metadata. - */ - private static Collection convertMetadata(Collection meta) { - List res = new ArrayList<>(); - - if (meta != null) { - for (Object info : meta) { - assert info instanceof GridQueryFieldMetadata; - - res.add(new OdbcColumnMeta((GridQueryFieldMetadata)info)); - } - } - - return res; - } - /** * Checks whether string matches SQL pattern. * @@ -694,7 +673,7 @@ private static boolean matches(String str, String ptrn) { * @param e Exception to convert. * @return resulting {@link OdbcResponse}. */ - private OdbcResponse exceptionToBatchResult(Exception e, long rowsAffected, long currentSet) { + private OdbcResponse exceptionToBatchResult(Exception e, Collection rowsAffected, long currentSet) { OdbcQueryExecuteBatchResult res = new OdbcQueryExecuteBatchResult(rowsAffected, currentSet, OdbcUtils.tryRetrieveSqlErrorCode(e), OdbcUtils.tryRetrieveH2ErrorMessage(e)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java new file mode 100644 index 0000000000000..66b0776292787 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.odbc; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import org.apache.ignite.cache.query.FieldsQueryCursor; +import org.apache.ignite.internal.processors.cache.QueryCursorImpl; +import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; + +/** + * Represents single result set. + */ +public class OdbcResultSet { + /** Cursor. */ + private final QueryCursorImpl> cursor; + + /** Current iterator. */ + private Iterator iter; + + /** + * Constructor. + * @param cursor Result set cursor. + */ + OdbcResultSet(FieldsQueryCursor> cursor) { + assert cursor instanceof QueryCursorImpl; + + this.cursor = (QueryCursorImpl>)cursor; + + if (this.cursor.isQuery()) + iter = this.cursor.iterator(); + else + iter = null; + } + + /** + * @return {@code true} if has non-fetched rows. + */ + public boolean hasUnfetchedRows() { + return iter != null && iter.hasNext(); + } + + /** + * @return Fields metadata of the current result set. + */ + public Collection fieldsMeta() { + return convertMetadata(cursor.fieldsMeta()); + } + + /** + * Fetch up to specified number of rows of result set. + * @param maxSize Maximum number of records to fetch. + * @return List of fetched records. + */ + public List fetch(int maxSize) { + List items = new ArrayList<>(maxSize); + + if (iter == null) + return items; + + for (int i = 0; i < maxSize && iter.hasNext(); ++i) + items.add(iter.next()); + + return items; + } + + /** + * Convert metadata in collection from {@link GridQueryFieldMetadata} to + * {@link OdbcColumnMeta}. + * + * @param meta Internal query field metadata. + * @return Odbc query field metadata. + */ + private static Collection convertMetadata(Collection meta) { + List res = new ArrayList<>(); + + if (meta != null) { + for (GridQueryFieldMetadata info : meta) + res.add(new OdbcColumnMeta(info)); + } + + return res; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java index 98fa0452f804b..4aa864deb714d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcUtils.java @@ -17,7 +17,11 @@ package org.apache.ignite.internal.processors.odbc.odbc; +import java.util.Iterator; +import java.util.List; import org.apache.ignite.IgniteException; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.odbc.SqlListenerDataTypes; import org.apache.ignite.internal.processors.query.IgniteSQLException; @@ -181,4 +185,31 @@ public static String tryRetrieveH2ErrorMessage(Throwable err) { return msg; } + + /** + * Get affected rows for statement. + * @param qryCur Cursor. + * @return Number of table rows affected, if the query is DML, and -1 otherwise. + */ + public static long rowsAffected(QueryCursor> qryCur) { + QueryCursorImpl> qryCur0 = (QueryCursorImpl>)qryCur; + + if (qryCur0.isQuery()) + return -1; + + Iterator> iter = qryCur0.iterator(); + + if (iter.hasNext()) { + List res = iter.next(); + + if (res.size() > 0) { + Long affected = (Long) res.get(0); + + if (affected != null) + return affected; + } + } + + return 0; + } } diff --git a/modules/platforms/cpp/examples/odbc-example/src/odbc_example.cpp b/modules/platforms/cpp/examples/odbc-example/src/odbc_example.cpp index 29c2a4a0dcfca..76b5c18184771 100644 --- a/modules/platforms/cpp/examples/odbc-example/src/odbc_example.cpp +++ b/modules/platforms/cpp/examples/odbc-example/src/odbc_example.cpp @@ -169,7 +169,7 @@ void GetDataWithOdbc(SQLHDBC dbc, const std::string& query) /** * Populate Person cache with sample data. - * + * * @param dbc Database connection. */ void PopulatePerson(SQLHDBC dbc) @@ -387,7 +387,7 @@ void PopulatePerson(SQLHDBC dbc) /** * Populate Organization cache with sample data. - * + * * @param dbc Database connection. */ void PopulateOrganization(SQLHDBC dbc) @@ -525,7 +525,7 @@ void DeletePerson(SQLHDBC dbc, int64_t key) /** * Query tables. - * + * * @param dbc Database connection. */ void QueryData(SQLHDBC dbc) @@ -552,7 +552,7 @@ void QueryData(SQLHDBC dbc) * * @return Exit code. */ -int main() +int main() { IgniteConfiguration cfg; diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index 707669d398b2e..c6097e0fa82df 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -557,19 +557,27 @@ struct QueriesTestSuiteFixture if (!SQL_SUCCEEDED(ret)) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - SQLLEN affected = 0; - ret = SQLRowCount(stmt, &affected); + SQLLEN totallyAffected = 0; - if (!SQL_SUCCEEDED(ret)) - BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + do + { + SQLLEN affected = 0; + ret = SQLRowCount(stmt, &affected); - BOOST_CHECK_EQUAL(affected, expectedToAffect); + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); - BOOST_CHECKPOINT("Getting next result set"); - ret = SQLMoreResults(stmt); + totallyAffected += affected; - if (ret != SQL_NO_DATA) - BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + BOOST_CHECKPOINT("Getting next result set"); + + ret = SQLMoreResults(stmt); + + if (ret != SQL_SUCCESS && ret != SQL_NO_DATA) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + } while (ret != SQL_NO_DATA); + + BOOST_CHECK_EQUAL(totallyAffected, expectedToAffect); BOOST_CHECKPOINT("Resetting parameters."); ret = SQLFreeStmt(stmt, SQL_RESET_PARAMS); @@ -2198,4 +2206,178 @@ BOOST_AUTO_TEST_CASE(TestAffectedRows) BOOST_CHECK_EQUAL(affected, 0); } +BOOST_AUTO_TEST_CASE(TestMultipleSelects) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + const int stmtCnt = 10; + + std::stringstream stream; + for (int i = 0; i < stmtCnt; ++i) + stream << "select " << i << "; "; + + stream << '\0'; + + std::string query0 = stream.str(); + std::vector query(query0.begin(), query0.end()); + + SQLRETURN ret = SQLExecDirect(stmt, &query[0], SQL_NTS); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + long res = 0; + + BOOST_CHECKPOINT("Binding column"); + ret = SQLBindCol(stmt, 1, SQL_C_SLONG, &res, 0, 0); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + for (long i = 0; i < stmtCnt; ++i) + { + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(res, i); + + ret = SQLFetch(stmt); + + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + + ret = SQLMoreResults(stmt); + + if (i < stmtCnt - 1 && !SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + else if (i == stmtCnt - 1) + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + } +} + +BOOST_AUTO_TEST_CASE(TestMultipleMixedStatements) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + const int stmtCnt = 10; + + std::stringstream stream; + for (int i = 0; i < stmtCnt; ++i) + stream << "select " << i << "; insert into TestType(_key) values(" << i << "); "; + + stream << '\0'; + + std::string query0 = stream.str(); + std::vector query(query0.begin(), query0.end()); + + SQLRETURN ret = SQLExecDirect(stmt, &query[0], SQL_NTS); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + long res = 0; + + BOOST_CHECKPOINT("Binding column"); + ret = SQLBindCol(stmt, 1, SQL_C_SLONG, &res, 0, 0); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + for (long i = 0; i < stmtCnt; ++i) + { + ret = SQLFetch(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(res, i); + + ret = SQLFetch(stmt); + + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + + ret = SQLMoreResults(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + SQLLEN affected = 0; + ret = SQLRowCount(stmt, &affected); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(affected, 1); + + ret = SQLFetch(stmt); + + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + + ret = SQLMoreResults(stmt); + + if (i < stmtCnt - 1 && !SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + else if (i == stmtCnt - 1) + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + } +} + +BOOST_AUTO_TEST_CASE(TestMultipleMixedStatementsNoFetch) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + const int stmtCnt = 10; + + std::stringstream stream; + for (int i = 0; i < stmtCnt; ++i) + stream << "select " << i << "; insert into TestType(_key) values(" << i << "); "; + + stream << '\0'; + + std::string query0 = stream.str(); + std::vector query(query0.begin(), query0.end()); + + SQLRETURN ret = SQLExecDirect(stmt, &query[0], SQL_NTS); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + long res = 0; + + BOOST_CHECKPOINT("Binding column"); + ret = SQLBindCol(stmt, 1, SQL_C_SLONG, &res, 0, 0); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + for (long i = 0; i < stmtCnt; ++i) + { + ret = SQLMoreResults(stmt); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + SQLLEN affected = 0; + ret = SQLRowCount(stmt, &affected); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + BOOST_CHECK_EQUAL(affected, 1); + + ret = SQLFetch(stmt); + + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + + ret = SQLMoreResults(stmt); + + if (i < stmtCnt - 1 && !SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + else if (i == stmtCnt - 1) + BOOST_CHECK_EQUAL(ret, SQL_NO_DATA); + } +} + + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h index dda0ba93e8558..8d6c90641d1f2 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h @@ -60,7 +60,9 @@ namespace ignite GET_PARAMS_METADATA = 7, - EXECUTE_SQL_QUERY_BATCH = 8 + EXECUTE_SQL_QUERY_BATCH = 8, + + QUERY_MORE_RESULTS = 9 }; }; @@ -386,6 +388,47 @@ namespace ignite std::string sqlQuery; }; + /** + * Query fetch request. + */ + class QueryMoreResultsRequest + { + public: + /** + * Constructor. + * + * @param queryId Query ID. + * @param pageSize Required page size. + */ + QueryMoreResultsRequest(int64_t queryId, int32_t pageSize) : + queryId(queryId), + pageSize(pageSize) + { + // No-op. + } + + /** + * Destructor. + */ + ~QueryMoreResultsRequest() + { + // No-op. + } + + /** + * Write request using provided writer. + * @param writer Writer. + */ + void Write(impl::binary::BinaryWriterImpl& writer) const; + + private: + /** Query ID. */ + int64_t queryId; + + /** SQL query. */ + int32_t pageSize; + }; + /** * General response. */ @@ -575,7 +618,7 @@ namespace ignite * Get affected rows number. * @return Number of rows affected by the query. */ - int64_t GetAffectedRows() + const std::vector& GetAffectedRows() { return affectedRows; } @@ -585,7 +628,7 @@ namespace ignite * Read response using provided reader. * @param reader Reader. */ - virtual void ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&); + virtual void ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion& ver); /** Query ID. */ int64_t queryId; @@ -594,7 +637,7 @@ namespace ignite meta::ColumnMetaVector meta; /** Number of affected rows. */ - int64_t affectedRows; + std::vector affectedRows; }; /** @@ -617,7 +660,7 @@ namespace ignite * Affected rows. * @return Affected rows. */ - int64_t GetAffectedRows() const + const std::vector& GetAffectedRows() const { return affectedRows; } @@ -628,7 +671,7 @@ namespace ignite */ int64_t GetErrorSetIdx() const { - return affectedRows; + return static_cast(affectedRows.size()); } /** @@ -658,7 +701,7 @@ namespace ignite virtual void ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion& ver); /** Affected rows. */ - int64_t affectedRows; + std::vector affectedRows; /** Index of the set which caused an error. */ int64_t errorSetIdx; @@ -817,6 +860,46 @@ namespace ignite /** Columns metadata. */ std::vector typeIds; }; + + /** + * Query fetch response. + */ + class QueryMoreResultsResponse : public Response + { + public: + /** + * Constructor. + * @param resultPage Result page. + */ + QueryMoreResultsResponse(ResultPage& resultPage); + + /** + * Destructor. + */ + virtual ~QueryMoreResultsResponse(); + + /** + * Get query ID. + * @return Query ID. + */ + int64_t GetQueryId() const + { + return queryId; + } + + private: + /** + * Read response using provided reader. + * @param reader Reader. + */ + virtual void ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&); + + /** Query ID. */ + int64_t queryId; + + /** Result page. */ + ResultPage& resultPage; + }; } } diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h index 5e741ed421465..1e6c8698a9106 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h @@ -105,6 +105,13 @@ namespace ignite */ virtual int64_t AffectedRows() const; + /** + * Move to the next result set. + * + * @return Operaion result. + */ + virtual SqlResult::Type NextResultSet(); + /** * Get SQL query string. * @@ -142,16 +149,13 @@ namespace ignite meta::ColumnMetaVector resultMeta; /** Number of rows affected. */ - int64_t rowsAffected; + std::vector rowsAffected; - /** Number of parameter sets successfully processed. */ - int64_t setsProcessed; + /** Rows affected index. */ + size_t rowsAffectedIdx; /** Query executed. */ bool executed; - - /** Data retrieved. */ - bool dataRetrieved; }; } } diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h index 875b1ce170045..d742490de2be6 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/column_metadata_query.h @@ -105,6 +105,13 @@ namespace ignite */ virtual int64_t AffectedRows() const; + /** + * Move to the next result set. + * + * @return Operatoin result. + */ + virtual SqlResult::Type NextResultSet(); + private: IGNITE_NO_COPY_ASSIGNMENT(ColumnMetadataQuery); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h index 5a4a978e5e837..c47600043d158 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h @@ -105,6 +105,13 @@ namespace ignite */ virtual int64_t AffectedRows() const; + /** + * Move to the next result set. + * + * @return Operaion result. + */ + virtual SqlResult::Type NextResultSet(); + /** * Get SQL query string. * @@ -139,6 +146,13 @@ namespace ignite * @return Result. */ SqlResult::Type MakeRequestFetch(); + + /** + * Make next result set request and use response to set internal state. + * + * @return Result. + */ + SqlResult::Type MakeRequestMoreResults(); /** * Close query. @@ -163,7 +177,13 @@ namespace ignite std::auto_ptr cursor; /** Number of rows affected. */ - int64_t rowsAffected; + std::vector rowsAffected; + + /** Rows affected index. */ + size_t rowsAffectedIdx; + + /** Cached next result page. */ + std::auto_ptr cachedNextPage; }; } } diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h index 7d607285a13d7..9abd8b2bde28a 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/foreign_keys_query.h @@ -105,6 +105,13 @@ namespace ignite * @return Number of rows affected by the statement. */ virtual int64_t AffectedRows() const; + + /** + * Move to the next result set. + * + * @return Operatoin result. + */ + virtual SqlResult::Type NextResultSet(); private: IGNITE_NO_COPY_ASSIGNMENT(ForeignKeysQuery); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h index 65bac33c76775..42f7e26de3a35 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/primary_keys_query.h @@ -102,6 +102,13 @@ namespace ignite * @return Number of rows affected by the statement. */ virtual int64_t AffectedRows() const; + + /** + * Move to the next result set. + * + * @return Operatoin result. + */ + virtual SqlResult::Type NextResultSet(); private: IGNITE_NO_COPY_ASSIGNMENT(PrimaryKeysQuery); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h index 701f5c8242fa1..9d54b9097e0ae 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/query.h @@ -130,6 +130,13 @@ namespace ignite */ virtual int64_t AffectedRows() const = 0; + /** + * Move to the next result set. + * + * @return Operatoin result. + */ + virtual SqlResult::Type NextResultSet() = 0; + /** * Get query type. * diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h index 0f6660ff8177e..d6a5c44338e2c 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/special_columns_query.h @@ -105,6 +105,13 @@ namespace ignite */ virtual int64_t AffectedRows() const; + /** + * Move to the next result set. + * + * @return Operatoin result. + */ + virtual SqlResult::Type NextResultSet(); + private: IGNITE_NO_COPY_ASSIGNMENT(SpecialColumnsQuery); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h index acd3f498a4270..759bfd6a43410 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/table_metadata_query.h @@ -106,6 +106,13 @@ namespace ignite */ virtual int64_t AffectedRows() const; + /** + * Move to the next result set. + * + * @return Operatoin result. + */ + virtual SqlResult::Type NextResultSet(); + private: IGNITE_NO_COPY_ASSIGNMENT(TableMetadataQuery); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h index 00cca08735095..974ee0118fa35 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/type_info_query.h @@ -96,6 +96,13 @@ namespace ignite */ virtual int64_t AffectedRows() const; + /** + * Move to the next result set. + * + * @return Operatoin result. + */ + virtual SqlResult::Type NextResultSet(); + private: IGNITE_NO_COPY_ASSIGNMENT(TypeInfoQuery); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h index 27e883d7db93c..6d4b3ab6f48ca 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h @@ -264,11 +264,11 @@ namespace ignite bool DataAvailable() const; /** - * Next results. + * More results. * * Move to next result set or affected rows number. */ - void NextResults(); + void MoreResults(); /** * Get column attribute. @@ -581,7 +581,7 @@ namespace ignite * * @return Operation result. */ - SqlResult::Type InternalNextResults(); + SqlResult::Type InternalMoreResults(); /** * Get column attribute. diff --git a/modules/platforms/cpp/odbc/src/cursor.cpp b/modules/platforms/cpp/odbc/src/cursor.cpp index b41f5b1f3c895..09e96cbfb770e 100644 --- a/modules/platforms/cpp/odbc/src/cursor.cpp +++ b/modules/platforms/cpp/odbc/src/cursor.cpp @@ -66,7 +66,7 @@ namespace ignite bool Cursor::IsClosedRemotely() const { - return currentPage.get() && currentPage->IsLast(); + return !currentPage.get() || currentPage->IsLast(); } void Cursor::UpdateData(std::auto_ptr& newPage) diff --git a/modules/platforms/cpp/odbc/src/message.cpp b/modules/platforms/cpp/odbc/src/message.cpp index 4767c74b2424f..5595ddb255e3b 100644 --- a/modules/platforms/cpp/odbc/src/message.cpp +++ b/modules/platforms/cpp/odbc/src/message.cpp @@ -18,6 +18,29 @@ #include "ignite/odbc/message.h" #include "ignite/odbc/utility.h" +namespace +{ + using namespace ignite; + using namespace odbc; + + void ReadAffectedRows(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion& protocolVersion, + std::vector& affectedRows) + { + affectedRows.clear(); + + if (protocolVersion < ProtocolVersion::VERSION_2_3_0) + affectedRows.push_back(reader.ReadInt64()); + else + { + int32_t len = reader.ReadInt32(); + + affectedRows.reserve(static_cast(len)); + for (int32_t i = 0; i < len; ++i) + affectedRows.push_back(reader.ReadInt64()); + } + } +} + namespace ignite { namespace odbc @@ -152,6 +175,7 @@ namespace ignite void QueryFetchRequest::Write(impl::binary::BinaryWriterImpl& writer) const { writer.WriteInt8(RequestType::FETCH_SQL_QUERY); + writer.WriteInt64(queryId); writer.WriteInt32(pageSize); } @@ -212,6 +236,14 @@ namespace ignite writer.WriteObject(sqlQuery); } + void QueryMoreResultsRequest::Write(impl::binary::BinaryWriterImpl& writer) const + { + writer.WriteInt8(RequestType::QUERY_MORE_RESULTS); + + writer.WriteInt64(queryId); + writer.WriteInt32(pageSize); + } + Response::Response() : status(ResponseStatus::UNKNOWN_ERROR), error() @@ -299,13 +331,13 @@ namespace ignite // No-op. } - void QueryExecuteResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&) + void QueryExecuteResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion& ver) { queryId = reader.ReadInt64(); meta::ReadColumnMetaVector(reader, meta); - affectedRows = reader.ReadInt64(); + ReadAffectedRows(reader, ver, affectedRows); } QueryExecuteBatchResponse::QueryExecuteBatchResponse(): @@ -325,7 +357,8 @@ namespace ignite void QueryExecuteBatchResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion& ver) { bool success = reader.ReadBool(); - affectedRows = reader.ReadInt64(); + + ReadAffectedRows(reader, ver, affectedRows); if (!success) { @@ -337,7 +370,9 @@ namespace ignite } } - QueryFetchResponse::QueryFetchResponse(ResultPage& resultPage): queryId(0), resultPage(resultPage) + QueryFetchResponse::QueryFetchResponse(ResultPage& resultPage) : + queryId(0), + resultPage(resultPage) { // No-op. } @@ -398,6 +433,25 @@ namespace ignite { utility::ReadByteArray(reader, typeIds); } + + QueryMoreResultsResponse::QueryMoreResultsResponse(ResultPage & resultPage) : + queryId(0), + resultPage(resultPage) + { + // No-op. + } + + QueryMoreResultsResponse::~QueryMoreResultsResponse() + { + // No-op. + } + + void QueryMoreResultsResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&) + { + queryId = reader.ReadInt64(); + + resultPage.Read(reader); + } } } diff --git a/modules/platforms/cpp/odbc/src/odbc.cpp b/modules/platforms/cpp/odbc/src/odbc.cpp index 8121a3bcbca1c..1480d0b9c9e3a 100644 --- a/modules/platforms/cpp/odbc/src/odbc.cpp +++ b/modules/platforms/cpp/odbc/src/odbc.cpp @@ -593,7 +593,7 @@ namespace ignite if (!statement) return SQL_INVALID_HANDLE; - statement->NextResults(); + statement->MoreResults(); return statement->GetDiagnosticRecords().GetReturnCode(); } diff --git a/modules/platforms/cpp/odbc/src/query/batch_query.cpp b/modules/platforms/cpp/odbc/src/query/batch_query.cpp index fc8fda47ddd9c..29d11ca8fc208 100644 --- a/modules/platforms/cpp/odbc/src/query/batch_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/batch_query.cpp @@ -34,10 +34,9 @@ namespace ignite sql(sql), params(params), resultMeta(), - rowsAffected(0), - setsProcessed(0), - executed(false), - dataRetrieved(false) + rowsAffected(), + rowsAffectedIdx(0), + executed(false) { // No-op. } @@ -62,6 +61,9 @@ namespace ignite int32_t processed = 0; + rowsAffected.clear(); + rowsAffected.reserve(static_cast(params.GetParamSetSize())); + do { int32_t currentPageSize = std::min(maxPageSize, rowNum - processed); bool lastPage = currentPageSize == rowNum - processed; @@ -71,7 +73,7 @@ namespace ignite processed += currentPageSize; } while (res == SqlResult::AI_SUCCESS && processed < rowNum); - params.SetParamsProcessed(static_cast(setsProcessed)); + params.SetParamsProcessed(static_cast(rowsAffected.size())); return res; } @@ -90,17 +92,7 @@ namespace ignite return SqlResult::AI_ERROR; } - if (dataRetrieved) - return SqlResult::AI_NO_DATA; - - app::ColumnBindingMap::iterator it = columnBindings.find(1); - - if (it != columnBindings.end()) - it->second.PutInt64(rowsAffected); - - dataRetrieved = true; - - return SqlResult::AI_SUCCESS; + return SqlResult::AI_NO_DATA; } SqlResult::Type BatchQuery::GetColumn(uint16_t columnIdx, app::ApplicationDataBuffer& buffer) @@ -112,31 +104,18 @@ namespace ignite return SqlResult::AI_ERROR; } - if (dataRetrieved) - { - diag.AddStatusRecord(SqlState::S24000_INVALID_CURSOR_STATE, - "Cursor has reached end of the result set."); - - return SqlResult::AI_ERROR; - } - - if (columnIdx != 1) - { - std::stringstream builder; - builder << "Column with id " << columnIdx << " is not available in result set."; - - diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, builder.str()); + diag.AddStatusRecord(SqlState::S24000_INVALID_CURSOR_STATE, + "Cursor has reached end of the result set."); - return SqlResult::AI_ERROR; - } - - buffer.PutInt64(rowsAffected); - - return SqlResult::AI_SUCCESS; + return SqlResult::AI_ERROR; } SqlResult::Type BatchQuery::Close() { + executed = false; + rowsAffected.clear(); + rowsAffectedIdx = 0; + return SqlResult::AI_SUCCESS; } @@ -147,7 +126,21 @@ namespace ignite int64_t BatchQuery::AffectedRows() const { - return rowsAffected; + int64_t affected = rowsAffectedIdx < rowsAffected.size() ? rowsAffected[rowsAffectedIdx] : 0; + return affected < 0 ? 0 : affected; + } + + SqlResult::Type BatchQuery::NextResultSet() + { + if (rowsAffectedIdx + 1 >= rowsAffected.size()) + { + Close(); + return SqlResult::AI_NO_DATA; + } + + ++rowsAffectedIdx; + + return SqlResult::AI_SUCCESS; } SqlResult::Type BatchQuery::MakeRequestExecuteBatch(SqlUlen begin, SqlUlen end, bool last) @@ -183,25 +176,20 @@ namespace ignite return SqlResult::AI_ERROR; } - rowsAffected += rsp.GetAffectedRows(); - LOG_MSG("rowsAffected: " << rowsAffected); + rowsAffected.insert(rowsAffected.end(), rsp.GetAffectedRows().begin(), rsp.GetAffectedRows().end()); + LOG_MSG("Affected rows list size: " << rowsAffected.size()); if (!rsp.GetErrorMessage().empty()) { LOG_MSG("Error: " << rsp.GetErrorMessage()); - - setsProcessed += rsp.GetErrorSetIdx(); - LOG_MSG("setsProcessed: " << setsProcessed); + LOG_MSG("Sets Processed: " << rowsAffected.size()); diag.AddStatusRecord(ResponseStatusToSqlState(rsp.GetErrorCode()), rsp.GetErrorMessage(), - static_cast(setsProcessed), 0); + static_cast(rowsAffected.size()), 0); return SqlResult::AI_SUCCESS_WITH_INFO; } - setsProcessed += end - begin; - LOG_MSG("setsProcessed: " << setsProcessed); - return SqlResult::AI_SUCCESS; } } diff --git a/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp b/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp index 09106120b8a17..0a091597feaf1 100644 --- a/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/column_metadata_query.cpp @@ -289,6 +289,11 @@ namespace ignite return 0; } + SqlResult::Type ColumnMetadataQuery::NextResultSet() + { + return SqlResult::AI_NO_DATA; + } + SqlResult::Type ColumnMetadataQuery::MakeRequestGetColumnsMeta() { QueryGetColumnsMetaRequest req(schema, table, column); diff --git a/modules/platforms/cpp/odbc/src/query/data_query.cpp b/modules/platforms/cpp/odbc/src/query/data_query.cpp index 80fcc69671157..012b02652d73f 100644 --- a/modules/platforms/cpp/odbc/src/query/data_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/data_query.cpp @@ -36,7 +36,9 @@ namespace ignite params(params), resultMeta(), cursor(), - rowsAffected(0) + rowsAffected(), + rowsAffectedIdx(0), + cachedNextPage() { // No-op. } @@ -79,10 +81,15 @@ namespace ignite if (cursor->NeedDataUpdate()) { - SqlResult::Type result = MakeRequestFetch(); + if (cachedNextPage.get()) + cursor->UpdateData(cachedNextPage); + else + { + SqlResult::Type result = MakeRequestFetch(); - if (result != SqlResult::AI_SUCCESS) - return result; + if (result != SqlResult::AI_SUCCESS) + return result; + } } if (!cursor->HasData()) @@ -168,6 +175,10 @@ namespace ignite cursor.reset(); resultMeta.clear(); + + rowsAffectedIdx = 0; + + rowsAffected.clear(); } return result; @@ -180,7 +191,25 @@ namespace ignite int64_t DataQuery::AffectedRows() const { - return rowsAffected; + int64_t affected = rowsAffectedIdx < rowsAffected.size() ? rowsAffected[rowsAffectedIdx] : 0; + return affected < 0 ? 0 : affected; + } + + SqlResult::Type DataQuery::NextResultSet() + { + if (rowsAffectedIdx + 1 >= rowsAffected.size()) + { + InternalClose(); + + return SqlResult::AI_NO_DATA; + } + + SqlResult::Type res = MakeRequestMoreResults(); + + if (res == SqlResult::AI_SUCCESS) + ++rowsAffectedIdx; + + return res; } SqlResult::Type DataQuery::MakeRequestExecute() @@ -221,7 +250,7 @@ namespace ignite rowsAffected = rsp.GetAffectedRows(); LOG_MSG("Query id: " << rsp.GetQueryId()); - LOG_MSG("Affected Rows: " << rowsAffected); + LOG_MSG("Affected Rows list size: " << rowsAffected.size()); for (size_t i = 0; i < resultMeta.size(); ++i) { @@ -231,10 +260,9 @@ namespace ignite << "\n[" << i << "] ColumnType: " << static_cast(resultMeta[i].GetDataType())); } - if (rowsAffected > 0) - cursor.reset(); - else - cursor.reset(new Cursor(rsp.GetQueryId())); + cursor.reset(new Cursor(rsp.GetQueryId())); + + rowsAffectedIdx = 0; return SqlResult::AI_SUCCESS; } @@ -308,10 +336,55 @@ namespace ignite return SqlResult::AI_ERROR; } + LOG_MSG("Page size: " << resultPage->GetSize()); + LOG_MSG("Page is last: " << resultPage->IsLast()); + cursor->UpdateData(resultPage); return SqlResult::AI_SUCCESS; } + + SqlResult::Type DataQuery::MakeRequestMoreResults() + { + std::auto_ptr resultPage(new ResultPage()); + + QueryMoreResultsRequest req(cursor->GetQueryId(), connection.GetConfiguration().GetPageSize()); + QueryMoreResultsResponse rsp(*resultPage); + + try + { + connection.SyncMessage(req, rsp); + } + catch (const OdbcError& err) + { + diag.AddStatusRecord(err); + + return SqlResult::AI_ERROR; + } + catch (const IgniteError& err) + { + diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText()); + + return SqlResult::AI_ERROR; + } + + if (rsp.GetStatus() != ResponseStatus::SUCCESS) + { + LOG_MSG("Error: " << rsp.GetError()); + + diag.AddStatusRecord(ResponseStatusToSqlState(rsp.GetStatus()), rsp.GetError()); + + return SqlResult::AI_ERROR; + } + + LOG_MSG("Page size: " << resultPage->GetSize()); + LOG_MSG("Page is last: " << resultPage->IsLast()); + + cachedNextPage = resultPage; + cursor.reset(new Cursor(rsp.GetQueryId())); + + return SqlResult::AI_SUCCESS; + } } } } diff --git a/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp b/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp index 4ca77090f9892..c22a3aaa21803 100644 --- a/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/foreign_keys_query.cpp @@ -125,6 +125,11 @@ namespace ignite { return 0; } + + SqlResult::Type ForeignKeysQuery::NextResultSet() + { + return SqlResult::AI_NO_DATA; + } } } } diff --git a/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp b/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp index ef99db3999f9a..bb6f90872969c 100644 --- a/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/primary_keys_query.cpp @@ -207,6 +207,11 @@ namespace ignite { return 0; } + + SqlResult::Type PrimaryKeysQuery::NextResultSet() + { + return SqlResult::AI_NO_DATA; + } } } } diff --git a/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp b/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp index b0f534c487a3b..01c7b96be9b28 100644 --- a/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/special_columns_query.cpp @@ -116,6 +116,11 @@ namespace ignite { return 0; } + + SqlResult::Type SpecialColumnsQuery::NextResultSet() + { + return SqlResult::AI_NO_DATA; + } } } } diff --git a/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp b/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp index 93f1f798073d2..53fe49d2ccf4b 100644 --- a/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/table_metadata_query.cpp @@ -215,6 +215,11 @@ namespace ignite return 0; } + SqlResult::Type TableMetadataQuery::NextResultSet() + { + return SqlResult::AI_NO_DATA; + } + SqlResult::Type TableMetadataQuery::MakeRequestGetTablesMeta() { QueryGetTablesMetaRequest req(catalog, schema, table, tableType); diff --git a/modules/platforms/cpp/odbc/src/query/type_info_query.cpp b/modules/platforms/cpp/odbc/src/query/type_info_query.cpp index b4efca0b9b668..939458a880506 100644 --- a/modules/platforms/cpp/odbc/src/query/type_info_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/type_info_query.cpp @@ -401,6 +401,11 @@ namespace ignite { return 0; } + + SqlResult::Type TypeInfoQuery::NextResultSet() + { + return SqlResult::AI_NO_DATA; + } } } } diff --git a/modules/platforms/cpp/odbc/src/statement.cpp b/modules/platforms/cpp/odbc/src/statement.cpp index 36c1a0b8431a8..b167d44915c02 100644 --- a/modules/platforms/cpp/odbc/src/statement.cpp +++ b/modules/platforms/cpp/odbc/src/statement.cpp @@ -824,19 +824,21 @@ namespace ignite return currentQuery.get() && currentQuery->DataAvailable(); } - void Statement::NextResults() + void Statement::MoreResults() { - IGNITE_ODBC_API_CALL(InternalNextResults()); + IGNITE_ODBC_API_CALL(InternalMoreResults()); } - SqlResult::Type Statement::InternalNextResults() + SqlResult::Type Statement::InternalMoreResults() { if (!currentQuery.get()) - return SqlResult::AI_NO_DATA; + { + AddStatusRecord(SqlState::SHY010_SEQUENCE_ERROR, "Query is not executed."); - SqlResult::Type result = currentQuery->Close(); + return SqlResult::AI_ERROR; + } - return result == SqlResult::AI_SUCCESS ? SqlResult::AI_NO_DATA : result; + return currentQuery->NextResultSet(); } void Statement::GetColumnAttribute(uint16_t colIdx, uint16_t attrId, From 83ec0437de55f7a06259287fa6c647569f3db738 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Fri, 3 Nov 2017 12:50:39 +0300 Subject: [PATCH 104/243] IGNITE-6765: Added test for closing ODBC statement (cherry picked from commit 242b345) --- .../cpp/odbc-test/src/queries_test.cpp | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index c6097e0fa82df..0b0bf82a28579 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -2379,5 +2379,22 @@ BOOST_AUTO_TEST_CASE(TestMultipleMixedStatementsNoFetch) } } +BOOST_AUTO_TEST_CASE(TestCloseAfterEmptyUpdate) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLCHAR query[] = "update TestType set strField='test' where _key=42"; + + SQLRETURN ret = SQLExecDirect(stmt, &query[0], SQL_NTS); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); + + ret = SQLFreeStmt(stmt, SQL_CLOSE); + + if (!SQL_SUCCEEDED(ret)) + BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); +} + BOOST_AUTO_TEST_SUITE_END() From b066e017cf292894101fa36a2eeaaeafca61afbc Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Fri, 10 Nov 2017 13:12:24 +0300 Subject: [PATCH 105/243] IGNITE-6841: Increased ODBC protocol version for multiple statements. (cherry picked from commit 954e47b) --- .../processors/odbc/odbc/OdbcConnectionContext.java | 6 +++++- .../internal/processors/odbc/odbc/OdbcMessageParser.java | 2 +- modules/platforms/cpp/odbc-test/src/queries_test.cpp | 8 ++++++++ .../cpp/odbc/include/ignite/odbc/protocol_version.h | 9 ++++++++- modules/platforms/cpp/odbc/src/message.cpp | 2 +- modules/platforms/cpp/odbc/src/protocol_version.cpp | 4 +++- 6 files changed, 26 insertions(+), 5 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java index 88a2e0ff1cd34..6a922bfe39107 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java @@ -40,8 +40,11 @@ public class OdbcConnectionContext implements ClientListenerConnectionContext { /** Version 2.3.0: added "skipReducerOnUpdate" flag. */ public static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); + /** Version 2.3.2: added multiple statements support. */ + public static final ClientListenerProtocolVersion VER_2_3_2 = ClientListenerProtocolVersion.create(2, 3, 2); + /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_0; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_2; /** Supported versions. */ private static final Set SUPPORTED_VERS = new HashSet<>(); @@ -63,6 +66,7 @@ public class OdbcConnectionContext implements ClientListenerConnectionContext { static { SUPPORTED_VERS.add(CURRENT_VER); + SUPPORTED_VERS.add(VER_2_3_0); SUPPORTED_VERS.add(VER_2_1_5); SUPPORTED_VERS.add(VER_2_1_0); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java index bf74bc5fb66bf..fb17d2a8640d3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java @@ -362,7 +362,7 @@ else if (res0 instanceof OdbcQueryGetParamsMetaResult) { * @param affectedRows Affected rows. */ private void writeAffectedRows(BinaryWriterExImpl writer, Collection affectedRows) { - if (ver.compareTo(OdbcConnectionContext.VER_2_3_0) < 0) { + if (ver.compareTo(OdbcConnectionContext.VER_2_3_2) < 0) { long summ = 0; for (Long value : affectedRows) diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index 0b0bf82a28579..6fcf7c9174823 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -771,6 +771,14 @@ BOOST_AUTO_TEST_CASE(TestConnectionProtocolVersion_2_3_0) InsertTestBatch(11, 20, 9); } +BOOST_AUTO_TEST_CASE(TestConnectionProtocolVersion_2_3_2) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache;PROTOCOL_VERSION=2.3.2"); + + InsertTestStrings(10, false); + InsertTestBatch(11, 20, 9); +} + BOOST_AUTO_TEST_CASE(TestTwoRowsInt8) { CheckTwoRowsInt(SQL_C_STINYINT); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h b/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h index e6088a74b46d3..f39c11e607bfa 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h @@ -31,11 +31,18 @@ namespace ignite class ProtocolVersion { public: - /** Current protocol version. */ + /** Version 2.1.0. */ static const ProtocolVersion VERSION_2_1_0; + + /** Version 2.1.5: added "lazy" flag. */ static const ProtocolVersion VERSION_2_1_5; + + /** Version 2.3.0: added "skipReducerOnUpdate" flag. */ static const ProtocolVersion VERSION_2_3_0; + /** Version 2.3.2: added multiple statements support. */ + static const ProtocolVersion VERSION_2_3_2; + typedef std::set VersionSet; /** diff --git a/modules/platforms/cpp/odbc/src/message.cpp b/modules/platforms/cpp/odbc/src/message.cpp index 5595ddb255e3b..32a5d919e2b44 100644 --- a/modules/platforms/cpp/odbc/src/message.cpp +++ b/modules/platforms/cpp/odbc/src/message.cpp @@ -28,7 +28,7 @@ namespace { affectedRows.clear(); - if (protocolVersion < ProtocolVersion::VERSION_2_3_0) + if (protocolVersion < ProtocolVersion::VERSION_2_3_2) affectedRows.push_back(reader.ReadInt64()); else { diff --git a/modules/platforms/cpp/odbc/src/protocol_version.cpp b/modules/platforms/cpp/odbc/src/protocol_version.cpp index b0b91219f9f8f..d7d85ad5ade07 100644 --- a/modules/platforms/cpp/odbc/src/protocol_version.cpp +++ b/modules/platforms/cpp/odbc/src/protocol_version.cpp @@ -29,11 +29,13 @@ namespace ignite const ProtocolVersion ProtocolVersion::VERSION_2_1_0(2, 1, 0); const ProtocolVersion ProtocolVersion::VERSION_2_1_5(2, 1, 5); const ProtocolVersion ProtocolVersion::VERSION_2_3_0(2, 3, 0); + const ProtocolVersion ProtocolVersion::VERSION_2_3_2(2, 3, 2); ProtocolVersion::VersionSet::value_type supportedArray[] = { ProtocolVersion::VERSION_2_1_0, ProtocolVersion::VERSION_2_1_5, ProtocolVersion::VERSION_2_3_0, + ProtocolVersion::VERSION_2_3_2, }; const ProtocolVersion::VersionSet ProtocolVersion::supported(supportedArray, @@ -62,7 +64,7 @@ namespace ignite const ProtocolVersion& ProtocolVersion::GetCurrent() { - return VERSION_2_3_0; + return VERSION_2_3_2; } void ThrowParseError() From b97e04c6f03b9532e1fa2f77dbdd1d4c851fa98d Mon Sep 17 00:00:00 2001 From: "Andrey V. Mashenkov" Date: Fri, 10 Nov 2017 17:33:52 +0300 Subject: [PATCH 106/243] Backport of IGNITE-6649: Added eviction policy factory to cache configuration. Signed-off-by: Andrey Gura (cherry picked from commit 6579e69) --- .../org/apache/ignite/cache/CacheMode.java | 2 +- .../AbstractEvictionPolicyFactory.java | 104 ++ .../fifo/FifoEvictionPolicyFactory.java | 72 ++ .../lru/LruEvictionPolicyFactory.java | 72 ++ .../sorted/SortedEvictionPolicyFactory.java | 98 ++ .../configuration/CacheConfiguration.java | 42 +- .../configuration/NearCacheConfiguration.java | 37 + .../processors/cache/ClusterCachesInfo.java | 7 + .../processors/cache/GridCacheAttributes.java | 19 + .../cache/GridCacheEvictionManager.java | 15 +- .../processors/cache/GridCacheProcessor.java | 19 +- .../processors/cache/GridCacheUtils.java | 1 + .../processors/igfs/IgfsHelperImpl.java | 8 +- .../internal/processors/igfs/IgfsImpl.java | 4 +- ...CacheConfigurationConsistencySelfTest.java | 52 + .../GridCacheNearEvictionEventSelfTest.java | 5 - .../EvictionPolicyFactoryAbstractTest.java | 1073 +++++++++++++++++ .../FifoEvictionPolicyFactorySelfTest.java | 261 ++++ .../lru/LruEvictionPolicyFactorySelfTest.java | 352 ++++++ .../SortedEvictionPolicyFactorySelfTest.java | 264 ++++ .../IgniteCacheEvictionSelfTestSuite.java | 6 + .../apache/ignite/yardstick/IgniteNode.java | 6 +- 22 files changed, 2499 insertions(+), 20 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java create mode 100644 modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicyFactory.java create mode 100644 modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java create mode 100644 modules/core/src/main/java/org/apache/ignite/cache/eviction/sorted/SortedEvictionPolicyFactory.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/EvictionPolicyFactoryAbstractTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/fifo/FifoEvictionPolicyFactorySelfTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/lru/LruEvictionPolicyFactorySelfTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/sorted/SortedEvictionPolicyFactorySelfTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/cache/CacheMode.java b/modules/core/src/main/java/org/apache/ignite/cache/CacheMode.java index e75fa0c9c3b24..4171b1ac28074 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/CacheMode.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/CacheMode.java @@ -55,7 +55,7 @@ public enum CacheMode { *

        * Note that partitioned cache is always fronted by local * {@code 'near'} cache which stores most recent data. You - * can configure the size of near cache via {@link NearCacheConfiguration#getNearEvictionPolicy()} + * can configure the size of near cache via {@link NearCacheConfiguration#getNearEvictionPolicyFactory()} * configuration property. */ PARTITIONED; diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java new file mode 100644 index 0000000000000..012c7ee522ea9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.eviction; + +import javax.cache.configuration.Factory; +import org.apache.ignite.internal.util.typedef.internal.A; + +/** + * Common functionality implementation for eviction policies factories. + */ +public abstract class AbstractEvictionPolicyFactory implements Factory { + /** */ + private int maxSize; + + /** */ + private int batchSize = 1; + + /** */ + private long maxMemSize; + + /** + * Sets maximum allowed size of cache before entry will start getting evicted. + * + * @param max Maximum allowed size of cache before entry will start getting evicted. + * @return {@code this} for chaining. + */ + public AbstractEvictionPolicyFactory setMaxSize(int max) { + A.ensure(max >= 0, "max >= 0"); + + this.maxSize = max; + + return this; + } + + /** + * Gets maximum allowed size of cache before entry will start getting evicted. + * + * @return Maximum allowed size of cache before entry will start getting evicted. + */ + public int getMaxSize() { + return maxSize; + } + + /** + * Sets batch size. + * + * @param batchSize Batch size. + * @return {@code this} for chaining. + */ + public AbstractEvictionPolicyFactory setBatchSize(int batchSize) { + A.ensure(batchSize > 0, "batchSize > 0"); + + this.batchSize = batchSize; + + return this; + } + + /** + * Gets batch size. + * + * @return batch size. + */ + public int getBatchSize() { + return batchSize; + } + + /** + * Sets maximum allowed cache size in bytes. + * + * @return {@code this} for chaining. + */ + public AbstractEvictionPolicyFactory setMaxMemorySize(long maxMemSize) { + A.ensure(maxMemSize >= 0, "maxMemSize >= 0"); + + this.maxMemSize = maxMemSize; + + return this; + } + + /** + * Gets maximum allowed cache size in bytes. + * + * @return maximum allowed cache size in bytes. + */ + public long getMaxMemorySize() { + return maxMemSize; + } + +} diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicyFactory.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicyFactory.java new file mode 100644 index 0000000000000..856865af6aa3b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicyFactory.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.eviction.fifo; + +import org.apache.ignite.cache.eviction.AbstractEvictionPolicyFactory; + +/** + * Factory class for {@link FifoEvictionPolicy}. + * + * Creates cache Eviction policy based on {@code First In First Out (FIFO)} algorithm and supports batch eviction. + *

        + * The eviction starts in the following cases: + *

          + *
        • The cache size becomes {@code batchSize} elements greater than the maximum size.
        • + *
        • + * The size of cache entries in bytes becomes greater than the maximum memory size. + * The size of cache entry calculates as sum of key size and value size. + *
        • + *
        + * Note:Batch eviction is enabled only if maximum memory limit isn't set ({@code maxMemSize == 0}). + * {@code batchSize} elements will be evicted in this case. The default {@code batchSize} value is {@code 1}. + *

        + * {@link FifoEvictionPolicy} implementation is very efficient since it does not create any additional + * table-like data structures. The {@code FIFO} ordering information is + * maintained by attaching ordering metadata to cache entries. + */ +public class FifoEvictionPolicyFactory extends AbstractEvictionPolicyFactory> { + /** */ + private static final long serialVersionUID = 0L; + + /** Constructor. */ + public FifoEvictionPolicyFactory() { + } + + /** Constructor. */ + public FifoEvictionPolicyFactory(int maxSize) { + setMaxSize(maxSize); + } + + /** */ + public FifoEvictionPolicyFactory(int maxSize, int batchSize, long maxMemSize) { + setMaxSize(maxSize); + setBatchSize(batchSize); + setMaxMemorySize(maxMemSize); + } + + /** {@inheritDoc} */ + @Override public FifoEvictionPolicy create() { + FifoEvictionPolicy policy = new FifoEvictionPolicy<>(); + + policy.setBatchSize(getBatchSize()); + policy.setMaxMemorySize(getMaxMemorySize()); + policy.setMaxSize(getMaxSize()); + + return policy; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java new file mode 100644 index 0000000000000..8f7fbc5c7b95d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.eviction.lru; + +import org.apache.ignite.cache.eviction.AbstractEvictionPolicyFactory; + +/** + * Factory class for {@link LruEvictionPolicy}. + * + * Creates cache Eviction policy based on {@code Least Recently Used (LRU)} algorithm and supports batch eviction. + *

        + * The eviction starts in the following cases: + *

          + *
        • The cache size becomes {@code batchSize} elements greater than the maximum size.
        • + *
        • + * The size of cache entries in bytes becomes greater than the maximum memory size. + * The size of cache entry calculates as sum of key size and value size. + *
        • + *
        + * Note:Batch eviction is enabled only if maximum memory limit isn't set ({@code maxMemSize == 0}). + * {@code batchSize} elements will be evicted in this case. The default {@code batchSize} value is {@code 1}. + + * {@link LruEvictionPolicy} implementation is very efficient since it is lock-free and does not create any additional table-like + * data structures. The {@code LRU} ordering information is maintained by attaching ordering metadata to cache entries. + */ +public class LruEvictionPolicyFactory extends AbstractEvictionPolicyFactory> { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + public LruEvictionPolicyFactory() { + } + + /** */ + public LruEvictionPolicyFactory(int maxSize) { + setMaxSize(maxSize); + } + + /** */ + public LruEvictionPolicyFactory(int maxSize, int batchSize, long maxMemSize) { + setMaxSize(maxSize); + setBatchSize(batchSize); + setMaxMemorySize(maxMemSize); + } + + /** {@inheritDoc} */ + @Override public LruEvictionPolicy create() { + LruEvictionPolicy policy = new LruEvictionPolicy<>(); + + policy.setBatchSize(getBatchSize()); + policy.setMaxMemorySize(getMaxMemorySize()); + policy.setMaxSize(getMaxSize()); + + return policy; + } + +} diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/sorted/SortedEvictionPolicyFactory.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/sorted/SortedEvictionPolicyFactory.java new file mode 100644 index 0000000000000..a88c277485eb7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/sorted/SortedEvictionPolicyFactory.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.eviction.sorted; + +import java.io.Serializable; +import java.util.Comparator; +import org.apache.ignite.cache.eviction.AbstractEvictionPolicyFactory; +import org.apache.ignite.cache.eviction.EvictableEntry; + +/** + * Factory class for {@link SortedEvictionPolicy}. + * + * Creates cache Eviction policy which will select the minimum cache entry for eviction. + *

        + * The eviction starts in the following cases: + *

          + *
        • The cache size becomes {@code batchSize} elements greater than the maximum size.
        • + *
        • + * The size of cache entries in bytes becomes greater than the maximum memory size. + * The size of cache entry calculates as sum of key size and value size. + *
        • + *
        + * Note:Batch eviction is enabled only if maximum memory limit isn't set ({@code maxMemSize == 0}). + * {@code batchSize} elements will be evicted in this case. The default {@code batchSize} value is {@code 1}. + *

        + * Entries comparison based on {@link Comparator} instance if provided. + * Default {@code Comparator} behaviour is use cache entries keys for comparison that imposes a requirement for keys + * to implement {@link Comparable} interface. + *

        + * User defined comparator should implement {@link Serializable} interface. + */ +public class SortedEvictionPolicyFactory extends AbstractEvictionPolicyFactory> { + /** */ + private static final long serialVersionUID = 0L; + + /** Comparator. */ + private Comparator> comp; + + /** */ + public SortedEvictionPolicyFactory() { + } + + /** */ + public SortedEvictionPolicyFactory(int maxSize) { + setMaxSize(maxSize); + } + + /** */ + public SortedEvictionPolicyFactory(int maxSize, int batchSize, long maxMemSize) { + setMaxSize(maxSize); + setBatchSize(batchSize); + setMaxMemorySize(maxMemSize); + } + + /** + * Gets entries comparator. + * @return entry comparator. + */ + public Comparator> getComp() { + return comp; + } + + /** + * Sets entries comparator. + * + * @param comp entry comparator. + */ + public void setComp(Comparator> comp) { + this.comp = comp; + } + + /** {@inheritDoc} */ + @Override public SortedEvictionPolicy create() { + SortedEvictionPolicy policy = new SortedEvictionPolicy<>(comp); + + policy.setBatchSize(getBatchSize()); + policy.setMaxMemorySize(getMaxMemorySize()); + policy.setMaxSize(getMaxSize()); + + return policy; + } + +} diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java index 37a067760b8db..bbeb8dfa5ba46 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java @@ -200,9 +200,13 @@ public class CacheConfiguration extends MutableConfiguration { /** Rebalance timeout. */ private long rebalanceTimeout = DFLT_REBALANCE_TIMEOUT; - /** Cache expiration policy. */ + /** Cache eviction policy. */ + @Deprecated private EvictionPolicy evictPlc; + /** Cache eviction policy factory. */ + private Factory evictPlcFactory; + /** */ private boolean onheapCache; @@ -395,6 +399,7 @@ public CacheConfiguration(CompleteConfiguration cfg) { eagerTtl = cc.isEagerTtl(); evictFilter = cc.getEvictionFilter(); evictPlc = cc.getEvictionPolicy(); + evictPlcFactory = cc.getEvictionPolicyFactory(); expiryPolicyFactory = cc.getExpiryPolicyFactory(); grpName = cc.getGroupName(); indexedTypes = cc.getIndexedTypes(); @@ -551,7 +556,10 @@ public CacheConfiguration setMemoryPolicyName(String memPlcName) { * which means that evictions are disabled for cache. * * @return Cache eviction policy or {@code null} if evictions should be disabled. + * + * @deprecated Use {@link #getEvictionPolicyFactory()} instead. */ + @Deprecated @SuppressWarnings({"unchecked"}) @Nullable public EvictionPolicy getEvictionPolicy() { return evictPlc; @@ -560,15 +568,43 @@ public CacheConfiguration setMemoryPolicyName(String memPlcName) { /** * Sets cache eviction policy. * - * @param evictPlc Cache expiration policy. + * @param evictPlc Cache eviction policy. * @return {@code this} for chaining. + * + * @deprecated Use {@link #setEvictionPolicyFactory(Factory)} instead. */ + @Deprecated public CacheConfiguration setEvictionPolicy(@Nullable EvictionPolicy evictPlc) { this.evictPlc = evictPlc; return this; } + /** + * Gets cache eviction policy factory. By default, returns {@code null} + * which means that evictions are disabled for cache. + * + * @return Cache eviction policy factory or {@code null} if evictions should be disabled + * or if {@link #getEvictionPolicy()} should be used instead. + */ + @Nullable public Factory> getEvictionPolicyFactory() { + return evictPlcFactory; + } + + /** + * Sets cache eviction policy factory. + * Note: Eviction policy factory should be {@link Serializable}. + * + * @param evictPlcFactory Cache eviction policy factory. + * @return {@code this} for chaining. + */ + public CacheConfiguration setEvictionPolicyFactory( + @Nullable Factory> evictPlcFactory) { + this.evictPlcFactory = evictPlcFactory; + + return this; + } + /** * Checks if the on-heap cache is enabled for the off-heap based page memory. * @@ -662,7 +698,7 @@ public CacheConfiguration setNodeFilter(IgnitePredicate nodeF * never be evicted. *

        * If not provided, any entry may be evicted depending on - * {@link #getEvictionPolicy() eviction policy} configuration. + * {@link #getEvictionPolicyFactory()} eviction policy} configuration. * * @return Eviction filter or {@code null}. */ diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/NearCacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/NearCacheConfiguration.java index 7b3022c00378b..a31a39701e350 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/NearCacheConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/NearCacheConfiguration.java @@ -18,9 +18,11 @@ package org.apache.ignite.configuration; import java.io.Serializable; +import javax.cache.configuration.Factory; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.eviction.EvictionPolicy; import org.apache.ignite.internal.util.typedef.internal.S; +import org.jetbrains.annotations.Nullable; import static org.apache.ignite.configuration.CacheConfiguration.DFLT_NEAR_START_SIZE; @@ -37,8 +39,12 @@ public class NearCacheConfiguration implements Serializable { private static final long serialVersionUID = 0L; /** Near cache eviction policy. */ + @Deprecated private EvictionPolicy nearEvictPlc; + /** Near cache eviction policy factory. */ + private Factory nearEvictPlcFactory; + /** Default near cache start size. */ private int nearStartSize = DFLT_NEAR_START_SIZE; @@ -55,6 +61,7 @@ public NearCacheConfiguration() { * @param ccfg Configuration to copy. */ public NearCacheConfiguration(NearCacheConfiguration ccfg) { + nearEvictPlcFactory = ccfg.getNearEvictionPolicyFactory(); nearEvictPlc = ccfg.getNearEvictionPolicy(); nearStartSize = ccfg.getNearStartSize(); } @@ -65,7 +72,10 @@ public NearCacheConfiguration(NearCacheConfiguration ccfg) { * * @return Near eviction policy. * @see CacheConfiguration#getEvictionPolicy() + * + * @deprecated Use {@link #getNearEvictionPolicyFactory()} instead. */ + @Deprecated public EvictionPolicy getNearEvictionPolicy() { return nearEvictPlc; } @@ -75,13 +85,40 @@ public EvictionPolicy getNearEvictionPolicy() { * * @param nearEvictPlc Near eviction policy. * @return {@code this} for chaining. + * + * @deprecated Use {@link #setNearEvictionPolicyFactory(Factory)} instead. */ + @Deprecated public NearCacheConfiguration setNearEvictionPolicy(EvictionPolicy nearEvictPlc) { this.nearEvictPlc = nearEvictPlc; return this; } + /** + * Gets cache eviction policy factory. By default, returns {@code null} + * which means that evictions are disabled for cache. + * + * @return Cache eviction policy or {@code null} if evictions should be disabled. + */ + @Nullable public Factory> getNearEvictionPolicyFactory() { + return nearEvictPlcFactory; + } + + /** + * Sets cache eviction policy factory. + * Note: Eviction policy factory should be {@link Serializable}. + * + * @param nearEvictPlcFactory Cache eviction policy. + * @return {@code this} for chaining. + */ + public NearCacheConfiguration setNearEvictionPolicyFactory( + @Nullable Factory> nearEvictPlcFactory) { + this.nearEvictPlcFactory = nearEvictPlcFactory; + + return this; + } + /** * Gets initial cache size for near cache which will be used to pre-create internal * hash table after start. Default value is defined by {@link CacheConfiguration#DFLT_NEAR_START_SIZE}. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 8382821237aa3..69f1a274c30e7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -275,6 +275,9 @@ private void checkCache(CacheJoinNodeDiscoveryData.CacheInfo locInfo, CacheData CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "evictionPolicy", "Eviction policy", locAttr.evictionPolicyClassName(), rmtAttr.evictionPolicyClassName(), true); + CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "evictionPolicyFactory", "Eviction policy factory", + locAttr.evictionPolicyFactoryClassName(), rmtAttr.evictionPolicyFactoryClassName(), true); + CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "transactionManagerLookup", "Transaction manager lookup", locAttr.transactionManagerLookupClassName(), rmtAttr.transactionManagerLookupClassName(), false); @@ -333,6 +336,10 @@ private void checkCache(CacheJoinNodeDiscoveryData.CacheInfo locInfo, CacheData "Near eviction policy", locAttr.nearEvictionPolicyClassName(), rmtAttr.nearEvictionPolicyClassName(), false); + CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "nearEvictionPolicyFactory", + "Near eviction policy factory", locAttr.nearEvictionPolicyFactoryClassName(), + rmtAttr.nearEvictionPolicyFactoryClassName(), false); + CU.checkAttributeMismatch(log, rmtAttr.cacheName(), rmt, "affinityIncludeNeighbors", "Affinity include neighbors", locAttr.affinityIncludeNeighbors(), rmtAttr.affinityIncludeNeighbors(), true); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java index d64ee8b30fb23..faad1ec58b9ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java @@ -154,13 +154,25 @@ public String evictionFilterClassName() { /** * @return Eviction policy class name. + * + * @deprecated Use evictionPolicyFactoryClassName() instead. */ + @Deprecated public String evictionPolicyClassName() { return className(ccfg.getEvictionPolicy()); } + /** + * @return Eviction policy factory class name. + */ + public String evictionPolicyFactoryClassName() { + return className(ccfg.getEvictionPolicyFactory()); + } + /** * @return Near eviction policy class name. + * + * @deprecated Use nearEvictionPolicyFactoryClassName() instead. */ public String nearEvictionPolicyClassName() { NearCacheConfiguration nearCfg = ccfg.getNearConfiguration(); @@ -171,6 +183,13 @@ public String nearEvictionPolicyClassName() { return className(nearCfg.getNearEvictionPolicy()); } + /** + * @return Near eviction policy factory class name. + */ + public String nearEvictionPolicyFactoryClassName() { + return className(ccfg.getEvictionPolicyFactory()); + } + /** * @return Store class name. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java index 7735f74cf2354..084b23586f97d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java @@ -60,7 +60,15 @@ public class GridCacheEvictionManager extends GridCacheManagerAdapter implements @Override public void start0() throws IgniteCheckedException { CacheConfiguration cfg = cctx.config(); - plc = cctx.isNear() ? cfg.getNearConfiguration().getNearEvictionPolicy() : cfg.getEvictionPolicy(); + if (cctx.isNear()) { + plc = (cfg.getNearConfiguration().getNearEvictionPolicyFactory() != null) ? + (EvictionPolicy)cfg.getNearConfiguration().getNearEvictionPolicyFactory().create() : + cfg.getNearConfiguration().getNearEvictionPolicy(); + } + else if (cfg.getEvictionPolicyFactory() != null) + plc = (EvictionPolicy)cfg.getEvictionPolicyFactory().create(); + else + plc = cfg.getEvictionPolicy(); plcEnabled = plc != null; @@ -298,4 +306,9 @@ private void notifyPolicy(GridCacheEntryEx e) { X.println(">>> Eviction manager memory stats [igniteInstanceName=" + cctx.igniteInstanceName() + ", cache=" + cctx.name() + ']'); } + + /** For test purposes. */ + public EvictionPolicy getEvictionPolicy() { + return plc; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index a990c3f324d57..da90ad43eb23f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -255,7 +255,7 @@ private void suggestOptimizations(CacheConfiguration cfg, boolean hasStore) { String msg = "Disable eviction policy (remove from configuration)"; - if (cfg.getEvictionPolicy() != null) + if (cfg.getEvictionPolicyFactory() != null || cfg.getEvictionPolicy() != null) perf.add(msg, false); else perf.add(msg, true); @@ -470,7 +470,7 @@ else if (cc.getRebalanceMode() == SYNC) { assertParameter(cc.getTransactionManagerLookupClassName() == null, "transaction manager can not be used with ATOMIC cache"); - if (cc.getEvictionPolicy() != null && !cc.isOnheapCacheEnabled()) + if ((cc.getEvictionPolicyFactory() != null || cc.getEvictionPolicy() != null)&& !cc.isOnheapCacheEnabled()) throw new IgniteCheckedException("Onheap cache must be enabled if eviction policy is configured [cacheName=" + U.maskName(cc.getName()) + "]"); @@ -511,6 +511,7 @@ private Collection dhtExcludes(GridCacheContext ctx) { * @throws IgniteCheckedException If failed to inject. */ private void prepare(CacheConfiguration cfg, Collection objs) throws IgniteCheckedException { + prepare(cfg, cfg.getEvictionPolicyFactory(), false); prepare(cfg, cfg.getEvictionPolicy(), false); prepare(cfg, cfg.getAffinity(), false); prepare(cfg, cfg.getAffinityMapper(), false); @@ -519,8 +520,10 @@ private void prepare(CacheConfiguration cfg, Collection objs) throws Ign NearCacheConfiguration nearCfg = cfg.getNearConfiguration(); - if (nearCfg != null) + if (nearCfg != null) { + prepare(cfg, nearCfg.getNearEvictionPolicyFactory(), true); prepare(cfg, nearCfg.getNearEvictionPolicy(), true); + } for (Object obj : objs) prepare(cfg, obj, false); @@ -548,6 +551,7 @@ private void prepare(CacheConfiguration cfg, @Nullable Object rsrc, boolean near private void cleanup(GridCacheContext cctx) { CacheConfiguration cfg = cctx.config(); + cleanup(cfg, cfg.getEvictionPolicyFactory(), false); cleanup(cfg, cfg.getEvictionPolicy(), false); cleanup(cfg, cfg.getAffinity(), false); cleanup(cfg, cfg.getAffinityMapper(), false); @@ -562,8 +566,10 @@ private void cleanup(GridCacheContext cctx) { NearCacheConfiguration nearCfg = cfg.getNearConfiguration(); - if (nearCfg != null) + if (nearCfg != null) { + cleanup(cfg, nearCfg.getNearEvictionPolicyFactory(), true); cleanup(cfg, nearCfg.getNearEvictionPolicy(), true); + } cctx.cleanup(); } @@ -3664,13 +3670,16 @@ private Iterable lifecycleAwares(CacheGroupContext grp, CacheConfigurati ret.add(ccfg.getAffinityMapper()); ret.add(ccfg.getEvictionFilter()); + ret.add(ccfg.getEvictionPolicyFactory()); ret.add(ccfg.getEvictionPolicy()); ret.add(ccfg.getInterceptor()); NearCacheConfiguration nearCfg = ccfg.getNearConfiguration(); - if (nearCfg != null) + if (nearCfg != null) { + ret.add(nearCfg.getNearEvictionPolicyFactory()); ret.add(nearCfg.getNearEvictionPolicy()); + } Collections.addAll(ret, objs); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 53fb4d3b49938..248f2aada0a52 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -1046,6 +1046,7 @@ public static CacheConfiguration hadoopSystemCache() { cache.setAtomicityMode(TRANSACTIONAL); cache.setWriteSynchronizationMode(FULL_SYNC); + cache.setEvictionPolicyFactory(null); cache.setEvictionPolicy(null); cache.setCacheStoreFactory(null); cache.setNodeFilter(CacheConfiguration.ALL_NODES); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsHelperImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsHelperImpl.java index 29e75a5a122d2..f20b787aa7c17 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsHelperImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsHelperImpl.java @@ -31,7 +31,9 @@ public class IgfsHelperImpl implements IgfsHelper { /** {@inheritDoc} */ @Override public void preProcessCacheConfiguration(CacheConfiguration cfg) { - EvictionPolicy evictPlc = cfg.getEvictionPolicy(); + EvictionPolicy evictPlc = cfg.getEvictionPolicyFactory() != null ? + (EvictionPolicy)cfg.getEvictionPolicyFactory().create() + : cfg.getEvictionPolicy(); if (evictPlc instanceof IgfsPerBlockLruEvictionPolicy && cfg.getEvictionFilter() == null) cfg.setEvictionFilter(new IgfsEvictionFilter()); @@ -39,7 +41,9 @@ public class IgfsHelperImpl implements IgfsHelper { /** {@inheritDoc} */ @Override public void validateCacheConfiguration(CacheConfiguration cfg) throws IgniteCheckedException { - EvictionPolicy evictPlc = cfg.getEvictionPolicy(); + EvictionPolicy evictPlc = cfg.getEvictionPolicyFactory() != null ? + (EvictionPolicy)cfg.getEvictionPolicyFactory().create() + : cfg.getEvictionPolicy(); if (evictPlc != null && evictPlc instanceof IgfsPerBlockLruEvictionPolicy) { EvictionFilter evictFilter = cfg.getEvictionFilter(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java index 5808e7cc4a620..5a9e10a90beef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsImpl.java @@ -237,7 +237,9 @@ public final class IgfsImpl implements IgfsEx { for (CacheConfiguration cacheCfg : igfsCtx.kernalContext().config().getCacheConfiguration()) { if (F.eq(dataCacheName, cacheCfg.getName())) { - EvictionPolicy evictPlc = cacheCfg.getEvictionPolicy(); + EvictionPolicy evictPlc = cacheCfg.getEvictionPolicyFactory() != null ? + (EvictionPolicy)cacheCfg.getEvictionPolicyFactory().create() + : cacheCfg.getEvictionPolicy(); if (evictPlc != null & evictPlc instanceof IgfsPerBlockLruEvictionPolicy) this.evictPlc = (IgfsPerBlockLruEvictionPolicy)evictPlc; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationConsistencySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationConsistencySelfTest.java index 2865627e3bd8c..3f4efc293e045 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationConsistencySelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheConfigurationConsistencySelfTest.java @@ -29,8 +29,10 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.cache.eviction.EvictionFilter; import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy; +import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicyFactory; import org.apache.ignite.cache.eviction.lru.LruEvictionPolicy; import org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy; +import org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicyFactory; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DeploymentMode; @@ -374,6 +376,28 @@ public void testDifferentEvictionEnabled() throws Exception { ); } + /** + * @throws Exception If failed. + */ + public void testDifferentEvictionPolicyEnabled() throws Exception { + checkSecondGridStartFails( + new C1() { + /** {@inheritDoc} */ + @Override public Void apply(CacheConfiguration cfg) { + cfg.setEvictionPolicyFactory(new FifoEvictionPolicyFactory<>()); + cfg.setOnheapCacheEnabled(true); + return null; + } + }, + new C1() { + /** {@inheritDoc} */ + @Override public Void apply(CacheConfiguration cfg) { + return null; + } + } + ); + } + /** * @throws Exception If failed. */ @@ -398,6 +422,30 @@ public void testDifferentEvictionPolicies() throws Exception { ); } + /** + * @throws Exception If failed. + */ + public void testDifferentEvictionPolicyFactories() throws Exception { + checkSecondGridStartFails( + new C1() { + /** {@inheritDoc} */ + @Override public Void apply(CacheConfiguration cfg) { + cfg.setEvictionPolicyFactory(new SortedEvictionPolicyFactory()); + cfg.setOnheapCacheEnabled(true); + return null; + } + }, + new C1() { + /** {@inheritDoc} */ + @Override public Void apply(CacheConfiguration cfg) { + cfg.setEvictionPolicyFactory(new FifoEvictionPolicyFactory<>()); + cfg.setOnheapCacheEnabled(true); + return null; + } + } + ); + } + /** * @throws Exception If failed. */ @@ -585,6 +633,7 @@ public void testPartitionedOnlyAttributesIgnoredForReplicated() throws Exception @Override public Void apply(CacheConfiguration cfg) { NearCacheConfiguration nearCfg = new NearCacheConfiguration(); + nearCfg.setNearEvictionPolicyFactory(new FifoEvictionPolicyFactory<>()); nearCfg.setNearEvictionPolicy(new LruEvictionPolicy()); cfg.setNearConfiguration(nearCfg); @@ -599,6 +648,7 @@ public void testPartitionedOnlyAttributesIgnoredForReplicated() throws Exception @Override public Void apply(CacheConfiguration cfg) { NearCacheConfiguration nearCfg = new NearCacheConfiguration(); + nearCfg.setNearEvictionPolicyFactory(new FifoEvictionPolicyFactory<>()); nearCfg.setNearEvictionPolicy(new FifoEvictionPolicy()); cfg.setNearConfiguration(nearCfg); @@ -624,6 +674,7 @@ public void testIgnoreMismatchForLocalCaches() throws Exception { @Override public Void apply(CacheConfiguration cfg) { cfg.setAffinity(new TestRendezvousAffinityFunction()); + cfg.setEvictionPolicyFactory(new FifoEvictionPolicyFactory<>()); cfg.setEvictionPolicy(new FifoEvictionPolicy()); cfg.setOnheapCacheEnabled(true); @@ -643,6 +694,7 @@ public void testIgnoreMismatchForLocalCaches() throws Exception { @Override public Void apply(CacheConfiguration cfg) { cfg.setAffinity(new RendezvousAffinityFunction()); + cfg.setEvictionPolicyFactory(new FifoEvictionPolicyFactory<>()); cfg.setEvictionPolicy(new LruEvictionPolicy()); cfg.setOnheapCacheEnabled(true); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearEvictionEventSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearEvictionEventSelfTest.java index 7088ad7c43fc8..0d36a5a7fde6b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearEvictionEventSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearEvictionEventSelfTest.java @@ -36,9 +36,4 @@ public class GridCacheNearEvictionEventSelfTest extends GridCacheEvictionEventAb @Override protected CacheAtomicityMode atomicityMode() { return TRANSACTIONAL; } - - /** {@inheritDoc} */ - @Override public void testEvictionEvent() throws Exception { - super.testEvictionEvent(); - } } \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/EvictionPolicyFactoryAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/EvictionPolicyFactoryAbstractTest.java new file mode 100644 index 0000000000000..0aa2d7f8c485c --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/EvictionPolicyFactoryAbstractTest.java @@ -0,0 +1,1073 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.eviction; + +import java.lang.reflect.InvocationTargetException; +import java.util.Collection; +import java.util.List; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; +import javax.cache.Cache; +import javax.cache.configuration.Factory; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.eviction.EvictableEntry; +import org.apache.ignite.cache.eviction.EvictionFilter; +import org.apache.ignite.cache.eviction.EvictionPolicy; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.CacheEvictionManager; +import org.apache.ignite.internal.processors.cache.GridCacheEvictionManager; +import org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedCache; +import org.apache.ignite.internal.util.typedef.C2; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.Transaction; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cache.CacheMode.LOCAL; +import static org.apache.ignite.cache.CacheMode.PARTITIONED; +import static org.apache.ignite.cache.CacheMode.REPLICATED; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_ASYNC; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED; +import static org.apache.ignite.events.EventType.EVT_TASK_FAILED; +import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED; +import static org.apache.ignite.internal.processors.cache.eviction.EvictionPolicyFactoryAbstractTest.EvictionPolicyProxy.proxy; +import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; +import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ; + +/** + * Base class for eviction tests. + */ +public abstract class EvictionPolicyFactoryAbstractTest> + extends GridCommonAbstractTest { + /** IP finder. */ + protected static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + + /** Put entry size. */ + protected static final int PUT_ENTRY_SIZE = 10; + + /** Replicated cache. */ + protected CacheMode mode = REPLICATED; + + /** Near enabled flag. */ + protected boolean nearEnabled; + + /** Policy max. */ + protected int plcMax = 10; + + /** Policy batch size. */ + protected int plcBatchSize = 1; + + /** Policy max memory size. */ + protected long plcMaxMemSize = 0; + + protected Factory policyFactory; + + /** Near policy max. */ + protected int nearMax = 3; + + /** Synchronous commit. */ + protected boolean syncCommit; + + /** */ + protected int gridCnt = 2; + + /** */ + protected EvictionFilter filter; + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + policyFactory = null; + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + assert policyFactory != null; + + IgniteConfiguration c = super.getConfiguration(igniteInstanceName); + + CacheConfiguration cc = defaultCacheConfiguration(); + + cc.setCacheMode(mode); + cc.setOnheapCacheEnabled(true); + cc.setEvictionPolicyFactory(policyFactory); + cc.setWriteSynchronizationMode(syncCommit ? FULL_SYNC : FULL_ASYNC); + cc.setAtomicityMode(TRANSACTIONAL); + + if (nearEnabled) { + NearCacheConfiguration nearCfg = new NearCacheConfiguration(); + + nearCfg.setNearEvictionPolicyFactory(createNearPolicyFactory(nearMax)); + + cc.setNearConfiguration(nearCfg); + } + else + cc.setNearConfiguration(null); + + if (mode == PARTITIONED) + cc.setBackups(1); + + if (filter != null) + cc.setEvictionFilter(filter); + + c.setCacheConfiguration(cc); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(ipFinder); + + c.setDiscoverySpi(disco); + + c.setIncludeEventTypes(EVT_TASK_FAILED, EVT_TASK_FINISHED, EVT_JOB_MAPPED); + + c.setIncludeProperties(); + + return c; + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + filter = null; + + super.afterTestsStopped(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizePolicy() throws Exception { + plcMax = 3; + plcMaxMemSize = 0; + plcBatchSize = 1; + + doTestPolicy(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizePolicyWithBatch() throws Exception { + plcMax = 3; + plcMaxMemSize = 0; + plcBatchSize = 2; + + doTestPolicyWithBatch(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxMemSizePolicy() throws Exception { + plcMax = 0; + plcMaxMemSize = 3 * MockEntry.ENTRY_SIZE; + plcBatchSize = 1; + + doTestPolicy(); + } + + /** + * Batch ignored when {@code maxSize > 0} and {@code maxMemSize > 0}. + * + * @throws Exception If failed. + */ + public void testMaxMemSizePolicyWithBatch() throws Exception { + plcMax = 3; + plcMaxMemSize = 10 * MockEntry.ENTRY_SIZE; + plcBatchSize = 2; + + doTestPolicy(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizeMemory() throws Exception { + int max = 10; + + plcMax = max; + plcMaxMemSize = 0; + plcBatchSize = 1; + + doTestMemory(max); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizeMemoryWithBatch() throws Exception { + int max = 10; + + plcMax = max; + plcMaxMemSize = 0; + plcBatchSize = 2; + + doTestMemory(max); + } + + /** + * @throws Exception If failed. + */ + public void testMaxMemSizeMemory() throws Exception { + int max = 10; + + plcMax = 0; + plcMaxMemSize = max * MockEntry.ENTRY_SIZE; + plcBatchSize = 1; + + doTestMemory(max); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizeRandom() throws Exception { + plcMax = 10; + plcMaxMemSize = 0; + plcBatchSize = 1; + + doTestRandom(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizeRandomWithBatch() throws Exception { + plcMax = 10; + plcMaxMemSize = 0; + plcBatchSize = 2; + + doTestRandom(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxMemSizeRandom() throws Exception { + plcMax = 0; + plcMaxMemSize = 10 * MockEntry.KEY_SIZE; + plcBatchSize = 1; + + doTestRandom(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizeAllowEmptyEntries() throws Exception { + plcMax = 10; + plcMaxMemSize = 0; + plcBatchSize = 1; + + doTestAllowEmptyEntries(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizeAllowEmptyEntriesWithBatch() throws Exception { + plcMax = 10; + plcMaxMemSize = 0; + plcBatchSize = 2; + + doTestAllowEmptyEntries(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxMemSizeAllowEmptyEntries() throws Exception { + plcMax = 0; + plcMaxMemSize = 10 * MockEntry.KEY_SIZE; + plcBatchSize = 1; + + doTestAllowEmptyEntries(); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizePut() throws Exception { + plcMax = 100; + plcBatchSize = 1; + plcMaxMemSize = 0; + + doTestPut(plcMax); + } + + /** + * @throws Exception If failed. + */ + public void testMaxSizePutWithBatch() throws Exception { + plcMax = 100; + plcBatchSize = 2; + plcMaxMemSize = 0; + + doTestPut(plcMax); + } + + /** + * @throws Exception If failed. + */ + public void testMaxMemSizePut() throws Exception { + int max = 100; + + plcMax = 0; + plcBatchSize = 2; + plcMaxMemSize = max * PUT_ENTRY_SIZE; + + doTestPut(max); + } + + /** + * Tests policy behaviour. + * + * @throws Exception If failed. + */ + protected abstract void doTestPolicy() throws Exception; + + /** + * Tests policy behaviour with batch enabled. + * + * @throws Exception If failed. + */ + protected abstract void doTestPolicyWithBatch() throws Exception; + + /** + * @throws Exception If failed. + */ + protected void doTestAllowEmptyEntries() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + MockEntry e1 = new MockEntry("1"); + MockEntry e2 = new MockEntry("2"); + MockEntry e3 = new MockEntry("3"); + MockEntry e4 = new MockEntry("4"); + MockEntry e5 = new MockEntry("5"); + + EvictionPolicyProxy p = proxy(policy()); + + p.onEntryAccessed(false, e1); + + assertFalse(e1.isEvicted()); + + check(p.queue().size(), MockEntry.KEY_SIZE); + + p.onEntryAccessed(false, e2); + + assertFalse(e1.isEvicted()); + assertFalse(e2.isEvicted()); + + check(p.queue().size(), MockEntry.KEY_SIZE); + + p.onEntryAccessed(false, e3); + + assertFalse(e1.isEvicted()); + assertFalse(e3.isEvicted()); + + check(p.queue().size(), MockEntry.KEY_SIZE); + + p.onEntryAccessed(false, e4); + + assertFalse(e1.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + + check(p.queue().size(), MockEntry.KEY_SIZE); + + p.onEntryAccessed(false, e5); + + assertFalse(e1.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e5.isEvicted()); + + check(p.queue().size(), MockEntry.KEY_SIZE); + } + finally { + stopAllGrids(); + } + } + + /** + * @throws Exception If failed. + */ + protected void doTestMemory(int max) throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + EvictionPolicyProxy p = proxy(policy()); + + int cnt = max + plcBatchSize; + + for (int i = 0; i < cnt; i++) + p.onEntryAccessed(false, new MockEntry(Integer.toString(i), Integer.toString(i))); + + info(p); + + check(max, MockEntry.ENTRY_SIZE); + } + finally { + stopAllGrids(); + } + } + + /** + * @throws Exception If failed. + */ + protected void doTestRandom() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + EvictionPolicyProxy p = proxy(policy()); + + int max = 10; + + Random rand = new Random(); + + int keys = 31; + + MockEntry[] entries = new MockEntry[keys]; + + for (int i = 0; i < entries.length; i++) + entries[i] = new MockEntry(Integer.toString(i)); + + int runs = 5000000; + + for (int i = 0; i < runs; i++) { + boolean rmv = rand.nextBoolean(); + + int j = rand.nextInt(entries.length); + + MockEntry e = entry(entries, j); + + if (rmv) + entries[j] = new MockEntry(Integer.toString(j)); + + p.onEntryAccessed(rmv, e); + } + + info(p); + + assertTrue(p.getCurrentSize() <= (plcMaxMemSize > 0 ? max : max + plcBatchSize)); + assertTrue(p.getCurrentMemorySize() <= (plcMaxMemSize > 0 ? max : max + plcBatchSize) * MockEntry.KEY_SIZE); + } + finally { + stopAllGrids(); + } + } + + /** + * @throws Exception If failed. + */ + protected void doTestPut(int max) throws Exception { + mode = LOCAL; + syncCommit = true; + + policyFactory = createPolicyFactory(); + + try { + Ignite ignite = startGrid(); + + IgniteCache cache = ignite.cache(DEFAULT_CACHE_NAME); + + int cnt = 500; + + int min = Integer.MAX_VALUE; + + int minIdx = 0; + + for (int i = 0; i < cnt; i++) { + cache.put(i, i); + + int cacheSize = cache.size(); + + if (i > max && cacheSize < min) { + min = cacheSize; + minIdx = i; + } + } + + assertTrue("Min cache size is too small: " + min, min >= max); + + check(max, PUT_ENTRY_SIZE); + + info("Min cache size [min=" + min + ", idx=" + minIdx + ']'); + info("Current cache size " + cache.size()); + info("Current cache key size " + cache.size()); + + min = Integer.MAX_VALUE; + + minIdx = 0; + + // Touch. + for (int i = cnt; --i > cnt - max;) { + cache.get(i); + + int cacheSize = cache.size(); + + if (cacheSize < min) { + min = cacheSize; + minIdx = i; + } + } + + info("----"); + info("Min cache size [min=" + min + ", idx=" + minIdx + ']'); + info("Current cache size " + cache.size()); + info("Current cache key size " + cache.size()); + + check(max, PUT_ENTRY_SIZE); + } + finally { + stopAllGrids(); + } + } + + /** + * @param arr Array. + * @param idx Index. + * @return Entry at the index. + */ + protected MockEntry entry(MockEntry[] arr, int idx) { + MockEntry e = arr[idx]; + + if (e.isEvicted()) + e = arr[idx] = new MockEntry(e.getKey()); + + return e; + } + + /** + * @param prefix Prefix. + * @param p Policy. + */ + protected void info(String prefix, EvictionPolicy p) { + info(prefix + ": " + p.toString()); + } + + /** @param p Policy. */ + protected void info(EvictionPolicy p) { + info(p.toString()); + } + + /** + * @param c1 Policy collection. + * @param c2 Expected list. + */ + protected static void check(Collection> c1, MockEntry... c2) { + check(c1, F.asList(c2)); + } + + /** + * @param expSize Expected size. + * @param entrySize Entry size. + */ + protected void check(int expSize, int entrySize) { + EvictionPolicyProxy proxy = proxy(policy()); + + assertEquals(expSize, proxy.getCurrentSize()); + assertEquals(expSize * entrySize, proxy.getCurrentMemorySize()); + } + + /** + * @param entrySize Entry size. + * @param c1 Closure 1. + * @param c2 Closure 2. + */ + protected void check(int entrySize, Collection> c1, MockEntry... c2) { + check(c2.length, entrySize); + + check(c1, c2); + } + + /** @return Policy. */ + protected T policy() { + CacheEvictionManager evictMgr = grid().cachex(DEFAULT_CACHE_NAME).context().evicts(); + + assert evictMgr instanceof GridCacheEvictionManager : evictMgr; + + return (T)((GridCacheEvictionManager)evictMgr).getEvictionPolicy(); + } + + /** + * @param i Grid index. + * @return Policy. + */ + @SuppressWarnings({"unchecked"}) + protected T policy(int i) { + CacheEvictionManager evictMgr = grid(i).cachex(DEFAULT_CACHE_NAME).context().evicts(); + + assert evictMgr instanceof GridCacheEvictionManager : evictMgr; + + return (T)((GridCacheEvictionManager)evictMgr).getEvictionPolicy(); + } + + /** + * @param i Grid index. + * @return Policy. + */ + @SuppressWarnings({"unchecked"}) + protected T nearPolicy(int i) { + CacheEvictionManager evictMgr = grid(i).cachex(DEFAULT_CACHE_NAME).context().near().context().evicts(); + + assert evictMgr instanceof GridCacheEvictionManager : evictMgr; + + return (T)((GridCacheEvictionManager)evictMgr).getEvictionPolicy(); + } + /** + * @param c1 Policy collection. + * @param c2 Expected list. + */ + protected static void check(Collection> c1, List c2) { + assert c1.size() == c2.size() : "Mismatch [actual=" + string(c1) + ", expected=" + string(c2) + ']'; + + assert c1.containsAll(c2) : "Mismatch [actual=" + string(c1) + ", expected=" + string(c2) + ']'; + + int i = 0; + + // Check order. + for (Cache.Entry e : c1) + assertEquals(e, c2.get(i++)); + } + + /** + * @param c Collection. + * @return String. + */ + @SuppressWarnings("unchecked") + protected static String string(Iterable c) { + return "[" + + F.fold( + c, + "", + new C2() { + @Override public String apply(Cache.Entry e, String b) { + return b.isEmpty() ? e.getKey().toString() : b + ", " + e.getKey(); + } + }) + + "]]"; + } + + /** @throws Exception If failed. */ + public void testMaxSizePartitionedNearDisabled() throws Exception { + mode = PARTITIONED; + nearEnabled = false; + plcMax = 10; + syncCommit = true; + + gridCnt = 2; + + checkPartitioned(); + } + + /** @throws Exception If failed. */ + public void testMaxSizePartitionedNearDisabledWithBatch() throws Exception { + mode = PARTITIONED; + nearEnabled = false; + plcMax = 10; + plcBatchSize = 2; + syncCommit = true; + + gridCnt = 2; + + checkPartitioned(); + } + + /** @throws Exception If failed. */ + public void testMaxMemSizePartitionedNearDisabled() throws Exception { + mode = PARTITIONED; + nearEnabled = false; + plcMax = 0; + plcMaxMemSize = 100; + syncCommit = true; + + gridCnt = 2; + + checkPartitioned(); + } + + /** @throws Exception If failed. */ + public void testPartitionedNearEnabled() throws Exception { + mode = PARTITIONED; + nearEnabled = true; + nearMax = 3; + plcMax = 10; + syncCommit = true; + + gridCnt = 2; + + checkPartitioned(); // Near size is 0 because of backups present. + } + + /** @throws Exception If failed. */ + public void testPartitionedNearDisabledMultiThreaded() throws Exception { + mode = PARTITIONED; + nearEnabled = false; + plcMax = 100; + + gridCnt = 2; + + checkPartitionedMultiThreaded(); + } + + /** @throws Exception If failed. */ + public void testPartitionedNearEnabledMultiThreaded() throws Exception { + mode = PARTITIONED; + nearEnabled = true; + plcMax = 10; + + gridCnt = 2; + + checkPartitionedMultiThreaded(); + } + + /** + * @throws Exception If failed. + */ + protected void checkPartitioned() throws Exception { + int endSize = nearEnabled ? 0 : plcMax; + + int endPlcSize = nearEnabled ? 0 : plcMax; + + policyFactory = createPolicyFactory(); + + startGridsMultiThreaded(gridCnt); + + try { + Random rand = new Random(); + + int cnt = 500; + + for (int i = 0; i < cnt; i++) { + IgniteCache cache = grid(rand.nextInt(2)).cache(DEFAULT_CACHE_NAME); + + int key = rand.nextInt(100); + String val = Integer.toString(key); + + cache.put(key, val); + + if (i % 100 == 0) + info("Stored cache object for key [key=" + key + ", idx=" + i + ']'); + } + + if (nearEnabled) { + for (int i = 0; i < gridCnt; i++) + assertEquals(endSize, near(i).nearSize()); + + if (endPlcSize >= 0) + checkNearPolicies(endPlcSize); + } + else { + if (plcMaxMemSize > 0) { + for (int i = 0; i < gridCnt; i++) { + GridDhtColocatedCache cache = colocated(i); + + int memSize = 0; + + for (Cache.Entry entry : cache.entrySet()) + memSize += entry.unwrap(EvictableEntry.class).size(); + + EvictionPolicyProxy plc = proxy(policy(i)); + + assertTrue(plc.getCurrentMemorySize() <= memSize); + } + } + + if (plcMax > 0) { + for (int i = 0; i < gridCnt; i++) { + int actual = colocated(i).map().internalSize(); + + assertTrue("Cache size is greater then policy size [expected=" + endSize + ", actual=" + actual + ']', + actual <= endSize + (plcMaxMemSize > 0 ? 1 : plcBatchSize)); + } + } + + checkPolicies(); + } + } + finally { + stopAllGrids(); + } + } + + /** + * @throws Exception If failed. + */ + protected void checkPartitionedMultiThreaded() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGridsMultiThreaded(gridCnt); + + final Random rand = new Random(); + + final AtomicInteger cntr = new AtomicInteger(); + + multithreaded(new Callable() { + @Nullable @Override public Object call() throws Exception { + int cnt = 100; + + for (int i = 0; i < cnt && !Thread.currentThread().isInterrupted(); i++) { + IgniteEx grid = grid(rand.nextInt(2)); + + IgniteCache cache = grid.cache(DEFAULT_CACHE_NAME); + + int key = rand.nextInt(1000); + String val = Integer.toString(key); + + try (Transaction tx = grid.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) { + String v = cache.get(key); + + assert v == null || v.equals(Integer.toString(key)) : "Invalid value for key [key=" + key + + ", val=" + v + ']'; + + cache.put(key, val); + + tx.commit(); + } + + if (cntr.incrementAndGet() % 100 == 0) + info("Stored cache object for key [key=" + key + ", idx=" + i + ']'); + } + + return null; + } + }, 10); + } + finally { + stopAllGrids(); + } + } + + /** + * @return Policy. + * + * @deprecated replace with getPolicyFactory(); + */ + @Deprecated + protected T createPolicy() { + return null; + }; + + /** + * @return Policy. + */ + protected abstract Factory createPolicyFactory(); + + /** + * @param nearMax Near max. + * @return Policy. + */ + protected abstract Factory createNearPolicyFactory(int nearMax); + + /** + * Performs after-test near policy check. + * + * @param nearMax Near max. + */ + protected void checkNearPolicies(int nearMax) { + for (int i = 0; i < gridCnt; i++) { + + EvictionPolicyProxy proxy = proxy(nearPolicy(i)); + + for (EvictableEntry e : proxy.queue()) + assert !e.isCached() : "Invalid near policy size: " + proxy.queue(); + } + } + + /** + * Performs after-test policy check. + */ + protected void checkPolicies() { + for (int i = 0; i < gridCnt; i++) { + if (plcMaxMemSize > 0) { + int size = 0; + + for (EvictableEntry entry : proxy(policy(i)).queue()) + size += entry.size(); + + assertEquals(size, proxy(policy(i)).getCurrentMemorySize()); + } + else + assertTrue(proxy(policy(i)).queue().size() <= plcMax + plcBatchSize); + } + } + + /** + * + */ + @SuppressWarnings({"PublicConstructorInNonPublicClass"}) + protected static class MockEntry extends GridCacheMockEntry { + /** Key size. */ + public static final int KEY_SIZE = 1; + + /** Value size. */ + public static final int VALUE_SIZE = 1; + + /** Entry size. */ + public static final int ENTRY_SIZE = KEY_SIZE + VALUE_SIZE; + + /** */ + private IgniteCache parent; + + /** Entry value. */ + private String val; + + /** @param key Key. */ + public MockEntry(String key) { + super(key); + } + + /** + * @param key Key. + * @param val Value. + */ + public MockEntry(String key, String val) { + super(key); + + this.val = val; + } + + /** + * @param key Key. + * @param parent Parent. + */ + public MockEntry(String key, @Nullable IgniteCache parent) { + super(key); + + this.parent = parent; + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public T unwrap(Class clazz) { + if (clazz.isAssignableFrom(IgniteCache.class)) + return (T)parent; + + return super.unwrap(clazz); + } + + /** {@inheritDoc} */ + @Override public String getValue() throws IllegalStateException { + return val; + } + + /** {@inheritDoc} */ + @Override public int size() { + return val == null ? KEY_SIZE : ENTRY_SIZE; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MockEntry.class, this, super.toString()); + } + } + + /** + * Rvicition policy proxy. + */ + public static class EvictionPolicyProxy implements EvictionPolicy { + /** Policy. */ + private final EvictionPolicy plc; + + /** + * @param plc Policy. + */ + private EvictionPolicyProxy(EvictionPolicy plc) { + this.plc = plc; + } + + /** + * @param plc Policy. + * @return Policy proxy. + */ + public static EvictionPolicyProxy proxy(EvictionPolicy plc) { + return new EvictionPolicyProxy(plc); + } + + /** + * @return Get current size. + */ + int getCurrentSize() { + try { + return (Integer)plc.getClass().getDeclaredMethod("getCurrentSize").invoke(plc); + } + catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + /** + * @return Current memory size. + */ + long getCurrentMemorySize() { + try { + return (Long)plc.getClass().getMethod("getCurrentMemorySize").invoke(plc); + } + catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + /** + * @return Current queue. + */ + public Collection queue() { + try { + return (Collection)plc.getClass().getDeclaredMethod("queue").invoke(plc); + } + catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + /** + * @param rmv Remove. + * @param entry Entry. + */ + @Override public void onEntryAccessed(boolean rmv, EvictableEntry entry) { + try { + plc.getClass() + .getMethod("onEntryAccessed", boolean.class, EvictableEntry.class) + .invoke(plc, rmv, entry); + } + catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + } +} \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/fifo/FifoEvictionPolicyFactorySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/fifo/FifoEvictionPolicyFactorySelfTest.java new file mode 100644 index 0000000000000..472bf41949091 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/fifo/FifoEvictionPolicyFactorySelfTest.java @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.eviction.fifo; + +import javax.cache.configuration.Factory; +import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy; +import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicyFactory; +import org.apache.ignite.internal.processors.cache.eviction.EvictionPolicyFactoryAbstractTest; + +/** + * FIFO eviction policy tests. + */ +public class FifoEvictionPolicyFactorySelfTest extends EvictionPolicyFactoryAbstractTest> { + /** {@inheritDoc} */ + @Override protected Factory> createPolicyFactory() { + return new FifoEvictionPolicyFactory<>(plcMax, plcBatchSize, plcMaxMemSize); + } + + /** {@inheritDoc} */ + @Override protected Factory> createNearPolicyFactory(int nearMax) { + FifoEvictionPolicyFactory plc = new FifoEvictionPolicyFactory<>(); + + plc.setMaxSize(nearMax); + plc.setBatchSize(plcBatchSize); + + return plc; + } + + /** {@inheritDoc} */ + @Override protected void doTestPolicy() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + MockEntry e1 = new MockEntry("1", "1"); + MockEntry e2 = new MockEntry("2", "2"); + MockEntry e3 = new MockEntry("3", "3"); + MockEntry e4 = new MockEntry("4", "4"); + MockEntry e5 = new MockEntry("5", "5"); + + FifoEvictionPolicy p = policy(); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1); + + p.onEntryAccessed(false, e2); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2); + + p.onEntryAccessed(false, e3); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3); + + assert !e1.isEvicted(); + assert !e2.isEvicted(); + assert !e3.isEvicted(); + + p.onEntryAccessed(false, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e2, e3, e4); + + assert e1.isEvicted(); + assert !e2.isEvicted(); + assert !e3.isEvicted(); + assert !e4.isEvicted(); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assert e2.isEvicted(); + assert !e3.isEvicted(); + assert !e4.isEvicted(); + assert !e5.isEvicted(); + + p.onEntryAccessed(false, e1 = new MockEntry("1", "1")); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5, e1); + + assert e3.isEvicted(); + assert !e1.isEvicted(); + assert !e4.isEvicted(); + assert !e5.isEvicted(); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5, e1); + + assert !e1.isEvicted(); + assert !e4.isEvicted(); + assert !e5.isEvicted(); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5, e1); + + assert !e1.isEvicted(); + assert !e4.isEvicted(); + assert !e5.isEvicted(); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5, e1); + + assert !e1.isEvicted(); + assert !e4.isEvicted(); + assert !e5.isEvicted(); + + p.onEntryAccessed(true, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5); + + assert !e1.isEvicted(); + assert !e4.isEvicted(); + assert !e5.isEvicted(); + + p.onEntryAccessed(true, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e5); + + assert !e4.isEvicted(); + assert !e5.isEvicted(); + + p.onEntryAccessed(true, e5); + + check(MockEntry.ENTRY_SIZE, p.queue()); + + assert !e5.isEvicted(); + + info(p); + } + finally { + stopAllGrids(); + } + } + + /** {@inheritDoc} */ + @Override protected void doTestPolicyWithBatch() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + MockEntry e1 = new MockEntry("1", "1"); + MockEntry e2 = new MockEntry("2", "2"); + MockEntry e3 = new MockEntry("3", "3"); + MockEntry e4 = new MockEntry("4", "4"); + MockEntry e5 = new MockEntry("5", "5"); + + FifoEvictionPolicy p = policy(); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1); + + p.onEntryAccessed(false, e2); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2); + + p.onEntryAccessed(false, e3); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3); + + p.onEntryAccessed(false, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3, e4); + + assertFalse(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + + p.onEntryAccessed(false, e5); + + // Batch evicted. + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertTrue(e1.isEvicted()); + assertTrue(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1 = new MockEntry("1", "1")); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5, e1); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + assertFalse(e1.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5, e1); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + assertFalse(e1.isEvicted()); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5, e1); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + assertFalse(e1.isEvicted()); + + p.onEntryAccessed(true, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3); + + assertFalse(e3.isEvicted()); + + p.onEntryAccessed(true, e3); + + check(MockEntry.ENTRY_SIZE, p.queue()); + + assertFalse(e3.isEvicted()); + + info(p); + } + finally { + stopAllGrids(); + } + } +} \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/lru/LruEvictionPolicyFactorySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/lru/LruEvictionPolicyFactorySelfTest.java new file mode 100644 index 0000000000000..d53cb6f36c923 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/lru/LruEvictionPolicyFactorySelfTest.java @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.eviction.lru; + +import javax.cache.configuration.Factory; +import org.apache.ignite.cache.eviction.EvictableEntry; +import org.apache.ignite.cache.eviction.lru.LruEvictionPolicy; +import org.apache.ignite.cache.eviction.lru.LruEvictionPolicyFactory; +import org.apache.ignite.internal.processors.cache.eviction.EvictionPolicyFactoryAbstractTest; + +/** + * LRU Eviction policy tests. + */ +public class LruEvictionPolicyFactorySelfTest extends EvictionPolicyFactoryAbstractTest> { + /** {@inheritDoc} */ + @Override protected Factory> createPolicyFactory() { + return new LruEvictionPolicyFactory<>(plcMax, plcBatchSize, plcMaxMemSize); + } + + /** {@inheritDoc} */ + @Override protected Factory> createNearPolicyFactory(int nearMax) { + LruEvictionPolicyFactory plc = new LruEvictionPolicyFactory<>(); + + plc.setMaxSize(nearMax); + plc.setBatchSize(plcBatchSize); + + return plc; + } + + /** + * @throws Exception If failed. + */ + public void testMiddleAccess() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + LruEvictionPolicy p = policy(); + + int max = 8; + + p.setMaxSize(max * MockEntry.ENTRY_SIZE); + + MockEntry entry1 = new MockEntry("1", "1"); + MockEntry entry2 = new MockEntry("2", "2"); + MockEntry entry3 = new MockEntry("3", "3"); + + p.onEntryAccessed(false, entry1); + p.onEntryAccessed(false, entry2); + p.onEntryAccessed(false, entry3); + + MockEntry[] freqUsed = new MockEntry[] { + new MockEntry("4", "4"), + new MockEntry("5", "5"), + new MockEntry("6", "6"), + new MockEntry("7", "7"), + new MockEntry("8", "7") + }; + + for (MockEntry e : freqUsed) + p.onEntryAccessed(false, e); + + for (MockEntry e : freqUsed) + assert !e.isEvicted(); + + int cnt = 1001; + + for (int i = 0; i < cnt; i++) + p.onEntryAccessed(false, entry(freqUsed, i % freqUsed.length)); + + info(p); + + check(max, MockEntry.ENTRY_SIZE); + } + finally { + stopGrid(); + } + } + + /** {@inheritDoc} */ + @Override protected void doTestPolicy() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + MockEntry e1 = new MockEntry("1", "1"); + MockEntry e2 = new MockEntry("2", "2"); + MockEntry e3 = new MockEntry("3", "3"); + MockEntry e4 = new MockEntry("4", "4"); + MockEntry e5 = new MockEntry("5", "5"); + + LruEvictionPolicy p = policy(); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1); + + p.onEntryAccessed(false, e2); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2); + + p.onEntryAccessed(false, e3); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3); + + assertFalse(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + + p.onEntryAccessed(false, e4); + + check(p.queue(), e2, e3, e4); + check(MockEntry.ENTRY_SIZE, p.queue(), e2, e3, e4); + + assertTrue(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertTrue(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1 = new MockEntry("1", "1")); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5, e1); + + assertTrue(e3.isEvicted()); + assertFalse(e1.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e5); + + assertEquals(3, p.getCurrentSize()); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e1, e5); + + assertFalse(e1.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5, e1); + + assertFalse(e1.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e5); + + assertEquals(3, p.getCurrentSize()); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e1, e5); + + assertFalse(e1.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5); + + assertFalse(e1.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e5); + + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e5); + + check(MockEntry.ENTRY_SIZE, p.queue()); + + assertFalse(e5.isEvicted()); + + info(p); + } + finally { + stopGrid(); + } + } + + /** {@inheritDoc} */ + @Override protected void doTestPolicyWithBatch() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + MockEntry e1 = new MockEntry("1", "1"); + MockEntry e2 = new MockEntry("2", "2"); + MockEntry e3 = new MockEntry("3", "3"); + MockEntry e4 = new MockEntry("4", "4"); + MockEntry e5 = new MockEntry("5", "5"); + + LruEvictionPolicy p = policy(); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1); + + p.onEntryAccessed(false, e2); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2); + + p.onEntryAccessed(false, e3); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3); + + assertFalse(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + + p.onEntryAccessed(false, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3, e4); + + assertFalse(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + + p.onEntryAccessed(false, e5); + + // Batch evicted + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertTrue(e1.isEvicted()); + assertTrue(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1 = new MockEntry("1", "1")); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5, e1); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + assertFalse(e1.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e1, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e1.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5, e1); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + assertFalse(e1.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e1, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e1.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3); + + assertFalse(e3.isEvicted()); + + p.onEntryAccessed(true, e3); + + check(MockEntry.ENTRY_SIZE, p.queue()); + + info(p); + } + finally { + stopGrid(); + } + } + + /** {@inheritDoc} */ + @Override protected void checkNearPolicies(int endNearPlcSize) { + for (int i = 0; i < gridCnt; i++) + for (EvictableEntry e : nearPolicy(i).queue()) + assert !e.isCached() : "Invalid near policy size: " + nearPolicy(i).queue(); + } + + /** {@inheritDoc} */ + @Override protected void checkPolicies() { + for (int i = 0; i < gridCnt; i++) { + if (plcMaxMemSize > 0) { + int size = 0; + + for (EvictableEntry entry : policy(i).queue()) + size += entry.size(); + + assertEquals(size, policy(i).getCurrentMemorySize()); + } + else + assertTrue(policy(i).queue().size() <= plcMax + plcBatchSize); + } + } +} \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/sorted/SortedEvictionPolicyFactorySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/sorted/SortedEvictionPolicyFactorySelfTest.java new file mode 100644 index 0000000000000..a0ab18f0204db --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/sorted/SortedEvictionPolicyFactorySelfTest.java @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.eviction.sorted; + +import javax.cache.configuration.Factory; +import org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy; +import org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicyFactory; +import org.apache.ignite.internal.processors.cache.eviction.EvictionPolicyFactoryAbstractTest; + +/** + * Sorted eviction policy tests. + */ +public class SortedEvictionPolicyFactorySelfTest extends EvictionPolicyFactoryAbstractTest> { + /** {@inheritDoc} */ + @Override protected Factory> createPolicyFactory() { + return new SortedEvictionPolicyFactory<>(plcMax, plcBatchSize, plcMaxMemSize); + } + + /** {@inheritDoc} */ + @Override protected Factory> createNearPolicyFactory(int nearMax) { + SortedEvictionPolicyFactory plc = new SortedEvictionPolicyFactory<>(); + + plc.setMaxSize(nearMax); + plc.setBatchSize(plcBatchSize); + + return plc; + } + + /** {@inheritDoc} */ + @Override protected void doTestPolicy() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + MockEntry e1 = new MockEntry("1", "1"); + MockEntry e2 = new MockEntry("2", "2"); + MockEntry e3 = new MockEntry("3", "3"); + MockEntry e4 = new MockEntry("4", "4"); + MockEntry e5 = new MockEntry("5", "5"); + + SortedEvictionPolicy p = policy(); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1); + + p.onEntryAccessed(false, e2); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2); + + p.onEntryAccessed(false, e3); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3); + + assertFalse(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3); + + p.onEntryAccessed(false, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e2, e3, e4); + + assertTrue(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertTrue(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1 = new MockEntry("1", "1")); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertTrue(e1.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertTrue(e1.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e3); + + check(MockEntry.ENTRY_SIZE, p.queue(), e4, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e5); + + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e5); + + check(MockEntry.ENTRY_SIZE, p.queue()); + + assertFalse(e5.isEvicted()); + + info(p); + } + finally { + stopAllGrids(); + } + } + + /** {@inheritDoc} */ + @Override protected void doTestPolicyWithBatch() throws Exception { + policyFactory = createPolicyFactory(); + + try { + startGrid(); + + MockEntry e1 = new MockEntry("1", "1"); + MockEntry e2 = new MockEntry("2", "2"); + MockEntry e3 = new MockEntry("3", "3"); + MockEntry e4 = new MockEntry("4", "4"); + MockEntry e5 = new MockEntry("5", "5"); + + SortedEvictionPolicy p = policy(); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1); + + p.onEntryAccessed(false, e2); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2); + + p.onEntryAccessed(false, e3); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3); + + p.onEntryAccessed(false, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e2, e3, e4); + + assertFalse(e1.isEvicted()); + assertFalse(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + + p.onEntryAccessed(false, e5); + + // Batch evicted. + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertTrue(e1.isEvicted()); + assertTrue(e2.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1 = new MockEntry("1", "1")); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e3, e4, e5); + + assertFalse(e1.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e3, e4, e5); + + assertFalse(e1.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(false, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e1, e3, e4, e5); + + assertFalse(e1.isEvicted()); + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e1); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e4, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e4.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e4); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3, e5); + + assertFalse(e3.isEvicted()); + assertFalse(e5.isEvicted()); + + p.onEntryAccessed(true, e5); + + check(MockEntry.ENTRY_SIZE, p.queue(), e3); + + assertFalse(e3.isEvicted()); + + p.onEntryAccessed(true, e3); + + check(MockEntry.ENTRY_SIZE, p.queue()); + + assertFalse(e3.isEvicted()); + + info(p); + } + finally { + stopAllGrids(); + } + } +} \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java index 84b14521af097..cd2ac5c012a60 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java @@ -29,7 +29,9 @@ import org.apache.ignite.internal.processors.cache.eviction.GridCacheEvictionFilterSelfTest; import org.apache.ignite.internal.processors.cache.eviction.GridCacheEvictionLockUnlockSelfTest; import org.apache.ignite.internal.processors.cache.eviction.GridCacheEvictionTouchSelfTest; +import org.apache.ignite.internal.processors.cache.eviction.fifo.FifoEvictionPolicyFactorySelfTest; import org.apache.ignite.internal.processors.cache.eviction.fifo.FifoEvictionPolicySelfTest; +import org.apache.ignite.internal.processors.cache.eviction.lru.LruEvictionPolicyFactorySelfTest; import org.apache.ignite.internal.processors.cache.eviction.lru.LruEvictionPolicySelfTest; import org.apache.ignite.internal.processors.cache.eviction.lru.LruNearEvictionPolicySelfTest; import org.apache.ignite.internal.processors.cache.eviction.lru.LruNearOnlyNearEvictionPolicySelfTest; @@ -43,6 +45,7 @@ import org.apache.ignite.internal.processors.cache.eviction.paged.RandomLruNearEnabledPageEvictionMultinodeTest; import org.apache.ignite.internal.processors.cache.eviction.paged.RandomLruPageEvictionMultinodeTest; import org.apache.ignite.internal.processors.cache.eviction.paged.RandomLruPageEvictionWithRebalanceTest; +import org.apache.ignite.internal.processors.cache.eviction.sorted.SortedEvictionPolicyFactorySelfTest; import org.apache.ignite.internal.processors.cache.eviction.sorted.SortedEvictionPolicySelfTest; /** @@ -59,6 +62,9 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(FifoEvictionPolicySelfTest.class)); suite.addTest(new TestSuite(SortedEvictionPolicySelfTest.class)); suite.addTest(new TestSuite(LruEvictionPolicySelfTest.class)); + suite.addTest(new TestSuite(FifoEvictionPolicyFactorySelfTest.class)); + suite.addTest(new TestSuite(SortedEvictionPolicyFactorySelfTest.class)); + suite.addTest(new TestSuite(LruEvictionPolicyFactorySelfTest.class)); suite.addTest(new TestSuite(LruNearEvictionPolicySelfTest.class)); suite.addTest(new TestSuite(LruNearOnlyNearEvictionPolicySelfTest.class)); suite.addTest(new TestSuite(GridCacheNearEvictionSelfTest.class)); diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java index 9770fa367283a..a6850bf4a1ec8 100644 --- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java +++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteNode.java @@ -107,8 +107,10 @@ public IgniteNode(boolean clientMode, Ignite ignite) { if (args.isNearCache()) { NearCacheConfiguration nearCfg = new NearCacheConfiguration(); - if (args.getNearCacheSize() != 0) - nearCfg.setNearEvictionPolicy(new LruEvictionPolicy(args.getNearCacheSize())); + int nearCacheSize = args.getNearCacheSize(); + + if (nearCacheSize != 0) + nearCfg.setNearEvictionPolicy(new LruEvictionPolicy(nearCacheSize)); cc.setNearConfiguration(nearCfg); } From cee1824013fa3abebb9d5a47e30b116dfe02e8f0 Mon Sep 17 00:00:00 2001 From: Ilya Borisov Date: Mon, 13 Nov 2017 13:20:03 +0700 Subject: [PATCH 107/243] IGNITE-6859 Don't register "cols" directive by it's function name. (cherry picked from commit 78a8403) --- .../list-editable/components/list-editable-cols/index.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/index.js b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/index.js index b7b55f6cf8118..e0d4b61e9173b 100644 --- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/index.js +++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/index.js @@ -23,6 +23,6 @@ import row from './row.directive.js'; export default angular .module('list-editable-cols', [ ]) - .directive(cols.name, cols) + .directive('listEditableCols', cols) .directive('listEditableItemView', row) .directive('listEditableItemEdit', row); From f52faa87c00da004642ef435c118e4f4a19e2459 Mon Sep 17 00:00:00 2001 From: Ilya Borisov Date: Mon, 13 Nov 2017 13:29:47 +0700 Subject: [PATCH 108/243] IGNITE-6824 Add step validator support to pcScaleNumber directive, add "step" validation message to pc-form-field-size pug mixin. (cherry picked from commit 250ceb7) --- .../components/page-configure-basic/components/pcbScaleNumber.js | 1 + .../page-configure-basic/mixins/pcb-form-field-size.pug | 1 + 2 files changed, 2 insertions(+) diff --git a/modules/web-console/frontend/app/components/page-configure-basic/components/pcbScaleNumber.js b/modules/web-console/frontend/app/components/page-configure-basic/components/pcbScaleNumber.js index 93d87ba915849..663d631dca550 100644 --- a/modules/web-console/frontend/app/components/page-configure-basic/components/pcbScaleNumber.js +++ b/modules/web-console/frontend/app/components/page-configure-basic/components/pcbScaleNumber.js @@ -28,6 +28,7 @@ export default function pcbScaleNumber() { ngModel.$parsers.push(down); ngModel.$validators.min = wrap(ngModel.$validators.min)(up); ngModel.$validators.max = wrap(ngModel.$validators.max)(up); + ngModel.$validators.step = wrap(ngModel.$validators.step)(up); scope.$watch(attr.pcbScaleNumber, (value, old) => { factor = Number(value); diff --git a/modules/web-console/frontend/app/components/page-configure-basic/mixins/pcb-form-field-size.pug b/modules/web-console/frontend/app/components/page-configure-basic/mixins/pcb-form-field-size.pug index fccd6ca0a0317..0cd5d0174c793 100644 --- a/modules/web-console/frontend/app/components/page-configure-basic/mixins/pcb-form-field-size.pug +++ b/modules/web-console/frontend/app/components/page-configure-basic/mixins/pcb-form-field-size.pug @@ -63,6 +63,7 @@ mixin pcb-form-field-size(label, model, name, disabled, required, placeholder, m +pcb-form-field-feedback(form, name, 'min', `Value is less than allowable minimum: ${min}`) +pcb-form-field-feedback(form, name, 'max', `Value is more than allowable maximum: ${max}`) +pcb-form-field-feedback(form, name, 'number', 'Only numbers allowed') + +pcb-form-field-feedback(form, name, 'step', 'Step is invalid') .input-tip +pcb-form-field-input(attributes=attributes) From 70a8ed2caea50d7941c1a881741a2cbe09654788 Mon Sep 17 00:00:00 2001 From: amashenkov Date: Mon, 13 Nov 2017 10:26:53 +0300 Subject: [PATCH 109/243] GG-13021: Fixed NPE on node stop when SSL is used. (cherry picked from commit 132ec3f) --- .../internal/util/nio/GridNioServer.java | 8 ++++-- .../IgniteCommunicationBalanceTest.java | 13 +++++++++ .../IgniteCommunicationSslBalanceTest.java | 28 +++++++++++++++++++ .../testsuites/IgniteCacheTestSuite.java | 2 ++ 4 files changed, 49 insertions(+), 2 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationSslBalanceTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java index 0dd7dd66f0111..1d595d2485f0f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java @@ -3371,8 +3371,12 @@ protected HeadFilter() { GridSelectorNioSessionImpl ses0 = (GridSelectorNioSessionImpl)ses; - if (!ses0.procWrite.get() && ses0.procWrite.compareAndSet(false, true)) - ses0.worker().registerWrite(ses0); + if (!ses0.procWrite.get() && ses0.procWrite.compareAndSet(false, true)) { + GridNioWorker worker = ses0.worker(); + + if (worker != null) + worker.registerWrite(ses0); + } return null; } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationBalanceTest.java b/modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationBalanceTest.java index 37a307fc35f52..666bc1dfbf6c2 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationBalanceTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationBalanceTest.java @@ -72,9 +72,19 @@ public class IgniteCommunicationBalanceTest extends GridCommonAbstractTest { cfg.setClientMode(client); + if (sslEnabled()) + cfg.setSslContextFactory(GridTestUtils.sslFactory()); + return cfg; } + /** + * @return {@code True} to enable SSL. + */ + protected boolean sslEnabled() { + return false; + } + /** * @return Value for {@link TcpCommunicationSpi#setUsePairedConnections(boolean)}. */ @@ -100,6 +110,9 @@ protected int connectionsPerNode() { * @throws Exception If failed. */ public void testBalance1() throws Exception { + if (sslEnabled()) + return; + System.setProperty(IgniteSystemProperties.IGNITE_IO_BALANCE_PERIOD, "5000"); try { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationSslBalanceTest.java b/modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationSslBalanceTest.java new file mode 100644 index 0000000000000..68094e265577a --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/managers/communication/IgniteCommunicationSslBalanceTest.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.communication; + +/** + * + */ +public class IgniteCommunicationSslBalanceTest extends IgniteCommunicationBalanceTest { + /** {@inheritDoc} */ + @Override protected boolean sslEnabled() { + return true; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java index 047550dc4583c..e3ebbc16e00fe 100755 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.managers.communication.IgniteCommunicationBalanceMultipleConnectionsTest; import org.apache.ignite.internal.managers.communication.IgniteCommunicationBalancePairedConnectionsTest; import org.apache.ignite.internal.managers.communication.IgniteCommunicationBalanceTest; +import org.apache.ignite.internal.managers.communication.IgniteCommunicationSslBalanceTest; import org.apache.ignite.internal.managers.communication.IgniteIoTestMessagesTest; import org.apache.ignite.internal.managers.communication.IgniteVariousConnectionNumberTest; import org.apache.ignite.internal.processors.cache.CacheAffinityCallSelfTest; @@ -309,6 +310,7 @@ public static TestSuite suite(Set ignoredTests) throws Exception { suite.addTestSuite(IgniteCommunicationBalanceTest.class); suite.addTestSuite(IgniteCommunicationBalancePairedConnectionsTest.class); suite.addTestSuite(IgniteCommunicationBalanceMultipleConnectionsTest.class); + suite.addTestSuite(IgniteCommunicationSslBalanceTest.class); suite.addTestSuite(IgniteIoTestMessagesTest.class); suite.addTestSuite(IgniteDiagnosticMessagesTest.class); suite.addTestSuite(IgniteDiagnosticMessagesMultipleConnectionsTest.class); From c756374d6a3c6652343d2dc448acfb1e015a9564 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Tue, 14 Nov 2017 08:39:26 +0700 Subject: [PATCH 110/243] IGNITE-6795 WebConsole: Improved file name with query export results. (cherry picked from commit 02ccc9f) --- .../app/modules/sql/sql.controller.js | 39 ++++++++++++++++--- 1 file changed, 33 insertions(+), 6 deletions(-) diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index 332d4d267cf14..8d27d30709d8b 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -212,6 +212,8 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', $ctrl.cacheMetadataTemplateUrl = cacheMetadataTemplateUrl; $ctrl.chartSettingsTemplateUrl = chartSettingsTemplateUrl; + $ctrl.demoStarted = false; + let stopTopology = null; const _tryStopRefresh = function(paragraph) { @@ -836,7 +838,6 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', /** * Update caches list. - * @private */ const _refreshFn = () => agentMgr.topology(true) @@ -873,6 +874,15 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', if (!_.includes(cacheNames, paragraph.cacheName)) paragraph.cacheName = _.head(cacheNames); }); + + // Await for demo caches. + if (!$ctrl.demoStarted && $root.IgniteDemoMode && _.nonEmpty(cacheNames)) { + $ctrl.demoStarted = true; + + Loading.finish('sqlLoading'); + + _.forEach($scope.notebook.paragraphs, (paragraph) => $scope.execute(paragraph)); + } }) .catch((err) => Messages.showError(err)); @@ -880,10 +890,11 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', agentMgr.startClusterWatch('Back to Configuration', 'base.configuration.tabs.advanced.clusters') .then(() => Loading.start('sqlLoading')) .then(_refreshFn) - .then(() => Loading.finish('sqlLoading')) .then(() => { - $root.IgniteDemoMode && _.forEach($scope.notebook.paragraphs, (paragraph) => $scope.execute(paragraph)); - + if (!$root.IgniteDemoMode) + Loading.finish('sqlLoading'); + }) + .then(() => { stopTopology = $interval(_refreshFn, 5000, 0, false); }); @@ -1626,8 +1637,24 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', LegacyUtils.download('application/octet-stream;charset=utf-8', fileName, escape(csvContent)); }; + /** + * Generate file name with query results. + * + * @param paragraph {Object} Query paragraph . + * @param all {Boolean} All result export flag. + * @returns {string} + */ + const exportFileName = (paragraph, all) => { + const args = paragraph.queryArgs; + + if (args.type === 'SCAN') + return `export-scan-${args.cacheName}-${paragraph.name}${all ? '-all' : ''}.csv`; + + return `export-query-${paragraph.name}${all ? '-all' : ''}.csv`; + }; + $scope.exportCsv = function(paragraph) { - _export(paragraph.name + '.csv', paragraph.gridOptions.columnDefs, paragraph.meta, paragraph.rows); + _export(exportFileName(paragraph, false), paragraph.gridOptions.columnDefs, paragraph.meta, paragraph.rows); // paragraph.gridOptions.api.exporter.csvExport(uiGridExporterConstants.ALL, uiGridExporterConstants.VISIBLE); }; @@ -1643,7 +1670,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', .then((nid) => args.type === 'SCAN' ? agentMgr.queryScanGetAll(nid, args.cacheName, args.query, !!args.regEx, !!args.caseSensitive, !!args.near, !!args.localNid) : agentMgr.querySqlGetAll(nid, args.cacheName, args.query, !!args.nonCollocatedJoins, !!args.enforceJoinOrder, false, !!args.localNid, !!args.lazy)) - .then((res) => _export(paragraph.name + '-all.csv', paragraph.gridOptions.columnDefs, res.columns, res.rows)) + .then((res) => _export(exportFileName(paragraph, true), paragraph.gridOptions.columnDefs, res.columns, res.rows)) .catch(Messages.showError) .then(() => paragraph.ace && paragraph.ace.focus()); }; From f05c03bce49e7a436405e697ef66148d28f26353 Mon Sep 17 00:00:00 2001 From: vsisko Date: Tue, 14 Nov 2017 09:44:18 +0700 Subject: [PATCH 111/243] IGNITE-6863 Visor CMD: Fixed check that cache available on node. (cherry picked from commit e489e37) --- .../cache/VisorCacheMetricsCollectorTask.java | 5 ++- .../visor/node/VisorGridConfiguration.java | 6 ++- .../cache/VisorCacheCommandSpec.scala | 44 ++++++++++++++----- 3 files changed, 42 insertions(+), 13 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheMetricsCollectorTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheMetricsCollectorTask.java index 8ce3c8c9b8f59..ab1fa8c835d41 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheMetricsCollectorTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheMetricsCollectorTask.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.IgniteCacheProxy; import org.apache.ignite.internal.processors.task.GridInternal; @@ -109,7 +110,9 @@ private VisorCacheMetricsCollectorJob(VisorCacheMetricsCollectorTaskArg arg, boo boolean allCaches = cacheNames.isEmpty(); for (IgniteCacheProxy ca : caches) { - if (ca.context().started()) { + GridCacheContext ctx = ca.context(); + + if (ctx.started() && (ctx.affinityNode() || ctx.isNear())) { String cacheName = ca.getName(); VisorCacheMetrics cm = new VisorCacheMetrics(ignite, cacheName); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java index 9e2370c4ed6b0..85849a56f907c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorGridConfiguration.java @@ -27,6 +27,7 @@ import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.HadoopConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; @@ -187,7 +188,10 @@ public VisorGridConfiguration(IgniteEx ignite) { srvcCfgs = VisorServiceConfiguration.list(c.getServiceConfiguration()); - dataStorage = new VisorDataStorageConfiguration(c.getDataStorageConfiguration()); + DataStorageConfiguration dsCfg = c.getDataStorageConfiguration(); + + if (dsCfg != null) + dataStorage = new VisorDataStorageConfiguration(dsCfg); } /** diff --git a/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommandSpec.scala b/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommandSpec.scala index 384aae0c0ffbd..d2c3711d6ef70 100644 --- a/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommandSpec.scala +++ b/modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommandSpec.scala @@ -17,25 +17,35 @@ package org.apache.ignite.visor.commands.cache +import java.lang.{Integer => JavaInt} +import java.util.{Collections, List => JavaList} + import org.apache.ignite.Ignition import org.apache.ignite.cache.CacheAtomicityMode._ import org.apache.ignite.cache.CacheMode._ import org.apache.ignite.cache.query.SqlQuery import org.apache.ignite.cache.query.annotations.QuerySqlField import org.apache.ignite.configuration._ +import org.apache.ignite.internal.visor.cache._ import org.apache.ignite.spi.discovery.tcp._ import org.apache.ignite.spi.discovery.tcp.ipfinder.vm._ - -import java.lang.{Integer => JavaInt} -import org.jetbrains.annotations._ - +import org.apache.ignite.util.AttributeNodeFilter import org.apache.ignite.visor._ import org.apache.ignite.visor.commands.cache.VisorCacheCommand._ +import org.apache.ignite.visor.visor.executeMulti +import org.jetbrains.annotations._ + +import scala.collection.JavaConversions._ /** * Unit test for 'events' command. */ -class VisorCacheCommandSpec extends VisorRuntimeBaseSpec(1) { +class VisorCacheCommandSpec extends VisorRuntimeBaseSpec(2) { + /** */ + val CACHE_NAME = "replicated" + + /** */ + val FILTER_ATTRIBUTE_NAME = "NAME" /** IP finder. */ val ipFinder = new TcpDiscoveryVmIpFinder(true) @@ -47,9 +57,11 @@ class VisorCacheCommandSpec extends VisorRuntimeBaseSpec(1) { def cacheConfig(@NotNull name: String): CacheConfiguration[Object, Object] = { val cfg = new CacheConfiguration[Object, Object] + cfg.setName(name) cfg.setCacheMode(REPLICATED) cfg.setAtomicityMode(TRANSACTIONAL) - cfg.setName(name) + + cfg.setNodeFilter(new AttributeNodeFilter(FILTER_ATTRIBUTE_NAME, "node-1")) val arr = Seq(classOf[JavaInt], classOf[Foo]).toArray @@ -64,19 +76,18 @@ class VisorCacheCommandSpec extends VisorRuntimeBaseSpec(1) { * @param name Ignite instance name. * @return Grid configuration. */ - override def config(name: String): IgniteConfiguration = - { - val cfg = new IgniteConfiguration + override def config(name: String): IgniteConfiguration = { + val cfg = super.config(name) - cfg.setIgniteInstanceName(name) cfg.setLocalHost("127.0.0.1") - cfg.setCacheConfiguration(cacheConfig("replicated")) + cfg.setCacheConfiguration(cacheConfig(CACHE_NAME)) val discoSpi = new TcpDiscoverySpi() discoSpi.setIpFinder(ipFinder) cfg.setDiscoverySpi(discoSpi) + cfg.setUserAttributes(Collections.singletonMap(FILTER_ATTRIBUTE_NAME, name)) cfg } @@ -130,6 +141,17 @@ class VisorCacheCommandSpec extends VisorRuntimeBaseSpec(1) { it("should scan cache") { visor cache "-c=replicated -scan" } + + it("should get metrics for nodes available by cache node filter") { + val caches: JavaList[String] = Collections.singletonList(CACHE_NAME) + + val arg = new VisorCacheMetricsCollectorTaskArg(false, caches) + + val metrics = executeMulti(classOf[VisorCacheMetricsCollectorTask], arg).toList + + assert(metrics.size == 1) + assert(metrics.head.getNodes.size() == 1) + } } } From 8cdfced4dfb736f1da51de68a44c65a932829245 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 14 Nov 2017 15:48:02 +0300 Subject: [PATCH 112/243] IGNITE-6835 ODBC driver now handles ungraceful TCP disconnects (cherry picked from commit 46c480b) --- .../ignite/odbc/system/socket_client.h | 20 +- .../os/linux/src/system/socket_client.cpp | 140 ++++++++++++- .../odbc/os/win/src/system/socket_client.cpp | 194 +++++++++++++++++- modules/platforms/cpp/odbc/src/connection.cpp | 2 +- 4 files changed, 341 insertions(+), 15 deletions(-) diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h b/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h index ee58927ea655a..946605e3db85e 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h @@ -21,6 +21,7 @@ #include #include "ignite/common/common.h" +#include "ignite/odbc/diagnostic/diagnosable.h" namespace ignite { @@ -34,6 +35,15 @@ namespace ignite class SocketClient { public: + /** Buffers size */ + enum { BUFFER_SIZE = 0x10000 }; + + /** The time in seconds the connection needs to remain idle before starts sending keepalive probes. */ + enum { KEEP_ALIVE_IDLE_TIME = 60 }; + + /** The time in seconds between individual keepalive probes. */ + enum { KEEP_ALIVE_PROBES_PERIOD = 1 }; + /** * Constructor. */ @@ -49,9 +59,10 @@ namespace ignite * * @param hostname Remote host name. * @param port TCP service port. + * @param diag Diagnostics collector. * @return True on success. */ - bool Connect(const char* hostname, uint16_t port); + bool Connect(const char* hostname, uint16_t port, diagnostic::Diagnosable& diag); /** * Close established connection. @@ -73,7 +84,7 @@ namespace ignite /** * Receive data from established connection. * - * @param data Pointer to data buffer. + * @param buffer Pointer to data buffer. * @param size Size of the buffer in bytes. * @return Number of bytes that have been received on success and negative * value on failure. @@ -81,6 +92,11 @@ namespace ignite int Receive(int8_t* buffer, size_t size); private: + /** + * Tries set socket options. + */ + void TrySetOptions(diagnostic::Diagnosable& diag); + intptr_t socketHandle; IGNITE_NO_COPY_ASSIGNMENT(SocketClient) diff --git a/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp b/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp index 9bdf1d75dd19e..5a9b03a21d398 100644 --- a/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp +++ b/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include @@ -31,6 +31,32 @@ #define SOCKET_ERROR (-1) +namespace +{ + /** + * Get last socket error message. + * @return Last socket error message string. + */ + std::string GetLastSocketErrorMessage() + { + int lastError = errno; + std::stringstream res; + + res << "error_code=" << lastError; + + if (lastError == 0) + return res.str(); + + char buffer[1024] = ""; + + strerror_r(lastError, buffer, sizeof(buffer)); + + res << ", msg=" << buffer; + + return res.str(); + } +} + namespace ignite { namespace odbc @@ -48,11 +74,12 @@ namespace ignite Close(); } - bool SocketClient::Connect(const char* hostname, uint16_t port) + bool SocketClient::Connect(const char* hostname, uint16_t port, diagnostic::Diagnosable& diag) { LOG_MSG("Host: " << hostname << ", port: " << port); addrinfo hints; + memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; @@ -66,26 +93,44 @@ namespace ignite int res = getaddrinfo(hostname, converter.str().c_str(), &hints, &result); if (res != 0) + { + LOG_MSG("Address resolving failed: " << gai_strerror(res)); + + diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Can not resolve host address."); + return false; + } // Attempt to connect to an address until one succeeds - for (addrinfo *it = result; it != NULL; it = it->ai_next) + for (addrinfo *it = result; it != NULL; it = it->ai_next) { - LOG_MSG("Addr: " << it->ai_addr->sa_data[2] << "." - << it->ai_addr->sa_data[3] << "." - << it->ai_addr->sa_data[4] << "." - << it->ai_addr->sa_data[5]); + LOG_MSG("Addr: " << (it->ai_addr->sa_data[2] & 0xFF) << "." + << (it->ai_addr->sa_data[3] & 0xFF) << "." + << (it->ai_addr->sa_data[4] & 0xFF) << "." + << (it->ai_addr->sa_data[5] & 0xFF)); // Create a SOCKET for connecting to server socketHandle = socket(it->ai_family, it->ai_socktype, it->ai_protocol); if (socketHandle == SOCKET_ERROR) + { + LOG_MSG("Socket creation failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Can not create new socket."); + return false; + } + + diag.GetDiagnosticRecords().Reset(); + + TrySetOptions(diag); // Connect to server. - res = connect(socketHandle, it->ai_addr, (int)it->ai_addrlen); - if (res == SOCKET_ERROR) + res = connect(socketHandle, it->ai_addr, static_cast(it->ai_addrlen)); + if (SOCKET_ERROR == res) { + LOG_MSG("Connection failed: " << GetLastSocketErrorMessage()); + Close(); continue; @@ -117,6 +162,83 @@ namespace ignite { return recv(socketHandle, reinterpret_cast(buffer), static_cast(size), 0); } + + void SocketClient::TrySetOptions(diagnostic::Diagnosable& diag) + { + int trueOpt = 1; + int bufSizeOpt = BUFFER_SIZE; + int idleOpt = KEEP_ALIVE_IDLE_TIME; + int idleRetryOpt = KEEP_ALIVE_PROBES_PERIOD; + + int res = setsockopt(socketHandle, SOL_SOCKET, SO_SNDBUF, + reinterpret_cast(&bufSizeOpt), sizeof(bufSizeOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP socket send buffer size setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP socket send buffer size"); + } + + res = setsockopt(socketHandle, SOL_SOCKET, SO_RCVBUF, + reinterpret_cast(&bufSizeOpt), sizeof(bufSizeOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP socket receive buffer size setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP socket receive buffer size"); + } + + res = setsockopt(socketHandle, IPPROTO_TCP, TCP_NODELAY, + reinterpret_cast(&trueOpt), sizeof(trueOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP no-delay mode setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP no-delay mode"); + } + + res = setsockopt(socketHandle, SOL_SOCKET, SO_KEEPALIVE, + reinterpret_cast(&trueOpt), sizeof(trueOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP keep-alive mode setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP keep-alive mode"); + + // There is no sense in configuring keep alive params if we faileed to set up keep alive mode. + return; + } + + res = setsockopt(socketHandle, IPPROTO_TCP, TCP_KEEPIDLE, + reinterpret_cast(&idleOpt), sizeof(idleOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP keep-alive idle timeout setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP keep-alive idle timeout"); + } + + res = setsockopt(socketHandle, IPPROTO_TCP, TCP_KEEPINTVL, + reinterpret_cast(&idleRetryOpt), sizeof(idleRetryOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP keep-alive probes period setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP keep-alive probes period"); + } + } } } } diff --git a/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp b/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp index 4c440e2cc7ae9..30fb7d7bacbeb 100644 --- a/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp +++ b/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -30,6 +31,54 @@ #include "ignite/odbc/utility.h" #include "ignite/odbc/log.h" +namespace +{ + /** + * Get last socket error message. + * @return Last socket error message string. + */ + std::string GetLastSocketErrorMessage() + { + HRESULT lastError = WSAGetLastError(); + std::stringstream res; + + res << "error_code=" << lastError; + + if (lastError == 0) + return res.str(); + + LPTSTR errorText = NULL; + + DWORD len = FormatMessage( + // use system message tables to retrieve error text + FORMAT_MESSAGE_FROM_SYSTEM + // allocate buffer on local heap for error text + | FORMAT_MESSAGE_ALLOCATE_BUFFER + // We're not passing insertion parameters + | FORMAT_MESSAGE_IGNORE_INSERTS, + // unused with FORMAT_MESSAGE_FROM_SYSTEM + NULL, + lastError, + MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US), + // output + reinterpret_cast(&errorText), + // minimum size for output buffer + 0, + // arguments - see note + NULL); + + if (NULL != errorText) + { + if (len != 0) + res << ", msg=" << std::string(errorText, len); + + LocalFree(errorText); + } + + return res.str(); + } +} + namespace ignite { namespace odbc @@ -47,7 +96,7 @@ namespace ignite Close(); } - bool SocketClient::Connect(const char* hostname, uint16_t port) + bool SocketClient::Connect(const char* hostname, uint16_t port, diagnostic::Diagnosable& diag) { static bool networkInited = false; @@ -59,10 +108,15 @@ namespace ignite networkInited = (WSAStartup(MAKEWORD(2, 2), &wsaData) == 0); if (!networkInited) + { + LOG_MSG("Networking initialisation failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Can not initialize Windows networking."); + return false; + } } - addrinfo *result = NULL; addrinfo hints; LOG_MSG("Host: " << hostname << " port: " << port); @@ -76,10 +130,17 @@ namespace ignite converter << port; // Resolve the server address and port + addrinfo *result = NULL; int res = getaddrinfo(hostname, converter.str().c_str(), &hints, &result); if (res != 0) + { + LOG_MSG("Address resolving failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Can not resolve host address."); + return false; + } // Attempt to connect to an address until one succeeds for (addrinfo *it = result; it != NULL; it = it->ai_next) @@ -93,12 +154,24 @@ namespace ignite socketHandle = socket(it->ai_family, it->ai_socktype, it->ai_protocol); if (socketHandle == INVALID_SOCKET) + { + LOG_MSG("Socket creation failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Can not create new socket."); + return false; + } + + diag.GetDiagnosticRecords().Reset(); + + TrySetOptions(diag); // Connect to server. res = connect(socketHandle, it->ai_addr, static_cast(it->ai_addrlen)); - if (res == SOCKET_ERROR) + if (SOCKET_ERROR == res) { + LOG_MSG("Connection failed: " << GetLastSocketErrorMessage()); + Close(); continue; @@ -130,6 +203,121 @@ namespace ignite { return recv(socketHandle, reinterpret_cast(buffer), static_cast(size), 0); } + + void SocketClient::TrySetOptions(diagnostic::Diagnosable& diag) + { + BOOL trueOpt = TRUE; + int bufSizeOpt = BUFFER_SIZE; + + int res = setsockopt(socketHandle, SOL_SOCKET, SO_SNDBUF, + reinterpret_cast(&bufSizeOpt), sizeof(bufSizeOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP socket send buffer size setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP socket send buffer size"); + } + + res = setsockopt(socketHandle, SOL_SOCKET, SO_RCVBUF, + reinterpret_cast(&bufSizeOpt), sizeof(bufSizeOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP socket receive buffer size setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP socket receive buffer size"); + } + + res = setsockopt(socketHandle, IPPROTO_TCP, TCP_NODELAY, + reinterpret_cast(&trueOpt), sizeof(trueOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP no-delay mode setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP no-delay mode"); + } + + res = setsockopt(socketHandle, SOL_SOCKET, SO_KEEPALIVE, + reinterpret_cast(&trueOpt), sizeof(trueOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP keep-alive mode setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP keep-alive mode"); + + // There is no sense in configuring keep alive params if we faileed to set up keep alive mode. + return; + } + + // This option is available starting with Windows 10, version 1709. +#if defined(TCP_KEEPIDLE) && defined(TCP_KEEPINTVL) + DWORD idleOpt = KEEP_ALIVE_IDLE_TIME; + DWORD idleRetryOpt = KEEP_ALIVE_PROBES_PERIOD; + + res = setsockopt(socketHandle, IPPROTO_TCP, TCP_KEEPIDLE, + reinterpret_cast(&idleOpt), sizeof(idleOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP keep-alive idle timeout setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP keep-alive idle timeout"); + } + + res = setsockopt(socketHandle, IPPROTO_TCP, TCP_KEEPINTVL, + reinterpret_cast(&idleRetryOpt), sizeof(idleRetryOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP keep-alive probes period setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP keep-alive probes period"); + } +#else // use old hardcore WSAIoctl + + // WinSock structure for KeepAlive timing settings + struct tcp_keepalive settings = {0}; + settings.onoff = 1; + settings.keepalivetime = KEEP_ALIVE_IDLE_TIME * 1000; + settings.keepaliveinterval = KEEP_ALIVE_PROBES_PERIOD * 1000; + + // pointers for WinSock call + DWORD bytesReturned; + WSAOVERLAPPED overlapped; + overlapped.hEvent = NULL; + + // Set KeepAlive settings + res = WSAIoctl( + socketHandle, + SIO_KEEPALIVE_VALS, + &settings, + sizeof(struct tcp_keepalive), + NULL, + 0, + &bytesReturned, + &overlapped, + NULL + ); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP keep-alive params setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP keep-alive idle timeout and probes period"); + } +#endif + } + } } } diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp index 8f4bf14b94312..b99d7687035db 100644 --- a/modules/platforms/cpp/odbc/src/connection.cpp +++ b/modules/platforms/cpp/odbc/src/connection.cpp @@ -126,7 +126,7 @@ namespace ignite return SqlResult::AI_ERROR; } - connected = socket.Connect(cfg.GetHost().c_str(), cfg.GetTcpPort()); + connected = socket.Connect(cfg.GetHost().c_str(), cfg.GetTcpPort(), *this); if (!connected) { From 5270067ef7cae56c32756c87e861e636ca75e39e Mon Sep 17 00:00:00 2001 From: Ilya Borisov Date: Wed, 15 Nov 2017 16:03:13 +0700 Subject: [PATCH 113/243] IGNITE-6859 Web Console: Do not split base64 strings to multiple lines because IE11 atob() implementation does not work with these. (cherry picked from commit 4589ff2) --- .../apache/ignite/console/agent/handlers/AbstractListener.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java index fc4daef09d68c..ace208744abd8 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/AbstractListener.java @@ -82,7 +82,7 @@ else if (args.length == 1) if (restRes.getData() != null) { ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); - Base64OutputStream b64os = new Base64OutputStream(baos); + Base64OutputStream b64os = new Base64OutputStream(baos, true, 0, null); GZIPOutputStream gzip = new GZIPOutputStream(b64os); gzip.write(restRes.getData().getBytes(UTF8)); From e080b8effd329782f29e948327f2d4c5b860faac Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Wed, 15 Nov 2017 13:41:31 +0300 Subject: [PATCH 114/243] IGNITE-6836: Implemented query timeout. (cherry picked from commit 8c278ca) --- .../odbc/odbc/OdbcMessageParser.java | 14 +++++- .../odbc/OdbcQueryExecuteBatchRequest.java | 16 ++++++- .../odbc/odbc/OdbcQueryExecuteRequest.java | 16 ++++++- .../odbc/odbc/OdbcRequestHandler.java | 9 ++-- .../cpp/odbc-test/src/attributes_test.cpp | 22 ++++++++++ .../cpp/odbc-test/src/parser_test.cpp | 2 +- .../cpp/odbc/include/ignite/odbc/message.h | 33 +++++++++----- .../cpp/odbc/include/ignite/odbc/parser.h | 2 +- .../include/ignite/odbc/query/batch_query.h | 8 +++- .../include/ignite/odbc/query/data_query.h | 8 +++- .../cpp/odbc/include/ignite/odbc/statement.h | 4 ++ modules/platforms/cpp/odbc/src/message.cpp | 34 +++++++++------ .../cpp/odbc/src/query/batch_query.cpp | 9 ++-- .../cpp/odbc/src/query/data_query.cpp | 9 ++-- modules/platforms/cpp/odbc/src/statement.cpp | 43 +++++++++++++++++-- 15 files changed, 180 insertions(+), 49 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java index fb17d2a8640d3..c2137bd469e48 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java @@ -96,7 +96,12 @@ public OdbcMessageParser(GridKernalContext ctx, ClientListenerProtocolVersion ve Object[] params = readParameterRow(reader, paramNum); - res = new OdbcQueryExecuteRequest(schema, sql, params); + int timeout = 0; + + if (ver.compareTo(OdbcConnectionContext.VER_2_3_2) >= 0) + timeout = reader.readInt(); + + res = new OdbcQueryExecuteRequest(schema, sql, params, timeout); break; } @@ -113,7 +118,12 @@ public OdbcMessageParser(GridKernalContext ctx, ClientListenerProtocolVersion ve for (int i = 0; i < rowNum; ++i) params[i] = readParameterRow(reader, paramRowLen); - res = new OdbcQueryExecuteBatchRequest(schema, sql, last, params); + int timeout = 0; + + if (ver.compareTo(OdbcConnectionContext.VER_2_3_2) >= 0) + timeout = reader.readInt(); + + res = new OdbcQueryExecuteBatchRequest(schema, sql, last, params, timeout); break; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java index 50d16e53442ee..0e4effdaf2800 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java @@ -41,13 +41,19 @@ public class OdbcQueryExecuteBatchRequest extends OdbcRequest { @GridToStringExclude private final Object[][] args; + /** Query timeout in seconds. */ + @GridToStringInclude + private final int timeout; + /** * @param schema Schema. * @param sqlQry SQL query. * @param last Last page flag. * @param args Arguments list. + * @param timeout Timeout in seconds. */ - public OdbcQueryExecuteBatchRequest(@Nullable String schema, String sqlQry, boolean last, Object[][] args) { + public OdbcQueryExecuteBatchRequest(@Nullable String schema, String sqlQry, boolean last, Object[][] args, + int timeout) { super(QRY_EXEC_BATCH); assert sqlQry != null : "SQL query should not be null"; @@ -57,6 +63,7 @@ public OdbcQueryExecuteBatchRequest(@Nullable String schema, String sqlQry, bool this.sqlQry = sqlQry; this.last = last; this.args = args; + this.timeout = timeout; } /** @@ -88,6 +95,13 @@ public boolean last() { return last; } + /** + * @return Timeout in seconds. + */ + public int timeout() { + return timeout; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(OdbcQueryExecuteBatchRequest.class, this, "args", args, true); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java index 26d332885ba9a..1fde9084299a0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java @@ -37,12 +37,17 @@ public class OdbcQueryExecuteRequest extends OdbcRequest { @GridToStringExclude private final Object[] args; + /** Query timeout in seconds. */ + @GridToStringInclude + private final int timeout; + /** * @param schema Schema. * @param sqlQry SQL query. * @param args Arguments list. + * @param timeout Timeout in seconds. */ - public OdbcQueryExecuteRequest(@Nullable String schema, String sqlQry, Object[] args) { + public OdbcQueryExecuteRequest(@Nullable String schema, String sqlQry, Object[] args, int timeout) { super(QRY_EXEC); assert sqlQry != null : "SQL query should not be null"; @@ -50,6 +55,7 @@ public OdbcQueryExecuteRequest(@Nullable String schema, String sqlQry, Object[] this.schema = schema; this.sqlQry = sqlQry; this.args = args; + this.timeout = timeout; } /** @@ -73,8 +79,16 @@ public Object[] arguments() { return schema; } + /** + * @return Timeout in seconds. + */ + public int timeout() { + return timeout; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(OdbcQueryExecuteRequest.class, this, "args", args, true); } + } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java index 7f6b48d9d11a8..3bc5a23d7946a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -203,7 +204,7 @@ public void onDisconnect() { * @param args Arguments. * @return Query instance. */ - private SqlFieldsQueryEx makeQuery(String schema, String sql, Object[] args) { + private SqlFieldsQueryEx makeQuery(String schema, String sql, Object[] args, int timeout) { SqlFieldsQueryEx qry = new SqlFieldsQueryEx(sql, null); qry.setArgs(args); @@ -216,6 +217,8 @@ private SqlFieldsQueryEx makeQuery(String schema, String sql, Object[] args) { qry.setSchema(schema); qry.setSkipReducerOnUpdate(skipReducerOnUpdate); + qry.setTimeout(timeout, TimeUnit.SECONDS); + return qry; } @@ -243,7 +246,7 @@ private ClientListenerResponse executeQuery(OdbcQueryExecuteRequest req) { log.debug("ODBC query parsed [reqId=" + req.requestId() + ", original=" + req.sqlQuery() + ", parsed=" + sql + ']'); - SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments()); + SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments(), req.timeout()); List>> cursors = ctx.query().querySqlFieldsNoCache(qry, true, false); @@ -285,7 +288,7 @@ private ClientListenerResponse executeBatchQuery(OdbcQueryExecuteBatchRequest re log.debug("ODBC query parsed [reqId=" + req.requestId() + ", original=" + req.sqlQuery() + ", parsed=" + sql + ']'); - SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments()); + SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments(), req.timeout()); Object[][] paramSet = req.arguments(); diff --git a/modules/platforms/cpp/odbc-test/src/attributes_test.cpp b/modules/platforms/cpp/odbc-test/src/attributes_test.cpp index f8cb5acf53b87..b87f4b92a5a80 100644 --- a/modules/platforms/cpp/odbc-test/src/attributes_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/attributes_test.cpp @@ -205,4 +205,26 @@ BOOST_AUTO_TEST_CASE(ConnectionAttributeConnectionDeadSet) CheckSQLConnectionDiagnosticError("HY092"); } +BOOST_AUTO_TEST_CASE(StatementAttributeQueryTimeout) +{ + Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache"); + + SQLULEN timeout = -1; + SQLRETURN ret = SQLGetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, &timeout, 0, 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + BOOST_REQUIRE_EQUAL(timeout, 0); + + ret = SQLSetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, reinterpret_cast(7), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + timeout = -1; + + ret = SQLGetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, &timeout, 0, 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + BOOST_REQUIRE_EQUAL(timeout, 7); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/parser_test.cpp b/modules/platforms/cpp/odbc-test/src/parser_test.cpp index 51539ae536a54..af113690c491f 100644 --- a/modules/platforms/cpp/odbc-test/src/parser_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/parser_test.cpp @@ -42,7 +42,7 @@ struct TestMessage // No-op. } - void Write(ignite::impl::binary::BinaryWriterImpl& writer) const + void Write(ignite::impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt32(a); writer.WriteString(b.data(), static_cast(b.size())); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h index 8d6c90641d1f2..51c0b4162fdbd 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h @@ -95,7 +95,7 @@ namespace ignite * Write request using provided writer. * @param writer Writer. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; private: /** Protocol version. */ @@ -132,8 +132,10 @@ namespace ignite * @param schema Schema. * @param sql SQL query. * @param params Query arguments. + * @param timeout Timeout. */ - QueryExecuteRequest(const std::string& schema, const std::string& sql, const app::ParameterSet& params); + QueryExecuteRequest(const std::string& schema, const std::string& sql, const app::ParameterSet& params, + int32_t timeout); /** * Destructor. @@ -143,8 +145,9 @@ namespace ignite /** * Write request using provided writer. * @param writer Writer. + * @param ver Version. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion& ver) const; private: /** Schema name. */ @@ -155,6 +158,9 @@ namespace ignite /** Parameters bindings. */ const app::ParameterSet& params; + + /** Timeout. */ + int32_t timeout; }; /** @@ -171,9 +177,10 @@ namespace ignite * @param params Query arguments. * @param begin Beginng of the interval. * @param end End of the interval. + * @param timeout Timeout. */ QueryExecuteBatchtRequest(const std::string& schema, const std::string& sql, - const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last); + const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last, int32_t timeout); /** * Destructor. @@ -183,8 +190,9 @@ namespace ignite /** * Write request using provided writer. * @param writer Writer. + * @param ver Version. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion& ver) const; private: /** Schema name. */ @@ -204,6 +212,9 @@ namespace ignite /** Last page flag. */ bool last; + + /** Timeout. */ + int32_t timeout; }; /** @@ -228,7 +239,7 @@ namespace ignite * Write request using provided writer. * @param writer Writer. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; private: /** Query ID. */ @@ -258,7 +269,7 @@ namespace ignite * Write request using provided writer. * @param writer Writer. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; private: /** Query ID. */ @@ -292,7 +303,7 @@ namespace ignite * Write request using provided writer. * @param writer Writer. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; private: /** Schema search pattern. */ @@ -331,7 +342,7 @@ namespace ignite * Write request using provided writer. * @param writer Writer. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; private: /** Column search pattern. */ @@ -378,7 +389,7 @@ namespace ignite * Write request using provided writer. * @param writer Writer. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; private: /** Schema. */ @@ -419,7 +430,7 @@ namespace ignite * Write request using provided writer. * @param writer Writer. */ - void Write(impl::binary::BinaryWriterImpl& writer) const; + void Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const; private: /** Query ID. */ diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/parser.h b/modules/platforms/cpp/odbc/include/ignite/odbc/parser.h index bfea67cc048fa..5ffb44c276c07 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/parser.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/parser.h @@ -79,7 +79,7 @@ namespace ignite BinaryWriterImpl writer(&outStream, 0); - msg.Write(writer); + msg.Write(writer, protocolVer); buf.resize(outStream.Position()); diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h index 1e6c8698a9106..af319abf398df 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/batch_query.h @@ -44,9 +44,10 @@ namespace ignite * @param connection Associated connection. * @param sql SQL query string. * @param params SQL params. + * @param timeout Timeout in seconds. */ - BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, - const std::string& sql, const app::ParameterSet& params); + BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + const app::ParameterSet& params, int32_t& timeout); /** * Destructor. @@ -156,6 +157,9 @@ namespace ignite /** Query executed. */ bool executed; + + /** Timeout. */ + int32_t& timeout; }; } } diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h index c47600043d158..af39bdbec59df 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/query/data_query.h @@ -44,9 +44,10 @@ namespace ignite * @param connection Associated connection. * @param sql SQL query string. * @param params SQL params. + * @param timeout Timeout. */ - DataQuery(diagnostic::Diagnosable& diag, Connection& connection, - const std::string& sql, const app::ParameterSet& params); + DataQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + const app::ParameterSet& params, int32_t& timeout); /** * Destructor. @@ -184,6 +185,9 @@ namespace ignite /** Cached next result page. */ std::auto_ptr cachedNextPage; + + /** Timeout. */ + int32_t& timeout; }; } } diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h index 6d4b3ab6f48ca..4cc30c3bc9b42 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/statement.h @@ -666,7 +666,11 @@ namespace ignite /** Offset added to pointers to change binding of column data. */ int* columnBindOffset; + /** Parameters. */ app::ParameterSet parameters; + + /** Query timeout in seconds. */ + int32_t timeout; }; } } diff --git a/modules/platforms/cpp/odbc/src/message.cpp b/modules/platforms/cpp/odbc/src/message.cpp index 32a5d919e2b44..57b72103ff1a2 100644 --- a/modules/platforms/cpp/odbc/src/message.cpp +++ b/modules/platforms/cpp/odbc/src/message.cpp @@ -63,7 +63,7 @@ namespace ignite // No-op. } - void HandshakeRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void HandshakeRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt8(RequestType::HANDSHAKE); @@ -86,10 +86,11 @@ namespace ignite } QueryExecuteRequest::QueryExecuteRequest(const std::string& schema, const std::string& sql, - const app::ParameterSet& params): + const app::ParameterSet& params, int32_t timeout): schema(schema), sql(sql), - params(params) + params(params), + timeout(timeout) { // No-op. } @@ -99,7 +100,7 @@ namespace ignite // No-op. } - void QueryExecuteRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryExecuteRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion& ver) const { writer.WriteInt8(RequestType::EXECUTE_SQL_QUERY); @@ -111,16 +112,20 @@ namespace ignite writer.WriteObject(sql); params.Write(writer); + + if (ver >= ProtocolVersion::VERSION_2_3_2) + writer.WriteInt32(timeout); } QueryExecuteBatchtRequest::QueryExecuteBatchtRequest(const std::string& schema, const std::string& sql, - const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last): + const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last, int32_t timeout): schema(schema), sql(sql), params(params), begin(begin), end(end), - last(last) + last(last), + timeout(timeout) { // No-op. } @@ -130,7 +135,7 @@ namespace ignite // No-op. } - void QueryExecuteBatchtRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryExecuteBatchtRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion& ver) const { writer.WriteInt8(RequestType::EXECUTE_SQL_QUERY_BATCH); @@ -142,6 +147,9 @@ namespace ignite writer.WriteObject(sql); params.Write(writer, begin, end, last); + + if (ver >= ProtocolVersion::VERSION_2_3_2) + writer.WriteInt32(timeout); } QueryCloseRequest::QueryCloseRequest(int64_t queryId): queryId(queryId) @@ -154,7 +162,7 @@ namespace ignite // No-op. } - void QueryCloseRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryCloseRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt8(RequestType::CLOSE_SQL_QUERY); writer.WriteInt64(queryId); @@ -172,7 +180,7 @@ namespace ignite // No-op. } - void QueryFetchRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryFetchRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt8(RequestType::FETCH_SQL_QUERY); @@ -194,7 +202,7 @@ namespace ignite // No-op. } - void QueryGetColumnsMetaRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryGetColumnsMetaRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt8(RequestType::GET_COLUMNS_METADATA); @@ -218,7 +226,7 @@ namespace ignite // No-op. } - void QueryGetTablesMetaRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryGetTablesMetaRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt8(RequestType::GET_TABLES_METADATA); @@ -228,7 +236,7 @@ namespace ignite writer.WriteObject(tableTypes); } - void QueryGetParamsMetaRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryGetParamsMetaRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt8(RequestType::GET_PARAMS_METADATA); @@ -236,7 +244,7 @@ namespace ignite writer.WriteObject(sqlQuery); } - void QueryMoreResultsRequest::Write(impl::binary::BinaryWriterImpl& writer) const + void QueryMoreResultsRequest::Write(impl::binary::BinaryWriterImpl& writer, const ProtocolVersion&) const { writer.WriteInt8(RequestType::QUERY_MORE_RESULTS); diff --git a/modules/platforms/cpp/odbc/src/query/batch_query.cpp b/modules/platforms/cpp/odbc/src/query/batch_query.cpp index 29d11ca8fc208..07d42d4e33f92 100644 --- a/modules/platforms/cpp/odbc/src/query/batch_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/batch_query.cpp @@ -27,8 +27,8 @@ namespace ignite { namespace query { - BatchQuery::BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, - const std::string& sql, const app::ParameterSet& params) : + BatchQuery::BatchQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + const app::ParameterSet& params, int32_t& timeout) : Query(diag, QueryType::BATCH), connection(connection), sql(sql), @@ -36,7 +36,8 @@ namespace ignite resultMeta(), rowsAffected(), rowsAffectedIdx(0), - executed(false) + executed(false), + timeout(timeout) { // No-op. } @@ -147,7 +148,7 @@ namespace ignite { const std::string& schema = connection.GetSchema(); - QueryExecuteBatchtRequest req(schema, sql, params, begin, end, last); + QueryExecuteBatchtRequest req(schema, sql, params, begin, end, last, timeout); QueryExecuteBatchResponse rsp; try diff --git a/modules/platforms/cpp/odbc/src/query/data_query.cpp b/modules/platforms/cpp/odbc/src/query/data_query.cpp index 012b02652d73f..0539af505810f 100644 --- a/modules/platforms/cpp/odbc/src/query/data_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/data_query.cpp @@ -28,8 +28,8 @@ namespace ignite { namespace query { - DataQuery::DataQuery(diagnostic::Diagnosable& diag, Connection& connection, - const std::string& sql, const app::ParameterSet& params) : + DataQuery::DataQuery(diagnostic::Diagnosable& diag, Connection& connection, const std::string& sql, + const app::ParameterSet& params, int32_t& timeout) : Query(diag, QueryType::DATA), connection(connection), sql(sql), @@ -38,7 +38,8 @@ namespace ignite cursor(), rowsAffected(), rowsAffectedIdx(0), - cachedNextPage() + cachedNextPage(), + timeout(timeout) { // No-op. } @@ -216,7 +217,7 @@ namespace ignite { const std::string& schema = connection.GetSchema(); - QueryExecuteRequest req(schema, sql, params); + QueryExecuteRequest req(schema, sql, params, timeout); QueryExecuteResponse rsp; try diff --git a/modules/platforms/cpp/odbc/src/statement.cpp b/modules/platforms/cpp/odbc/src/statement.cpp index b167d44915c02..898d44d43c827 100644 --- a/modules/platforms/cpp/odbc/src/statement.cpp +++ b/modules/platforms/cpp/odbc/src/statement.cpp @@ -15,6 +15,8 @@ * limitations under the License. */ +#include + #include "ignite/odbc/system/odbc_constants.h" #include "ignite/odbc/query/batch_query.h" #include "ignite/odbc/query/data_query.h" @@ -42,7 +44,8 @@ namespace ignite rowsFetched(0), rowStatuses(0), columnBindOffset(0), - parameters() + parameters(), + timeout(0) { // No-op. } @@ -282,6 +285,29 @@ namespace ignite break; } + case SQL_ATTR_QUERY_TIMEOUT: + { + SqlUlen uTimeout = reinterpret_cast(value); + + if (uTimeout > INT32_MAX) + { + timeout = INT32_MAX; + + std::stringstream ss; + + ss << "Value is too big: " << uTimeout << ", changing to " << timeout << "."; + std::string msg = ss.str(); + + AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, msg); + + return SqlResult::AI_SUCCESS_WITH_INFO; + } + + timeout = static_cast(uTimeout); + + break; + } + default: { AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, @@ -409,6 +435,15 @@ namespace ignite break; } + case SQL_ATTR_QUERY_TIMEOUT: + { + SqlUlen *uTimeout = reinterpret_cast(buf); + + *uTimeout = static_cast(timeout); + + break; + } + default: { AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, @@ -496,7 +531,7 @@ namespace ignite // Resetting parameters types as we are changing the query. parameters.Prepare(); - currentQuery.reset(new query::DataQuery(*this, connection, query, parameters)); + currentQuery.reset(new query::DataQuery(*this, connection, query, parameters, timeout)); return SqlResult::AI_SUCCESS; } @@ -534,13 +569,13 @@ namespace ignite { query::DataQuery& qry = static_cast(*currentQuery); - currentQuery.reset(new query::BatchQuery(*this, connection, qry.GetSql(), parameters)); + currentQuery.reset(new query::BatchQuery(*this, connection, qry.GetSql(), parameters, timeout)); } else if (parameters.GetParamSetSize() == 1 && currentQuery->GetType() == query::QueryType::BATCH) { query::BatchQuery& qry = static_cast(*currentQuery); - currentQuery.reset(new query::DataQuery(*this, connection, qry.GetSql(), parameters)); + currentQuery.reset(new query::DataQuery(*this, connection, qry.GetSql(), parameters, timeout)); } if (parameters.IsDataAtExecNeeded()) From 854499aeec8741ad45c31f7b8ef34b12dd5d6753 Mon Sep 17 00:00:00 2001 From: alexdel Date: Wed, 15 Nov 2017 19:35:30 +0700 Subject: [PATCH 115/243] IGNITE-5635 Web Console: Added spinning wheel for execute, scan and export actions on Queries screen. (cherry picked from commit f992386) --- .../app/modules/sql/sql.controller.js | 28 ++++++- .../frontend/public/stylesheets/style.scss | 10 +++ .../frontend/views/sql/sql.tpl.pug | 84 +++++++++++++++++-- 3 files changed, 112 insertions(+), 10 deletions(-) diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index 8d27d30709d8b..b4c8a6f2e1db0 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -59,6 +59,9 @@ class Paragraph { self.maxPages = 0; self.filter = ''; self.useAsDefaultSchema = false; + self.localQueryMode = false; + self.csvIsPreparing = false; + self.scanningInProgress = false; _.assign(this, paragraph); @@ -200,6 +203,14 @@ class Paragraph { chartTimeLineEnabled() { return _.nonEmpty(this.chartKeyCols) && _.eq(this.chartKeyCols[0], TIME_LINE); } + + executionInProgress(showLocal = false) { + return this.loading && (this.localQueryMode === showLocal); + } + + checkScanInProgress(showLocal = false) { + return this.scanningInProgress && (this.localQueryMode === showLocal); + } } // Controller for SQL notebook screen. @@ -1409,6 +1420,8 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', const enforceJoinOrder = !!paragraph.enforceJoinOrder; const lazy = !!paragraph.lazy; + paragraph.localQueryMode = local; + $scope.queryAvailable(paragraph) && _chooseNode(paragraph.cacheName, local) .then((nid) => { Notebook.save($scope.notebook) @@ -1506,8 +1519,12 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', const filter = paragraph.filter; const pageSize = paragraph.pageSize; + paragraph.localQueryMode = local; + $scope.scanAvailable(paragraph) && _chooseNode(cacheName, local) .then((nid) => { + paragraph.scanningInProgress = true; + Notebook.save($scope.notebook) .catch(Messages.showError); @@ -1537,7 +1554,8 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', paragraph.setError(err); _showLoading(paragraph, false); - }); + }) + .then(() => paragraph.scanningInProgress = false); }); }; @@ -1664,6 +1682,8 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', }; $scope.exportCsvAll = (paragraph) => { + paragraph.csvIsPreparing = true; + const args = paragraph.queryArgs; return Promise.resolve(args.localNid || _chooseNode(args.cacheName, false)) @@ -1672,7 +1692,11 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', : agentMgr.querySqlGetAll(nid, args.cacheName, args.query, !!args.nonCollocatedJoins, !!args.enforceJoinOrder, false, !!args.localNid, !!args.lazy)) .then((res) => _export(exportFileName(paragraph, true), paragraph.gridOptions.columnDefs, res.columns, res.rows)) .catch(Messages.showError) - .then(() => paragraph.ace && paragraph.ace.focus()); + .then(() => { + paragraph.csvIsPreparing = false; + + return paragraph.ace && paragraph.ace.focus(); + }); }; // $scope.exportPdfAll = function(paragraph) { diff --git a/modules/web-console/frontend/public/stylesheets/style.scss b/modules/web-console/frontend/public/stylesheets/style.scss index b259f1d870abb..8433747317f94 100644 --- a/modules/web-console/frontend/public/stylesheets/style.scss +++ b/modules/web-console/frontend/public/stylesheets/style.scss @@ -2201,3 +2201,13 @@ html,body,.splash-screen { .demo-mode { padding-top: 30px; } + +// Fix for injecting svg icon into BS btn +.btn--with-icon { + display: flex; + align-items: center; + + span { + margin-left: 5px; + } +} \ No newline at end of file diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/views/sql/sql.tpl.pug index 7ee966d20870b..b324622ebee08 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/views/sql/sql.tpl.pug @@ -123,8 +123,17 @@ mixin query-settings span Lazy result set mixin query-actions - button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph)') Execute - button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph, true)') Execute on selected node + button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph)') + div + i.fa.fa-fw.fa-play(ng-hide='paragraph.executionInProgress(false)') + i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.executionInProgress(false)') + span.tipLabelExecute Execute + button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph, true)') + div + i.fa.fa-fw.fa-play(ng-hide='paragraph.executionInProgress(true)') + i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.executionInProgress(true)') + span.tipLabelExecute Execute on selected node + a.btn.btn-default(ng-disabled='!queryAvailable(paragraph)' ng-click='explain(paragraph)' data-placement='bottom' bs-tooltip='' data-title='{{queryTooltip(paragraph, "explain query")}}') Explain @@ -141,8 +150,35 @@ mixin table-result-heading-query +result-toolbar .col-xs-4 .pull-right - -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] - +btn-group('paragraph.loading', options, '{{ queryTooltip(paragraph, "export query results") }}') + .btn-group.panel-tip-container + button.btn.btn-primary.btn--with-icon( + ng-click='exportCsv(paragraph)' + + ng-disabled='paragraph.loading' + + bs-tooltip='' + ng-attr-title='{{ queryTooltip(paragraph, "export query results") }}' + + data-trigger='hover' + data-placement='bottom' + ) + svg(ignite-icon='csv' ng-if='!paragraph.csvIsPreparing') + i.fa.fa-fw.fa-refresh.fa-spin(ng-if='paragraph.csvIsPreparing') + span Export + + -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] + button.btn.dropdown-toggle.btn-primary( + ng-disabled='paragraph.loading' + + bs-dropdown=`${JSON.stringify(options)}` + + data-toggle='dropdown' + data-container='body' + data-placement='bottom-right' + ) + span.caret + + mixin table-result-heading-scan .total.row @@ -157,8 +193,34 @@ mixin table-result-heading-scan +result-toolbar .col-xs-4 .pull-right - -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] - +btn-group('paragraph.loading', options, '{{ scanTooltip(paragraph) }}') + .btn-group.panel-tip-container + // TODO: replace this logic for exporting under one component + button.btn.btn-primary.btn--with-icon( + ng-click='exportCsv(paragraph)' + + ng-disabled='paragraph.loading' + + bs-tooltip='' + ng-attr-title='{{ scanTooltip(paragraph) }}' + + data-trigger='hover' + data-placement='bottom' + ) + svg(ignite-icon='csv' ng-if='!paragraph.csvIsPreparing') + i.fa.fa-fw.fa-refresh.fa-spin(ng-if='paragraph.csvIsPreparing') + span Export + + -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] + button.btn.dropdown-toggle.btn-primary( + ng-disabled='paragraph.loading' + + bs-dropdown=`${JSON.stringify(options)}` + + data-toggle='dropdown' + data-container='body' + data-placement='bottom-right' + ) + span.caret mixin table-result-body .grid(ui-grid='paragraph.gridOptions' ui-grid-resize-columns ui-grid-exporter) @@ -197,9 +259,15 @@ mixin paragraph-scan .col-sm-12.sql-controls button.btn.btn-primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph)') - | Scan + div + i.fa.fa-fw.fa-play(ng-hide='paragraph.checkScanInProgress(false)') + i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.checkScanInProgress(false)') + span.tipLabelExecute Scan + button.btn.btn-primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph, true)') - | Scan on selected node + i.fa.fa-fw.fa-play(ng-hide='paragraph.checkScanInProgress(true)') + i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.checkScanInProgress(true)') + span.tipLabelExecute Scan on selected node .col-sm-12.sql-result(ng-if='paragraph.queryExecuted()' ng-switch='paragraph.resultType()') .error(ng-switch-when='error') Error: {{paragraph.error.message}} From 060236080a3d0e862e55bf30427fd0350cbd2be1 Mon Sep 17 00:00:00 2001 From: alexdel Date: Wed, 15 Nov 2017 21:48:15 +0700 Subject: [PATCH 116/243] IGNITE-4394 Web Console: Select only server nodes for local queries on Queries screen. (cherry picked from commit 63b628d) --- .../web-console/frontend/app/modules/sql/sql.controller.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index b4c8a6f2e1db0..f5edb047a6ea9 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -871,7 +871,8 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', ip: _.head(node.attributes['org.apache.ignite.ips'].split(', ')), version: node.attributes['org.apache.ignite.build.ver'], gridName: node.attributes['org.apache.ignite.ignite.name'], - os: `${node.attributes['os.name']} ${node.attributes['os.arch']} ${node.attributes['os.version']}` + os: `${node.attributes['os.name']} ${node.attributes['os.arch']} ${node.attributes['os.version']}`, + client: node.attributes['org.apache.ignite.cache.client'] }); }); @@ -1338,7 +1339,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', if (_.isEmpty(name)) return Promise.resolve(null); - const nodes = cacheNodes(name); + const nodes = _.filter(cacheNodes(name), (node) => !node.client); if (local) { return Nodes.selectNode(nodes, name) From 49c447aec88a151d568b3c1e947af57dbbf6990a Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 16 Nov 2017 10:12:24 +0700 Subject: [PATCH 117/243] IGNITE-6926 Web console: Fixed SimpleWorkerPool next worker selection logic. (cherry picked from commit 50b384b) --- modules/web-console/frontend/app/utils/SimpleWorkerPool.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/web-console/frontend/app/utils/SimpleWorkerPool.js b/modules/web-console/frontend/app/utils/SimpleWorkerPool.js index b76dc525973bd..495a4d20d8e5b 100644 --- a/modules/web-console/frontend/app/utils/SimpleWorkerPool.js +++ b/modules/web-console/frontend/app/utils/SimpleWorkerPool.js @@ -65,7 +65,7 @@ export default class SimpleWorkerPool { } _getNextWorker() { - return this._workers.find((w) => !w.tid); + return this._workers.find((w) => _.isNil(w.tid)); } _getNextTask() { @@ -106,7 +106,7 @@ export default class SimpleWorkerPool { this._tasks.push({tid, data}); if (this.__dbg) - console.log(`Pool: [name=${this._name}, queue=${this._tasks.length}]`); + console.log(`Pool: [name=${this._name}, tid=${tid}, queue=${this._tasks.length}]`); this._run(); From 775e5d012e2ad1f7795857c400b77cc7817cadbe Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Fri, 17 Nov 2017 16:33:37 +0300 Subject: [PATCH 118/243] IGNITE-6932: SQL: cache-less query execution now checks for cluster state. This closes #3048. (cherry picked from commit cbd7e39cf34d86bc4fe41ac44a286ba7690b08cb) --- .../processors/query/GridQueryProcessor.java | 8 ++ ...eckClusterStateBeforeExecuteQueryTest.java | 89 +++++++++++++++++++ .../IgniteCacheQuerySelfTestSuite.java | 3 + 3 files changed, 100 insertions(+) create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index f5fb7818350be..df4652bb3e59f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -1940,6 +1940,8 @@ public FieldsQueryCursor> querySqlFieldsNoCache(final SqlFieldsQuery qry * * @param qry Query. * @param keepBinary Keep binary flag. + * @param failOnMultipleStmts If {@code true} the method must throws exception when query contains + * more then one SQL statement. * @return Cursor. */ public List>> querySqlFieldsNoCache(final SqlFieldsQuery qry, @@ -1951,6 +1953,12 @@ public List>> querySqlFieldsNoCache(final SqlFieldsQue if (qry.isLocal()) throw new IgniteException("Local query is not supported without specific cache."); + if (!ctx.state().publicApiActiveState()) { + throw new IgniteException("Can not perform the operation because the cluster is inactive. Note, that " + + "the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes " + + "join the cluster. To activate the cluster call Ignite.active(true)."); + } + if (qry.getSchema() == null) qry.setSchema(QueryUtils.DFLT_SCHEMA); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java new file mode 100644 index 0000000000000..aae6a0ca8a2f0 --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.concurrent.Callable; +import org.apache.ignite.IgniteException; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; + +/** + * + */ +public class IgniteCheckClusterStateBeforeExecuteQueryTest extends GridCommonAbstractTest { + /** */ + private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder); + + DataStorageConfiguration pCfg = new DataStorageConfiguration(); + + pCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(100 * 1024 * 1024)); + + cfg.setDataStorageConfiguration(pCfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + stopAllGrids(); + + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false)); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false)); + } + + /** + * @throws Exception On failed. + */ + public void testDynamicSchemaChangesPersistence() throws Exception { + final IgniteEx ig = startGrid(0); + + assertThrows(log, new Callable() { + @Override public Void call() throws Exception { + ig.context().query().querySqlFieldsNoCache(new SqlFieldsQuery("SELECT 1"), false); + + return null; + } + }, IgniteException.class, "Can not perform the operation because the cluster is inactive."); + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index 0b1a753d17173..8d3025c0d0593 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -70,6 +70,7 @@ import org.apache.ignite.internal.processors.cache.IgniteCacheQueryIndexSelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheQueryLoadSelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheUpdateSqlQuerySelfTest; +import org.apache.ignite.internal.processors.cache.IgniteCheckClusterStateBeforeExecuteQueryTest; import org.apache.ignite.internal.processors.cache.IgniteCrossCachesJoinsQueryTest; import org.apache.ignite.internal.processors.cache.IncorrectQueryEntityTest; import org.apache.ignite.internal.processors.cache.QueryEntityCaseMismatchTest; @@ -361,6 +362,8 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(GridCacheQuerySqlFieldInlineSizeSelfTest.class); suite.addTestSuite(IgniteSqlParameterizedQueryTest.class); + suite.addTestSuite(IgniteCheckClusterStateBeforeExecuteQueryTest.class); + return suite; } } From 2ac0170c9bffc1be4c9b23c12b4bb2d6ae571861 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Mon, 20 Nov 2017 20:15:37 +0300 Subject: [PATCH 119/243] IGNITE-6876: Added in ODBC support for SQL_ATTR_CONNECTION_TIMEOUT (cherry picked from commit db343b6) --- .../cpp/odbc-test/src/attributes_test.cpp | 22 +++ .../cpp/odbc-test/src/queries_test.cpp | 113 ++++++++++++++ .../odbc/include/ignite/odbc/common_types.h | 3 + .../cpp/odbc/include/ignite/odbc/connection.h | 81 +++++++++- .../ignite/odbc/system/socket_client.h | 55 ++++++- .../os/linux/src/system/socket_client.cpp | 141 ++++++++++++++++-- .../odbc/os/win/src/system/socket_client.cpp | 134 +++++++++++++++-- modules/platforms/cpp/odbc/src/connection.cpp | 141 +++++++++++++----- .../odbc/src/diagnostic/diagnostic_record.cpp | 6 + .../cpp/odbc/src/query/batch_query.cpp | 12 +- .../cpp/odbc/src/query/data_query.cpp | 12 +- 11 files changed, 644 insertions(+), 76 deletions(-) diff --git a/modules/platforms/cpp/odbc-test/src/attributes_test.cpp b/modules/platforms/cpp/odbc-test/src/attributes_test.cpp index b87f4b92a5a80..c4c2433106b08 100644 --- a/modules/platforms/cpp/odbc-test/src/attributes_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/attributes_test.cpp @@ -227,4 +227,26 @@ BOOST_AUTO_TEST_CASE(StatementAttributeQueryTimeout) BOOST_REQUIRE_EQUAL(timeout, 7); } +BOOST_AUTO_TEST_CASE(ConnectionAttributeConnectionTimeout) +{ + Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache"); + + SQLUINTEGER timeout = -1; + SQLRETURN ret = SQLGetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, &timeout, 0, 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + BOOST_REQUIRE_EQUAL(timeout, 0); + + ret = SQLSetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, reinterpret_cast(42), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + + timeout = -1; + + ret = SQLGetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, &timeout, 0, 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + BOOST_REQUIRE_EQUAL(timeout, 42); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp index 6fcf7c9174823..dafab1a5f11a9 100644 --- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp +++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp @@ -2404,5 +2404,118 @@ BOOST_AUTO_TEST_CASE(TestCloseAfterEmptyUpdate) BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt)); } +BOOST_AUTO_TEST_CASE(TestConnectionTimeoutQuery) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + + InsertTestStrings(10, false); +} + +BOOST_AUTO_TEST_CASE(TestConnectionTimeoutBatch) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + + InsertTestBatch(11, 20, 9); +} + +BOOST_AUTO_TEST_CASE(TestConnectionTimeoutBoth) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + + InsertTestStrings(10, false); + InsertTestBatch(11, 20, 9); +} + +BOOST_AUTO_TEST_CASE(TestQueryTimeoutQuery) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + InsertTestStrings(10, false); +} + +BOOST_AUTO_TEST_CASE(TestQueryTimeoutBatch) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + InsertTestBatch(11, 20, 9); +} + +BOOST_AUTO_TEST_CASE(TestQueryTimeoutBoth) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + InsertTestStrings(10, false); + InsertTestBatch(11, 20, 9); +} + +BOOST_AUTO_TEST_CASE(TestQueryAndConnectionTimeoutQuery) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, reinterpret_cast(3), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + + InsertTestStrings(10, false); +} + +BOOST_AUTO_TEST_CASE(TestQueryAndConnectionTimeoutBatch) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, reinterpret_cast(3), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + + InsertTestBatch(11, 20, 9); +} + +BOOST_AUTO_TEST_CASE(TestQueryAndConnectionTimeoutBoth) +{ + Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache"); + + SQLRETURN ret = SQLSetStmtAttr(stmt, SQL_ATTR_QUERY_TIMEOUT, reinterpret_cast(5), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_STMT, stmt); + + ret = SQLSetConnectAttr(dbc, SQL_ATTR_CONNECTION_TIMEOUT, reinterpret_cast(3), 0); + + ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc); + + InsertTestStrings(10, false); + InsertTestBatch(11, 20, 9); +} BOOST_AUTO_TEST_SUITE_END() diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/common_types.h b/modules/platforms/cpp/odbc/include/ignite/odbc/common_types.h index 349147f2e385c..9c8c433aa3054 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/common_types.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/common_types.h @@ -190,6 +190,9 @@ namespace ignite */ SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + /** The timeout period expired before the data source responded to the request. */ + SHYT00_TIMEOUT_EXPIRED, + /** * The connection timeout period expired before the data source * responded to the request. diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h b/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h index 34fed5f12b53e..1577ee7fe7f79 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h @@ -27,6 +27,7 @@ #include "ignite/odbc/config/connection_info.h" #include "ignite/odbc/config/configuration.h" #include "ignite/odbc/diagnostic/diagnosable_adapter.h" +#include "ignite/odbc/odbc_error.h" namespace ignite { @@ -41,6 +42,19 @@ namespace ignite { friend class Environment; public: + /** + * Operation with timeout result. + */ + struct OperationResult + { + enum T + { + SUCCESS, + FAIL, + TIMEOUT + }; + }; + /** * Destructor. */ @@ -96,15 +110,21 @@ namespace ignite * * @param data Data buffer. * @param len Data length. + * @param timeout Timeout. + * @return @c true on success, @c false on timeout. + * @throw OdbcError on error. */ - void Send(const int8_t* data, size_t len); + bool Send(const int8_t* data, size_t len, int32_t timeout); /** * Receive next message. * * @param msg Buffer for message. + * @param timeout Timeout. + * @return @c true on success, @c false on timeout. + * @throw OdbcError on error. */ - void Receive(std::vector& msg); + bool Receive(std::vector& msg, int32_t timeout); /** * Get name of the assotiated schema. @@ -134,9 +154,43 @@ namespace ignite /** * Synchronously send request message and receive response. + * Uses provided timeout. * * @param req Request message. * @param rsp Response message. + * @param timeout Timeout. + * @return @c true on success, @c false on timeout. + * @throw OdbcError on error. + */ + template + bool SyncMessage(const ReqT& req, RspT& rsp, int32_t timeout) + { + std::vector tempBuffer; + + parser.Encode(req, tempBuffer); + + bool success = Send(tempBuffer.data(), tempBuffer.size(), timeout); + + if (!success) + return false; + + success = Receive(tempBuffer, timeout); + + if (!success) + return false; + + parser.Decode(rsp, tempBuffer); + + return true; + } + + /** + * Synchronously send request message and receive response. + * Uses connection timeout. + * + * @param req Request message. + * @param rsp Response message. + * @throw OdbcError on error. */ template void SyncMessage(const ReqT& req, RspT& rsp) @@ -145,9 +199,15 @@ namespace ignite parser.Encode(req, tempBuffer); - Send(tempBuffer.data(), tempBuffer.size()); + bool success = Send(tempBuffer.data(), tempBuffer.size(), timeout); + + if (!success) + throw OdbcError(SqlState::SHYT01_CONNECTION_TIMEOUT, "Send operation timed out"); + + success = Receive(tempBuffer, timeout); - Receive(tempBuffer); + if (!success) + throw OdbcError(SqlState::SHYT01_CONNECTION_TIMEOUT, "Receive operation timed out"); parser.Decode(rsp, tempBuffer); } @@ -280,18 +340,20 @@ namespace ignite * * @param dst Buffer for data. * @param len Number of bytes to receive. - * @return Number of successfully received bytes. + * @param timeout Timeout. + * @return Operation result. */ - size_t ReceiveAll(void* dst, size_t len); + OperationResult::T ReceiveAll(void* dst, size_t len, int32_t timeout); /** * Send specified number of bytes. * * @param data Data buffer. * @param len Data length. - * @return Number of successfully sent bytes. + * @param timeout Timeout. + * @return Operation result. */ - size_t SendAll(const int8_t* data, size_t len); + OperationResult::T SendAll(const int8_t* data, size_t len, int32_t timeout); /** * Perform handshake request. @@ -311,6 +373,9 @@ namespace ignite /** State flag. */ bool connected; + /** Connection timeout in seconds. */ + int32_t timeout; + /** Message parser. */ Parser parser; diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h b/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h index 946605e3db85e..2a3cfa3c7528f 100644 --- a/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h +++ b/modules/platforms/cpp/odbc/include/ignite/odbc/system/socket_client.h @@ -44,6 +44,22 @@ namespace ignite /** The time in seconds between individual keepalive probes. */ enum { KEEP_ALIVE_PROBES_PERIOD = 1 }; + /** Connection establishment timeout in seconds. */ + enum { CONNECT_TIMEOUT = 5 }; + + /** + * Non-negative timeout operation result. + */ + struct WaitResult + { + enum T + { + TIMEOUT = 0, + + SUCCESS = 1 + }; + }; + /** * Constructor. */ @@ -76,20 +92,31 @@ namespace ignite * * @param data Pointer to data to be sent. * @param size Size of the data in bytes. - * @return Number of bytes that have been sent on success and negative - * value on failure. + * @param timeout Timeout. + * @return Number of bytes that have been sent on success, + * WaitResult::TIMEOUT on timeout and -errno on failure. */ - int Send(const int8_t* data, size_t size); + int Send(const int8_t* data, size_t size, int32_t timeout); /** * Receive data from established connection. * * @param buffer Pointer to data buffer. * @param size Size of the buffer in bytes. - * @return Number of bytes that have been received on success and negative - * value on failure. + * @param timeout Timeout. + * @return Number of bytes that have been sent on success, + * WaitResult::TIMEOUT on timeout and -errno on failure. */ - int Receive(int8_t* buffer, size_t size); + int Receive(int8_t* buffer, size_t size, int32_t timeout); + + /** + * Check if the socket is blocking or not. + * @return @c true if the socket is blocking and false otherwise. + */ + bool IsBlocking() const + { + return blocking; + } private: /** @@ -97,8 +124,24 @@ namespace ignite */ void TrySetOptions(diagnostic::Diagnosable& diag); + /** + * Wait on the socket for any event for specified time. + * This function uses poll to achive timeout functionality + * for every separate socket operation. + * + * @param timeout Timeout. + * @param rd Wait for read if @c true, or for write if @c false. + * @return -errno on error, WaitResult::TIMEOUT on timeout and + * WaitResult::SUCCESS on success. + */ + int WaitOnSocket(int32_t timeout, bool rd); + + /** Handle. */ intptr_t socketHandle; + /** Blocking flag. */ + bool blocking; + IGNITE_NO_COPY_ASSIGNMENT(SocketClient) }; } diff --git a/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp b/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp index 5a9b03a21d398..a6d61514f8699 100644 --- a/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp +++ b/modules/platforms/cpp/odbc/os/linux/src/system/socket_client.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -35,26 +36,36 @@ namespace { /** * Get last socket error message. + * @param error Error code. * @return Last socket error message string. */ - std::string GetLastSocketErrorMessage() + std::string GetSocketErrorMessage(int error) { - int lastError = errno; std::stringstream res; - res << "error_code=" << lastError; + res << "error_code=" << error; - if (lastError == 0) + if (error == 0) return res.str(); char buffer[1024] = ""; - strerror_r(lastError, buffer, sizeof(buffer)); - - res << ", msg=" << buffer; + if (!strerror_r(error, buffer, sizeof(buffer))) + res << ", msg=" << buffer; return res.str(); } + + /** + * Get last socket error message. + * @return Last socket error message string. + */ + std::string GetLastSocketErrorMessage() + { + int lastError = errno; + + return GetSocketErrorMessage(lastError); + } } namespace ignite @@ -64,7 +75,9 @@ namespace ignite namespace tcp { - SocketClient::SocketClient() : socketHandle(SOCKET_ERROR) + SocketClient::SocketClient() : + socketHandle(SOCKET_ERROR), + blocking(true) { // No-op. } @@ -129,11 +142,27 @@ namespace ignite res = connect(socketHandle, it->ai_addr, static_cast(it->ai_addrlen)); if (SOCKET_ERROR == res) { - LOG_MSG("Connection failed: " << GetLastSocketErrorMessage()); + int lastError = errno; + + if (lastError != EWOULDBLOCK && lastError != EINPROGRESS) + { + LOG_MSG("Connection failed: " << GetSocketErrorMessage(lastError)); + + Close(); - Close(); + continue; + } - continue; + res = WaitOnSocket(CONNECT_TIMEOUT, false); + + if (res < 0 || res == WaitResult::TIMEOUT) + { + LOG_MSG("Connection timeout expired: " << GetSocketErrorMessage(-res)); + + Close(); + + continue; + } } break; } @@ -153,13 +182,29 @@ namespace ignite } } - int SocketClient::Send(const int8_t* data, size_t size) + int SocketClient::Send(const int8_t* data, size_t size, int32_t timeout) { + if (!blocking) + { + int res = WaitOnSocket(timeout, false); + + if (res < 0 || res == WaitResult::TIMEOUT) + return res; + } + return send(socketHandle, reinterpret_cast(data), static_cast(size), 0); } - int SocketClient::Receive(int8_t* buffer, size_t size) + int SocketClient::Receive(int8_t* buffer, size_t size, int32_t timeout) { + if (!blocking) + { + int res = WaitOnSocket(timeout, true); + + if (res < 0 || res == WaitResult::TIMEOUT) + return res; + } + return recv(socketHandle, reinterpret_cast(buffer), static_cast(size), 0); } @@ -203,6 +248,30 @@ namespace ignite "Can not set up TCP no-delay mode"); } + res = setsockopt(socketHandle, SOL_SOCKET, SO_OOBINLINE, + reinterpret_cast(&trueOpt), sizeof(trueOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP out-of-bound data inlining setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP out-of-bound data inlining"); + } + + blocking = false; + + int flags; + if (((flags = fcntl(socketHandle, F_GETFL, 0)) < 0) || + (fcntl(socketHandle, F_SETFL, flags | O_NONBLOCK) < 0)) + { + blocking = true; + LOG_MSG("Non-blocking mode setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up non-blocking mode. Timeouts are not available."); + } + res = setsockopt(socketHandle, SOL_SOCKET, SO_KEEPALIVE, reinterpret_cast(&trueOpt), sizeof(trueOpt)); @@ -238,6 +307,52 @@ namespace ignite diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, "Can not set up TCP keep-alive probes period"); } + + } + + int SocketClient::WaitOnSocket(int32_t timeout, bool rd) + { + int ready = 0; + int lastError = 0; + + fd_set fds; + + do { + struct timeval tv = { 0 }; + tv.tv_sec = timeout; + + FD_ZERO(&fds); + FD_SET(socketHandle, &fds); + + fd_set* readFds = 0; + fd_set* writeFds = 0; + + if (rd) + readFds = &fds; + else + writeFds = &fds; + + ready = select(static_cast((socketHandle) + 1), + readFds, writeFds, NULL, (timeout == 0 ? NULL : &tv)); + + if (ready == SOCKET_ERROR) + lastError = errno; + + } while (ready == SOCKET_ERROR && lastError == EINTR); + + if (ready == SOCKET_ERROR) + return -lastError; + + socklen_t size = sizeof(lastError); + int res = getsockopt(socketHandle, SOL_SOCKET, SO_ERROR, reinterpret_cast(&lastError), &size); + + if (res != SOCKET_ERROR && lastError != 0) + return -lastError; + + if (ready == 0) + return WaitResult::TIMEOUT; + + return WaitResult::SUCCESS; } } } diff --git a/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp b/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp index 30fb7d7bacbeb..6f87b9323dc54 100644 --- a/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp +++ b/modules/platforms/cpp/odbc/os/win/src/system/socket_client.cpp @@ -34,17 +34,17 @@ namespace { /** - * Get last socket error message. - * @return Last socket error message string. + * Get socket error message for the error code. + * @param error Error code. + * @return Socket error message string. */ - std::string GetLastSocketErrorMessage() + std::string GetSocketErrorMessage(HRESULT error) { - HRESULT lastError = WSAGetLastError(); std::stringstream res; - res << "error_code=" << lastError; + res << "error_code=" << error; - if (lastError == 0) + if (error == 0) return res.str(); LPTSTR errorText = NULL; @@ -58,7 +58,7 @@ namespace | FORMAT_MESSAGE_IGNORE_INSERTS, // unused with FORMAT_MESSAGE_FROM_SYSTEM NULL, - lastError, + error, MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US), // output reinterpret_cast(&errorText), @@ -77,6 +77,17 @@ namespace return res.str(); } + + /** + * Get last socket error message. + * @return Last socket error message string. + */ + std::string GetLastSocketErrorMessage() + { + HRESULT lastError = WSAGetLastError(); + + return GetSocketErrorMessage(lastError); + } } namespace ignite @@ -86,7 +97,9 @@ namespace ignite namespace tcp { - SocketClient::SocketClient() : socketHandle(INVALID_SOCKET) + SocketClient::SocketClient() : + socketHandle(INVALID_SOCKET), + blocking(true) { // No-op. } @@ -170,11 +183,27 @@ namespace ignite res = connect(socketHandle, it->ai_addr, static_cast(it->ai_addrlen)); if (SOCKET_ERROR == res) { - LOG_MSG("Connection failed: " << GetLastSocketErrorMessage()); + int lastError = WSAGetLastError(); - Close(); + if (lastError != WSAEWOULDBLOCK) + { + LOG_MSG("Connection failed: " << GetSocketErrorMessage(lastError)); - continue; + Close(); + + continue; + } + + res = WaitOnSocket(CONNECT_TIMEOUT, false); + + if (res < 0 || res == WaitResult::TIMEOUT) + { + LOG_MSG("Connection timeout expired: " << GetSocketErrorMessage(-res)); + + Close(); + + continue; + } } break; } @@ -194,21 +223,39 @@ namespace ignite } } - int SocketClient::Send(const int8_t* data, size_t size) + int SocketClient::Send(const int8_t* data, size_t size, int32_t timeout) { + if (!blocking) + { + int res = WaitOnSocket(timeout, false); + + if (res < 0 || res == WaitResult::TIMEOUT) + return res; + } + return send(socketHandle, reinterpret_cast(data), static_cast(size), 0); } - int SocketClient::Receive(int8_t* buffer, size_t size) + int SocketClient::Receive(int8_t* buffer, size_t size, int32_t timeout) { + if (!blocking) + { + int res = WaitOnSocket(timeout, true); + + if (res < 0 || res == WaitResult::TIMEOUT) + return res; + } + return recv(socketHandle, reinterpret_cast(buffer), static_cast(size), 0); } void SocketClient::TrySetOptions(diagnostic::Diagnosable& diag) { BOOL trueOpt = TRUE; + ULONG uTrueOpt = TRUE; int bufSizeOpt = BUFFER_SIZE; + int res = setsockopt(socketHandle, SOL_SOCKET, SO_SNDBUF, reinterpret_cast(&bufSizeOpt), sizeof(bufSizeOpt)); @@ -242,6 +289,29 @@ namespace ignite "Can not set up TCP no-delay mode"); } + res = setsockopt(socketHandle, SOL_SOCKET, SO_OOBINLINE, + reinterpret_cast(&trueOpt), sizeof(trueOpt)); + + if (SOCKET_ERROR == res) + { + LOG_MSG("TCP out-of-bound data inlining setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up TCP out-of-bound data inlining"); + } + + blocking = false; + res = ioctlsocket(socketHandle, FIONBIO, &uTrueOpt); + + if (res == SOCKET_ERROR) + { + blocking = true; + LOG_MSG("Non-blocking mode setup failed: " << GetLastSocketErrorMessage()); + + diag.AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, + "Can not set up non-blocking mode. Timeouts are not available."); + } + res = setsockopt(socketHandle, SOL_SOCKET, SO_KEEPALIVE, reinterpret_cast(&trueOpt), sizeof(trueOpt)); @@ -318,6 +388,44 @@ namespace ignite #endif } + int SocketClient::WaitOnSocket(int32_t timeout, bool rd) + { + int ready = 0; + int lastError = 0; + + fd_set fds; + + do { + struct timeval tv = { 0 }; + tv.tv_sec = timeout; + + FD_ZERO(&fds); + FD_SET(socketHandle, &fds); + + fd_set* readFds = 0; + fd_set* writeFds = 0; + + if (rd) + readFds = &fds; + else + writeFds = &fds; + + ready = select(static_cast((socketHandle) + 1), + readFds, writeFds, NULL, (timeout == 0 ? NULL : &tv)); + + if (ready == SOCKET_ERROR) + lastError = WSAGetLastError(); + + } while (ready == SOCKET_ERROR && lastError == WSAEINTR); + + if (ready == SOCKET_ERROR) + return -lastError; + + if (ready == 0) + return WaitResult::TIMEOUT; + + return WaitResult::SUCCESS; + } } } } diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp index b99d7687035db..8b0387627d20d 100644 --- a/modules/platforms/cpp/odbc/src/connection.cpp +++ b/modules/platforms/cpp/odbc/src/connection.cpp @@ -16,6 +16,7 @@ */ #include +#include #include @@ -27,7 +28,6 @@ #include "ignite/odbc/connection.h" #include "ignite/odbc/message.h" #include "ignite/odbc/config/configuration.h" -#include "ignite/odbc/odbc_error.h" namespace { @@ -46,6 +46,7 @@ namespace ignite Connection::Connection() : socket(), connected(false), + timeout(0), parser(), config(), info(config) @@ -194,7 +195,7 @@ namespace ignite return SqlResult::AI_SUCCESS; } - void Connection::Send(const int8_t* data, size_t len) + bool Connection::Send(const int8_t* data, size_t len, int32_t timeout) { if (!connected) throw OdbcError(SqlState::S08003_NOT_CONNECTED, "Connection is not established"); @@ -209,38 +210,45 @@ namespace ignite memcpy(msg.GetData() + sizeof(OdbcProtocolHeader), data, len); - size_t sent = SendAll(msg.GetData(), msg.GetSize()); + OperationResult::T res = SendAll(msg.GetData(), msg.GetSize(), timeout); - if (sent != len + sizeof(OdbcProtocolHeader)) + if (res == OperationResult::TIMEOUT) + return false; + + if (res == OperationResult::FAIL) throw OdbcError(SqlState::S08S01_LINK_FAILURE, "Can not send message due to connection failure"); LOG_MSG("message sent: (" << msg.GetSize() << " bytes)" << utility::HexDump(msg.GetData(), msg.GetSize())); + + return true; } - size_t Connection::SendAll(const int8_t* data, size_t len) + Connection::OperationResult::T Connection::SendAll(const int8_t* data, size_t len, int32_t timeout) { int sent = 0; while (sent != static_cast(len)) { - int res = socket.Send(data + sent, len - sent); + int res = socket.Send(data + sent, len - sent, timeout); LOG_MSG("Sent: " << res); - if (res <= 0) + if (res < 0 || res == tcp::SocketClient::WaitResult::TIMEOUT) { Close(); - return sent; + return res < 0 ? OperationResult::FAIL : OperationResult::TIMEOUT; } sent += res; } - return sent; + assert(static_cast(sent) == len); + + return OperationResult::SUCCESS; } - void Connection::Receive(std::vector& msg) + bool Connection::Receive(std::vector& msg, int32_t timeout) { if (!connected) throw OdbcError(SqlState::S08003_NOT_CONNECTED, "Connection is not established"); @@ -249,36 +257,40 @@ namespace ignite OdbcProtocolHeader hdr; - size_t received = ReceiveAll(reinterpret_cast(&hdr), sizeof(hdr)); + OperationResult::T res = ReceiveAll(reinterpret_cast(&hdr), sizeof(hdr), timeout); + + if (res == OperationResult::TIMEOUT) + return false; - if (received != sizeof(hdr)) + if (res == OperationResult::FAIL) throw OdbcError(SqlState::S08S01_LINK_FAILURE, "Can not receive message header"); if (hdr.len < 0) { Close(); - throw OdbcError(SqlState::S08S01_LINK_FAILURE, "Protocol error: Message length is negative"); + throw OdbcError(SqlState::SHY000_GENERAL_ERROR, "Protocol error: Message length is negative"); } if (hdr.len == 0) - return; + return false; msg.resize(hdr.len); - received = ReceiveAll(&msg[0], hdr.len); + res = ReceiveAll(&msg[0], hdr.len, timeout); - if (received != hdr.len) - { - msg.resize(received); + if (res == OperationResult::TIMEOUT) + return false; + if (res == OperationResult::FAIL) throw OdbcError(SqlState::S08S01_LINK_FAILURE, "Can not receive message body"); - } LOG_MSG("Message received: " << utility::HexDump(&msg[0], msg.size())); + + return true; } - size_t Connection::ReceiveAll(void* dst, size_t len) + Connection::OperationResult::T Connection::ReceiveAll(void* dst, size_t len, int32_t timeout) { size_t remain = len; int8_t* buffer = reinterpret_cast(dst); @@ -287,20 +299,20 @@ namespace ignite { size_t received = len - remain; - int res = socket.Receive(buffer + received, remain); + int res = socket.Receive(buffer + received, remain, timeout); LOG_MSG("Receive res: " << res << " remain: " << remain); - if (res <= 0) + if (res < 0 || res == tcp::SocketClient::WaitResult::TIMEOUT) { Close(); - return received; + return res < 0 ? OperationResult::FAIL : OperationResult::TIMEOUT; } remain -= static_cast(res); } - return len; + return OperationResult::SUCCESS; } const std::string& Connection::GetSchema() const @@ -334,6 +346,14 @@ namespace ignite IGNITE_ODBC_API_CALL(InternalTransactionRollback()); } + SqlResult::Type Connection::InternalTransactionRollback() + { + AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, + "Rollback operation is not supported."); + + return SqlResult::AI_ERROR; + } + void Connection::GetAttribute(int attr, void* buf, SQLINTEGER bufLen, SQLINTEGER* valueLen) { IGNITE_ODBC_API_CALL(InternalGetAttribute(attr, buf, bufLen, valueLen)); @@ -343,7 +363,7 @@ namespace ignite { if (!buf) { - AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, "Data buffer is NULL."); + AddStatusRecord(SqlState::SHY009_INVALID_USE_OF_NULL_POINTER, "Data buffer is null."); return SqlResult::AI_ERROR; } @@ -362,6 +382,18 @@ namespace ignite break; } + case SQL_ATTR_CONNECTION_TIMEOUT: + { + SQLUINTEGER *val = reinterpret_cast(buf); + + *val = static_cast(timeout); + + if (valueLen) + *valueLen = SQL_IS_INTEGER; + + break; + } + default: { AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, @@ -381,6 +413,13 @@ namespace ignite SqlResult::Type Connection::InternalSetAttribute(int attr, void* value, SQLINTEGER valueLen) { + if (!value) + { + AddStatusRecord(SqlState::SHY009_INVALID_USE_OF_NULL_POINTER, "Value pointer is null."); + + return SqlResult::AI_ERROR; + } + switch (attr) { case SQL_ATTR_CONNECTION_DEAD: @@ -390,6 +429,39 @@ namespace ignite return SqlResult::AI_ERROR; } + case SQL_ATTR_CONNECTION_TIMEOUT: + { + SQLUINTEGER uTimeout = static_cast(reinterpret_cast(value)); + + if (uTimeout != 0 && connected && socket.IsBlocking()) + { + timeout = 0; + + AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, "Can not set timeout, because can not " + "enable non-blocking mode on TCP connection. Setting to 0."); + + return SqlResult::AI_SUCCESS_WITH_INFO; + } + + if (uTimeout > INT32_MAX) + { + timeout = INT32_MAX; + + std::stringstream ss; + + ss << "Value is too big: " << uTimeout << ", changing to " << timeout << "."; + std::string msg = ss.str(); + + AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED, msg); + + return SqlResult::AI_SUCCESS_WITH_INFO; + } + + timeout = static_cast(uTimeout); + + break; + } + default: { AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, @@ -402,14 +474,6 @@ namespace ignite return SqlResult::AI_SUCCESS; } - SqlResult::Type Connection::InternalTransactionRollback() - { - AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED, - "Rollback operation is not supported."); - - return SqlResult::AI_ERROR; - } - SqlResult::Type Connection::MakeRequestHandshake() { bool distributedJoins = false; @@ -451,7 +515,16 @@ namespace ignite try { - SyncMessage(req, rsp); + // Workaround for some Linux systems that report connection on non-blocking + // sockets as successfull but fail to establish real connection. + bool sent = SyncMessage(req, rsp, tcp::SocketClient::CONNECT_TIMEOUT); + + if (!sent) + { + AddStatusRecord(SqlState::S08001_CANNOT_CONNECT, "Failed to establish connection with the host."); + + return SqlResult::AI_ERROR; + } } catch (const OdbcError& err) { diff --git a/modules/platforms/cpp/odbc/src/diagnostic/diagnostic_record.cpp b/modules/platforms/cpp/odbc/src/diagnostic/diagnostic_record.cpp index 0a023101cfd76..7fa76691b535d 100644 --- a/modules/platforms/cpp/odbc/src/diagnostic/diagnostic_record.cpp +++ b/modules/platforms/cpp/odbc/src/diagnostic/diagnostic_record.cpp @@ -133,6 +133,9 @@ namespace /** SQL state HYC00 constant. */ const std::string STATE_HYC00 = "HYC00"; + /** SQL state HYT00 constant. */ + const std::string STATE_HYT00 = "HYT00"; + /** SQL state HYT01 constant. */ const std::string STATE_HYT01 = "HYT01"; @@ -365,6 +368,9 @@ namespace ignite case SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED: return STATE_HYC00; + case SqlState::SHYT00_TIMEOUT_EXPIRED: + return STATE_HYT00; + case SqlState::SHYT01_CONNECTION_TIMEOUT: return STATE_HYT01; diff --git a/modules/platforms/cpp/odbc/src/query/batch_query.cpp b/modules/platforms/cpp/odbc/src/query/batch_query.cpp index 07d42d4e33f92..a9db8d81e8394 100644 --- a/modules/platforms/cpp/odbc/src/query/batch_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/batch_query.cpp @@ -153,7 +153,17 @@ namespace ignite try { - connection.SyncMessage(req, rsp); + // Setting connection timeout to 1 second more than query timeout itself. + int32_t connectionTimeout = timeout ? timeout + 1 : 0; + + bool success = connection.SyncMessage(req, rsp, connectionTimeout); + + if (!success) + { + diag.AddStatusRecord(SqlState::SHYT00_TIMEOUT_EXPIRED, "Query timeout expired"); + + return SqlResult::AI_ERROR; + } } catch (const OdbcError& err) { diff --git a/modules/platforms/cpp/odbc/src/query/data_query.cpp b/modules/platforms/cpp/odbc/src/query/data_query.cpp index 0539af505810f..e7bf5a02abe6b 100644 --- a/modules/platforms/cpp/odbc/src/query/data_query.cpp +++ b/modules/platforms/cpp/odbc/src/query/data_query.cpp @@ -222,7 +222,17 @@ namespace ignite try { - connection.SyncMessage(req, rsp); + // Setting connection timeout to 1 second more than query timeout itself. + int32_t connectionTimeout = timeout ? timeout + 1 : 0; + + bool success = connection.SyncMessage(req, rsp, connectionTimeout); + + if (!success) + { + diag.AddStatusRecord(SqlState::SHYT00_TIMEOUT_EXPIRED, "Query timeout expired"); + + return SqlResult::AI_ERROR; + } } catch (const OdbcError& err) { From c1b75f78a3498a38e4d70c5eefb67b6e48e960d6 Mon Sep 17 00:00:00 2001 From: alexdel Date: Tue, 21 Nov 2017 17:20:11 +0700 Subject: [PATCH 120/243] IGNITE-4454. Added duration and node ID in results header and 'Show query' modal. (cherry picked from commit 31055f2) --- .../app/modules/sql/sql.controller.js | 7 ++++-- .../frontend/views/sql/sql.tpl.pug | 22 ++++++++++++------- .../frontend/views/templates/message.tpl.pug | 1 + 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index f5edb047a6ea9..e97825ca3c1ee 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -1421,13 +1421,12 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', const enforceJoinOrder = !!paragraph.enforceJoinOrder; const lazy = !!paragraph.lazy; - paragraph.localQueryMode = local; - $scope.queryAvailable(paragraph) && _chooseNode(paragraph.cacheName, local) .then((nid) => { Notebook.save($scope.notebook) .catch(Messages.showError); + paragraph.localQueryMode = local; paragraph.prevQuery = paragraph.queryArgs ? paragraph.queryArgs.query : paragraph.query; _showLoading(paragraph, true); @@ -1841,6 +1840,10 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', scope.content = paragraph.queryArgs.query.split(/\r?\n/); } + // Attach duration and selected node info + scope.meta = `Duration: ${$filter('duration')(paragraph.duration)}.`; + scope.meta += paragraph.localQueryMode ? ` Node ID8: ${_.id8(paragraph.resNodeId)}` : ''; + // Show a basic modal from a controller $modal({scope, templateUrl: messageTemplateUrl, show: true}); } diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/views/sql/sql.tpl.pug index b324622ebee08..44989e8808dd6 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/views/sql/sql.tpl.pug @@ -32,7 +32,7 @@ mixin result-toolbar mixin chart-settings .total.row - .col-xs-4 + .col-xs-5 .chart-settings-link(ng-show='paragraph.chart && paragraph.chartColumns.length > 0') a(title='Click to show chart settings dialog' ng-click='$event.stopPropagation()' bs-popover data-template-url='{{ $ctrl.chartSettingsTemplateUrl }}' data-placement='bottom' data-auto-close='1' data-trigger='click') i.fa.fa-bars @@ -41,7 +41,11 @@ mixin chart-settings label Show button.select-manual-caret.btn.btn-default(ng-model='paragraph.timeLineSpan' ng-change='applyChartSettings(paragraph)' bs-options='item for item in timeLineSpans' bs-select data-caret-html='') label min - .col-xs-4 + + div + label Duration: #[b {{paragraph.duration | duration}}] + label.margin-left-dflt(ng-show='paragraph.localQueryMode') NodeID8: #[b {{paragraph.resNodeId | id8}}] + .col-xs-2 +result-toolbar mixin notebook-rename @@ -139,16 +143,17 @@ mixin query-actions mixin table-result-heading-query .total.row - .col-xs-4 + .col-xs-5 grid-column-selector(grid-api='paragraph.gridOptions.api') .fa.fa-bars.icon label Page: #[b {{paragraph.page}}] label.margin-left-dflt Results so far: #[b {{paragraph.rows.length + paragraph.total}}] label.margin-left-dflt Duration: #[b {{paragraph.duration | duration}}] - .col-xs-4 + label.margin-left-dflt(ng-show='paragraph.localQueryMode') NodeID8: #[b {{paragraph.resNodeId | id8}}] + .col-xs-2 div(ng-if='paragraph.qryType === "query"') +result-toolbar - .col-xs-4 + .col-xs-5 .pull-right .btn-group.panel-tip-container button.btn.btn-primary.btn--with-icon( @@ -182,16 +187,17 @@ mixin table-result-heading-query mixin table-result-heading-scan .total.row - .col-xs-4 + .col-xs-5 grid-column-selector(grid-api='paragraph.gridOptions.api') .fa.fa-bars.icon label Page: #[b {{paragraph.page}}] label.margin-left-dflt Results so far: #[b {{paragraph.rows.length + paragraph.total}}] label.margin-left-dflt Duration: #[b {{paragraph.duration | duration}}] - .col-xs-4 + label.margin-left-dflt(ng-show='paragraph.localQueryMode') NodeID8: #[b {{paragraph.resNodeId | id8}}] + .col-xs-2 div(ng-if='paragraph.qryType === "query"') +result-toolbar - .col-xs-4 + .col-xs-5 .pull-right .btn-group.panel-tip-container // TODO: replace this logic for exporting under one component diff --git a/modules/web-console/frontend/views/templates/message.tpl.pug b/modules/web-console/frontend/views/templates/message.tpl.pug index aa3615fbee0c6..3cdb3c8619840 100644 --- a/modules/web-console/frontend/views/templates/message.tpl.pug +++ b/modules/web-console/frontend/views/templates/message.tpl.pug @@ -25,4 +25,5 @@ .modal-body(ng-show='content' style='overflow: auto; max-height: 300px;') p(ng-bind-html='content.join("
        ")' style='text-align: left; white-space: nowrap;') .modal-footer + .pull-left(ng-show='meta') {{meta}} button.btn.btn-primary(id='confirm-btn-confirm' ng-click='$hide()') Ok From cd6e315d8e7ca71a235e76f8868c2bc5a47921f7 Mon Sep 17 00:00:00 2001 From: mcherkasov Date: Tue, 14 Nov 2017 12:30:59 +0300 Subject: [PATCH 121/243] 5195 DataStreamer can fails if non-data node enter\leave the grid. --- .../cache/GridCacheAffinityManager.java | 7 ++- .../datastreamer/DataStreamerImpl.java | 18 +++++- .../cache/IgniteCacheDynamicStopSelfTest.java | 2 +- .../DataStreamerImplSelfTest.java | 62 +++++++++++++++++++ 4 files changed, 85 insertions(+), 4 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java index fbe1a9583a0db..f0cdd225568ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java @@ -123,7 +123,12 @@ public List> assignments(AffinityTopologyVersion topVer) { if (cctx.isLocal()) topVer = LOC_CACHE_TOP_VER; - return aff.assignments(topVer); + GridAffinityAssignmentCache aff0 = aff; + + if (aff0 == null) + throw new IgniteException(FAILED_TO_FIND_CACHE_ERR_MSG + cctx.name()); + + return aff0.assignments(topVer); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java index d38132fa3059c..c5ad1bb39ef2d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java @@ -779,6 +779,8 @@ private void load0( else topVer = ctx.cache().context().exchange().readyAffinityVersion(); + List> assignments = cctx.affinity().assignments(topVer); + if (!allowOverwrite() && !cctx.isLocal()) { // Cases where cctx required. gate = cctx.gate(); @@ -956,7 +958,7 @@ else if (remaps + 1 > maxRemapCnt) { final List> futs; try { - futs = buf.update(entriesForNode, topVer, opFut, remap); + futs = buf.update(entriesForNode, topVer, assignments, opFut, remap); opFut.markInitialized(); } @@ -1411,6 +1413,7 @@ private class Buffer { @Nullable List> update( Iterable newEntries, AffinityTopologyVersion topVer, + List> assignments, GridCompoundFuture opFut, boolean remap ) throws IgniteInterruptedCheckedException { @@ -1441,8 +1444,16 @@ private class Buffer { futs[b.partId] = curFut0; } - if (b.batchTopVer == null) + if (b.batchTopVer == null) { + b.batchTopVer = topVer; + b.assignments = assignments; + } + + if (!topVer.equals(b.batchTopVer) && b.assignments.equals(assignments)) { + // topology changed, but affinity is the same, no re-map is required. b.batchTopVer = topVer; + b.assignments = assignments; + } curBatchTopVer = b.batchTopVer; @@ -2186,6 +2197,9 @@ private class PerStripeBuffer { /** */ private final IgniteInClosure> signalC; + /** Batch assignments */ + public List> assignments; + /** * @param partId Partition ID. * @param c Signal closure. diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDynamicStopSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDynamicStopSelfTest.java index 5628c4db26db5..44cd475948e0b 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDynamicStopSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheDynamicStopSelfTest.java @@ -142,4 +142,4 @@ public void checkStopStartCacheWithDataLoader(final boolean allowOverwrite) thro ignite(0).destroyCache(DEFAULT_CACHE_NAME); } -} \ No newline at end of file +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImplSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImplSelfTest.java index e90f6b0946561..26aed4b3e09fc 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImplSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImplSelfTest.java @@ -27,6 +27,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import javax.cache.CacheException; @@ -38,6 +39,7 @@ import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.managers.communication.GridIoMessage; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -430,6 +432,66 @@ public void testRetryWhenTopologyMismatch() throws Exception { assertFalse(logWriter.toString().contains("DataStreamer will retry data transfer at stable topology")); } + /** + * @throws Exception If failed. + */ + public void testClientEventsNotCausingRemaps() throws Exception { + Ignite ignite = startGrids(2); + + ignite.getOrCreateCache(DEFAULT_CACHE_NAME); + + IgniteDataStreamer streamer = ignite.dataStreamer(DEFAULT_CACHE_NAME); + + ((DataStreamerImpl)streamer).maxRemapCount(3); + + streamer.addData(1, 1); + + for (int topChanges = 0; topChanges < 30; topChanges++) { + IgniteEx node = startGrid(getConfiguration("flapping-client").setClientMode(true)); + + streamer.addData(1, 1); + + node.close(); + + streamer.addData(1, 1); + } + + streamer.flush(); + + streamer.close(); + } + + /** + * @throws Exception If failed. + */ + public void testServerEventsCauseRemaps() throws Exception { + Ignite ignite = startGrids(2); + + ignite.getOrCreateCache(DEFAULT_CACHE_NAME); + + IgniteDataStreamer streamer = ignite.dataStreamer(DEFAULT_CACHE_NAME); + + ((DataStreamerImpl)streamer).maxRemapCount(0); + + streamer.addData(1, 1); + + startGrid(2); + + try { + streamer.addData(1, 1); + + streamer.flush(); + } + catch (IllegalStateException ex) { + assert ex.getMessage().contains("Data streamer has been closed"); + + return; + } + + fail("Expected exception wasn't thrown"); + } + + /** * Gets cache configuration. * From ae5ad71983b5ddbbec5eb86317830acec96b884f Mon Sep 17 00:00:00 2001 From: alexdel Date: Tue, 21 Nov 2017 20:15:32 +0700 Subject: [PATCH 122/243] IGNITE-6914. Web Console: Exporting large CSV files via file-saver library. (cherry picked from commit fe6bbab) --- modules/web-console/frontend/app/app.js | 18 +++++++++--------- .../frontend/app/modules/sql/sql.controller.js | 4 ++-- .../app/services/LegacyUtils.service.js | 18 +++++------------- modules/web-console/frontend/package.json | 2 +- .../web-console/frontend/views/sql/sql.tpl.pug | 4 ++-- 5 files changed, 19 insertions(+), 27 deletions(-) diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js index 44454f659bdd6..ca678fcacd295 100644 --- a/modules/web-console/frontend/app/app.js +++ b/modules/web-console/frontend/app/app.js @@ -142,23 +142,23 @@ angular.module('ignite-console', [ 'ngAnimate', 'ngSanitize', // Third party libs. - 'ngRetina', 'btford.socket-io', - 'mgcrea.ngStrap', - 'ui.router', - 'ui.router.state.events', - 'gridster', 'dndLists', + 'gridster', + 'mgcrea.ngStrap', + 'ngRetina', 'nvd3', + 'pascalprecht.translate', 'smart-table', 'treeControl', - 'pascalprecht.translate', 'ui.grid', - 'ui.grid.saveState', - 'ui.grid.selection', - 'ui.grid.resizeColumns', 'ui.grid.autoResize', 'ui.grid.exporter', + 'ui.grid.resizeColumns', + 'ui.grid.saveState', + 'ui.grid.selection', + 'ui.router', + 'ui.router.state.events', // Base modules. 'ignite-console.core', 'ignite-console.ace', diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index e97825ca3c1ee..ea0a06db5ff7d 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -1652,7 +1652,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', csvContent += cols.join(';') + '\n'; }); - LegacyUtils.download('application/octet-stream;charset=utf-8', fileName, escape(csvContent)); + LegacyUtils.download('text/csv', fileName, csvContent); }; /** @@ -1767,7 +1767,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', }; $scope.scanAvailable = function(paragraph) { - return $scope.caches.length && !paragraph.loading; + return $scope.caches.length && !(paragraph.loading || paragraph.csvIsPreparing); }; $scope.scanTooltip = function(paragraph) { diff --git a/modules/web-console/frontend/app/services/LegacyUtils.service.js b/modules/web-console/frontend/app/services/LegacyUtils.service.js index be593b052b1c4..b19bde3f02be4 100644 --- a/modules/web-console/frontend/app/services/LegacyUtils.service.js +++ b/modules/web-console/frontend/app/services/LegacyUtils.service.js @@ -15,6 +15,8 @@ * limitations under the License. */ +import saver from 'file-saver'; + // TODO: Refactor this service for legacy tables with more than one input field. export default ['IgniteLegacyUtils', ['IgniteErrorPopover', (ErrorPopover) => { function isDefined(v) { @@ -351,20 +353,10 @@ export default ['IgniteLegacyUtils', ['IgniteErrorPopover', (ErrorPopover) => { return !isEmpty; }, domainForStoreConfigured, - download(type, name, data) { - const file = document.createElement('a'); - - file.setAttribute('href', 'data:' + type + ';charset=utf-8,' + data); - file.setAttribute('download', name); - file.setAttribute('target', '_self'); - - file.style.display = 'none'; - - document.body.appendChild(file); - - file.click(); + download(type = 'application/octet-stream', name = 'file.txt', data = '') { + const file = new Blob([data], { type: `${type};charset=utf-8`}); - document.body.removeChild(file); + saver.saveAs(file, name, false); }, getQueryVariable(name) { const attrs = window.location.search.substring(1).split('&'); diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index 49e69b4636bdf..18635f7e71c09 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -76,8 +76,8 @@ "html-loader": "0.4.5", "html-webpack-plugin": "2.29.0", "jquery": "3.2.1", - "json-loader": "0.5.7", "json-bigint": "0.2.3", + "json-loader": "0.5.7", "jszip": "3.1.4", "lodash": "4.17.4", "node-sass": "4.6.0", diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/views/sql/sql.tpl.pug index 44989e8808dd6..a53a082ca8095 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/views/sql/sql.tpl.pug @@ -204,7 +204,7 @@ mixin table-result-heading-scan button.btn.btn-primary.btn--with-icon( ng-click='exportCsv(paragraph)' - ng-disabled='paragraph.loading' + ng-disabled='paragraph.loading || paragraph.csvIsPreparing' bs-tooltip='' ng-attr-title='{{ scanTooltip(paragraph) }}' @@ -218,7 +218,7 @@ mixin table-result-heading-scan -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] button.btn.dropdown-toggle.btn-primary( - ng-disabled='paragraph.loading' + ng-disabled='paragraph.loading || paragraph.csvIsPreparing' bs-dropdown=`${JSON.stringify(options)}` From 74cb0e9388c272c2692d48f9702967af19dda295 Mon Sep 17 00:00:00 2001 From: vsisko Date: Tue, 21 Nov 2017 21:30:03 +0700 Subject: [PATCH 123/243] IGNITE-6976 Visor CMD: Task to put/get/remove data to/from caches. (cherry picked from commit 152104e) --- .../visor/cache/VisorCacheModifyTask.java | 113 +++++++++++++++++ .../visor/cache/VisorCacheModifyTaskArg.java | 114 ++++++++++++++++++ .../cache/VisorCacheModifyTaskResult.java | 101 ++++++++++++++++ .../visor/cache/VisorModifyCacheMode.java | 47 ++++++++ .../internal/visor/query/VisorQueryTask.java | 3 - .../internal/visor/query/VisorQueryUtils.java | 25 ++-- .../resources/META-INF/classnames.properties | 77 +++++++++--- 7 files changed, 445 insertions(+), 35 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorModifyCacheMode.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java new file mode 100644 index 0000000000000..d6b1ff76f0d85 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.cache; + +import java.util.UUID; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorOneNodeTask; +import org.apache.ignite.internal.visor.query.VisorQueryUtils; +import org.apache.ignite.internal.visor.util.VisorTaskUtils; + +/** + * Task that modify value in specified cache. + */ +@GridInternal +public class VisorCacheModifyTask extends VisorOneNodeTask { + /** */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorCacheModifyJob job(VisorCacheModifyTaskArg arg) { + return new VisorCacheModifyJob(arg, debug); + } + + /** + * Job that clear specified caches. + */ + private static class VisorCacheModifyJob extends VisorJob { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Create job. + * + * @param arg Task argument. + * @param debug Debug flag. + */ + private VisorCacheModifyJob(VisorCacheModifyTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override protected VisorCacheModifyTaskResult run(final VisorCacheModifyTaskArg arg) { + assert arg != null; + + VisorModifyCacheMode mode = arg.getMode(); + String cacheName = arg.getCacheName(); + Object key = arg.getKey(); + + assert mode != null; + assert cacheName != null; + assert key != null; + + IgniteCache cache = ignite.cache(cacheName); + + if (cache == null) + throw new IllegalArgumentException("Failed to find cache with specified name [cacheName=" + arg.getCacheName() + "]"); + + ClusterNode node = ignite.affinity(cacheName).mapKeyToNode(key); + + UUID nid = node != null ? node.id() : null; + + switch (mode) { + case PUT: + Object old = cache.get(key); + + cache.put(key, arg.getValue()); + + return new VisorCacheModifyTaskResult(nid, VisorTaskUtils.compactClass(old), + VisorQueryUtils.convertValue(old)); + + case GET: + Object value = cache.get(key); + + return new VisorCacheModifyTaskResult(nid, VisorTaskUtils.compactClass(value), + VisorQueryUtils.convertValue(value)); + + case REMOVE: + Object removed = cache.get(key); + + cache.remove(key); + + return new VisorCacheModifyTaskResult(nid, VisorTaskUtils.compactClass(removed), + VisorQueryUtils.convertValue(removed)); + } + + return new VisorCacheModifyTaskResult(nid, null, null); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorCacheModifyJob.class, this); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java new file mode 100644 index 0000000000000..706aab70626bc --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.cache; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * Argument for {@link VisorCacheModifyTask}. + */ +public class VisorCacheModifyTaskArg extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Cache name. */ + private String cacheName; + + /** Modification mode. */ + private VisorModifyCacheMode mode; + + /** Specified key. */ + private Object key; + + /** Specified value. */ + private Object value; + + /** + * Default constructor. + */ + public VisorCacheModifyTaskArg() { + // No-op. + } + + /** + * @param cacheName Cache name. + * @param mode Modification mode. + * @param key Specified key. + * @param value Specified value. + */ + public VisorCacheModifyTaskArg(String cacheName, VisorModifyCacheMode mode, Object key, Object value) { + this.cacheName = cacheName; + this.mode = mode; + this.key = key; + this.value = value; + } + + /** + * @return Cache name. + */ + public String getCacheName() { + return cacheName; + } + + /** + * @return Modification mode. + */ + public VisorModifyCacheMode getMode() { + return mode; + } + + /** + * @return Specified key. + */ + public Object getKey() { + return key; + } + + /** + * @return Specified value. + */ + public Object getValue() { + return value; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeString(out, cacheName); + U.writeEnum(out, mode); + out.writeObject(key); + out.writeObject(value); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + cacheName = U.readString(in); + mode = VisorModifyCacheMode.fromOrdinal(in.readByte()); + key = in.readObject(); + value = in.readObject(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorCacheModifyTaskArg.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java new file mode 100644 index 0000000000000..ce09bb2a32972 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.cache; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.UUID; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * Result for {@link VisorCacheModifyTask}. + */ +public class VisorCacheModifyTaskResult extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Node ID where modified data contained. */ + private UUID affinityNode; + + /** Result type name. */ + private String resultType; + + /** Value for specified key or number of modified rows. */ + private Object result; + + /** + * Default constructor. + */ + public VisorCacheModifyTaskResult() { + // No-op. + } + + /** + * @param affinityNode Node ID where modified data contained. + * @param resultType Result type name. + * @param result Value for specified key or number of modified rows. + */ + public VisorCacheModifyTaskResult(UUID affinityNode, String resultType, Object result) { + this.affinityNode = affinityNode; + this.resultType = resultType; + this.result = result; + } + + /** + * @return Node ID where modified data contained. + */ + public UUID getAffinityNode() { + return affinityNode; + } + + /** + * @return Result type name. + */ + public String getResultType() { + return resultType; + } + + /** + * @return Value for specified key or number of modified rows.. + */ + public Object getResult() { + return result; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeUuid(out, affinityNode); + U.writeString(out, resultType); + out.writeObject(result); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + affinityNode = U.readUuid(in); + resultType = U.readString(in); + result = in.readObject(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(VisorCacheModifyTaskResult.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorModifyCacheMode.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorModifyCacheMode.java new file mode 100644 index 0000000000000..4e284393ddcc2 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorModifyCacheMode.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.visor.cache; + +import org.jetbrains.annotations.Nullable; + +/** + * Enumeration of all supported cache modify modes. + */ +public enum VisorModifyCacheMode { + /** Put new value into cache. */ + PUT, + + /** Get value from cache. */ + GET, + + /** Remove value from cache. */ + REMOVE; + + /** Enumerated values. */ + private static final VisorModifyCacheMode[] VALS = values(); + + /** + * Efficiently gets enumerated value from its ordinal. + * + * @param ord Ordinal value. + * @return Enumerated value or {@code null} if ordinal out of range. + */ + @Nullable public static VisorModifyCacheMode fromOrdinal(int ord) { + return ord >= 0 && ord < VALS.length ? VALS[ord] : null; + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java index 933bacc6c1882..51bf7d6f10009 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java @@ -22,15 +22,12 @@ import java.util.Collection; import java.util.List; import java.util.UUID; -import javax.cache.CacheException; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; -import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.util.typedef.F; -import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorEither; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryUtils.java index 9a0262d2ff4bb..aa4cb48a0abd2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryUtils.java @@ -218,6 +218,17 @@ public static String binaryToString(BinaryObject obj) { "typeId", obj.type().typeId(), true); } + public static Object convertValue(Object original) { + if (original == null) + return null; + else if (isKnownType(original)) + return original; + else if (original instanceof BinaryObject) + return binaryToString((BinaryObject)original); + else + return original.getClass().isArray() ? "binary" : original.toString(); + } + /** * Collects rows from sql query future, first time creates meta and column names arrays. * @@ -237,18 +248,8 @@ public static List fetchSqlQueryRows(VisorQueryCursor> cur, in Object[] row = new Object[sz]; - for (int i = 0; i < sz; i++) { - Object o = next.get(i); - - if (o == null) - row[i] = null; - else if (isKnownType(o)) - row[i] = o; - else if (o instanceof BinaryObject) - row[i] = binaryToString((BinaryObject)o); - else - row[i] = o.getClass().isArray() ? "binary" : o.toString(); - } + for (int i = 0; i < sz; i++) + row[i] = convertValue(next.get(i)); rows.add(row); diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties index f3fc074e1e850..c27681ec2c4a5 100644 --- a/modules/core/src/main/resources/META-INF/classnames.properties +++ b/modules/core/src/main/resources/META-INF/classnames.properties @@ -54,15 +54,19 @@ org.apache.ignite.cache.affinity.AffinityUuid org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction$HashComparator org.apache.ignite.cache.eviction.AbstractEvictionPolicy +org.apache.ignite.cache.eviction.AbstractEvictionPolicyFactory org.apache.ignite.cache.eviction.EvictionFilter org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy +org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicyFactory org.apache.ignite.cache.eviction.igfs.IgfsEvictionFilter org.apache.ignite.cache.eviction.igfs.IgfsPerBlockLruEvictionPolicy org.apache.ignite.cache.eviction.lru.LruEvictionPolicy +org.apache.ignite.cache.eviction.lru.LruEvictionPolicyFactory org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy$DefaultHolderComparator org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy$GridConcurrentSkipListSetEx org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy$HolderComparator +org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicyFactory org.apache.ignite.cache.query.CacheQueryEntryEvent org.apache.ignite.cache.query.ContinuousQuery org.apache.ignite.cache.query.Query @@ -135,12 +139,14 @@ org.apache.ignite.configuration.CacheConfiguration$IgniteAllNodesPredicate org.apache.ignite.configuration.CheckpointWriteOrder org.apache.ignite.configuration.CollectionConfiguration org.apache.ignite.configuration.DataPageEvictionMode +org.apache.ignite.configuration.DataRegionConfiguration +org.apache.ignite.configuration.DataStorageConfiguration org.apache.ignite.configuration.DeploymentMode org.apache.ignite.configuration.IgniteReflectionFactory -org.apache.ignite.configuration.DataStorageConfiguration -org.apache.ignite.configuration.DataRegionConfiguration +org.apache.ignite.configuration.MemoryConfiguration +org.apache.ignite.configuration.MemoryPolicyConfiguration org.apache.ignite.configuration.NearCacheConfiguration -org.apache.ignite.configuration.DataStorageConfiguration +org.apache.ignite.configuration.PersistentStoreConfiguration org.apache.ignite.configuration.TopologyValidator org.apache.ignite.configuration.TransactionConfiguration org.apache.ignite.configuration.WALMode @@ -183,6 +189,7 @@ org.apache.ignite.igfs.mapreduce.records.IgfsByteDelimiterRecordResolver org.apache.ignite.igfs.mapreduce.records.IgfsFixedLengthRecordResolver org.apache.ignite.igfs.mapreduce.records.IgfsNewLineRecordResolver org.apache.ignite.igfs.mapreduce.records.IgfsStringDelimiterRecordResolver +org.apache.ignite.internal.DuplicateTypeIdException org.apache.ignite.internal.ExecutorAwareMessage org.apache.ignite.internal.GridClosureCallMode org.apache.ignite.internal.GridComponent$DiscoveryDataExchangeType @@ -307,10 +314,12 @@ org.apache.ignite.internal.igfs.common.IgfsIpcCommand org.apache.ignite.internal.jdbc2.JdbcBatchUpdateTask org.apache.ignite.internal.jdbc2.JdbcConnection$JdbcConnectionValidationTask org.apache.ignite.internal.jdbc2.JdbcDatabaseMetadata$UpdateMetadataTask +org.apache.ignite.internal.jdbc2.JdbcQueryMultipleStatementsTask org.apache.ignite.internal.jdbc2.JdbcQueryTask org.apache.ignite.internal.jdbc2.JdbcQueryTask$1 -org.apache.ignite.internal.jdbc2.JdbcQueryTask$QueryResult -org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx +org.apache.ignite.internal.jdbc2.JdbcQueryTaskResult +org.apache.ignite.internal.jdbc2.JdbcQueryTaskV2 +org.apache.ignite.internal.jdbc2.JdbcQueryTaskV3 org.apache.ignite.internal.managers.GridManagerAdapter$1$1 org.apache.ignite.internal.managers.checkpoint.GridCheckpointManager$CheckpointSet org.apache.ignite.internal.managers.checkpoint.GridCheckpointRequest @@ -341,7 +350,7 @@ org.apache.ignite.internal.mem.IgniteOutOfMemoryException org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl$Segment org.apache.ignite.internal.pagemem.wal.StorageException org.apache.ignite.internal.pagemem.wal.WALIterator -org.apache.ignite.internal.pagemem.wal.record.TxRecord$TxAction +org.apache.ignite.internal.pagemem.wal.WALPointer org.apache.ignite.internal.pagemem.wal.record.WALRecord$RecordType org.apache.ignite.internal.pagemem.wal.record.delta.DeltaApplicationException org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion @@ -426,29 +435,29 @@ org.apache.ignite.internal.processors.cache.GridCacheAdapter$11 org.apache.ignite.internal.processors.cache.GridCacheAdapter$12 org.apache.ignite.internal.processors.cache.GridCacheAdapter$13 org.apache.ignite.internal.processors.cache.GridCacheAdapter$14 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$15$1 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$16 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$15 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$16$1 org.apache.ignite.internal.processors.cache.GridCacheAdapter$17 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$18 org.apache.ignite.internal.processors.cache.GridCacheAdapter$2 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$25$1 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$27 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$28$1 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$29 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$26$1 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$28 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$29$1 org.apache.ignite.internal.processors.cache.GridCacheAdapter$3 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$31 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$30 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$32 org.apache.ignite.internal.processors.cache.GridCacheAdapter$4 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$47 org.apache.ignite.internal.processors.cache.GridCacheAdapter$48 org.apache.ignite.internal.processors.cache.GridCacheAdapter$49 org.apache.ignite.internal.processors.cache.GridCacheAdapter$50 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$52 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$51 org.apache.ignite.internal.processors.cache.GridCacheAdapter$53 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$53$1 org.apache.ignite.internal.processors.cache.GridCacheAdapter$54 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$54$1 org.apache.ignite.internal.processors.cache.GridCacheAdapter$55 +org.apache.ignite.internal.processors.cache.GridCacheAdapter$56 org.apache.ignite.internal.processors.cache.GridCacheAdapter$6 org.apache.ignite.internal.processors.cache.GridCacheAdapter$8 -org.apache.ignite.internal.processors.cache.GridCacheAdapter$9 org.apache.ignite.internal.processors.cache.GridCacheAdapter$AsyncOp$1 org.apache.ignite.internal.processors.cache.GridCacheAdapter$AsyncOp$1$1 org.apache.ignite.internal.processors.cache.GridCacheAdapter$AsyncOpRetryFuture$1 @@ -584,6 +593,7 @@ org.apache.ignite.internal.processors.cache.StoredCacheData org.apache.ignite.internal.processors.cache.affinity.GridCacheAffinityProxy org.apache.ignite.internal.processors.cache.binary.BinaryMetadataHolder org.apache.ignite.internal.processors.cache.binary.BinaryMetadataKey +org.apache.ignite.internal.processors.cache.binary.BinaryMetadataTransport$2 org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl$3 org.apache.ignite.internal.processors.cache.binary.MetadataRequestMessage org.apache.ignite.internal.processors.cache.binary.MetadataResponseMessage @@ -757,6 +767,7 @@ org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtCol org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$2 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$3 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$4 +org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$5 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$LockTimeoutObject$1 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$MiniFuture$1 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CacheGroupAffinityMessage @@ -817,6 +828,7 @@ org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$ org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$2 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$3 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$4 +org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$5 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$LockTimeoutObject$1 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$MiniFuture$1 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest @@ -861,6 +873,8 @@ org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$19 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$2 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$20 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$21 +org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$22 +org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$23 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$3 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$4 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$5 @@ -900,9 +914,11 @@ org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree$DestroyBa org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree$Result org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO$EntryPart org.apache.ignite.internal.processors.cache.persistence.wal.AbstractWalRecordsIterator +org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager$FileArchiver$1 org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager$RecordsIterator org.apache.ignite.internal.processors.cache.persistence.wal.SegmentEofException +org.apache.ignite.internal.processors.cache.persistence.wal.WalSegmentTailReachedException org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException org.apache.ignite.internal.processors.cache.persistence.wal.reader.StandaloneWalRecordsIterator org.apache.ignite.internal.processors.cache.query.CacheQueryEntry @@ -919,8 +935,6 @@ org.apache.ignite.internal.processors.cache.query.GridCacheQueryDetailMetricsAda org.apache.ignite.internal.processors.cache.query.GridCacheQueryFutureAdapter$1 org.apache.ignite.internal.processors.cache.query.GridCacheQueryFutureAdapter$2 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$10 -org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$11 -org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$12$1 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$4$1 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$4$2 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$5 @@ -943,10 +957,16 @@ org.apache.ignite.internal.processors.cache.query.GridCacheQueryMetricsKey org.apache.ignite.internal.processors.cache.query.GridCacheQueryRequest org.apache.ignite.internal.processors.cache.query.GridCacheQueryResponse org.apache.ignite.internal.processors.cache.query.GridCacheQueryResponseEntry +org.apache.ignite.internal.processors.cache.query.GridCacheQuerySqlMetadataJobV2 +org.apache.ignite.internal.processors.cache.query.GridCacheQuerySqlMetadataJobV2$1 +org.apache.ignite.internal.processors.cache.query.GridCacheQuerySqlMetadataJobV2$2 +org.apache.ignite.internal.processors.cache.query.GridCacheQuerySqlMetadataJobV2$3 +org.apache.ignite.internal.processors.cache.query.GridCacheQuerySqlMetadataV2 org.apache.ignite.internal.processors.cache.query.GridCacheQueryType org.apache.ignite.internal.processors.cache.query.GridCacheSqlIndexMetadata org.apache.ignite.internal.processors.cache.query.GridCacheSqlMetadata org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery +org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx org.apache.ignite.internal.processors.cache.query.continuous.CacheContinuousQueryBatchAck org.apache.ignite.internal.processors.cache.query.continuous.CacheContinuousQueryDeployableObject org.apache.ignite.internal.processors.cache.query.continuous.CacheContinuousQueryEntry @@ -973,6 +993,7 @@ org.apache.ignite.internal.processors.cache.ratemetrics.HitRateMetricsSandbox org.apache.ignite.internal.processors.cache.store.GridCacheStoreManagerAdapter$1 org.apache.ignite.internal.processors.cache.store.GridCacheStoreManagerAdapter$2 org.apache.ignite.internal.processors.cache.store.GridCacheStoreManagerAdapter$3 +org.apache.ignite.internal.processors.cache.store.GridCacheStoreManagerAdapter$StoreOperation org.apache.ignite.internal.processors.cache.store.GridCacheWriteBehindStore$BatchingResult org.apache.ignite.internal.processors.cache.store.GridCacheWriteBehindStore$StatefulValue org.apache.ignite.internal.processors.cache.store.GridCacheWriteBehindStore$StoreOperation @@ -1264,6 +1285,7 @@ org.apache.ignite.internal.processors.marshaller.MappingProposedMessage$Proposal org.apache.ignite.internal.processors.marshaller.MarshallerMappingItem org.apache.ignite.internal.processors.marshaller.MissingMappingRequestMessage org.apache.ignite.internal.processors.marshaller.MissingMappingResponseMessage +org.apache.ignite.internal.processors.odbc.jdbc.JdbcStatementType org.apache.ignite.internal.processors.odbc.odbc.escape.OdbcEscapeType org.apache.ignite.internal.processors.platform.PlatformAbstractConfigurationClosure org.apache.ignite.internal.processors.platform.PlatformAbstractPredicate @@ -1287,6 +1309,7 @@ org.apache.ignite.internal.processors.platform.cache.query.PlatformContinuousQue org.apache.ignite.internal.processors.platform.cache.query.PlatformContinuousQueryFilter org.apache.ignite.internal.processors.platform.cache.query.PlatformContinuousQueryImpl org.apache.ignite.internal.processors.platform.cache.query.PlatformContinuousQueryRemoteFilter +org.apache.ignite.internal.processors.platform.client.IgniteClientException org.apache.ignite.internal.processors.platform.cluster.PlatformClusterNodeFilter org.apache.ignite.internal.processors.platform.cluster.PlatformClusterNodeFilterImpl org.apache.ignite.internal.processors.platform.compute.PlatformAbstractJob @@ -1323,6 +1346,7 @@ org.apache.ignite.internal.processors.platform.entityframework.PlatformDotNetEnt org.apache.ignite.internal.processors.platform.entityframework.PlatformDotNetEntityFrameworkCacheExtension$RemoveOldEntriesRunnable org.apache.ignite.internal.processors.platform.entityframework.PlatformDotNetEntityFrameworkIncreaseVersionProcessor org.apache.ignite.internal.processors.platform.events.PlatformEventFilterListenerImpl +org.apache.ignite.internal.processors.platform.events.PlatformLocalEventListener org.apache.ignite.internal.processors.platform.message.PlatformMessageFilter org.apache.ignite.internal.processors.platform.messaging.PlatformMessageFilterImpl org.apache.ignite.internal.processors.platform.messaging.PlatformMessageLocalFilter @@ -1361,7 +1385,9 @@ org.apache.ignite.internal.processors.query.schema.message.SchemaAbstractDiscove org.apache.ignite.internal.processors.query.schema.message.SchemaFinishDiscoveryMessage org.apache.ignite.internal.processors.query.schema.message.SchemaOperationStatusMessage org.apache.ignite.internal.processors.query.schema.message.SchemaProposeDiscoveryMessage +org.apache.ignite.internal.processors.query.schema.operation.SchemaAbstractAlterTableOperation org.apache.ignite.internal.processors.query.schema.operation.SchemaAbstractOperation +org.apache.ignite.internal.processors.query.schema.operation.SchemaAlterTableAddColumnOperation org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexAbstractOperation org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexCreateOperation org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexDropOperation @@ -1465,6 +1491,8 @@ org.apache.ignite.internal.processors.task.GridTaskThreadContextKey org.apache.ignite.internal.processors.task.GridTaskWorker$3 org.apache.ignite.internal.processors.task.GridTaskWorker$5 org.apache.ignite.internal.processors.task.GridTaskWorker$State +org.apache.ignite.internal.sql.SqlLexerTokenType +org.apache.ignite.internal.sql.SqlParseException org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException @@ -1557,6 +1585,7 @@ org.apache.ignite.internal.util.IgniteUtils$7 org.apache.ignite.internal.util.IgniteUtils$8 org.apache.ignite.internal.util.IgniteUtils$9 org.apache.ignite.internal.util.StripedCompositeReadWriteLock$ReadLock +org.apache.ignite.internal.util.StripedExecutor$StealingStripe$1 org.apache.ignite.internal.util.UUIDCollectionMessage org.apache.ignite.internal.util.future.AsyncFutureListener org.apache.ignite.internal.util.future.GridCompoundFuture$1 @@ -1772,6 +1801,10 @@ org.apache.ignite.internal.visor.cache.VisorCacheMetrics org.apache.ignite.internal.visor.cache.VisorCacheMetricsCollectorTask org.apache.ignite.internal.visor.cache.VisorCacheMetricsCollectorTask$VisorCacheMetricsCollectorJob org.apache.ignite.internal.visor.cache.VisorCacheMetricsCollectorTaskArg +org.apache.ignite.internal.visor.cache.VisorCacheModifyTask +org.apache.ignite.internal.visor.cache.VisorCacheModifyTask$VisorCacheClearJob +org.apache.ignite.internal.visor.cache.VisorCacheModifyTaskArg +org.apache.ignite.internal.visor.cache.VisorCacheModifyTaskResult org.apache.ignite.internal.visor.cache.VisorCacheNearConfiguration org.apache.ignite.internal.visor.cache.VisorCacheNodesTask org.apache.ignite.internal.visor.cache.VisorCacheNodesTask$VisorCacheNodesJob @@ -1797,6 +1830,7 @@ org.apache.ignite.internal.visor.cache.VisorCacheStopTask$VisorCacheStopJob org.apache.ignite.internal.visor.cache.VisorCacheStopTaskArg org.apache.ignite.internal.visor.cache.VisorCacheStoreConfiguration org.apache.ignite.internal.visor.cache.VisorMemoryMetrics +org.apache.ignite.internal.visor.cache.VisorModifyCacheMode org.apache.ignite.internal.visor.cache.VisorPartitionMap org.apache.ignite.internal.visor.compute.VisorComputeCancelSessionsTask org.apache.ignite.internal.visor.compute.VisorComputeCancelSessionsTask$VisorComputeCancelSessionsJob @@ -1874,6 +1908,9 @@ org.apache.ignite.internal.visor.node.VisorBasicConfiguration org.apache.ignite.internal.visor.node.VisorBinaryConfiguration org.apache.ignite.internal.visor.node.VisorBinaryTypeConfiguration org.apache.ignite.internal.visor.node.VisorCacheKeyConfiguration +org.apache.ignite.internal.visor.node.VisorClientConnectorConfiguration +org.apache.ignite.internal.visor.node.VisorDataRegionConfiguration +org.apache.ignite.internal.visor.node.VisorDataStorageConfiguration org.apache.ignite.internal.visor.node.VisorExecutorConfiguration org.apache.ignite.internal.visor.node.VisorExecutorServiceConfiguration org.apache.ignite.internal.visor.node.VisorGridConfiguration @@ -2008,6 +2045,7 @@ org.apache.ignite.plugin.segmentation.SegmentationResolver org.apache.ignite.services.Service org.apache.ignite.services.ServiceConfiguration org.apache.ignite.services.ServiceContext +org.apache.ignite.services.ServiceDeploymentException org.apache.ignite.services.ServiceDescriptor org.apache.ignite.spi.IgnitePortProtocol org.apache.ignite.spi.IgniteSpiCloseableIterator @@ -2094,4 +2132,3 @@ org.apache.ignite.transactions.TransactionRollbackException org.apache.ignite.transactions.TransactionState org.apache.ignite.transactions.TransactionTimeoutException org.apache.ignite.util.AttributeNodeFilter -org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIO From cf1a73c7c1a11430f694f964efb3a76862fb5d88 Mon Sep 17 00:00:00 2001 From: alexdel Date: Tue, 21 Nov 2017 22:08:25 +0700 Subject: [PATCH 124/243] IGNITE-4394 Web Console: Fixed memory leak in Messages.service. (cherry picked from commit df48356) --- .../web-console/frontend/app/services/Messages.service.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/services/Messages.service.js b/modules/web-console/frontend/app/services/Messages.service.js index 854cf0b030ae7..523adaecc326f 100644 --- a/modules/web-console/frontend/app/services/Messages.service.js +++ b/modules/web-console/frontend/app/services/Messages.service.js @@ -44,8 +44,11 @@ export default ['IgniteMessages', ['$alert', ($alert) => { }; const hideAlert = () => { - if (msgModal) + if (msgModal) { msgModal.hide(); + msgModal.destroy(); + msgModal = null; + } }; const _showMessage = (message, err, type, duration) => { From b1ecbdd8db4006f60f6a159e7a5f51aed446e267 Mon Sep 17 00:00:00 2001 From: alexdel Date: Tue, 21 Nov 2017 22:25:47 +0700 Subject: [PATCH 125/243] IGNITE-5641 Web Console: Added "Copy to clipboard" action on Queries screen. (cherry picked from commit 08371f5) --- .../frontend/app/modules/sql/sql.controller.js | 15 +++++++++++---- .../web-console/frontend/views/sql/sql.tpl.pug | 6 ++++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/modules/sql/sql.controller.js index ea0a06db5ff7d..a2ad912deec46 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/modules/sql/sql.controller.js @@ -214,8 +214,8 @@ class Paragraph { } // Controller for SQL notebook screen. -export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', '$animate', '$location', '$anchorScroll', '$state', '$filter', '$modal', '$popover', 'IgniteLoading', 'IgniteLegacyUtils', 'IgniteMessages', 'IgniteConfirm', 'AgentManager', 'IgniteChartColors', 'IgniteNotebook', 'IgniteNodes', 'uiGridExporterConstants', 'IgniteVersion', 'IgniteActivitiesData', 'JavaTypes', - function($root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes) { +export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', '$animate', '$location', '$anchorScroll', '$state', '$filter', '$modal', '$popover', 'IgniteLoading', 'IgniteLegacyUtils', 'IgniteMessages', 'IgniteConfirm', 'AgentManager', 'IgniteChartColors', 'IgniteNotebook', 'IgniteNodes', 'uiGridExporterConstants', 'IgniteVersion', 'IgniteActivitiesData', 'JavaTypes', 'IgniteCopyToClipboard', + function($root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes, IgniteCopyToClipboard) { const $ctrl = this; // Define template urls. @@ -1613,7 +1613,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', .then(() => paragraph.ace && paragraph.ace.focus()); }; - const _export = (fileName, columnDefs, meta, rows) => { + const _export = (fileName, columnDefs, meta, rows, toClipBoard = false) => { let csvContent = ''; const cols = []; @@ -1652,7 +1652,10 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', csvContent += cols.join(';') + '\n'; }); - LegacyUtils.download('text/csv', fileName, csvContent); + if (toClipBoard) + IgniteCopyToClipboard.copy(csvContent); + else + LegacyUtils.download('text/csv', fileName, csvContent); }; /** @@ -1671,6 +1674,10 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', return `export-query-${paragraph.name}${all ? '-all' : ''}.csv`; }; + $scope.exportCsvToClipBoard = (paragraph) => { + _export(exportFileName(paragraph, false), paragraph.gridOptions.columnDefs, paragraph.meta, paragraph.rows, true); + }; + $scope.exportCsv = function(paragraph) { _export(exportFileName(paragraph, false), paragraph.gridOptions.columnDefs, paragraph.meta, paragraph.rows); diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/views/sql/sql.tpl.pug index a53a082ca8095..7714235693042 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/views/sql/sql.tpl.pug @@ -171,7 +171,7 @@ mixin table-result-heading-query i.fa.fa-fw.fa-refresh.fa-spin(ng-if='paragraph.csvIsPreparing') span Export - -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] + -var options = [{ text: 'Export', click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }, { divider: true }, { text: 'Copy to clipboard', click: 'exportCsvToClipBoard(paragraph)' }] button.btn.dropdown-toggle.btn-primary( ng-disabled='paragraph.loading' @@ -180,6 +180,7 @@ mixin table-result-heading-query data-toggle='dropdown' data-container='body' data-placement='bottom-right' + data-html='true' ) span.caret @@ -216,7 +217,7 @@ mixin table-result-heading-scan i.fa.fa-fw.fa-refresh.fa-spin(ng-if='paragraph.csvIsPreparing') span Export - -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }] + -var options = [{ text: "Export", click: 'exportCsv(paragraph)' }, { text: 'Export all', click: 'exportCsvAll(paragraph)' }, { divider: true }, { text: 'Copy to clipboard', click: 'exportCsvToClipBoard(paragraph)' }] button.btn.dropdown-toggle.btn-primary( ng-disabled='paragraph.loading || paragraph.csvIsPreparing' @@ -225,6 +226,7 @@ mixin table-result-heading-scan data-toggle='dropdown' data-container='body' data-placement='bottom-right' + data-html='true' ) span.caret From 305b1b3b0b80c3d867a091427a7e52ae249ce692 Mon Sep 17 00:00:00 2001 From: "Andrey V. Mashenkov" Date: Wed, 22 Nov 2017 11:19:32 +0300 Subject: [PATCH 126/243] IGNITE-6984: Make cache creation slightly more verbose. (cherry picked from commit bb89ad4) --- .../ignite/internal/processors/cache/GridCacheProcessor.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index da90ad43eb23f..a993b30a05777 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -1129,10 +1129,12 @@ private void startCache(GridCacheAdapter cache, QuerySchema schema) throws if (log.isInfoEnabled()) { log.info("Started cache [name=" + cfg.getName() + + ", id="+cacheCtx.cacheId() + (cfg.getGroupName() != null ? ", group=" + cfg.getGroupName() : "") + ", memoryPolicyName=" + memPlcName + ", mode=" + cfg.getCacheMode() + - ", atomicity=" + cfg.getAtomicityMode() + ']'); + ", atomicity=" + cfg.getAtomicityMode() + + ", backups=" + cfg.getBackups() +']'); } } From 9a92adfb159a7ddfa4a3299deae931270b123653 Mon Sep 17 00:00:00 2001 From: vd-pyatkov Date: Wed, 22 Nov 2017 10:39:58 +0300 Subject: [PATCH 127/243] IGNITE-6922 Class cannot undeploy from grid in some specific cases - Fixes #3045. Signed-off-by: Alexey Goncharuk (cherry picked from commit d205023) --- .../GridDeploymentPerVersionStore.java | 36 ++--- modules/core/src/test/config/tests.properties | 1 + .../ignite/p2p/SharedDeploymentTest.java | 128 ++++++++++++++++++ .../testsuites/IgniteP2PSelfTestSuite.java | 2 + .../tests/p2p/compute/ExternalCallable.java | 10 +- .../tests/p2p/compute/ExternalCallable1.java | 11 +- .../tests/p2p/compute/ExternalCallable2.java | 11 +- .../tests/p2p/compute/ExternalCallable.java | 59 ++++++++ .../tests/p2p/compute/ExternalCallable1.java | 59 ++++++++ .../tests/p2p/compute/ExternalCallable2.java | 59 ++++++++ 10 files changed, 348 insertions(+), 28 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java create mode 100644 modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java create mode 100644 modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java create mode 100644 modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java index 070b3906c8b0f..8447c97743f73 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java @@ -376,31 +376,33 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { // Find existing deployments that need to be checked // whether they should be reused for this request. - for (SharedDeployment d : deps) { - if (!d.pendingUndeploy() && !d.undeployed()) { - Map parties = d.participants(); + if (ctx.config().getDeploymentMode() == CONTINUOUS) { + for (SharedDeployment d : deps) { + if (!d.pendingUndeploy() && !d.undeployed()) { + Map parties = d.participants(); - if (parties != null) { - IgniteUuid ldrId = parties.get(meta.senderNodeId()); + if (parties != null) { + IgniteUuid ldrId = parties.get(meta.senderNodeId()); - if (ldrId != null) { - assert !ldrId.equals(meta.classLoaderId()); + if (ldrId != null) { + assert !ldrId.equals(meta.classLoaderId()); - if (log.isDebugEnabled()) - log.debug("Skipping deployment (loaders on remote node are different) " + - "[dep=" + d + ", meta=" + meta + ']'); + if (log.isDebugEnabled()) + log.debug("Skipping deployment (loaders on remote node are different) " + + "[dep=" + d + ", meta=" + meta + ']'); - continue; + continue; + } } - } - if (depsToCheck == null) - depsToCheck = new LinkedList<>(); + if (depsToCheck == null) + depsToCheck = new LinkedList<>(); - if (log.isDebugEnabled()) - log.debug("Adding deployment to check: " + d); + if (log.isDebugEnabled()) + log.debug("Adding deployment to check: " + d); - depsToCheck.add(d); + depsToCheck.add(d); + } } } diff --git a/modules/core/src/test/config/tests.properties b/modules/core/src/test/config/tests.properties index 1ea5b3daff042..718d66107f550 100644 --- a/modules/core/src/test/config/tests.properties +++ b/modules/core/src/test/config/tests.properties @@ -88,6 +88,7 @@ grid.comm.selftest.timeout=10000 #P2P tests #Overwrite this property. It should point to P2P module compilation directory. p2p.uri.cls=file://localhost/@{IGNITE_HOME}/modules/extdata/p2p/target/classes/ +p2p.uri.cls.second=file://localhost/@{IGNITE_HOME}/modules/extdata/uri/target/classes/ # AOP tests. # Connector port for RMI. diff --git a/modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java b/modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java new file mode 100644 index 0000000000000..cc0340e6c2bb8 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/p2p/SharedDeploymentTest.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.p2p; + +import org.apache.ignite.Ignite; +import org.apache.ignite.configuration.DeploymentMode; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.testframework.GridTestExternalClassLoader; +import org.apache.ignite.testframework.config.GridTestProperties; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import java.lang.reflect.Constructor; +import java.net.URL; +import java.util.Collection; + +/** + */ +public class SharedDeploymentTest extends GridCommonAbstractTest { + /** */ + private static final String RUN_CLS = "org.apache.ignite.tests.p2p.compute.ExternalCallable"; + + /** */ + private static final String RUN_CLS1 = "org.apache.ignite.tests.p2p.compute.ExternalCallable1"; + + /** */ + private static final String RUN_CLS2 = "org.apache.ignite.tests.p2p.compute.ExternalCallable2"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName) + .setPeerClassLoadingEnabled(true) + .setDeploymentMode(DeploymentMode.SHARED); + } + + /** + * @throws Exception if failed. + */ + public void testDeploymentFromSecondAndThird() throws Exception { + try { + startGrid(1); + + final Ignite ignite2 = startGrid(2); + Ignite ignite3 = startGrid(3); + + Collection res = runJob0(new GridTestExternalClassLoader(new URL[] { + new URL(GridTestProperties.getProperty("p2p.uri.cls"))}, RUN_CLS1/*, RUN_CLS2*/), ignite2, 10_000, 1); + + for (Object o: res) + assertEquals(o, 42); + + res = runJob1(new GridTestExternalClassLoader(new URL[] { + new URL(GridTestProperties.getProperty("p2p.uri.cls"))}, RUN_CLS, RUN_CLS2), ignite3, 10_000, 2); + + for (Object o: res) + assertEquals(o, 42); + + res = runJob2(new GridTestExternalClassLoader(new URL[] { + new URL(GridTestProperties.getProperty("p2p.uri.cls"))}, RUN_CLS, RUN_CLS1), ignite3, 10_000, 3); + + for (Object o: res) + assertEquals(o, 42); + + ignite3.close(); + + ignite3 = startGrid(3); + + res = runJob2(new GridTestExternalClassLoader(new URL[] { + new URL(GridTestProperties.getProperty("p2p.uri.cls.second"))}, RUN_CLS, RUN_CLS1), ignite3, 10_000, 4); + + for (Object o: res) + assertEquals(o, 43); + } + finally { + stopAllGrids(); + } + } + + /** + * @param ignite Ignite instance. + * @param timeout Timeout. + * @param param Parameter. + * @throws Exception If failed. + */ + private Collection runJob1(ClassLoader testClassLoader, Ignite ignite, long timeout, int param) throws Exception { + Constructor ctor = testClassLoader.loadClass(RUN_CLS1).getConstructor(int.class); + + return ignite.compute().withTimeout(timeout).broadcast((IgniteCallable)ctor.newInstance(param)); + } + + /** + * @param ignite Ignite instance. + * @param timeout Timeout. + * @param param Parameter. + * @throws Exception If failed. + */ + private Collection runJob0(ClassLoader testClassLoader, Ignite ignite, long timeout, int param) throws Exception { + Constructor ctor = testClassLoader.loadClass(RUN_CLS).getConstructor(int.class); + + return ignite.compute().withTimeout(timeout).broadcast((IgniteCallable)ctor.newInstance(param)); + } + + /** + * @param ignite Ignite instance. + * @param timeout Timeout. + * @param param Parameter. + * @throws Exception If failed. + */ + private Collection runJob2(ClassLoader testClassLoader, Ignite ignite, long timeout, int param) throws Exception { + Constructor ctor = testClassLoader.loadClass(RUN_CLS2).getConstructor(int.class); + + return ignite.compute().withTimeout(timeout).broadcast((IgniteCallable)ctor.newInstance(param)); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java index abd99678f8be5..3c50bafca7d08 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteP2PSelfTestSuite.java @@ -35,6 +35,7 @@ import org.apache.ignite.p2p.GridP2PSameClassLoaderSelfTest; import org.apache.ignite.p2p.GridP2PTimeoutSelfTest; import org.apache.ignite.p2p.GridP2PUndeploySelfTest; +import org.apache.ignite.p2p.SharedDeploymentTest; import org.apache.ignite.testframework.GridTestUtils; /** @@ -72,6 +73,7 @@ public static TestSuite suite(Set ignoredTests) throws Exception { suite.addTest(new TestSuite(GridP2PMissedResourceCacheSizeSelfTest.class)); suite.addTest(new TestSuite(GridP2PContinuousDeploymentSelfTest.class)); suite.addTest(new TestSuite(DeploymentClassLoaderCallableTest.class)); + suite.addTest(new TestSuite(SharedDeploymentTest.class)); GridTestUtils.addTestIfNeeded(suite, GridDeploymentMessageCountSelfTest.class, ignoredTests); return suite; diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java index 25f1f3ea832d6..d24895c425aa2 100644 --- a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java @@ -18,8 +18,10 @@ package org.apache.ignite.tests.p2p.compute; import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteLogger; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.resources.LoggerResource; /** */ @@ -28,6 +30,10 @@ public class ExternalCallable implements IgniteCallable { @IgniteInstanceResource Ignite ignite; + /** Logger. */ + @LoggerResource + private IgniteLogger log; + /** */ private int param; @@ -46,10 +52,8 @@ public ExternalCallable(int param) { /** {@inheritDoc} */ @Override public Object call() { - System.err.println("!!!!! I am job " + param + " on " + ignite.name()); + log.info("!!!!! I am job " + param + " on " + ignite.name()); return 42; } } - - diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java index 6a6befc7d6265..b20f3b9556eb0 100644 --- a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java @@ -18,8 +18,10 @@ package org.apache.ignite.tests.p2p.compute; import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteLogger; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.resources.LoggerResource; /** */ @@ -28,11 +30,14 @@ public class ExternalCallable1 implements IgniteCallable { @IgniteInstanceResource Ignite ignite; + /** Logger. */ + @LoggerResource + private IgniteLogger log; + /** */ private int param; /** - * */ public ExternalCallable1() { // No-op. @@ -47,10 +52,8 @@ public ExternalCallable1(int param) { /** {@inheritDoc} */ @Override public Object call() { - System.err.println("!!!!! I am job_1 " + param + " on " + ignite.name()); + log.info("!!!!! I am job_1 " + param + " on " + ignite.name()); return 42; } } - - diff --git a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java index 7d1d0f78f6bd2..48d51bad47ed5 100644 --- a/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java +++ b/modules/extdata/p2p/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java @@ -18,8 +18,10 @@ package org.apache.ignite.tests.p2p.compute; import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteLogger; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.resources.LoggerResource; /** */ @@ -28,11 +30,14 @@ public class ExternalCallable2 implements IgniteCallable { @IgniteInstanceResource Ignite ignite; + /** Logger. */ + @LoggerResource + private IgniteLogger log; + /** */ private int param; /** - * */ public ExternalCallable2() { // No-op. @@ -47,10 +52,8 @@ public ExternalCallable2(int param) { /** {@inheritDoc} */ @Override public Object call() { - System.err.println("!!!!! I am job_2 " + param + " on " + ignite.name()); + log.info("!!!!! I am job_2 " + param + " on " + ignite.name()); return 42; } } - - diff --git a/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java b/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java new file mode 100644 index 0000000000000..092019920729f --- /dev/null +++ b/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.tests.p2p.compute; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.resources.LoggerResource; + +/** + */ +public class ExternalCallable implements IgniteCallable { + /** */ + @IgniteInstanceResource + Ignite ignite; + + /** Logger. */ + @LoggerResource + private IgniteLogger log; + + /** */ + private int param; + + /** + */ + public ExternalCallable() { + // No-op. + } + + /** + * @param param Param. + */ + public ExternalCallable(int param) { + this.param = param; + } + + /** {@inheritDoc} */ + @Override public Object call() { + log.info("!!!!! I am modified job " + param + " on " + ignite.name()); + + return 43; + } +} diff --git a/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java b/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java new file mode 100644 index 0000000000000..fa48f0fc38b11 --- /dev/null +++ b/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable1.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.tests.p2p.compute; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.resources.LoggerResource; + +/** + */ +public class ExternalCallable1 implements IgniteCallable { + /** */ + @IgniteInstanceResource + Ignite ignite; + + /** Logger. */ + @LoggerResource + private IgniteLogger log; + + /** */ + private int param; + + /** + */ + public ExternalCallable1() { + // No-op. + } + + /** + * @param param Param. + */ + public ExternalCallable1(int param) { + this.param = param; + } + + /** {@inheritDoc} */ + @Override public Object call() { + log.info("!!!!! I am modified job_1 " + param + " on " + ignite.name()); + + return 43; + } +} diff --git a/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java b/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java new file mode 100644 index 0000000000000..a1ab9c148a3cf --- /dev/null +++ b/modules/extdata/uri/src/main/java/org/apache/ignite/tests/p2p/compute/ExternalCallable2.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.tests.p2p.compute; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.resources.LoggerResource; + +/** + */ +public class ExternalCallable2 implements IgniteCallable { + /** */ + @IgniteInstanceResource + Ignite ignite; + + /** Logger. */ + @LoggerResource + private IgniteLogger log; + + /** */ + private int param; + + /** + */ + public ExternalCallable2() { + // No-op. + } + + /** + * @param param Param. + */ + public ExternalCallable2(int param) { + this.param = param; + } + + /** {@inheritDoc} */ + @Override public Object call() { + log.info("!!!!! I am modified job_2 " + param + " on " + ignite.name()); + + return 43; + } +} From b8c76637efd58925bb639c4810e0338e459a84b4 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Mon, 27 Nov 2017 10:31:34 +0700 Subject: [PATCH 128/243] master Fixed serialVersionUID for compatibility. (cherry picked from commit a3b044f) --- .../apache/ignite/internal/visor/VisorDataTransferObject.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java index 4635c3e36561f..38d7a0ad0aef5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/VisorDataTransferObject.java @@ -32,6 +32,9 @@ * Base class for data transfer objects. */ public abstract class VisorDataTransferObject implements Externalizable { + /** */ + private static final long serialVersionUID = 6920203681702514010L; + /** Magic number to detect correct transfer objects. */ private static final int MAGIC = 0x42BEEF00; From 89a7a272df5454a54a89da7409806671722fbf83 Mon Sep 17 00:00:00 2001 From: sboikov Date: Mon, 9 Jan 2017 11:42:39 +0300 Subject: [PATCH 129/243] Fixed IGNITE-6838. Restore EvictionPolicy 'maxSize' field default value. --- .../ignite/cache/eviction/AbstractEvictionPolicyFactory.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java index 012c7ee522ea9..aa7dea97728a4 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicyFactory.java @@ -20,12 +20,14 @@ import javax.cache.configuration.Factory; import org.apache.ignite.internal.util.typedef.internal.A; +import static org.apache.ignite.configuration.CacheConfiguration.DFLT_CACHE_SIZE; + /** * Common functionality implementation for eviction policies factories. */ public abstract class AbstractEvictionPolicyFactory implements Factory { /** */ - private int maxSize; + private int maxSize = DFLT_CACHE_SIZE; /** */ private int batchSize = 1; From 3f7d3bbc437ca6250ef9abab4c75be25ec94d05f Mon Sep 17 00:00:00 2001 From: Dmitriy Shabalin Date: Tue, 28 Nov 2017 10:43:33 +0700 Subject: [PATCH 130/243] IGNITE-7020 Web Console: fixed resize of pinned columns. (cherry picked from commit e4458d4) --- modules/web-console/frontend/app/primitives/ui-grid/index.scss | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/web-console/frontend/app/primitives/ui-grid/index.scss b/modules/web-console/frontend/app/primitives/ui-grid/index.scss index e86eec7a889d5..5caa57cd0b1bc 100644 --- a/modules/web-console/frontend/app/primitives/ui-grid/index.scss +++ b/modules/web-console/frontend/app/primitives/ui-grid/index.scss @@ -401,6 +401,7 @@ .ui-grid-column-resizer { right: -1px; opacity: 0; + z-index: 1000; } } } From 7b9d31d2d106948d5996218a59c071c1d49d05c7 Mon Sep 17 00:00:00 2001 From: alexdel Date: Tue, 28 Nov 2017 17:00:42 +0700 Subject: [PATCH 131/243] IGNITE-4454. Web Console: Minor UI changes. (cherry picked from commit 2261b32) --- modules/web-console/frontend/views/sql/sql.tpl.pug | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/views/sql/sql.tpl.pug index 7714235693042..98b4d68f0eb3a 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/views/sql/sql.tpl.pug @@ -32,7 +32,7 @@ mixin result-toolbar mixin chart-settings .total.row - .col-xs-5 + .col-xs-7 .chart-settings-link(ng-show='paragraph.chart && paragraph.chartColumns.length > 0') a(title='Click to show chart settings dialog' ng-click='$event.stopPropagation()' bs-popover data-template-url='{{ $ctrl.chartSettingsTemplateUrl }}' data-placement='bottom' data-auto-close='1' data-trigger='click') i.fa.fa-bars @@ -143,7 +143,7 @@ mixin query-actions mixin table-result-heading-query .total.row - .col-xs-5 + .col-xs-7 grid-column-selector(grid-api='paragraph.gridOptions.api') .fa.fa-bars.icon label Page: #[b {{paragraph.page}}] @@ -153,7 +153,7 @@ mixin table-result-heading-query .col-xs-2 div(ng-if='paragraph.qryType === "query"') +result-toolbar - .col-xs-5 + .col-xs-3 .pull-right .btn-group.panel-tip-container button.btn.btn-primary.btn--with-icon( @@ -188,7 +188,7 @@ mixin table-result-heading-query mixin table-result-heading-scan .total.row - .col-xs-5 + .col-xs-7 grid-column-selector(grid-api='paragraph.gridOptions.api') .fa.fa-bars.icon label Page: #[b {{paragraph.page}}] @@ -198,7 +198,7 @@ mixin table-result-heading-scan .col-xs-2 div(ng-if='paragraph.qryType === "query"') +result-toolbar - .col-xs-5 + .col-xs-3 .pull-right .btn-group.panel-tip-container // TODO: replace this logic for exporting under one component From 601c90c9633e80ff33c30177292235cabf466dae Mon Sep 17 00:00:00 2001 From: alexdel Date: Tue, 28 Nov 2017 22:27:43 +0700 Subject: [PATCH 132/243] IGNITE-6919. Web Console: Minor fix of page title. (cherry picked from commit d2c21bc) --- .../frontend/app/modules/branding/branding.service.js | 2 +- modules/web-console/frontend/views/index.pug | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/web-console/frontend/app/modules/branding/branding.service.js b/modules/web-console/frontend/app/modules/branding/branding.service.js index 46bc3580b8ca1..564aa855a0603 100644 --- a/modules/web-console/frontend/app/modules/branding/branding.service.js +++ b/modules/web-console/frontend/app/modules/branding/branding.service.js @@ -19,7 +19,7 @@ export default class { static $inject = ['IgniteVersion']; constructor(Version) { - this.titleSuffix = ' – Apache Ignite Web Console'; + this.titleSuffix = ' - Apache Ignite Web Console'; this.headerLogo = '/images/ignite-logo.svg'; diff --git a/modules/web-console/frontend/views/index.pug b/modules/web-console/frontend/views/index.pug index 9565949120d86..e91af9bda889f 100644 --- a/modules/web-console/frontend/views/index.pug +++ b/modules/web-console/frontend/views/index.pug @@ -19,7 +19,7 @@ html(ng-app='ignite-console' id='app' ng-strict-di) head base(href='/') - meta(http-equiv='content-type' content='text/html; charset=UTF8') + meta(http-equiv='content-type' content='text/html; charset=utf-8') meta(http-equiv='content-language' content='en') meta(http-equiv='X-UA-Compatible' content='IE=Edge') From 9e5ea4394f344b6064f0abf616e932eddc08160d Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Tue, 31 Oct 2017 14:33:58 +0300 Subject: [PATCH 133/243] Backported IGNITE-6690 DiscoverySpi: Clientmode Ignite should not fail on handshake errors --- .../ignite/spi/discovery/tcp/ClientImpl.java | 14 +- .../ignite/spi/discovery/tcp/ServerImpl.java | 14 +- .../spi/discovery/tcp/TcpDiscoverySpi.java | 22 ++ .../tcp/TcpDiscoveryWithWrongServerTest.java | 332 ++++++++++++++++++ .../IgniteSpiDiscoverySelfTestSuite.java | 5 +- 5 files changed, 378 insertions(+), 9 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java index 139c11049ecc5..c9a4a5a6ac393 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java @@ -702,11 +702,17 @@ else if (addrs.isEmpty()) { } if (X.hasCause(e, StreamCorruptedException.class)) { - if (--sslConnectAttempts == 0) - throw new IgniteSpiException("Unable to establish plain connection. " + - "Was remote cluster configured with SSL? [rmtAddr=" + addr + ", errMsg=\"" + e.getMessage() + "\"]", e); + // StreamCorruptedException could be caused by remote node failover + if (connectAttempts < 2) { + connectAttempts++; - continue; + continue; + } + + if (log.isDebugEnabled()) + log.debug("Connect failed with StreamCorruptedException, skip address: " + addr); + + break; } if (timeoutHelper.checkFailureTimeoutReached(e)) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index ada078012681e..d15fbde036023 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -1272,11 +1272,17 @@ else if (U.currentTimeMillis() - noResStart > spi.joinTimeout) } if (X.hasCause(e, StreamCorruptedException.class)) { - if (--sslConnectAttempts == 0) - throw new IgniteException("Unable to establish plain connection. " + - "Was remote cluster configured with SSL? [rmtAddr=" + addr + ", errMsg=\"" + e.getMessage() + "\"]", e); + // StreamCorruptedException could be caused by remote node failover + if (connectAttempts < 2) { + connectAttempts++; - continue; + continue; + } + + if (log.isDebugEnabled()) + log.debug("Connect failed with StreamCorruptedException, skip address: " + addr); + + break; } if (timeoutHelper.checkFailureTimeoutReached(e)) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index eb8ee30d77aee..acc7233b83f3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -23,6 +23,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Serializable; +import java.io.StreamCorruptedException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; @@ -40,7 +41,9 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.regex.Pattern; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLException; import javax.net.ssl.SSLServerSocketFactory; import javax.net.ssl.SSLSocketFactory; import org.apache.ignite.Ignite; @@ -265,6 +268,9 @@ public class TcpDiscoverySpi extends IgniteSpiAdapter implements DiscoverySpi { /** Maximum ack timeout value for receiving message acknowledgement in milliseconds (value is 600,000ms). */ public static final long DFLT_MAX_ACK_TIMEOUT = 10 * 60 * 1000; + /** Ssl message pattern for StreamCorruptedException. */ + private static Pattern sslMsgPattern = Pattern.compile("invalid stream header: 150\\d0\\d00"); + /** Local address. */ protected String locAddr; @@ -1598,6 +1604,22 @@ protected T readMessage(Socket sock, @Nullable InputStream in, long timeout) "long GC pauses on remote node) [curTimeout=" + timeout + ", rmtAddr=" + sock.getRemoteSocketAddress() + ", rmtPort=" + sock.getPort() + ']'); + StreamCorruptedException streamCorruptedCause = X.cause(e, StreamCorruptedException.class); + + if (streamCorruptedCause != null) { + // Lets check StreamCorruptedException for SSL Alert message + // Sample SSL Alert message: 15:03:03:00:02:02:0a + // 15 = Alert + // 03:03 = SSL version + // 00:02 = payload length + // 02:0a = critical (02) / unexpected message (0a) + // So, check message for "invalid stream header: 150X0X00" + + String msg = streamCorruptedCause.getMessage(); + + if (msg != null && sslMsgPattern.matcher(msg).matches()) + streamCorruptedCause.initCause(new SSLException("Detected SSL alert in StreamCorruptedException")); + } throw e; } finally { diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java new file mode 100644 index 0000000000000..768f5f7f3d281 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java @@ -0,0 +1,332 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.discovery.tcp; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import org.apache.ignite.Ignite; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.spi.IgniteSpiException; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestThread; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * Client-based discovery SPI test with non-Ignite servers. + */ +public class TcpDiscoveryWithWrongServerTest extends GridCommonAbstractTest { + /** Non-Ignite Server port. */ + private final static int SERVER_PORT = 47500; + + /** Non-Ignite Server socket. */ + private ServerSocket srvSock; + + /** Count of accepted connections to non-Ignite Server. */ + private int connCnt; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder(); + + ipFinder.setAddresses(Collections.singleton("127.0.0.1:" + Integer.toString(SERVER_PORT) + ".." + + Integer.toString(SERVER_PORT + 2))); + + cfg.setDiscoverySpi(new TcpDiscoverySpiWithOrderedIps().setIpFinder(ipFinder)); + + if (igniteInstanceName.startsWith("client")) + cfg.setClientMode(true); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopTcpThread(); + + stopAllGrids(); + + super.afterTest(); + } + + /** + * Starts tcp test thread + * @param workerFactory one of WorkerFactory + */ + private void startTcpThread(final WorkerFactory workerFactory) { + connCnt = 0; + + try { + srvSock = new ServerSocket(SERVER_PORT, 10, InetAddress.getByName("127.0.0.1")); + } + catch (Exception e) { + fail("Unexpected TcpServer exception " + e.getMessage()); + } + + new GridTestThread(new Runnable() { + @Override public void run() { + try { + while(!Thread.currentThread().isInterrupted()) { + Socket clientSock = srvSock.accept(); + + connCnt++; + + // Create a new thread for socket connection. + new GridTestThread(workerFactory.newWorker(clientSock)).start(); + } + } + catch (Exception e) { + if (!srvSock.isClosed()) + e.printStackTrace(); + } + } + }).start(); + } + + /** + * Stops tcp test thread + * @throws IOException IOException + */ + private void stopTcpThread() throws IOException { + if (srvSock != null && !srvSock.isClosed()) + srvSock.close(); + } + + /** + * Test that Client successfully ignores wrong responses during Discovery Handshake Procedure. + * + * @throws Exception in case of error. + */ + public void testWrongHandshakeResponse() throws Exception { + startTcpThread(new SomeResponseWorker()); + + simpleTest(); + } + + /** + * Test that Client successfully ignores wrong responses during Discovery Handshake Procedure. + * + * @throws Exception in case of error. + */ + public void testNoHandshakeResponse() throws Exception { + startTcpThread(new NoResponseWorker()); + + simpleTest(); + } + + /** + * Test that Client successfully ignores when server closes sockets after Discovery Handshake Request. + * + * @throws Exception in case of error. + */ + public void testDisconnectOnRequest() throws Exception { + startTcpThread(new DisconnectOnRequestWorker()); + + simpleTest(); + } + + /** + * Test that Client successfully ignores when server closes sockets immediately. + * + * @throws Exception in case of error. + */ + public void testEarlyDisconnect() throws Exception { + startTcpThread(new EarlyDisconnectWorker()); + + simpleTest(); + } + + /** + * Some simple sanity check with the Server and Client + * It is expected that both client and server could successfully perform Discovery Procedure when there is + * unknown (test) server in the ipFinder list. + */ + private void simpleTest() { + try { + Ignite srv = startGrid("server"); + Ignite client = startGrid("client"); + + awaitPartitionMapExchange(); + + assertEquals(2, srv.cluster().nodes().size()); + assertEquals(2, client.cluster().nodes().size()); + assertTrue(connCnt >= 2); + + srv.getOrCreateCache(DEFAULT_CACHE_NAME).put(1, 1); + + assertEquals(1, client.getOrCreateCache(DEFAULT_CACHE_NAME).get(1)); + } + catch (Exception e) { + fail("Failed with unexpected exception: " + e.getMessage()); + } + } + + /** + * Just a factory for runnable workers + */ + private interface WorkerFactory { + /** + * Creates a new worker for socket + * @param clientSock socket for worker + * @return runnable Worker + */ + Runnable newWorker(Socket clientSock); + } + + /** + * SocketWorker + */ + private abstract class SocketWorker implements Runnable { + /** Client socket. */ + Socket clientSock; + + /** + * @param clientSock Client socket. + */ + SocketWorker(Socket clientSock) { + this.clientSock = clientSock; + } + + /** {@inheritDoc} */ + @Override public void run() { + try { + InputStream input = clientSock.getInputStream(); + OutputStream output = clientSock.getOutputStream(); + byte[] buf = new byte[1024]; + + while (!clientSock.isClosed() && input.read(buf) > 0) + action(input, output); + + if (!clientSock.isClosed()) + clientSock.close(); + } + catch (IOException e) { + log.error("Unexpected error", e); + } + } + + /** + * @param input socket input stream + * @param output socket output stream + * @throws IOException IOException + */ + public abstract void action(InputStream input, OutputStream output) throws IOException; + } + + /** + * SomeResponseWorker. + */ + private class SomeResponseWorker implements WorkerFactory { + /** {@inheritDoc} */ + @Override public Runnable newWorker(Socket clientSock) { + return new SocketWorker(clientSock) { + @Override public void action(InputStream input, OutputStream output) throws IOException { + output.write("Some response".getBytes()); + + log.error("TEST: Some response was sent to " + clientSock.getRemoteSocketAddress()); + } + }; + } + } + + /** + * NoResponseWorker. + */ + private class NoResponseWorker implements WorkerFactory { + /** {@inheritDoc} */ + @Override public Runnable newWorker(Socket clientSock) { + return new SocketWorker(clientSock) { + @Override public void action(InputStream input, OutputStream output) throws IOException { + log.error("TEST: No response was sent to " + clientSock.getRemoteSocketAddress()); + } + }; + } + } + + /** + * DisconnectOnRequestWorker. + */ + private class DisconnectOnRequestWorker implements WorkerFactory { + /** {@inheritDoc} */ + @Override public Runnable newWorker(Socket clientSock) { + return new SocketWorker(clientSock) { + @Override public void action(InputStream input, OutputStream output) throws IOException { + clientSock.close(); + + log.error("TEST: Socket closed for " + clientSock.getRemoteSocketAddress()); + } + }; + } + } + + /** + * EarlyDisconnectWorker. + */ + private class EarlyDisconnectWorker implements WorkerFactory { + /** {@inheritDoc} */ + @Override public Runnable newWorker(Socket clientSock) { + return new SocketWorker(clientSock) { + @Override public void action(InputStream input, OutputStream output) throws IOException { + // No-op + } + + @Override public void run() { + try { + clientSock.close(); + + log.error("TEST: Socket closed for " + clientSock.getRemoteSocketAddress()); + } + catch (IOException e) { + log.error("Unexpected error", e); + } + } + }; + } + } + + /** + * TcpDiscoverySpi with non-shuffled resolved IP addresses. We should ensure that in this test non-Ignite server + * is the first element of the addresses list + */ + class TcpDiscoverySpiWithOrderedIps extends TcpDiscoverySpi { + /** {@inheritDoc} */ + @Override protected Collection resolvedAddresses() throws IgniteSpiException { + Collection shuffled = super.resolvedAddresses(); + List res = new ArrayList<>(shuffled); + + Collections.sort(res, new Comparator() { + @Override public int compare(InetSocketAddress o1, InetSocketAddress o2) { + return o1.toString().compareTo(o2.toString()); + } + }); + + return res; + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java index 3335797a1222e..4a3f3f2e0a2db 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java @@ -42,6 +42,7 @@ import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySslSelfTest; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySslTrustedSelfTest; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySslTrustedUntrustedTest; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoveryWithWrongServerTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.jdbc.TcpDiscoveryJdbcIpFinderSelfTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinderSelfTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.sharedfs.TcpDiscoverySharedFsIpFinderSelfTest; @@ -94,7 +95,9 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(TcpDiscoveryNodeAttributesUpdateOnReconnectTest.class)); suite.addTest(new TestSuite(AuthenticationRestartTest.class)); - //Client connect + suite.addTest(new TestSuite(TcpDiscoveryWithWrongServerTest.class)); + + // Client connect. suite.addTest(new TestSuite(IgniteClientConnectTest.class)); suite.addTest(new TestSuite(IgniteClientReconnectMassiveShutdownTest.class)); From 0edbeb7a9aef7f59dced618a3c221e9a871d5dce Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Thu, 2 Nov 2017 13:12:03 +0300 Subject: [PATCH 134/243] Fixed TcpDiscoveryWithWrongServerTest. Signed-off-by: nikolay_tikhonov --- .../tcp/TcpDiscoveryWithWrongServerTest.java | 52 ++++++++++--------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java index 768f5f7f3d281..ffd0d030c643e 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryWithWrongServerTest.java @@ -29,6 +29,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.ignite.Ignite; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.spi.IgniteSpiException; @@ -40,14 +41,17 @@ * Client-based discovery SPI test with non-Ignite servers. */ public class TcpDiscoveryWithWrongServerTest extends GridCommonAbstractTest { - /** Non-Ignite Server port. */ + /** Non-Ignite Server port #1. */ private final static int SERVER_PORT = 47500; - /** Non-Ignite Server socket. */ - private ServerSocket srvSock; + /** Non-Ignite Server port #2. */ + private final static int LAST_SERVER_PORT = SERVER_PORT + 5; + + /** Non-Ignite Server sockets. */ + private List srvSocks = new ArrayList<>(); /** Count of accepted connections to non-Ignite Server. */ - private int connCnt; + private AtomicInteger connCnt = new AtomicInteger(0); /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { @@ -56,7 +60,7 @@ public class TcpDiscoveryWithWrongServerTest extends GridCommonAbstractTest { TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder(); ipFinder.setAddresses(Collections.singleton("127.0.0.1:" + Integer.toString(SERVER_PORT) + ".." + - Integer.toString(SERVER_PORT + 2))); + Integer.toString(LAST_SERVER_PORT))); cfg.setDiscoverySpi(new TcpDiscoverySpiWithOrderedIps().setIpFinder(ipFinder)); @@ -68,7 +72,7 @@ public class TcpDiscoveryWithWrongServerTest extends GridCommonAbstractTest { /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { - stopTcpThread(); + stopTcpThreads(); stopAllGrids(); @@ -79,15 +83,10 @@ public class TcpDiscoveryWithWrongServerTest extends GridCommonAbstractTest { * Starts tcp test thread * @param workerFactory one of WorkerFactory */ - private void startTcpThread(final WorkerFactory workerFactory) { - connCnt = 0; + private void startTcpThread(final WorkerFactory workerFactory, final int port) throws Exception { + final ServerSocket srvSock = new ServerSocket(port, 10, InetAddress.getByName("127.0.0.1")); - try { - srvSock = new ServerSocket(SERVER_PORT, 10, InetAddress.getByName("127.0.0.1")); - } - catch (Exception e) { - fail("Unexpected TcpServer exception " + e.getMessage()); - } + srvSocks.add(srvSock); new GridTestThread(new Runnable() { @Override public void run() { @@ -95,7 +94,7 @@ private void startTcpThread(final WorkerFactory workerFactory) { while(!Thread.currentThread().isInterrupted()) { Socket clientSock = srvSock.accept(); - connCnt++; + connCnt.getAndIncrement(); // Create a new thread for socket connection. new GridTestThread(workerFactory.newWorker(clientSock)).start(); @@ -103,7 +102,7 @@ private void startTcpThread(final WorkerFactory workerFactory) { } catch (Exception e) { if (!srvSock.isClosed()) - e.printStackTrace(); + log.error("Unexpected error", e); } } }).start(); @@ -113,9 +112,10 @@ private void startTcpThread(final WorkerFactory workerFactory) { * Stops tcp test thread * @throws IOException IOException */ - private void stopTcpThread() throws IOException { - if (srvSock != null && !srvSock.isClosed()) - srvSock.close(); + private void stopTcpThreads() throws IOException { + for (ServerSocket srvSock: srvSocks) + if (!srvSock.isClosed()) + srvSock.close(); } /** @@ -124,7 +124,8 @@ private void stopTcpThread() throws IOException { * @throws Exception in case of error. */ public void testWrongHandshakeResponse() throws Exception { - startTcpThread(new SomeResponseWorker()); + startTcpThread(new SomeResponseWorker(), SERVER_PORT); + startTcpThread(new SomeResponseWorker(), LAST_SERVER_PORT); simpleTest(); } @@ -135,7 +136,8 @@ public void testWrongHandshakeResponse() throws Exception { * @throws Exception in case of error. */ public void testNoHandshakeResponse() throws Exception { - startTcpThread(new NoResponseWorker()); + startTcpThread(new NoResponseWorker(), SERVER_PORT); + startTcpThread(new NoResponseWorker(), LAST_SERVER_PORT); simpleTest(); } @@ -146,7 +148,8 @@ public void testNoHandshakeResponse() throws Exception { * @throws Exception in case of error. */ public void testDisconnectOnRequest() throws Exception { - startTcpThread(new DisconnectOnRequestWorker()); + startTcpThread(new DisconnectOnRequestWorker(), SERVER_PORT); + startTcpThread(new DisconnectOnRequestWorker(), LAST_SERVER_PORT); simpleTest(); } @@ -157,7 +160,8 @@ public void testDisconnectOnRequest() throws Exception { * @throws Exception in case of error. */ public void testEarlyDisconnect() throws Exception { - startTcpThread(new EarlyDisconnectWorker()); + startTcpThread(new EarlyDisconnectWorker(), SERVER_PORT); + startTcpThread(new EarlyDisconnectWorker(), LAST_SERVER_PORT); simpleTest(); } @@ -176,7 +180,7 @@ private void simpleTest() { assertEquals(2, srv.cluster().nodes().size()); assertEquals(2, client.cluster().nodes().size()); - assertTrue(connCnt >= 2); + assertTrue(connCnt.get() >= 2); srv.getOrCreateCache(DEFAULT_CACHE_NAME).put(1, 1); From 0e04b36e7edaf2350c0a4769c4cf5e7885f7e37e Mon Sep 17 00:00:00 2001 From: apopov Date: Thu, 9 Nov 2017 11:13:53 +0300 Subject: [PATCH 135/243] GG-13042 Backport GG-13035 changes to 1.9.x to keep libs in sync --- examples/pom-standalone-lgpl.xml | 8 +------- examples/pom-standalone.xml | 8 +------- examples/pom.xml | 14 +------------- modules/clients/pom.xml | 8 +------- modules/ignored-tests/pom.xml | 10 ++-------- 5 files changed, 6 insertions(+), 42 deletions(-) diff --git a/examples/pom-standalone-lgpl.xml b/examples/pom-standalone-lgpl.xml index 30b5b80349a9d..3577d9cdcc0cc 100644 --- a/examples/pom-standalone-lgpl.xml +++ b/examples/pom-standalone-lgpl.xml @@ -81,13 +81,7 @@ com.google.code.simple-spring-memcached spymemcached - 2.7.3 - - - commons-codec - commons-codec - - + 2.8.4 diff --git a/examples/pom-standalone.xml b/examples/pom-standalone.xml index cdef8e169811c..2c7cd1b59ce94 100644 --- a/examples/pom-standalone.xml +++ b/examples/pom-standalone.xml @@ -81,13 +81,7 @@ com.google.code.simple-spring-memcached spymemcached - 2.7.3 - - - commons-codec - commons-codec - - + 2.8.4 diff --git a/examples/pom.xml b/examples/pom.xml index 30d23ae26bb74..1fbeac98e7d9e 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -70,13 +70,7 @@ com.google.code.simple-spring-memcached spymemcached - 2.7.3 - - - commons-codec - commons-codec - - + 2.8.4 @@ -140,12 +134,6 @@ ignite-spark ${project.version} - - - org.jboss.netty - netty - 3.2.9.Final - diff --git a/modules/clients/pom.xml b/modules/clients/pom.xml index 002f16f8d69e3..39e17ecaae710 100644 --- a/modules/clients/pom.xml +++ b/modules/clients/pom.xml @@ -44,14 +44,8 @@ com.google.code.simple-spring-memcached spymemcached - 2.7.3 + 2.8.4 test - - - commons-codec - commons-codec - - diff --git a/modules/ignored-tests/pom.xml b/modules/ignored-tests/pom.xml index 00c7d554d7650..8e3993b29022f 100644 --- a/modules/ignored-tests/pom.xml +++ b/modules/ignored-tests/pom.xml @@ -164,14 +164,8 @@ com.google.code.simple-spring-memcached spymemcached - 2.7.3 - test - - - commons-codec - commons-codec - - + 2.8.4 + test From c2b8c9329916c7049fd43dab3f6a6bfb617bd8ef Mon Sep 17 00:00:00 2001 From: dkarachentsev Date: Mon, 13 Nov 2017 10:35:21 +0300 Subject: [PATCH 136/243] IGNITE-6818 Handle half open connection in communication. (cherry picked from commit 191295d) --- .../tcp/TcpCommunicationSpi.java | 37 +++-- ...municationSpiHalfOpenedConnectionTest.java | 142 ++++++++++++++++++ .../IgniteSpiCommunicationSelfTestSuite.java | 2 + 3 files changed, 168 insertions(+), 13 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiHalfOpenedConnectionTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java index ec586b450fb13..6df7208315773 100755 --- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java @@ -539,15 +539,7 @@ private void onFirstMessage(final GridNioSession ses, Message msg) { if (c.failed) { ses.send(new RecoveryLastReceivedMessage(ALREADY_CONNECTED)); - for (GridNioSession ses0 : nioSrvr.sessions()) { - ConnectionKey key0 = ses0.meta(CONN_IDX_META); - - if (ses0.accepted() && key0 != null && - key0.nodeId().equals(connKey.nodeId()) && - key0.connectionIndex() == connKey.connectionIndex() && - key0.connectCount() < connKey.connectCount()) - ses0.close(); - } + closeStaleConnections(connKey); } } } @@ -567,11 +559,13 @@ private void onFirstMessage(final GridNioSession ses, Message msg) { if (oldClient instanceof GridTcpNioCommunicationClient) { if (log.isInfoEnabled()) log.info("Received incoming connection when already connected " + - "to this node, rejecting [locNode=" + locNode.id() + - ", rmtNode=" + sndId + ']'); + "to this node, rejecting [locNode=" + locNode.id() + + ", rmtNode=" + sndId + ']'); ses.send(new RecoveryLastReceivedMessage(ALREADY_CONNECTED)); + closeStaleConnections(connKey); + return; } else { @@ -599,11 +593,13 @@ private void onFirstMessage(final GridNioSession ses, Message msg) { if (log.isInfoEnabled()) log.info("Received incoming connection when already connected " + - "to this node, rejecting [locNode=" + locNode.id() + - ", rmtNode=" + sndId + ']'); + "to this node, rejecting [locNode=" + locNode.id() + + ", rmtNode=" + sndId + ']'); ses.send(new RecoveryLastReceivedMessage(ALREADY_CONNECTED)); + closeStaleConnections(connKey); + fut.onDone(oldClient); return; @@ -658,6 +654,21 @@ private void onFirstMessage(final GridNioSession ses, Message msg) { } } + /** + * @param connKey Connection key. + */ + private void closeStaleConnections(ConnectionKey connKey) { + for (GridNioSession ses0 : nioSrvr.sessions()) { + ConnectionKey key0 = ses0.meta(CONN_IDX_META); + + if (ses0.accepted() && key0 != null && + key0.nodeId().equals(connKey.nodeId()) && + key0.connectionIndex() == connKey.connectionIndex() && + key0.connectCount() < connKey.connectCount()) + ses0.close(); + } + } + @Override public void onMessage(final GridNioSession ses, Message msg) { ConnectionKey connKey = ses.meta(CONN_IDX_META); diff --git a/modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiHalfOpenedConnectionTest.java b/modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiHalfOpenedConnectionTest.java new file mode 100644 index 0000000000000..3e10f942c4459 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiHalfOpenedConnectionTest.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.communication.tcp; + +import java.io.IOException; +import java.util.Iterator; +import java.util.UUID; +import java.util.concurrent.ConcurrentMap; +import org.apache.ignite.Ignite; +import org.apache.ignite.cluster.ClusterGroup; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.util.nio.GridCommunicationClient; +import org.apache.ignite.internal.util.nio.GridNioRecoveryDescriptor; +import org.apache.ignite.internal.util.nio.GridNioServerListener; +import org.apache.ignite.internal.util.nio.GridTcpNioCommunicationClient; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * Tests case when connection is closed only for one side, when other is not notified. + */ +public class TcpCommunicationSpiHalfOpenedConnectionTest extends GridCommonAbstractTest { + /** Client spi. */ + private TcpCommunicationSpi clientSpi; + + /** Paired connections. */ + private boolean pairedConnections; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + if (igniteInstanceName.contains("client")) { + cfg.setClientMode(true); + + clientSpi = (TcpCommunicationSpi)cfg.getCommunicationSpi(); + } + + ((TcpCommunicationSpi)cfg.getCommunicationSpi()).setUsePairedConnections(pairedConnections); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(true); + } + + /** + * @throws Exception If failed. + */ + public void testReconnect() throws Exception { + pairedConnections = false; + + checkReconnect(); + } + + /** + * @throws Exception If failed. + */ + public void testReconnectPaired() throws Exception { + pairedConnections = true; + + checkReconnect(); + } + + /** + * @throws Exception If failed. + */ + private void checkReconnect() throws Exception { + Ignite srv = startGrid("server"); + Ignite client = startGrid("client"); + + UUID nodeId = srv.cluster().localNode().id(); + + System.out.println(">> Server ID: " + nodeId); + + ClusterGroup srvGrp = client.cluster().forNodeId(nodeId); + + System.out.println(">> Send job"); + + // Establish connection + client.compute(srvGrp).run(F.noop()); + + ConcurrentMap clients = U.field(clientSpi, "clients"); + ConcurrentMap recoveryDescs = U.field(clientSpi, "recoveryDescs"); + ConcurrentMap outRecDescs = U.field(clientSpi, "outRecDescs"); + ConcurrentMap inRecDescs = U.field(clientSpi, "inRecDescs"); + GridNioServerListener lsnr = U.field(clientSpi, "srvLsnr"); + + Iterator it = F.concat( + recoveryDescs.values().iterator(), + outRecDescs.values().iterator(), + inRecDescs.values().iterator() + ); + + while (it.hasNext()) { + GridNioRecoveryDescriptor desc = it.next(); + + // Need to simulate connection close in GridNioServer as it + // releases descriptors on disconnect. + desc.release(); + } + + // Remove client to avoid calling close(), in that case server + // will close connection too, but we want to keep the server + // uninformed and force ping old connection. + GridCommunicationClient[] clients0 = clients.remove(nodeId); + + for (GridCommunicationClient commClient : clients0) + lsnr.onDisconnected(((GridTcpNioCommunicationClient)commClient).session(), new IOException("Test exception")); + + info(">> Removed client"); + + // Reestablish connection + client.compute(srvGrp).run(F.noop()); + + info(">> Sent second job"); + } + + /** {@inheritDoc} */ + @Override protected long getTestTimeout() { + return 30_000; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiCommunicationSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiCommunicationSelfTestSuite.java index 77de3fcc54999..8e96a3f223604 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiCommunicationSelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiCommunicationSelfTestSuite.java @@ -38,6 +38,7 @@ import org.apache.ignite.spi.communication.tcp.IgniteTcpCommunicationRecoveryAckClosureSelfTest; import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpiDropNodesTest; import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpiFaultyClientTest; +import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpiHalfOpenedConnectionTest; /** * Test suite for all communication SPIs. @@ -78,6 +79,7 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(TcpCommunicationSpiFaultyClientTest.class)); suite.addTest(new TestSuite(TcpCommunicationSpiDropNodesTest.class)); + suite.addTest(new TestSuite(TcpCommunicationSpiHalfOpenedConnectionTest.class)); return suite; } From 2644b51cb861253e175032ecb82c9794c98a8e67 Mon Sep 17 00:00:00 2001 From: alexdel Date: Tue, 5 Dec 2017 11:40:35 +0700 Subject: [PATCH 137/243] IGNITE-4398. Prevent $http caching in IE11. (cherry picked from commit f418563) --- modules/web-console/backend/middlewares/api.js | 5 +++++ modules/web-console/frontend/app/helpers/jade/mixins.pug | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/web-console/backend/middlewares/api.js b/modules/web-console/backend/middlewares/api.js index aa599aea180ab..23fd7ae074683 100644 --- a/modules/web-console/backend/middlewares/api.js +++ b/modules/web-console/backend/middlewares/api.js @@ -25,6 +25,11 @@ module.exports = { module.exports.factory = () => { return (req, res, next) => { + // Set headers to avoid API caching in browser (esp. IE) + res.header('Cache-Control', 'must-revalidate'); + res.header('Expires', '-1'); + res.header('Last-Modified', new Date().toUTCString()); + res.api = { error(err) { if (err.name === 'MongoError') diff --git a/modules/web-console/frontend/app/helpers/jade/mixins.pug b/modules/web-console/frontend/app/helpers/jade/mixins.pug index 3e390778ed21a..7eb4d8946252d 100644 --- a/modules/web-console/frontend/app/helpers/jade/mixins.pug +++ b/modules/web-console/frontend/app/helpers/jade/mixins.pug @@ -40,7 +40,7 @@ mixin main-table(title, rows, focusId, click, rowTemplate, searchField) thead tr th - lable.labelHeader.labelFormField #{title}: + label.labelHeader.labelFormField #{title}: .col-sm-3.pull-right(style='padding: 0') input.form-control(type='text' st-search=`${searchField}` placeholder=`Filter ${title}...`) tbody From 248ad6a87cf3066330ae1c463ea565a05302076d Mon Sep 17 00:00:00 2001 From: Ilya Borisov Date: Tue, 5 Dec 2017 14:00:01 +0700 Subject: [PATCH 138/243] IGNITE-6873 Notify users about outdated browser. (cherry picked from commit ff3712c) --- .../frontend/app/browserUpdate/index.js | 34 ++++++++++++++++++ .../frontend/app/browserUpdate/style.scss | 36 +++++++++++++++++++ modules/web-console/frontend/package.json | 5 +-- .../frontend/webpack/webpack.common.js | 3 +- 4 files changed, 75 insertions(+), 3 deletions(-) create mode 100644 modules/web-console/frontend/app/browserUpdate/index.js create mode 100644 modules/web-console/frontend/app/browserUpdate/style.scss diff --git a/modules/web-console/frontend/app/browserUpdate/index.js b/modules/web-console/frontend/app/browserUpdate/index.js new file mode 100644 index 0000000000000..85df64e93ed78 --- /dev/null +++ b/modules/web-console/frontend/app/browserUpdate/index.js @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import browserUpdate from 'browser-update'; +import './style.scss'; + +browserUpdate({ + notify: { + i: 10, + f: '-18m', + s: 9, + c: '-18m', + o: '-18m', + e: '-6m' + }, + l: 'en', + mobile: false, + api: 5, + reminder: 0 +}); diff --git a/modules/web-console/frontend/app/browserUpdate/style.scss b/modules/web-console/frontend/app/browserUpdate/style.scss new file mode 100644 index 0000000000000..5842f5f80b271 --- /dev/null +++ b/modules/web-console/frontend/app/browserUpdate/style.scss @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@import "./../primitives/btn/index.scss"; + +#buorg.buorg { + background-color: $brand-warning; + line-height: 16px; + font-family: Roboto, sans-serif; + + a { + @extend .btn-ignite; + + &#buorgul { + @extend .btn-ignite--success; + } + + &#buorgig { + @extend .btn-ignite--primary; + } + } +} diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index 18635f7e71c09..5b1734fa91cc1 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -61,6 +61,7 @@ "babel-runtime": "6.25.0", "bootstrap-sass": "3.3.7", "brace": "0.10.0", + "browser-update": "2.1.9", "copy-webpack-plugin": "4.0.1", "css-loader": "0.28.7", "eslint": "4.3.0", @@ -102,7 +103,6 @@ }, "devDependencies": { "chai": "4.1.0", - "type-detect": "4.0.3", "jasmine-core": "2.6.4", "karma": "1.7.0", "karma-babel-preprocessor": "6.0.1", @@ -115,6 +115,7 @@ "mocha": "3.4.2", "mocha-teamcity-reporter": "1.1.1", "phantomjs-prebuilt": "2.1.14", - "sinon": "2.3.8" + "sinon": "2.3.8", + "type-detect": "4.0.3" } } diff --git a/modules/web-console/frontend/webpack/webpack.common.js b/modules/web-console/frontend/webpack/webpack.common.js index 48e1e9b93593b..a0d6d0c8698c2 100644 --- a/modules/web-console/frontend/webpack/webpack.common.js +++ b/modules/web-console/frontend/webpack/webpack.common.js @@ -44,7 +44,8 @@ export default { entry: { polyfill: 'babel-polyfill', vendor: path.join(app, 'vendor.js'), - app: path.join(app, 'app.js') + app: path.join(app, 'app.js'), + browserUpdate: path.join(app, 'browserUpdate', 'index.js') }, // Output system. From 4f5c30ade399f55cecbf506bf9db7cf3e64e7656 Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Wed, 6 Dec 2017 16:21:20 +0300 Subject: [PATCH 139/243] IGNITE-6971 Ignite Logger type & logging file config indication. This closes #3095. Signed-off-by: nikolay_tikhonov --- .../ignite/internal/GridLoggerProxy.java | 9 +++++ .../apache/ignite/internal/IgniteKernal.java | 13 ++++++ .../logger/platform/PlatformLogger.java | 15 +++++++ .../org/apache/ignite/logger/NullLogger.java | 6 +++ .../apache/ignite/logger/java/JavaLogger.java | 17 ++++++++ .../ignite/logger/java/JavaLoggerTest.java | 5 +++ .../junits/logger/GridTestLog4jLogger.java | 21 +++++----- .../ignite/logger/log4j/Log4JLogger.java | 40 ++++++++++++++----- .../log4j/GridLog4jCorrectFileNameTest.java | 13 ++++++ .../log4j/GridLog4jInitializedTest.java | 7 +++- .../log4j/GridLog4jLoggingFileTest.java | 13 ++++-- .../log4j/GridLog4jLoggingPathTest.java | 12 +++++- .../logger/log4j/GridLog4jLoggingUrlTest.java | 15 +++++-- .../log4j/GridLog4jNotInitializedTest.java | 5 +++ .../testsuites/IgniteLog4jTestSuite.java | 6 +++ .../ignite/logger/log4j2/Log4J2Logger.java | 20 +++++++--- .../logger/log4j2/Log4j2LoggerSelfTest.java | 19 ++++++++- .../ignite/logger/slf4j/Slf4jLogger.java | 6 +++ 18 files changed, 207 insertions(+), 35 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridLoggerProxy.java b/modules/core/src/main/java/org/apache/ignite/internal/GridLoggerProxy.java index 9f5daaee6dac1..fc49b96d8c18f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridLoggerProxy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridLoggerProxy.java @@ -168,6 +168,15 @@ public GridLoggerProxy(IgniteLogger impl, @Nullable Object ctgr, @Nullable Strin return impl.isQuiet(); } + /** + * Gets the class name and parameters of the Logger type used. + * + * @return Logger information (name and parameters) + */ + public String getLoggerInfo() { + return impl.toString(); + } + /** * Enriches the log message with Ignite instance name if * {@link org.apache.ignite.IgniteSystemProperties#IGNITE_LOG_INSTANCE_NAME} or diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index 8a71e1a15b0f4..ae3aa73e0fd9e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -810,6 +810,7 @@ public void start( ackOsInfo(); ackLanguageRuntime(); ackRemoteManagement(); + ackLogger(); ackVmArguments(rtBean); ackClassPaths(rtBean); ackSystemProperties(); @@ -1961,6 +1962,16 @@ private void ackConfigUrl() { log.info("Config URL: " + System.getProperty(IGNITE_CONFIG_URL, "n/a")); } + /** + * Acks Logger configuration. + */ + private void ackLogger() { + assert log != null; + + if (log.isInfoEnabled()) + log.info("Logger: " + log.getLoggerInfo() ); + } + /** * Acks ASCII-logo. Thanks to http://patorjk.com/software/taag */ @@ -2005,6 +2016,8 @@ private void ackAsciiLogo() { if (fileName != null) U.quiet(false, " ^-- Logging to file '" + fileName + '\''); + U.quiet(false, " ^-- Logging by '" + log.getLoggerInfo() + '\''); + U.quiet(false, " ^-- To see **FULL** console log here add -DIGNITE_QUIET=false or \"-v\" to ignite.{sh|bat}", ""); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/logger/platform/PlatformLogger.java b/modules/core/src/main/java/org/apache/ignite/internal/logger/platform/PlatformLogger.java index 0a0437e9eed89..c082126dbd4fa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/logger/platform/PlatformLogger.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/logger/platform/PlatformLogger.java @@ -24,7 +24,10 @@ import org.apache.ignite.internal.processors.platform.callback.PlatformCallbackGateway; import org.apache.ignite.internal.processors.platform.memory.PlatformMemory; import org.apache.ignite.internal.processors.platform.memory.PlatformOutputStream; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.S; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.IgniteSystemProperties.IGNITE_QUIET; @@ -49,24 +52,31 @@ public class PlatformLogger implements IgniteLogger { public static final int LVL_ERROR = 4; /** Callbacks. */ + @GridToStringExclude private volatile PlatformCallbackGateway gate; /** Context. */ + @GridToStringExclude private volatile PlatformContext ctx; /** Category. */ + @GridToStringExclude private final String category; /** Trace flag. */ + @GridToStringInclude private volatile boolean traceEnabled; /** Debug flag. */ + @GridToStringInclude private volatile boolean debugEnabled; /** Info flag. */ + @GridToStringInclude private volatile boolean infoEnabled; /** Quiet flag. */ + @GridToStringInclude private static final boolean isQuiet = Boolean.valueOf(System.getProperty(IGNITE_QUIET, "true")); /** @@ -220,4 +230,9 @@ private static String getCategoryString(Object ctgr) { ? ((Class)ctgr).getName() : (ctgr == null ? null : String.valueOf(ctgr)); } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(PlatformLogger.class, this); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/logger/NullLogger.java b/modules/core/src/main/java/org/apache/ignite/logger/NullLogger.java index 71c7ba499d1ff..5b9c483848843 100644 --- a/modules/core/src/main/java/org/apache/ignite/logger/NullLogger.java +++ b/modules/core/src/main/java/org/apache/ignite/logger/NullLogger.java @@ -18,6 +18,7 @@ package org.apache.ignite.logger; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.util.typedef.internal.S; import org.jetbrains.annotations.Nullable; /** @@ -88,4 +89,9 @@ public class NullLogger implements IgniteLogger { @Nullable @Override public String fileName() { return null; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(NullLogger.class, this); + } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/logger/java/JavaLogger.java b/modules/core/src/main/java/org/apache/ignite/logger/java/JavaLogger.java index 6aa7d3879698a..78d3afad965e7 100644 --- a/modules/core/src/main/java/org/apache/ignite/logger/java/JavaLogger.java +++ b/modules/core/src/main/java/org/apache/ignite/logger/java/JavaLogger.java @@ -29,8 +29,10 @@ import java.util.logging.Logger; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.logger.LoggerNodeIdAware; import org.jetbrains.annotations.Nullable; @@ -107,16 +109,23 @@ public class JavaLogger implements IgniteLogger, LoggerNodeIdAware { private static volatile boolean quiet0; /** Java Logging implementation proxy. */ + @GridToStringExclude @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") private Logger impl; + /** Path to configuration file. */ + @GridToStringExclude + private String cfg; + /** Quiet flag. */ private final boolean quiet; /** Work directory. */ + @GridToStringExclude private volatile String workDir; /** Node ID. */ + @GridToStringExclude private volatile UUID nodeId; /** @@ -153,6 +162,8 @@ private void defaultConfiguration() { catch (IOException e) { error("Failed to read logging configuration: " + cfgUrl, e); } + + cfg = cfgUrl.getPath(); } /** @@ -216,6 +227,7 @@ private void configure(@Nullable Logger initImpl) { // User configured console appender, thus log is not quiet. quiet0 = !consoleHndFound; inited = true; + cfg = System.getProperty("java.util.logging.config.file"); return; } @@ -406,4 +418,9 @@ private static T findHandler(Logger log, Class cls) { return null; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JavaLogger.class, this, "config", this.cfg); + } } diff --git a/modules/core/src/test/java/org/apache/ignite/logger/java/JavaLoggerTest.java b/modules/core/src/test/java/org/apache/ignite/logger/java/JavaLoggerTest.java index 5fd5b5e7ce2c3..d9ec810d32586 100644 --- a/modules/core/src/test/java/org/apache/ignite/logger/java/JavaLoggerTest.java +++ b/modules/core/src/test/java/org/apache/ignite/logger/java/JavaLoggerTest.java @@ -42,6 +42,11 @@ public void testLogInitialize() throws Exception { ((JavaLogger)log).setWorkDirectory(U.defaultWorkDirectory()); ((LoggerNodeIdAware)log).setNodeId(UUID.fromString("00000000-1111-2222-3333-444444444444")); + System.out.println(log.toString()); + + assertTrue(log.toString().contains("JavaLogger")); + assertTrue(log.toString().contains(JavaLogger.DFLT_CONFIG_PATH)); + if (log.isDebugEnabled()) log.debug("This is 'debug' message."); diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/logger/GridTestLog4jLogger.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/logger/GridTestLog4jLogger.java index 52813b997a132..2ac32cea4bd0e 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/logger/GridTestLog4jLogger.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/logger/GridTestLog4jLogger.java @@ -96,12 +96,14 @@ public class GridTestLog4jLogger implements IgniteLogger, LoggerNodeIdAware { private Logger impl; /** Path to configuration file. */ - private final String path; + @GridToStringExclude + private final String cfg; /** Quiet flag. */ private final boolean quiet; /** Node ID. */ + @GridToStringExclude private UUID nodeId; /** @@ -140,7 +142,7 @@ public GridTestLog4jLogger(boolean init) { else quiet = true; - path = null; + cfg = null; } /** @@ -148,11 +150,9 @@ public GridTestLog4jLogger(boolean init) { * * @param impl Log4j implementation to use. */ - public GridTestLog4jLogger(final Logger impl) { + protected GridTestLog4jLogger(final Logger impl) { assert impl != null; - path = null; - addConsoleAppenderIfNeeded(null, new C1() { @Override public Logger apply(Boolean init) { return impl; @@ -160,6 +160,7 @@ public GridTestLog4jLogger(final Logger impl) { }); quiet = quiet0; + cfg = null; } /** @@ -172,7 +173,7 @@ public GridTestLog4jLogger(String path) throws IgniteCheckedException { if (path == null) throw new IgniteCheckedException("Configuration XML file for Log4j must be specified."); - this.path = path; + this.cfg = path; final URL cfgUrl = U.resolveIgniteUrl(path); @@ -204,12 +205,12 @@ public GridTestLog4jLogger(File cfgFile) throws IgniteCheckedException { if (!cfgFile.exists() || cfgFile.isDirectory()) throw new IgniteCheckedException("Log4j configuration path was not found or is a directory: " + cfgFile); - path = cfgFile.getAbsolutePath(); + cfg = cfgFile.getAbsolutePath(); addConsoleAppenderIfNeeded(null, new C1() { @Override public Logger apply(Boolean init) { if (init) - DOMConfigurator.configure(path); + DOMConfigurator.configure(cfg); return Logger.getRootLogger(); } @@ -228,7 +229,7 @@ public GridTestLog4jLogger(final URL cfgUrl) throws IgniteCheckedException { if (cfgUrl == null) throw new IgniteCheckedException("Configuration XML file for Log4j must be specified."); - path = null; + cfg = cfgUrl.getPath(); addConsoleAppenderIfNeeded(null, new C1() { @Override public Logger apply(Boolean init) { @@ -519,6 +520,6 @@ public static Collection logFiles() { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridTestLog4jLogger.class, this); + return S.toString(GridTestLog4jLogger.class, this, "config", cfg); } } diff --git a/modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java b/modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java index 39a1008a6f153..02e7b35fac4be 100644 --- a/modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java +++ b/modules/log4j/src/main/java/org/apache/ignite/logger/log4j/Log4JLogger.java @@ -96,12 +96,14 @@ public class Log4JLogger implements IgniteLogger, LoggerNodeIdAware, Log4jFileAw private Logger impl; /** Path to configuration file. */ - private final String path; + @GridToStringExclude + private final String cfg; /** Quiet flag. */ private final boolean quiet; /** Node ID. */ + @GridToStringExclude private UUID nodeId; /** @@ -140,7 +142,7 @@ public Log4JLogger(boolean init) { else quiet = true; - path = null; + cfg = null; } /** @@ -151,7 +153,24 @@ public Log4JLogger(boolean init) { public Log4JLogger(final Logger impl) { assert impl != null; - path = null; + addConsoleAppenderIfNeeded(null, new C1() { + @Override public Logger apply(Boolean init) { + return impl; + } + }); + + quiet = quiet0; + cfg = null; + } + + /** + * Creates new logger with given implementation. + * + * @param impl Log4j implementation to use. + * @param path Configuration file/url path. + */ + private Log4JLogger(final Logger impl, final String path) { + assert impl != null; addConsoleAppenderIfNeeded(null, new C1() { @Override public Logger apply(Boolean init) { @@ -160,6 +179,7 @@ public Log4JLogger(final Logger impl) { }); quiet = quiet0; + cfg = path; } /** @@ -168,11 +188,11 @@ public Log4JLogger(final Logger impl) { * @param path Path to log4j configuration XML file. * @throws IgniteCheckedException Thrown in case logger can't be created. */ - public Log4JLogger(String path) throws IgniteCheckedException { + public Log4JLogger(final String path) throws IgniteCheckedException { if (path == null) throw new IgniteCheckedException("Configuration XML file for Log4j must be specified."); - this.path = path; + this.cfg = path; final URL cfgUrl = U.resolveIgniteUrl(path); @@ -204,12 +224,12 @@ public Log4JLogger(File cfgFile) throws IgniteCheckedException { if (!cfgFile.exists() || cfgFile.isDirectory()) throw new IgniteCheckedException("Log4j configuration path was not found or is a directory: " + cfgFile); - path = cfgFile.getAbsolutePath(); + cfg = cfgFile.getAbsolutePath(); addConsoleAppenderIfNeeded(null, new C1() { @Override public Logger apply(Boolean init) { if (init) - DOMConfigurator.configure(path); + DOMConfigurator.configure(cfg); return Logger.getRootLogger(); } @@ -228,7 +248,7 @@ public Log4JLogger(final URL cfgUrl) throws IgniteCheckedException { if (cfgUrl == null) throw new IgniteCheckedException("Configuration XML file for Log4j must be specified."); - path = null; + cfg = cfgUrl.getPath(); addConsoleAppenderIfNeeded(null, new C1() { @Override public Logger apply(Boolean init) { @@ -448,7 +468,7 @@ public static Collection logFiles() { @Override public Log4JLogger getLogger(Object ctgr) { return new Log4JLogger(ctgr == null ? Logger.getRootLogger() : ctgr instanceof Class ? Logger.getLogger(((Class)ctgr).getName()) : - Logger.getLogger(ctgr.toString())); + Logger.getLogger(ctgr.toString()), cfg); } /** {@inheritDoc} */ @@ -517,7 +537,7 @@ public static Collection logFiles() { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(Log4JLogger.class, this); + return S.toString(Log4JLogger.class, this, "config", this.cfg); } /** {@inheritDoc} */ diff --git a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jCorrectFileNameTest.java b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jCorrectFileNameTest.java index 4672963a1f93b..ac5b4983af799 100644 --- a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jCorrectFileNameTest.java +++ b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jCorrectFileNameTest.java @@ -18,12 +18,15 @@ package org.apache.ignite.logger.log4j; import java.io.File; +import java.util.Collections; import java.util.Enumeration; import junit.framework.TestCase; import org.apache.ignite.Ignite; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; import org.apache.ignite.testframework.junits.common.GridCommonTest; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -108,6 +111,16 @@ private static IgniteConfiguration getConfiguration(String igniteInstanceName) t cfg.setGridLogger(new Log4JLogger()); cfg.setConnectorConfiguration(null); + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder(false); + + ipFinder.setAddresses(Collections.singleton("127.0.0.1:47500..47502")); + + disco.setIpFinder(ipFinder); + + cfg.setDiscoverySpi(disco); + return cfg; } diff --git a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jInitializedTest.java b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jInitializedTest.java index 94907f098b0c3..1fb9c34c0c21f 100644 --- a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jInitializedTest.java +++ b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jInitializedTest.java @@ -39,7 +39,12 @@ public class GridLog4jInitializedTest extends TestCase { public void testLogInitialize() { IgniteLogger log = new Log4JLogger(); - assert log.isInfoEnabled() == true; + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4JLogger")); + assertTrue(log.toString().contains("config=null")); + + assertTrue(log.isInfoEnabled()); if (log.isDebugEnabled()) log.debug("This is 'debug' message."); diff --git a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingFileTest.java b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingFileTest.java index e64edd31263e9..d1b09d7542e0a 100644 --- a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingFileTest.java +++ b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingFileTest.java @@ -31,9 +31,12 @@ public class GridLog4jLoggingFileTest extends TestCase { /** */ private IgniteLogger log; + /** Logger config */ + private File xml; + /** {@inheritDoc} */ @Override protected void setUp() throws Exception { - File xml = GridTestUtils.resolveIgnitePath("modules/core/src/test/config/log4j-test.xml"); + xml = GridTestUtils.resolveIgnitePath("modules/core/src/test/config/log4j-test.xml"); assert xml != null; assert xml.exists() == true; @@ -45,8 +48,12 @@ public class GridLog4jLoggingFileTest extends TestCase { * Tests log4j logging SPI. */ public void testLog() { - assert log.isDebugEnabled() == true; - assert log.isInfoEnabled() == true; + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4JLogger")); + assertTrue(log.toString().contains(xml.getPath())); + + assertTrue(log.isInfoEnabled()); log.debug("This is 'debug' message."); log.info("This is 'info' message."); diff --git a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingPathTest.java b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingPathTest.java index 9ab56d5321bb1..867efbac45c68 100644 --- a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingPathTest.java +++ b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingPathTest.java @@ -29,16 +29,24 @@ public class GridLog4jLoggingPathTest extends TestCase { /** */ private IgniteLogger log; + /** Logger config */ + private String path = "modules/core/src/test/config/log4j-test.xml"; + /** {@inheritDoc} */ @Override protected void setUp() throws Exception { - log = new Log4JLogger("modules/core/src/test/config/log4j-test.xml").getLogger(getClass()); + log = new Log4JLogger(path).getLogger(getClass()); } /** * Tests log4j logging SPI. */ public void testLog() { - assert log.isInfoEnabled() == true; + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4JLogger")); + assertTrue(log.toString().contains(path)); + + assertTrue(log.isInfoEnabled()); if (log.isDebugEnabled()) log.debug("This is 'debug' message."); diff --git a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingUrlTest.java b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingUrlTest.java index 9ec695e6b94ba..1e2e8df82bf31 100644 --- a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingUrlTest.java +++ b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jLoggingUrlTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.logger.log4j; import java.io.File; +import java.net.URL; import junit.framework.TestCase; import org.apache.ignite.IgniteLogger; import org.apache.ignite.testframework.GridTestUtils; @@ -31,6 +32,9 @@ public class GridLog4jLoggingUrlTest extends TestCase { /** */ private IgniteLogger log; + /** Logger config */ + private URL url; + /** {@inheritDoc} */ @Override protected void setUp() throws Exception { File xml = GridTestUtils.resolveIgnitePath("modules/core/src/test/config/log4j-test.xml"); @@ -38,15 +42,20 @@ public class GridLog4jLoggingUrlTest extends TestCase { assert xml != null; assert xml.exists(); - log = new Log4JLogger(xml.toURI().toURL()).getLogger(getClass()); + url = xml.toURI().toURL(); + log = new Log4JLogger(url).getLogger(getClass()); } /** * Tests log4j logging SPI. */ public void testLog() { - assert log.isDebugEnabled(); - assert log.isInfoEnabled(); + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4JLogger")); + assertTrue(log.toString().contains(url.getPath())); + + assertTrue(log.isInfoEnabled()); log.debug("This is 'debug' message."); log.info("This is 'info' message."); diff --git a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jNotInitializedTest.java b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jNotInitializedTest.java index 390fdcb0a66bc..d32e890686420 100644 --- a/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jNotInitializedTest.java +++ b/modules/log4j/src/test/java/org/apache/ignite/logger/log4j/GridLog4jNotInitializedTest.java @@ -30,6 +30,11 @@ public class GridLog4jNotInitializedTest extends TestCase { public void testLogInitialize() { IgniteLogger log = new Log4JLogger().getLogger(GridLog4jNotInitializedTest.class); + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4JLogger")); + assertTrue(log.toString().contains("config=null")); + if (log.isDebugEnabled()) log.debug("This is 'debug' message."); else diff --git a/modules/log4j/src/test/java/org/apache/ignite/testsuites/IgniteLog4jTestSuite.java b/modules/log4j/src/test/java/org/apache/ignite/testsuites/IgniteLog4jTestSuite.java index f5f13d9e7ffb3..2c0af796ed22a 100644 --- a/modules/log4j/src/test/java/org/apache/ignite/testsuites/IgniteLog4jTestSuite.java +++ b/modules/log4j/src/test/java/org/apache/ignite/testsuites/IgniteLog4jTestSuite.java @@ -20,6 +20,9 @@ import junit.framework.TestSuite; import org.apache.ignite.logger.log4j.GridLog4jCorrectFileNameTest; import org.apache.ignite.logger.log4j.GridLog4jInitializedTest; +import org.apache.ignite.logger.log4j.GridLog4jLoggingFileTest; +import org.apache.ignite.logger.log4j.GridLog4jLoggingPathTest; +import org.apache.ignite.logger.log4j.GridLog4jLoggingUrlTest; import org.apache.ignite.logger.log4j.GridLog4jNotInitializedTest; /** @@ -36,6 +39,9 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(GridLog4jInitializedTest.class)); suite.addTest(new TestSuite(GridLog4jNotInitializedTest.class)); suite.addTest(new TestSuite(GridLog4jCorrectFileNameTest.class)); + suite.addTest(new TestSuite(GridLog4jLoggingFileTest.class)); + suite.addTest(new TestSuite(GridLog4jLoggingPathTest.class)); + suite.addTest(new TestSuite(GridLog4jLoggingUrlTest.class)); return suite; } diff --git a/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java b/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java index e39a9efcca031..58a68cd60cb05 100644 --- a/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java +++ b/modules/log4j2/src/main/java/org/apache/ignite/logger/log4j2/Log4J2Logger.java @@ -26,6 +26,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.C1; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.S; @@ -100,10 +101,15 @@ public class Log4J2Logger implements IgniteLogger, LoggerNodeIdAware { @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") private Logger impl; + /** Path to configuration file. */ + @GridToStringExclude + private final String cfg; + /** Quiet flag. */ private final boolean quiet; /** Node ID. */ + @GridToStringExclude private volatile UUID nodeId; /** @@ -111,7 +117,7 @@ public class Log4J2Logger implements IgniteLogger, LoggerNodeIdAware { * * @param impl Log4j implementation to use. */ - private Log4J2Logger(final Logger impl) { + private Log4J2Logger(final Logger impl, String path) { assert impl != null; addConsoleAppenderIfNeeded(new C1() { @@ -121,6 +127,7 @@ private Log4J2Logger(final Logger impl) { }); quiet = quiet0; + cfg = path; } /** @@ -148,6 +155,7 @@ public Log4J2Logger(String path) throws IgniteCheckedException { }); quiet = quiet0; + cfg = path; } /** @@ -175,6 +183,7 @@ public Log4J2Logger(File cfgFile) throws IgniteCheckedException { }); quiet = quiet0; + cfg = cfgFile.getPath(); } /** @@ -197,6 +206,7 @@ public Log4J2Logger(final URL cfgUrl) throws IgniteCheckedException { }); quiet = quiet0; + cfg = cfgUrl.getPath(); } /** @@ -416,17 +426,17 @@ public Logger createConsoleLogger() { */ @Override public Log4J2Logger getLogger(Object ctgr) { if (ctgr == null) - return new Log4J2Logger((Logger)LogManager.getRootLogger()); + return new Log4J2Logger((Logger)LogManager.getRootLogger(), cfg); if (ctgr instanceof Class) { String name = ((Class)ctgr).getName(); - return new Log4J2Logger((Logger)LogManager.getLogger(name)); + return new Log4J2Logger((Logger)LogManager.getLogger(name), cfg); } String name = ctgr.toString(); - return new Log4J2Logger((Logger)LogManager.getLogger(name)); + return new Log4J2Logger((Logger)LogManager.getLogger(name), cfg); } /** {@inheritDoc} */ @@ -495,6 +505,6 @@ public Logger createConsoleLogger() { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(Log4J2Logger.class, this); + return S.toString(Log4J2Logger.class, this, "config", cfg); } } diff --git a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java index a5564da01175a..5f3207ec52e80 100644 --- a/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java +++ b/modules/log4j2/src/test/java/org/apache/ignite/logger/log4j2/Log4j2LoggerSelfTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.logger.log4j2; import java.io.File; +import java.net.URL; import java.util.Collections; import java.util.UUID; import junit.framework.TestCase; @@ -59,6 +60,11 @@ public void testFileConstructor() throws Exception { IgniteLogger log = new Log4J2Logger(xml).getLogger(getClass()); + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4J2Logger")); + assertTrue(log.toString().contains(xml.getPath())); + ((LoggerNodeIdAware)log).setNodeId(UUID.randomUUID()); checkLog(log); @@ -73,7 +79,13 @@ public void testUrlConstructor() throws Exception { assert xml != null; assert xml.exists(); - IgniteLogger log = new Log4J2Logger(xml.toURI().toURL()).getLogger(getClass()); + URL url = xml.toURI().toURL(); + IgniteLogger log = new Log4J2Logger(url).getLogger(getClass()); + + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4J2Logger")); + assertTrue(log.toString().contains(url.getPath())); ((LoggerNodeIdAware)log).setNodeId(UUID.randomUUID()); @@ -86,6 +98,11 @@ public void testUrlConstructor() throws Exception { public void testPathConstructor() throws Exception { IgniteLogger log = new Log4J2Logger(LOG_PATH_TEST).getLogger(getClass()); + System.out.println(log.toString()); + + assertTrue(log.toString().contains("Log4J2Logger")); + assertTrue(log.toString().contains(LOG_PATH_TEST)); + ((LoggerNodeIdAware)log).setNodeId(UUID.randomUUID()); checkLog(log); diff --git a/modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/Slf4jLogger.java b/modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/Slf4jLogger.java index 2b0e98041db39..245085090d138 100644 --- a/modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/Slf4jLogger.java +++ b/modules/slf4j/src/main/java/org/apache/ignite/logger/slf4j/Slf4jLogger.java @@ -18,6 +18,7 @@ package org.apache.ignite.logger.slf4j; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.util.typedef.internal.S; import org.jetbrains.annotations.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -143,4 +144,9 @@ public Slf4jLogger(Logger impl) { @Nullable @Override public String fileName() { return null; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Slf4jLogger.class, this); + } } \ No newline at end of file From b61b4d08069c2d33d0f7e8ecc1e6981badf0ddef Mon Sep 17 00:00:00 2001 From: Evgenii Zhuravlev Date: Thu, 7 Dec 2017 15:42:33 +0300 Subject: [PATCH 140/243] IGNITE-7088 Fix implementation of DIRECT comparator for ordering cache start operations. This closes #3136. Signed-off-by: nikolay_tikhonov (cherry picked from commit bbeb205) --- .../processors/cache/ClusterCachesInfo.java | 8 ++-- .../processors/cache/CacheComparatorTest.java | 48 +++++++++++++++++++ .../testsuites/IgniteCacheTestSuite2.java | 3 ++ 3 files changed, 54 insertions(+), 5 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheComparatorTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 69f1a274c30e7..49ba732d8cced 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -1755,16 +1755,14 @@ private boolean surviveReconnect(String cacheName) { * Use DIRECT comparator for ordering cache start operations. * Use REVERSE comparator for ordering cache stop operations. */ - private static class CacheComparators { + static class CacheComparators { /** * DIRECT comparator for cache descriptors (first system caches). */ static Comparator DIRECT = new Comparator() { @Override public int compare(DynamicCacheDescriptor o1, DynamicCacheDescriptor o2) { - if (!o1.cacheType().userCache()) - return -1; - if (!o2.cacheType().userCache()) - return 1; + if (o1.cacheType().userCache() ^ o2.cacheType().userCache()) + return o2.cacheType().userCache() ? -1 : 1; return o1.cacheId().compareTo(o2.cacheId()); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheComparatorTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheComparatorTest.java new file mode 100644 index 0000000000000..0bd587de85d40 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheComparatorTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import junit.framework.TestCase; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.processors.query.QuerySchema; + +/** + * Test for CacheComparators from ClusterCachesInfo + */ +public class CacheComparatorTest extends TestCase { + /** + * Test if comparator not violates its general contract + */ + public void testDirect() { + DynamicCacheDescriptor desc1 = new DynamicCacheDescriptor(null, + new CacheConfiguration().setName("1111"), CacheType.DATA_STRUCTURES, + null, true, null, true, + false, null, new QuerySchema()); + + DynamicCacheDescriptor desc2 = new DynamicCacheDescriptor(null, + new CacheConfiguration().setName("2222"), CacheType.INTERNAL, + null, true, null, true, + false, null, new QuerySchema()); + + assertEquals(-1, + ClusterCachesInfo.CacheComparators.DIRECT.compare(desc1, desc2)); + + assertEquals(1, + ClusterCachesInfo.CacheComparators.DIRECT.compare(desc2, desc1)); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java index 6f5b7108d49b0..69bb87806c6cf 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java @@ -22,6 +22,7 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunctionExcludeNeighborsSelfTest; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunctionFastPowerOfTwoHashSelfTest; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunctionStandardHashSelfTest; +import org.apache.ignite.internal.processors.cache.CacheComparatorTest; import org.apache.ignite.internal.processors.cache.CacheConcurrentReadThroughTest; import org.apache.ignite.internal.processors.cache.CacheConfigurationLeakTest; import org.apache.ignite.internal.processors.cache.CacheDhtLocalPartitionAfterRemoveSelfTest; @@ -293,6 +294,8 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(CachePartitionStateTest.class)); + suite.addTest(new TestSuite(CacheComparatorTest.class)); + return suite; } } From 73a6ab47f1f490a8c37299931a954b36a6b402ba Mon Sep 17 00:00:00 2001 From: "Andrey V. Mashenkov" Date: Thu, 7 Dec 2017 19:16:25 +0300 Subject: [PATCH 141/243] IGNITE-7008 TcpDiscoverySharedFsIpFinder fails with NPE if address can't be resolved. This closes #3087. Signed-off-by: nikolay_tikhonov (cherry picked from commit e39283e) --- .../TcpDiscoverySharedFsIpFinder.java | 34 ++++++++----------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/sharedfs/TcpDiscoverySharedFsIpFinder.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/sharedfs/TcpDiscoverySharedFsIpFinder.java index a30309ce2ed18..397af1a5d16ed 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/sharedfs/TcpDiscoverySharedFsIpFinder.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/sharedfs/TcpDiscoverySharedFsIpFinder.java @@ -186,29 +186,24 @@ private File initFolder() throws IgniteSpiException { Collection addrs = new LinkedList<>(); - for (String fileName : folder.list()) - if (!".svn".equals(fileName)) { - InetSocketAddress addr = null; + for (String fileName : folder.list()) { + StringTokenizer st = new StringTokenizer(fileName, DELIM); - StringTokenizer st = new StringTokenizer(fileName, DELIM); + if (st.countTokens() != 2) + continue; - if (st.countTokens() == 2) { - String addrStr = st.nextToken(); - String portStr = st.nextToken(); + String addrStr = st.nextToken(); + String portStr = st.nextToken(); - try { - int port = Integer.parseInt(portStr); - - addr = new InetSocketAddress(denormalizeAddress(addrStr), port); - } - catch (IllegalArgumentException e) { - U.error(log, "Failed to parse file entry: " + fileName, e); - } - } + try { + int port = Integer.parseInt(portStr); - if (addr != null) - addrs.add(addr); + addrs.add(new InetSocketAddress(denormalizeAddress(addrStr), port)); } + catch (IllegalArgumentException e) { + U.error(log, "Failed to parse file entry: " + fileName, e); + } + } return Collections.unmodifiableCollection(addrs); } @@ -277,7 +272,8 @@ private String name(InetSocketAddress addr) { SB sb = new SB(); - sb.a(normalizeAddress(addr.getAddress().getHostAddress())) + // There is no need to normalize hostname as DNS name specification doesn't allow ':' and '_' chars. + sb.a(addr.isUnresolved() ? addr.getHostName() : normalizeAddress(addr.getAddress().getHostAddress())) .a(DELIM) .a(addr.getPort()); From b0c8d955829f35459a8c832656b44975b6d7108b Mon Sep 17 00:00:00 2001 From: Alexander Fedotov Date: Fri, 8 Dec 2017 15:32:27 +0300 Subject: [PATCH 142/243] IGNITE-7085 When PDS is enabled and IGNITE_BINARY_SORT_OBJECT_FIELDS is set and IgniteCache#put is called node hangs - Fixes #3121. Signed-off-by: Alexey Goncharuk (cherry picked from commit 3c20b0e4e32443977cbc18b61ef03d94b1cd5407) --- .../binary/BinaryClassDescriptor.java | 3 +- .../ignite/internal/binary/BinaryContext.java | 1 + .../IgnitePdsBinarySortObjectFieldsTest.java | 150 ++++++++++++++++++ .../ignite/testframework/GridTestUtils.java | 41 +++++ .../IgnitePdsWithIndexingCoreTestSuite.java | 2 + 5 files changed, 196 insertions(+), 1 deletion(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinarySortObjectFieldsTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java index 935211eceda34..e00835acf0fc7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java @@ -741,7 +741,8 @@ void write(Object obj, BinaryWriterExImpl writer) throws BinaryObjectException { postWrite(writer); // Check whether we need to update metadata. - if (obj.getClass() != BinaryMetadata.class) { + // The reason for this check is described in https://issues.apache.org/jira/browse/IGNITE-7138. + if (obj.getClass() != BinaryMetadata.class && obj.getClass() != BinaryTreeMap.class) { int schemaId = writer.schemaId(); if (schemaReg.schema(schemaId) == null) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java index dd192bf0e9e5f..5be1d39d9af8e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java @@ -342,6 +342,7 @@ public BinaryContext(BinaryMetadataHandler metaHnd, IgniteConfiguration igniteCf registerPredefinedType(BinaryMetadataKey.class, 0); registerPredefinedType(BinaryMetadata.class, 0); registerPredefinedType(BinaryEnumObjectImpl.class, 0); + registerPredefinedType(BinaryTreeMap.class, 0); registerPredefinedType(PlatformDotNetSessionData.class, 0); registerPredefinedType(PlatformDotNetSessionLockResult.class, 0); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinarySortObjectFieldsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinarySortObjectFieldsTest.java new file mode 100644 index 0000000000000..7c8981e5fb43d --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsBinarySortObjectFieldsTest.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.util.concurrent.TimeUnit; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.PersistentStoreConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * + */ +public class IgnitePdsBinarySortObjectFieldsTest extends GridCommonAbstractTest { + /** */ + private static final String CACHE_NAME = "ignitePdsBinarySortObjectFieldsTestCache"; + + /** + * Value. + */ + public static class Value { + /** */ + private Long val; + + /** + * Default constructor. + */ + public Value() { + // No-op. + } + + /** + * @param val Value. + */ + public Value(Long val) { + this.val = val; + } + + /** + * Returns the value. + * + * @return Value. + */ + public Long getVal() { + return val; + } + + /** + * Sets the value. + * + * @param val Value. + */ + public void setVal(Long val) { + this.val = val; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return "Value [val=" + val + ']'; + } + } + + /** */ + private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + GridTestUtils.deleteDbFiles(); + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + GridTestUtils.deleteDbFiles(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + cfg.setConsistentId(gridName); + + ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER); + + cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration()); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + GridTestUtils.deleteDbFiles(); + + super.afterTest(); + } + + /** + * @throws Exception if failed. + */ + public void testGivenCacheWithPojoValueAndPds_WhenPut_ThenNoHangup() throws Exception { + System.setProperty("IGNITE_BINARY_SORT_OBJECT_FIELDS", "true"); + + GridTestUtils.assertTimeout(5, TimeUnit.SECONDS, new Runnable() { + @Override public void run() { + IgniteEx ignite; + + try { + ignite = startGrid(0); + } + catch (Exception e) { + throw new RuntimeException(e); + } + + ignite.active(true); + + IgniteCache cache = ignite.getOrCreateCache( + new CacheConfiguration(CACHE_NAME) + ); + + cache.put(1L, new Value(1L)); + + assertEquals(1, cache.size()); + } + }); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java index 380284fea2b25..53ca516967201 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java @@ -52,6 +52,11 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import javax.cache.CacheException; import javax.cache.configuration.Factory; @@ -452,6 +457,42 @@ public static

        Throwable assertThrowsWithCause(IgniteInClosure

        call, P p, throw new AssertionError("Exception has not been thrown."); } + /** + * Asserts that the specified runnable completes within the specified timeout. + * + * @param msg Assertion message in case of timeout. + * @param timeout Timeout. + * @param timeUnit Timeout {@link TimeUnit}. + * @param runnable {@link Runnable} to check. + * @throws Exception In case of any exception distinct from {@link TimeoutException}. + */ + public static void assertTimeout(String msg, long timeout, TimeUnit timeUnit, Runnable runnable) throws Exception { + ExecutorService executorSvc = Executors.newSingleThreadExecutor(); + Future fut = executorSvc.submit(runnable); + + try { + fut.get(timeout, timeUnit); + } + catch (TimeoutException ignored) { + fail(msg, null); + } + finally { + executorSvc.shutdownNow(); + } + } + + /** + * Asserts that the specified runnable completes within the specified timeout. + * + * @param timeout Timeout. + * @param timeUnit Timeout {@link TimeUnit}. + * @param runnable {@link Runnable} to check. + * @throws Exception In case of any exception distinct from {@link TimeoutException}. + */ + public static void assertTimeout(long timeout, TimeUnit timeUnit, Runnable runnable) throws Exception { + assertTimeout("Timeout occurred.", timeout, timeUnit, runnable); + } + /** * Throw assertion error with specified error message and initialized cause. * diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java index 114f6304e2742..96263f8b49c54 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java @@ -19,6 +19,7 @@ import junit.framework.TestSuite; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsAtomicCacheRebalancingTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsBinaryMetadataOnClusterRestartTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsBinarySortObjectFieldsTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsMarshallerMappingRestoreOnNodeStartTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsTxCacheRebalancingTest; import org.apache.ignite.internal.processors.cache.persistence.IgnitePersistentStoreCacheGroupsTest; @@ -58,6 +59,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgnitePdsBinaryMetadataOnClusterRestartTest.class); suite.addTestSuite(IgnitePdsMarshallerMappingRestoreOnNodeStartTest.class); suite.addTestSuite(IgnitePdsThreadInterruptionTest.class); + suite.addTestSuite(IgnitePdsBinarySortObjectFieldsTest.class); return suite; } From ea857ff9ecf32e76141114444ee6cb87ebf470f5 Mon Sep 17 00:00:00 2001 From: vsisko Date: Sun, 10 Dec 2017 20:13:05 +0700 Subject: [PATCH 143/243] IGNITE-6999 Visor CMD: Added support for "-quiet" option in batch mode. (cherry picked from commit 1e8b550) --- .../ignite/visor/commands/VisorConsole.scala | 25 +++++++++----- .../commands/open/VisorOpenCommand.scala | 34 +++++++++++++------ .../scala/org/apache/ignite/visor/visor.scala | 25 ++++++++------ 3 files changed, 54 insertions(+), 30 deletions(-) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala index d53a0d51f293c..ce8b313f6208d 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala @@ -84,7 +84,7 @@ class VisorConsole { if (hasArgFlag("?", argLst) || hasArgFlag("help", argLst)) { println("Usage:") - println(s" $progName [? | -help]|[{-v}{-np} {-cfg=}]|[{-b=} {-e=command1;command2;...}]") + println(s" $progName [? | -help]|[{-v}{-np} {-cfg=}]|[{-b=} {-e=command1;command2;...} -quiet]") println(" Where:") println(" ?, /help, -help - show this message.") println(" -v - verbose mode (quiet by default).") @@ -93,6 +93,7 @@ class VisorConsole { println(" -b= - batch mode with file.") println(" -e=cmd1;cmd2;... - batch mode with commands.") println(" -nq - batch mode will not quit after execution (useful for alerts monitoring).") + println(" -quiet - batch mode will not print inform message and node log.") visor.quit() } @@ -100,15 +101,19 @@ class VisorConsole { argLst } - protected def buildReader(argLst: ArgList) = { + protected def buildReader(argLst: ArgList): ConsoleReader = { val cfgFile = argValue("cfg", argLst) val batchFile = argValue("b", argLst) val batchCommand = argValue("e", argLst) val noBatchQuit = hasArgName("nq", argLst) + val quiet = hasArgName("quiet", argLst) if (noBatchQuit && batchFile.isEmpty && batchCommand.isEmpty) visor.warn("Option \"-nq\" will be ignored because batch mode options \"-b\" or \"-e\" were not specified.") + if (quiet && batchFile.isEmpty && batchCommand.isEmpty) + visor.warn("Option \"-quiet\" will be ignored because batch mode options \"-b\" or \"-e\" were not specified.") + cfgFile.foreach(cfg => { if (cfg.trim.isEmpty) { visor.warn("Expected path to configuration after \"-cfg\" option.") @@ -153,6 +158,7 @@ class VisorConsole { val inputStream = batchStream match { case Some(cmd) => visor.batchMode = true + visor.quiet = quiet val script = if (noBatchQuit) cmd else cmd + "\nquit\n" @@ -179,7 +185,8 @@ class VisorConsole { } protected def mainLoop(reader: ConsoleReader) { - welcomeMessage() + if (!visor.quiet) + welcomeMessage() var ok = true @@ -194,7 +201,7 @@ class VisorConsole { val buf = new StringBuilder while (ok) { - line = reader.readLine("visor> ") + line = reader.readLine(if (visor.quiet) null else "visor> ") ok = line != null @@ -253,11 +260,11 @@ class VisorConsole { * Print banner, hint message on start. */ protected def welcomeMessage() { - println("___ _________________________ ________" + NL + - "__ | / /____ _/__ ___/__ __ \\___ __ \\" + NL + - "__ | / / __ / _____ \\ _ / / /__ /_/ /" + NL + - "__ |/ / __/ / ____/ / / /_/ / _ _, _/" + NL + - "_____/ /___/ /____/ \\____/ /_/ |_|" + NL + + println("___ _________________________ ________" + NL + + "__ | / /____ _/__ ___/__ __ \\___ __ \\" + NL + + "__ | / / __ / _____ \\ _ / / /__ /_/ /" + NL + + "__ |/ / __/ / ____/ / / /_/ / _ _, _/" + NL + + "_____/ /___/ /____/ \\____/ /_/ |_|" + NL + NL + "ADMIN CONSOLE" + NL + copyright()) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala index 1cfbde4d6fff9..949aa0081165d 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala @@ -19,7 +19,8 @@ package org.apache.ignite.visor.commands.open -import java.util.logging.{ConsoleHandler, Logger} +import java.net.URL +import java.util.logging.{ConsoleHandler, Level, Logger} import org.apache.ignite.IgniteSystemProperties._ import org.apache.ignite.configuration.IgniteConfiguration @@ -28,14 +29,13 @@ import org.apache.ignite.internal.IgniteEx import org.apache.ignite.internal.util.scala.impl import org.apache.ignite.internal.util.spring.IgniteSpringHelper import org.apache.ignite.internal.util.{IgniteUtils => U} +import org.apache.ignite.logger.NullLogger import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi import org.apache.ignite.visor.commands.common.{VisorConsoleCommand, VisorTextTable} import org.apache.ignite.visor.visor._ import org.apache.ignite.visor.{VisorTag, visor} import org.apache.ignite.{IgniteException, IgniteSystemProperties, Ignition} -import java.net.URL - import scala.language.{implicitConversions, reflectiveCalls} /** @@ -134,7 +134,15 @@ class VisorOpenCommand extends VisorConsoleCommand { // Add no-op logger to remove no-appender warning. val log4jTup = - if (classOf[Ignition].getClassLoader.getResource("org/apache/log4j/Appender.class") != null) + if (visor.quiet) { + val springLog = Logger.getLogger("org.springframework") + + if (springLog != null) + springLog.setLevel(Level.WARNING) + + null + } + else if (classOf[Ignition].getClassLoader.getResource("org/apache/log4j/Appender.class") != null) U.addLog4jNoOpLogger() else null @@ -147,7 +155,7 @@ class VisorOpenCommand extends VisorConsoleCommand { spring.loadConfigurations(url, "cacheConfiguration", "fileSystemConfiguration", "lifecycleBeans", "indexingSpi").get1() finally { - if (log4jTup != null) + if (log4jTup != null && !visor.quiet) U.removeLog4jNoOpLogger(log4jTup) } @@ -159,12 +167,16 @@ class VisorOpenCommand extends VisorConsoleCommand { val cfg = cfgs.iterator().next() - if (log4jTup != null) - System.setProperty(IgniteSystemProperties.IGNITE_CONSOLE_APPENDER, "false") - else - Logger.getGlobal.getHandlers.foreach({ - case handler: ConsoleHandler => Logger.getGlobal.removeHandler(handler) - }) + if (visor.quiet) + cfg.setGridLogger(new NullLogger) + else { + if (log4jTup != null) + System.setProperty(IgniteSystemProperties.IGNITE_CONSOLE_APPENDER, "false") + else + Logger.getGlobal.getHandlers.foreach({ + case handler: ConsoleHandler => Logger.getGlobal.removeHandler(handler) + }) + } // Setting up 'Config URL' for properly print in console. System.setProperty(IgniteSystemProperties.IGNITE_CONFIG_URL, url.getPath) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala index ffc7a00b64ae1..0f1e53a435d32 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala @@ -240,6 +240,9 @@ object visor extends VisorTag { var batchMode: Boolean = false + /** Quiet mode to disable node log and information messages output. */ + var quiet: Boolean = false + def reader(reader: ConsoleReader) { assert(reader != null) @@ -1640,20 +1643,22 @@ object visor extends VisorTag { nl() - val t = VisorTextTable() + if (!visor.quiet) { + val t = VisorTextTable() - // Print advise. - println("Some useful commands:") + // Print advise. + println("Some useful commands:") - t += ("Type 'top'", "to see full topology.") - t += ("Type 'node'", "to see node statistics.") - t += ("Type 'cache'", "to see cache statistics.") - t += ("Type 'tasks'", "to see tasks statistics.") - t += ("Type 'config'", "to see node configuration.") + t += ("Type 'top'", "to see full topology.") + t += ("Type 'node'", "to see node statistics.") + t += ("Type 'cache'", "to see cache statistics.") + t += ("Type 'tasks'", "to see tasks statistics.") + t += ("Type 'config'", "to see node configuration.") - t.render() + t.render() - println("\nType 'help' to get help.\n") + println("\nType 'help' to get help.\n") + } status() } From c1a474535c94dc157c3d785a70a0724e08c5018f Mon Sep 17 00:00:00 2001 From: Andrey Novikov Date: Fri, 17 Nov 2017 13:28:14 +0700 Subject: [PATCH 144/243] IGNITE-6920 Added direct-install build for Web Console. (cherry picked from commit 69eced6) --- modules/web-console/.gitignore | 11 +- modules/web-console/DEVNOTES.txt | 13 +- modules/web-console/backend/.gitignore | 6 - modules/web-console/backend/app/apiServer.js | 76 +-- .../backend/app/browsersHandler.js | 442 +++++++++--------- modules/web-console/backend/app/mongo.js | 73 ++- modules/web-console/backend/app/nconf.js | 44 +- modules/web-console/backend/app/routes.js | 26 +- modules/web-console/backend/app/settings.js | 84 ++-- modules/web-console/backend/index.js | 25 +- modules/web-console/backend/injector.js | 4 +- modules/web-console/backend/package.json | 22 +- .../compose/frontend/nginx/web-console.conf | 1 - .../docker/standalone/nginx/web-console.conf | 1 - modules/web-console/frontend/.gitignore | 8 +- .../generator/JavaTransformer.service.js | 2 + .../frontend/webpack/webpack.dev.babel.js | 5 +- 17 files changed, 466 insertions(+), 377 deletions(-) diff --git a/modules/web-console/.gitignore b/modules/web-console/.gitignore index 9ddddc4776d8c..8648ae5323960 100644 --- a/modules/web-console/.gitignore +++ b/modules/web-console/.gitignore @@ -1,6 +1,5 @@ -docker/standalone/backend/build -docker/standalone/frontend/build -docker/standalone/data -docker/compose/backend/build -docker/compose/frontend/build -docker/dev/data +.npmrc +package-lock.json +build/ +node_modules/ +data/ diff --git a/modules/web-console/DEVNOTES.txt b/modules/web-console/DEVNOTES.txt index aa8702e04c53c..4ff68a7f35625 100644 --- a/modules/web-console/DEVNOTES.txt +++ b/modules/web-console/DEVNOTES.txt @@ -1,8 +1,5 @@ -Ignite Web Console Instructions -====================================== - -How to deploy locally: - +Ignite Web Console Build Instructions +===================================== 1. Install locally MongoDB (version >=3.2.x) follow instructions from site http://docs.mongodb.org/manual/installation. 2. Install locally NodeJS (version >=6.5.x) using installer from site https://nodejs.org/en/download/current for your OS. 3. Change directory to '$IGNITE_HOME/modules/web-console/backend' and @@ -13,10 +10,10 @@ How to deploy locally: 6. Copy ignite-web-agent-.zip from '$IGNITE_HOME/modules/web-console/web-agent/target' to '$IGNITE_HOME/modules/web-console/backend/agent_dists' folder. -Steps 1 - 6 should be executed once. - -How to run console in development mode: +Steps 1 - 4 should be executed once. +Ignite Web Console Run In Development Mode +========================================== 1. Configure MongoDB to run as service or in terminal change dir to $MONGO_INSTALL_DIR/server/3.2/bin and start MongoDB by executing "mongod". diff --git a/modules/web-console/backend/.gitignore b/modules/web-console/backend/.gitignore index 0aa3ecebe2c90..e5b9047191ac9 100644 --- a/modules/web-console/backend/.gitignore +++ b/modules/web-console/backend/.gitignore @@ -1,8 +1,2 @@ -*.idea -*.log -.npmrc -node_modules agent_dists/*.zip config/*.json -yarn.lock -package-lock.json diff --git a/modules/web-console/backend/app/apiServer.js b/modules/web-console/backend/app/apiServer.js index affb9c9fccf84..cb097509e4b40 100644 --- a/modules/web-console/backend/app/apiServer.js +++ b/modules/web-console/backend/app/apiServer.js @@ -21,48 +21,64 @@ module.exports = { implements: 'api-server', - inject: ['require(express)', 'configure', 'routes'] -}; - -module.exports.factory = function(Express, configure, routes) { - /** - * Connected agents manager. - */ - class ApiServer { + inject: ['require(fs)', 'require(path)', 'require(express)', 'settings', 'configure', 'routes'], + factory(fs, path, Express, settings, configure, routes) { /** - * @param {Server} srv + * Connected agents manager. */ - attach(srv) { - const app = new Express(); + class ApiServer { + /** + * @param {Server} srv + */ + attach(srv) { + const app = new Express(); + + configure.express(app); - configure.express(app); + routes.register(app); - routes.register(app); + if (settings.packaged) { + const staticDir = path.join(process.cwd(), 'libs/frontend'); - // Catch 404 and forward to error handler. - app.use((req, res, next) => { - const err = new Error('Not Found: ' + req.originalUrl); + try { + fs.accessSync(staticDir, fs.F_OK); - err.status = 404; + app.use('/', Express.static(staticDir)); - next(err); - }); + app.get('*', function(req, res) { + res.sendFile(path.join(staticDir, 'index.html')); + }); + } + catch (e) { + console.log(`Failed to find folder with frontend files: ${staticDir}`); + } + } - // Production error handler: no stacktraces leaked to user. - app.use((err, req, res) => { - res.status(err.status || 500); + // Catch 404 and forward to error handler. + app.use((req, res, next) => { + const err = new Error('Not Found: ' + req.originalUrl); - res.render('error', { - message: err.message, - error: {} + err.status = 404; + + next(err); }); - }); - srv.addListener('request', app); + // Production error handler: no stacktraces leaked to user. + app.use((err, req, res) => { + res.status(err.status || 500); - return app; + res.render('error', { + message: err.message, + error: {} + }); + }); + + srv.addListener('request', app); + + return app; + } } - } - return new ApiServer(); + return new ApiServer(); + } }; diff --git a/modules/web-console/backend/app/browsersHandler.js b/modules/web-console/backend/app/browsersHandler.js index f4ff23c2534a9..8b1385db94491 100644 --- a/modules/web-console/backend/app/browsersHandler.js +++ b/modules/web-console/backend/app/browsersHandler.js @@ -24,291 +24,289 @@ */ module.exports = { implements: 'browsers-handler', - inject: ['require(lodash)', 'require(socket.io)', 'configure', 'errors', 'mongo'] -}; + inject: ['require(lodash)', 'require(socket.io)', 'configure', 'errors', 'mongo'], + factory: (_, socketio, configure, errors, mongo) => { + class BrowserSockets { + constructor() { + this.sockets = new Map(); + } -module.exports.factory = (_, socketio, configure, errors, mongo) => { - class BrowserSockets { - constructor() { - this.sockets = new Map(); - } + /** + * @param {Socket} sock + */ + add(sock) { + const token = sock.request.user.token; - /** - * @param {Socket} sock - */ - add(sock) { - const token = sock.request.user.token; + if (this.sockets.has(token)) + this.sockets.get(token).push(sock); + else + this.sockets.set(token, [sock]); - if (this.sockets.has(token)) - this.sockets.get(token).push(sock); - else - this.sockets.set(token, [sock]); + return this.sockets.get(token); + } - return this.sockets.get(token); - } + /** + * @param {Socket} sock + */ + remove(sock) { + const token = sock.request.user.token; - /** - * @param {Socket} sock - */ - remove(sock) { - const token = sock.request.user.token; + const sockets = this.sockets.get(token); - const sockets = this.sockets.get(token); + _.pull(sockets, sock); - _.pull(sockets, sock); + return sockets; + } - return sockets; - } + get(token) { + if (this.sockets.has(token)) + return this.sockets.get(token); - get(token) { - if (this.sockets.has(token)) - return this.sockets.get(token); + return []; + } - return []; + demo(token) { + return _.filter(this.sockets.get(token), (sock) => sock.request._query.IgniteDemoMode === 'true'); + } } - demo(token) { - return _.filter(this.sockets.get(token), (sock) => sock.request._query.IgniteDemoMode === 'true'); - } - } + class BrowsersHandler { + /** + * @constructor + */ + constructor() { + /** + * Connected browsers. + * @type {BrowserSockets} + */ + this._browserSockets = new BrowserSockets(); + + /** + * Registered Visor task. + * @type {Map} + */ + this._visorTasks = new Map(); + } - class BrowsersHandler { - /** - * @constructor - */ - constructor() { /** - * Connected browsers. - * @type {BrowserSockets} + * @param {Error} err + * @return {{code: number, message: *}} */ - this._browserSockets = new BrowserSockets(); + errorTransformer(err) { + return { + code: err.code || 1, + message: err.message || err + }; + } /** - * Registered Visor task. - * @type {Map} + * @param {String} token + * @param {Array.} [socks] */ - this._visorTasks = new Map(); - } + agentStats(token, socks = this._browserSockets.get(token)) { + return this._agentHnd.agents(token) + .then((agentSocks) => { + const stat = _.reduce(agentSocks, (acc, agentSock) => { + acc.count += 1; + acc.hasDemo |= _.get(agentSock, 'demo.enabled'); - /** - * @param {Error} err - * @return {{code: number, message: *}} - */ - errorTransformer(err) { - return { - code: err.code || 1, - message: err.message || err - }; - } + if (agentSock.cluster) + acc.clusters.push(agentSock.cluster); - /** - * @param {String} token - * @param {Array.} [socks] - */ - agentStats(token, socks = this._browserSockets.get(token)) { - return this._agentHnd.agents(token) - .then((agentSocks) => { - const stat = _.reduce(agentSocks, (acc, agentSock) => { - acc.count += 1; - acc.hasDemo |= _.get(agentSock, 'demo.enabled'); - - if (agentSock.cluster) - acc.clusters.push(agentSock.cluster); - - return acc; - }, {count: 0, hasDemo: false, clusters: []}); - - stat.clusters = _.uniqWith(stat.clusters, _.isEqual); - - return stat; - }) - .catch(() => ({count: 0, hasDemo: false, clusters: []})) - .then((stat) => _.forEach(socks, (sock) => sock.emit('agents:stat', stat))); - } + return acc; + }, {count: 0, hasDemo: false, clusters: []}); - emitNotification(sock) { - sock.emit('user:notifications', this.notification); - } + stat.clusters = _.uniqWith(stat.clusters, _.isEqual); - /** - * @param {String} notification Notification message. - */ - updateNotification(notification) { - this.notification = notification; + return stat; + }) + .catch(() => ({count: 0, hasDemo: false, clusters: []})) + .then((stat) => _.forEach(socks, (sock) => sock.emit('agents:stat', stat))); + } - for (const socks of this._browserSockets.sockets.values()) { - for (const sock of socks) - this.emitNotification(sock); + emitNotification(sock) { + sock.emit('user:notifications', this.notification); } - } - executeOnAgent(token, demo, event, ...args) { - const cb = _.last(args); + /** + * @param {String} notification Notification message. + */ + updateNotification(notification) { + this.notification = notification; - return this._agentHnd.agent(token, demo) - .then((agentSock) => agentSock.emitEvent(event, ..._.dropRight(args))) - .then((res) => cb(null, res)) - .catch((err) => cb(this.errorTransformer(err))); - } + for (const socks of this._browserSockets.sockets.values()) { + for (const sock of socks) + this.emitNotification(sock); + } + } - agentListeners(sock) { - const demo = sock.request._query.IgniteDemoMode === 'true'; - const token = () => sock.request.user.token; + executeOnAgent(token, demo, event, ...args) { + const cb = _.last(args); - // Return available drivers to browser. - sock.on('schemaImport:drivers', (...args) => { - this.executeOnAgent(token(), demo, 'schemaImport:drivers', ...args); - }); + return this._agentHnd.agent(token, demo) + .then((agentSock) => agentSock.emitEvent(event, ..._.dropRight(args))) + .then((res) => cb(null, res)) + .catch((err) => cb(this.errorTransformer(err))); + } - // Return schemas from database to browser. - sock.on('schemaImport:schemas', (...args) => { - this.executeOnAgent(token(), demo, 'schemaImport:schemas', ...args); - }); + agentListeners(sock) { + const demo = sock.request._query.IgniteDemoMode === 'true'; + const token = () => sock.request.user.token; - // Return tables from database to browser. - sock.on('schemaImport:metadata', (...args) => { - this.executeOnAgent(token(), demo, 'schemaImport:metadata', ...args); - }); - } + // Return available drivers to browser. + sock.on('schemaImport:drivers', (...args) => { + this.executeOnAgent(token(), demo, 'schemaImport:drivers', ...args); + }); - /** - * @param {Promise.} agent - * @param {Boolean} demo - * @param {Object.} params - * @return {Promise.} - */ - executeOnNode(agent, demo, params) { - return agent - .then((agentSock) => agentSock.emitEvent('node:rest', {uri: 'ignite', demo, params})) - .then((res) => { - if (res.status === 0) { - if (res.zipped) - return res; - - return JSON.parse(res.data); - } - - throw new Error(res.error); + // Return schemas from database to browser. + sock.on('schemaImport:schemas', (...args) => { + this.executeOnAgent(token(), demo, 'schemaImport:schemas', ...args); }); - } - registerVisorTask(taskId, taskCls, ...argCls) { - this._visorTasks.set(taskId, { - taskCls, - argCls - }); - } + // Return tables from database to browser. + sock.on('schemaImport:metadata', (...args) => { + this.executeOnAgent(token(), demo, 'schemaImport:metadata', ...args); + }); + } - nodeListeners(sock) { - // Return command result from grid to browser. - sock.on('node:rest', (clusterId, params, cb) => { - const demo = sock.request._query.IgniteDemoMode === 'true'; - const token = sock.request.user.token; + /** + * @param {Promise.} agent + * @param {Boolean} demo + * @param {Object.} params + * @return {Promise.} + */ + executeOnNode(agent, demo, params) { + return agent.then((agentSock) => agentSock.emitEvent('node:rest', {uri: 'ignite', demo, params})) + .then((res) => { + if (res.status === 0) { + if (res.zipped) + return res; - const agent = this._agentHnd.agent(token, demo, clusterId); + return JSON.parse(res.data); + } - this.executeOnNode(agent, demo, params) - .then((data) => cb(null, data)) - .catch((err) => cb(this.errorTransformer(err))); - }); + throw new Error(res.error); + }); + } - const internalVisor = (postfix) => `org.apache.ignite.internal.visor.${postfix}`; + registerVisorTask(taskId, taskCls, ...argCls) { + this._visorTasks.set(taskId, { + taskCls, + argCls + }); + } - this.registerVisorTask('querySql', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryArg')); - this.registerVisorTask('querySqlV2', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryArgV2')); - this.registerVisorTask('querySqlV3', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryArgV3')); - this.registerVisorTask('querySqlX2', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryTaskArg')); + nodeListeners(sock) { + // Return command result from grid to browser. + sock.on('node:rest', (clusterId, params, cb) => { + const demo = sock.request._query.IgniteDemoMode === 'true'; + const token = sock.request.user.token; - this.registerVisorTask('queryScanX2', internalVisor('query.VisorScanQueryTask'), internalVisor('query.VisorScanQueryTaskArg')); + const agent = this._agentHnd.agent(token, demo, clusterId); - this.registerVisorTask('queryFetch', internalVisor('query.VisorQueryNextPageTask'), 'org.apache.ignite.lang.IgniteBiTuple', 'java.lang.String', 'java.lang.Integer'); - this.registerVisorTask('queryFetchX2', internalVisor('query.VisorQueryNextPageTask'), internalVisor('query.VisorQueryNextPageTaskArg')); + this.executeOnNode(agent, demo, params) + .then((data) => cb(null, data)) + .catch((err) => cb(this.errorTransformer(err))); + }); - this.registerVisorTask('queryClose', internalVisor('query.VisorQueryCleanupTask'), 'java.util.Map', 'java.util.UUID', 'java.util.Set'); - this.registerVisorTask('queryCloseX2', internalVisor('query.VisorQueryCleanupTask'), internalVisor('query.VisorQueryCleanupTaskArg')); + const internalVisor = (postfix) => `org.apache.ignite.internal.visor.${postfix}`; + this.registerVisorTask('querySql', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryArg')); + this.registerVisorTask('querySqlV2', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryArgV2')); + this.registerVisorTask('querySqlV3', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryArgV3')); + this.registerVisorTask('querySqlX2', internalVisor('query.VisorQueryTask'), internalVisor('query.VisorQueryTaskArg')); - // Return command result from grid to browser. - sock.on('node:visor', (clusterId, taskId, nids, ...args) => { - const demo = sock.request._query.IgniteDemoMode === 'true'; - const token = sock.request.user.token; + this.registerVisorTask('queryScanX2', internalVisor('query.VisorScanQueryTask'), internalVisor('query.VisorScanQueryTaskArg')); - const cb = _.last(args); - args = _.dropRight(args); + this.registerVisorTask('queryFetch', internalVisor('query.VisorQueryNextPageTask'), 'org.apache.ignite.lang.IgniteBiTuple', 'java.lang.String', 'java.lang.Integer'); + this.registerVisorTask('queryFetchX2', internalVisor('query.VisorQueryNextPageTask'), internalVisor('query.VisorQueryNextPageTaskArg')); - const desc = this._visorTasks.get(taskId); + this.registerVisorTask('queryClose', internalVisor('query.VisorQueryCleanupTask'), 'java.util.Map', 'java.util.UUID', 'java.util.Set'); + this.registerVisorTask('queryCloseX2', internalVisor('query.VisorQueryCleanupTask'), internalVisor('query.VisorQueryCleanupTaskArg')); - if (_.isNil(desc)) - return cb(this.errorTransformer(new errors.IllegalArgumentException(`Failed to find Visor task for id: ${taskId}`))); - const params = { - cmd: 'exe', - name: 'org.apache.ignite.internal.visor.compute.VisorGatewayTask', - p1: nids, - p2: desc.taskCls - }; + // Return command result from grid to browser. + sock.on('node:visor', (clusterId, taskId, nids, ...args) => { + const demo = sock.request._query.IgniteDemoMode === 'true'; + const token = sock.request.user.token; - _.forEach(_.concat(desc.argCls, args), (param, idx) => { params[`p${idx + 3}`] = param; }); + const cb = _.last(args); + args = _.dropRight(args); - const agent = this._agentHnd.agent(token, demo, clusterId); + const desc = this._visorTasks.get(taskId); - this.executeOnNode(agent, demo, params) - .then((data) => { - if (data.zipped) - return cb(null, data); + if (_.isNil(desc)) + return cb(this.errorTransformer(new errors.IllegalArgumentException(`Failed to find Visor task for id: ${taskId}`))); - if (data.finished) - return cb(null, data.result); + const params = { + cmd: 'exe', + name: 'org.apache.ignite.internal.visor.compute.VisorGatewayTask', + p1: nids, + p2: desc.taskCls + }; - cb(this.errorTransformer(data.error)); - }) - .catch((err) => cb(this.errorTransformer(err))); - }); - } + _.forEach(_.concat(desc.argCls, args), (param, idx) => { params[`p${idx + 3}`] = param; }); - /** - * - * @param server - * @param {AgentsHandler} agentHnd - */ - attach(server, agentHnd) { - this._agentHnd = agentHnd; + const agent = this._agentHnd.agent(token, demo, clusterId); - if (this.io) - throw 'Browser server already started!'; + this.executeOnNode(agent, demo, params) + .then((data) => { + if (data.zipped) + return cb(null, data); - mongo.Notifications.findOne().sort('-date').exec() - .then((notification) => { - this.notification = notification; - }) - .then(() => { - const io = socketio(server); + if (data.finished) + return cb(null, data.result); - configure.socketio(io); + cb(this.errorTransformer(data.error)); + }) + .catch((err) => cb(this.errorTransformer(err))); + }); + } - // Handle browser connect event. - io.sockets.on('connection', (sock) => { - this._browserSockets.add(sock); + /** + * + * @param server + * @param {AgentsHandler} agentHnd + */ + attach(server, agentHnd) { + this._agentHnd = agentHnd; - // Handle browser disconnect event. - sock.on('disconnect', () => { - this._browserSockets.remove(sock); + if (this.io) + throw 'Browser server already started!'; - const demo = sock.request._query.IgniteDemoMode === 'true'; + mongo.Notifications.findOne().sort('-date').exec() + .then((notification) => { + this.notification = notification; + }) + .then(() => { + const io = socketio(server); - // Stop demo if latest demo tab for this token. - demo && agentHnd.tryStopDemo(sock); - }); + configure.socketio(io); - this.agentListeners(sock); - this.nodeListeners(sock); + // Handle browser connect event. + io.sockets.on('connection', (sock) => { + this._browserSockets.add(sock); - this.agentStats(sock.request.user.token, [sock]); - this.emitNotification(sock); + // Handle browser disconnect event. + sock.on('disconnect', () => { + this._browserSockets.remove(sock); + + const demo = sock.request._query.IgniteDemoMode === 'true'; + + // Stop demo if latest demo tab for this token. + demo && agentHnd.tryStopDemo(sock); + }); + + this.agentListeners(sock); + this.nodeListeners(sock); + + this.agentStats(sock.request.user.token, [sock]); + this.emitNotification(sock); + }); }); - }); + } } - } - return new BrowsersHandler(); + return new BrowsersHandler(); + } }; diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index 7043fcdfd04b9..81076af5f25cb 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -24,16 +24,10 @@ */ module.exports = { implements: 'mongo', - inject: ['require(passport-local-mongoose)', 'settings', 'ignite_modules/mongo:*', 'mongoose'] + inject: ['require(passport-local-mongoose)', 'settings', 'mongoose'] }; -module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose) { - // Use native promises - mongoose.Promise = global.Promise; - - // Connect to mongoDB database. - mongoose.connect(settings.mongoUrl, {server: {poolSize: 4}}); - +const defineSchema = (passportMongo, mongoose) => { const Schema = mongoose.Schema; const ObjectId = mongoose.Schema.Types.ObjectId; const result = { connection: mongoose.connection }; @@ -1147,12 +1141,6 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose // Define Notifications model. result.Notifications = mongoose.model('Notifications', NotificationsSchema); - // Registering the routes of all plugin modules - for (const name in pluginMongo) { - if (pluginMongo.hasOwnProperty(name)) - pluginMongo[name].register(mongoose, result); - } - result.handleError = function(res, err) { // TODO IGNITE-843 Send error to admin res.status(err.code || 500).send(err.message); @@ -1160,3 +1148,60 @@ module.exports.factory = function(passportMongo, settings, pluginMongo, mongoose return result; }; + +module.exports.factory = function(passportMongo, settings, mongoose) { + // Use native promises + mongoose.Promise = global.Promise; + + console.log('Trying to connect to local MongoDB...'); + + // Connect to mongoDB database. + return mongoose.connect(settings.mongoUrl, {server: {poolSize: 4}}) + .catch(() => { + console.log('Failed to connect to local MongoDB, will try to download and start embedded MongoDB'); + + const {MongodHelper} = require('mongodb-prebuilt'); + const {MongoDBDownload} = require('mongodb-download'); + + const helper = new MongodHelper(['--port', '27017', '--dbpath', `${process.cwd()}/user_data`]); + + helper.mongoBin.mongoDBPrebuilt.mongoDBDownload = new MongoDBDownload({ + downloadDir: `${process.cwd()}/libs/mongodb`, + version: '3.4.7' + }); + + let mongodRun; + + if (settings.packaged) { + mongodRun = new Promise((resolve, reject) => { + helper.resolveLink = resolve; + helper.rejectLink = reject; + + helper.mongoBin.runCommand() + .then(() => { + helper.mongoBin.childProcess.removeAllListeners('close'); + + helper.mongoBin.childProcess.stderr.on('data', (data) => helper.stderrHandler(data)); + helper.mongoBin.childProcess.stdout.on('data', (data) => helper.stdoutHandler(data)); + helper.mongoBin.childProcess.on('close', (code) => helper.closeHandler(code)); + }); + }); + } + else + mongodRun = helper.run(); + + return mongodRun + .catch((err) => console.log('Failed to start embedded MongoDB', err)) + .then(() => { + console.log('Embedded MongoDB successfully started'); + + return mongoose.connect(settings.mongoUrl, {server: {poolSize: 4}}); + }) + .catch((err) => { + console.log('Failed to connect to embedded MongoDB', err); + + return Promise.reject(err); + }); + }) + .then(() => defineSchema(passportMongo, mongoose)); +}; diff --git a/modules/web-console/backend/app/nconf.js b/modules/web-console/backend/app/nconf.js index c585ac6cf410c..6813f0fd38656 100644 --- a/modules/web-console/backend/app/nconf.js +++ b/modules/web-console/backend/app/nconf.js @@ -24,25 +24,29 @@ */ module.exports = { implements: 'nconf', - inject: ['require(nconf)', 'require(fs)'] -}; - -module.exports.factory = function(nconf, fs) { - const default_config = './config/settings.json'; - const file = process.env.SETTINGS || default_config; - - nconf.env({separator: '_'}); - - try { - fs.accessSync(file, fs.F_OK); - - nconf.file({file}); - } catch (ignore) { - nconf.file({file: default_config}); + inject: ['require(nconf)', 'require(fs)'], + factory(nconf, fs) { + nconf.env({separator: '_'}).argv(); + + const dfltFile = 'config/settings.json'; + const customFile = nconf.get('settings') || dfltFile; + + try { + fs.accessSync(customFile, fs.F_OK); + + nconf.file({file: customFile}); + } + catch (ignored) { + try { + fs.accessSync(dfltFile, fs.F_OK); + + nconf.file({file: dfltFile}); + } + catch (ignored2) { + // No-op. + } + } + + return nconf; } - - if (process.env.CONFIG_PATH && fs.existsSync(process.env.CONFIG_PATH)) - nconf.file({file: process.env.CONFIG_PATH}); - - return nconf; }; diff --git a/modules/web-console/backend/app/routes.js b/modules/web-console/backend/app/routes.js index 826407b8c03f9..aa6efae64b02e 100644 --- a/modules/web-console/backend/app/routes.js +++ b/modules/web-console/backend/app/routes.js @@ -44,22 +44,22 @@ module.exports.factory = function(publicRoute, adminRoute, profilesRoute, demoRo }; // Registering the standard routes - app.use('/', publicRoute); - app.use('/admin', _mustAuthenticated, _adminOnly, adminRoute); - app.use('/profile', _mustAuthenticated, profilesRoute); - app.use('/demo', _mustAuthenticated, demoRoute); + app.use('/api/v1/', publicRoute); + app.use('/api/v1/admin', _mustAuthenticated, _adminOnly, adminRoute); + app.use('/api/v1/profile', _mustAuthenticated, profilesRoute); + app.use('/api/v1/demo', _mustAuthenticated, demoRoute); - app.all('/configuration/*', _mustAuthenticated); + app.all('/api/v1/configuration/*', _mustAuthenticated); - app.use('/configuration', configurationsRoute); - app.use('/configuration/clusters', clustersRoute); - app.use('/configuration/domains', domainsRoute); - app.use('/configuration/caches', cachesRoute); - app.use('/configuration/igfs', igfssRoute); + app.use('/api/v1/configuration', configurationsRoute); + app.use('/api/v1/configuration/clusters', clustersRoute); + app.use('/api/v1/configuration/domains', domainsRoute); + app.use('/api/v1/configuration/caches', cachesRoute); + app.use('/api/v1/configuration/igfs', igfssRoute); - app.use('/notebooks', _mustAuthenticated, notebooksRoute); - app.use('/downloads', _mustAuthenticated, downloadsRoute); - app.use('/activities', _mustAuthenticated, activitiesRoute); + app.use('/api/v1/notebooks', _mustAuthenticated, notebooksRoute); + app.use('/api/v1/downloads', _mustAuthenticated, downloadsRoute); + app.use('/api/v1/activities', _mustAuthenticated, activitiesRoute); } }; }; diff --git a/modules/web-console/backend/app/settings.js b/modules/web-console/backend/app/settings.js index 05cb7f6903f1a..5032443582097 100644 --- a/modules/web-console/backend/app/settings.js +++ b/modules/web-console/backend/app/settings.js @@ -24,50 +24,56 @@ */ module.exports = { implements: 'settings', - inject: ['nconf', 'require(fs)'] -}; + inject: ['nconf', 'require(fs)'], + factory(nconf, fs) { + /** + * Normalize a port into a number, string, or false. + */ + const _normalizePort = function(val) { + const port = parseInt(val, 10); + + // named pipe + if (isNaN(port)) + return val; -module.exports.factory = function(nconf, fs) { - /** - * Normalize a port into a number, string, or false. - */ - const _normalizePort = function(val) { - const port = parseInt(val, 10); + // port number + if (port >= 0) + return port; - // named pipe - if (isNaN(port)) - return val; + return false; + }; - // port number - if (port >= 0) - return port; + const mail = nconf.get('mail') || {}; - return false; - }; + mail.address = (username, email) => username ? '"' + username + '" <' + email + '>' : email; - const mail = nconf.get('mail') || {}; + const packaged = __dirname.startsWith('/snapshot/') || __dirname.startsWith('C:\\snapshot\\'); - mail.address = (username, email) => username ? '"' + username + '" <' + email + '>' : email; + const dfltAgentDists = packaged ? 'libs/agent_dists' : 'agent_dists'; + const dfltHost = packaged ? '0.0.0.0' : '127.0.0.1'; + const dfltPort = packaged ? 80 : 3000; - return { - agent: { - dists: 'agent_dists' - }, - server: { - host: nconf.get('server:host') || '127.0.0.1', - port: _normalizePort(nconf.get('server:port') || 3000), - SSLOptions: nconf.get('server:ssl') && { - enable301Redirects: true, - trustXFPHeader: true, - key: fs.readFileSync(nconf.get('server:key')), - cert: fs.readFileSync(nconf.get('server:cert')), - passphrase: nconf.get('server:keyPassphrase') - } - }, - mail, - mongoUrl: nconf.get('mongodb:url') || 'mongodb://127.0.0.1/console', - cookieTTL: 3600000 * 24 * 30, - sessionSecret: nconf.get('server:sessionSecret') || 'keyboard cat', - tokenLength: 20 - }; + return { + agent: { + dists: nconf.get('agent:dists') || dfltAgentDists + }, + packaged, + server: { + host: nconf.get('server:host') || dfltHost, + port: _normalizePort(nconf.get('server:port') || dfltPort), + SSLOptions: nconf.get('server:ssl') && { + enable301Redirects: true, + trustXFPHeader: true, + key: fs.readFileSync(nconf.get('server:key')), + cert: fs.readFileSync(nconf.get('server:cert')), + passphrase: nconf.get('server:keyPassphrase') + } + }, + mail, + mongoUrl: nconf.get('mongodb:url') || 'mongodb://127.0.0.1/console', + cookieTTL: 3600000 * 24 * 30, + sessionSecret: nconf.get('server:sessionSecret') || 'keyboard cat', + tokenLength: 20 + }; + } }; diff --git a/modules/web-console/backend/index.js b/modules/web-console/backend/index.js index 84547892da091..266fa54e9a775 100644 --- a/modules/web-console/backend/index.js +++ b/modules/web-console/backend/index.js @@ -17,14 +17,21 @@ 'use strict'; -const _ = require('lodash'); const fs = require('fs'); const path = require('path'); + +require('app-module-path').addPath(path.join(__dirname, 'node_modules')); + +const _ = require('lodash'); +const getos = require('getos'); const http = require('http'); const https = require('https'); const MigrateMongoose = require('migrate-mongoose'); -const igniteModules = process.env.IGNITE_MODULES ? + +const packaged = __dirname.startsWith('/snapshot/') || __dirname.startsWith('C:\\snapshot\\'); + +const igniteModules = !packaged && process.env.IGNITE_MODULES ? path.join(path.normalize(process.env.IGNITE_MODULES), 'backend') : path.join(__dirname, 'ignite_modules'); let injector; @@ -34,6 +41,8 @@ try { fs.accessSync(igniteModulesInjector, fs.F_OK); + process.env.NODE_PATH = path.join(__dirname, 'node_modules'); + injector = require(igniteModulesInjector); } catch (ignore) { @@ -93,7 +102,7 @@ const init = ([settings, apiSrv, agentsHnd, browsersHnd]) => { /** * Run mongo model migration. - * + * * @param dbConnectionUri Mongo connection url. * @param group Migrations group. * @param migrationsPath Migrations path. @@ -122,6 +131,16 @@ const migrate = (dbConnectionUri, group, migrationsPath) => { }); }; +getos(function(e, os) { + if (e) + return console.log(e); + + console.log('Your OS is: ' + JSON.stringify(os)); +}); + +injector.log.info = () => {}; +injector.log.debug = () => {}; + Promise.all([injector('settings'), injector('mongo')]) .then(([{mongoUrl}]) => { return migrate(mongoUrl, 'Ignite', path.join(__dirname, 'migrations')) diff --git a/modules/web-console/backend/injector.js b/modules/web-console/backend/injector.js index a5996b39eb9b1..754967fa0a318 100644 --- a/modules/web-console/backend/injector.js +++ b/modules/web-console/backend/injector.js @@ -21,10 +21,10 @@ module.exports = fireUp.newInjector({ basePath: __dirname, modules: [ './app/**/*.js', - './config/**/*.js', './errors/**/*.js', './middlewares/**/*.js', './routes/**/*.js', - './services/**/*.js' + './services/**/*.js', + './ignite_modules/**/*.js' ] }); diff --git a/modules/web-console/backend/package.json b/modules/web-console/backend/package.json index 29aa7341e3688..f0b2b5ebb8f88 100644 --- a/modules/web-console/backend/package.json +++ b/modules/web-console/backend/package.json @@ -7,7 +7,8 @@ "ci-test": "cross-env NODE_ENV=test MOCHA_REPORTER=mocha-teamcity-reporter node ./test/index.js", "test": "cross-env NODE_ENV=test CONFIG_PATH='./test/config/settings.json' node ./test/index.js", "eslint": "eslint --env node --format node_modules/eslint-friendly-formatter ./ -- --eff-by-issue", - "start": "node ./index.js" + "start": "node ./index.js", + "build": "pkg . --out-path build" }, "author": "", "contributors": [ @@ -30,7 +31,23 @@ "linux", "win32" ], + "bin": "index.js", + "pkg": { + "assets": [ + "app/*.js", + "errors/*.js", + "ignite_modules/*", + "injector.js", + "middlewares/*.js", + "migrations/*", + "node_modules/getos/logic/*.js", + "routes/*.js", + "routes/**/*.json", + "services/*.js" + ] + }, "dependencies": { + "app-module-path": "2.2.0", "body-parser": "1.17.2", "connect-mongo": "1.3.2", "cookie-parser": "1.4.3", @@ -38,9 +55,11 @@ "express-session": "1.15.4", "fire-up": "1.0.0", "glob": "7.1.2", + "getos": "3.1.0", "jszip": "3.1.3", "lodash": "4.17.4", "migrate-mongoose": "3.2.2", + "mongodb-prebuilt": "6.3.3", "mongoose": "4.11.4", "morgan": "1.8.2", "nconf": "0.8.4", @@ -59,6 +78,7 @@ "mocha": "3.4.2", "mocha-teamcity-reporter": "1.1.1", "mockgoose": "6.0.8", + "pkg": "4.2.4", "supertest": "3.0.0" } } diff --git a/modules/web-console/docker/compose/frontend/nginx/web-console.conf b/modules/web-console/docker/compose/frontend/nginx/web-console.conf index 323826e230c17..3f5157dab7b08 100644 --- a/modules/web-console/docker/compose/frontend/nginx/web-console.conf +++ b/modules/web-console/docker/compose/frontend/nginx/web-console.conf @@ -39,7 +39,6 @@ server { } location /api/v1 { - rewrite /api/v1/(.*) /$1 break; proxy_set_header Host $http_host; proxy_pass http://backend-api; } diff --git a/modules/web-console/docker/standalone/nginx/web-console.conf b/modules/web-console/docker/standalone/nginx/web-console.conf index 3d830758f502d..6e36eed30220d 100644 --- a/modules/web-console/docker/standalone/nginx/web-console.conf +++ b/modules/web-console/docker/standalone/nginx/web-console.conf @@ -34,7 +34,6 @@ server { } location /api/v1 { - rewrite /api/v1/(.*) /$1 break; proxy_set_header Host $http_host; proxy_pass http://backend-api; } diff --git a/modules/web-console/frontend/.gitignore b/modules/web-console/frontend/.gitignore index 138a2cf2896e6..4fc11f460210a 100644 --- a/modules/web-console/frontend/.gitignore +++ b/modules/web-console/frontend/.gitignore @@ -1,9 +1,3 @@ -*.idea -*.log *.log.* -.npmrc -build/* -node_modules public/stylesheets/*.css -yarn.lock -package-lock.json + diff --git a/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js b/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js index 4e6204faba43d..e5f4804100152 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js @@ -1654,6 +1654,8 @@ export default class IgniteJavaTransformer extends AbstractTransformer { } if ((_.nonEmpty(clientNearCaches) || demo) && shortFactoryCls) { + imports.push('org.apache.ignite.Ignite'); + sb.append(`Ignite ignite = Ignition.start(${cfgRef});`); _.forEach(clientNearCaches, (cache, idx) => { diff --git a/modules/web-console/frontend/webpack/webpack.dev.babel.js b/modules/web-console/frontend/webpack/webpack.dev.babel.js index 56371cda0a169..88bf5c62e3eaf 100644 --- a/modules/web-console/frontend/webpack/webpack.dev.babel.js +++ b/modules/web-console/frontend/webpack/webpack.dev.babel.js @@ -77,10 +77,7 @@ export default merge(commonCfg, { ws: true }, '/api/v1/*': { - target: `http://localhost:${backendPort}`, - pathRewrite: { - '^/api/v1': '' - } + target: `http://localhost:${backendPort}` } }, watchOptions: { From 41f4373799aa1129b5d1853188bb845cd9900996 Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Mon, 20 Nov 2017 14:52:25 +0700 Subject: [PATCH 145/243] IGNITE-6927 Added Web Console direct-install build. (cherry picked from commit 9a0e36b) --- modules/web-console/DEVNOTES.txt | 20 +++- modules/web-console/assembly/README.txt | 60 ++++++++++ .../web-console/assembly/direct-install.xml | 91 ++++++++++++++++ modules/web-console/pom.xml | 103 ++++++++++++++++-- 4 files changed, 256 insertions(+), 18 deletions(-) create mode 100644 modules/web-console/assembly/README.txt create mode 100644 modules/web-console/assembly/direct-install.xml diff --git a/modules/web-console/DEVNOTES.txt b/modules/web-console/DEVNOTES.txt index 4ff68a7f35625..6dbb15ad65883 100644 --- a/modules/web-console/DEVNOTES.txt +++ b/modules/web-console/DEVNOTES.txt @@ -2,13 +2,13 @@ Ignite Web Console Build Instructions ===================================== 1. Install locally MongoDB (version >=3.2.x) follow instructions from site http://docs.mongodb.org/manual/installation. 2. Install locally NodeJS (version >=6.5.x) using installer from site https://nodejs.org/en/download/current for your OS. -3. Change directory to '$IGNITE_HOME/modules/web-console/backend' and +3. Change directory to '/modules/web-console/backend' and run "npm install --no-optional" for download backend dependencies. -4. Change directory to '$IGNITE_HOME/modules/web-console/frontend' and +4. Change directory to '/modules/web-console/frontend' and run "npm install --no-optional" for download frontend dependencies. 5. Build ignite-web-agent module follow instructions from 'modules/web-console/web-agent/README.txt'. -6. Copy ignite-web-agent-.zip from '$IGNITE_HOME/modules/web-console/web-agent/target' - to '$IGNITE_HOME/modules/web-console/backend/agent_dists' folder. +6. Copy ignite-web-agent-.zip from '/modules/web-console/web-agent/target' + to '/modules/web-console/backend/agent_dists' folder. Steps 1 - 4 should be executed once. @@ -17,10 +17,10 @@ Ignite Web Console Run In Development Mode 1. Configure MongoDB to run as service or in terminal change dir to $MONGO_INSTALL_DIR/server/3.2/bin and start MongoDB by executing "mongod". -2. In new terminal change directory to '$IGNITE_HOME/modules/web-console/backend'. +2. In new terminal change directory to '/modules/web-console/backend'. If needed run "npm install --no-optional" (if dependencies changed) and run "npm start" to start backend. -3. In new terminal change directory to '$IGNITE_HOME/modules/web-console/frontend'. +3. In new terminal change directory to '/modules/web-console/frontend'. If needed run "npm install --no-optional" (if dependencies changed) and start webpack in development mode "npm run dev". 4. In browser open: http://localhost:9000 @@ -30,3 +30,11 @@ How to migrate model: 1. Model will be upgraded on first start. 2. To downgrade model execute in terminal following command: "./node_modules/.bin/migrate down -d ". Example: "./node_modules/.bin/migrate down add_index -d mongodb://localhost/console". + + +Ignite Web Console Direct-Install Maven Build Instructions +========================================================== +To build direct-install archive from sources run following command in Ignite project root folder: +"mvn clean package -pl :ignite-web-agent,:ignite-web-console -am -P web-console -DskipTests=true" + +Assembled archive can be found here: `/modules/web-console/target/ignite-web-console-direct-install-*.zip`. diff --git a/modules/web-console/assembly/README.txt b/modules/web-console/assembly/README.txt new file mode 100644 index 0000000000000..2b64a7306ed17 --- /dev/null +++ b/modules/web-console/assembly/README.txt @@ -0,0 +1,60 @@ +Requirements +------------------------------------- +1. JDK 7 bit for your platform, or newer. +2. Supported browsers: Chrome, Firefox, Safari, Edge. +3. Ignite cluster should be started with `ignite-rest-http` module in classpath. For this copy `ignite-rest-http` folder from `libs\optional` to `libs` folder. + + +How to run +------------------------------------- +1. Unpack ignite-web-console-x.x.x.zip to some folder. +2. Start ignite-web-console-xxx executable for you platform: + For Linux: ignite-web-console-linux + For MacOS: ignite-web-console-macos + For Windows: ignite-web-console-win.exe + +Note: on Linux and Mac OS X `root` permission is required to bind to 80 port, but you may always start Web Console on another port if you don't have such permission. + +3. Open URL `localhost` in browser. +4. Login with user `admin@admin` and password `admin`. +5. Start web agent from folder `web agent`. For Web Agent settings see `web-agent\README.txt`. +Cluster URL should be specified in `web-agent\default.properties` in `node-uri` parameter. + +Technical details +------------------------------------- +1. Package content: + `libs` - this folder contains Web Console and MongoDB binaries. + `user_data` - this folder contains all Web Console data (registered users, created objects, ...) and should be preserved in case of update to new version. +2. Package already contains MongoDB for Mac OS X, Windows, RHEL, CentOs and Ubuntu on other platforms MongoDB will be downloaded on first start. MongoDB executables will be downloaded to `libs\mogodb` folder. +3. Web console will start on default HTTP port `80` and bind to all interfaces `0.0.0.0`. +3. To bind Web Console to specific network interface: + On Linux: `./ignite-web-console-linux --server:host 192.168.0.1` + On Windows: `ignite-web-console-win.exe --server:host 192.168.0.1` +4. To start Web Console on another port, for example `3000`: + On Linux: `sudo ./ignite-web-console-linux --server:port 3000` + On Windows: `ignite-web-console-win.exe --server:port 3000` + +All available parameters with defaults: + Web Console host: --server:host 0.0.0.0 + Web Console port: --server:port 80 + Enable HTTPS: --server:ssl false + HTTPS key: --server:key "serve/keys/test.key" + HTTPS cetificate: --server:cert "serve/keys/test.crt" + HTTPS passphrase: --server:keyPassphrase "password" + MongoDB URL: --mongodb:url mongodb://localhost/console + Mail service: --mail:service "gmail" + Signature text: --mail:sign "Kind regards, Apache Ignite Team" + Greeting text: --mail:greeting "Apache Ignite Web Console" + Mail FROM: --mail:from "Apache Ignite Web Console " + User to send e-mail: --mail:auth:user "someusername@somecompany.somedomain" + E-mail service password: --mail:auth:pass "" + +Troubleshooting +------------------------------------- +1. On Windows check that MongoDB is not blocked by Antivirus/Firewall/Smartscreen. +2. Root permission is required to bind to 80 port under Mac OS X and Linux, but you may always start Web Console on another port if you don't have such permission. +3. For extended debug output start Web Console as following: + On Linux execute command in terminal: `DEBUG=mongodb-* ./ignite-web-console-linux` + On Windows execute two commands in terminal: + `SET DEBUG=mongodb-*` + `ignite-web-console-win.exe` diff --git a/modules/web-console/assembly/direct-install.xml b/modules/web-console/assembly/direct-install.xml new file mode 100644 index 0000000000000..d34cd0757e460 --- /dev/null +++ b/modules/web-console/assembly/direct-install.xml @@ -0,0 +1,91 @@ + + + + + + release-ignite-web-agent + + + zip + + + + + ${project.basedir}/target + /libs/agent_dists + + **/* + + + + + ${project.basedir}/target + /libs/mongodb/mongodb-download + + **/* + + + + + ${project.basedir}/target + /user_data + + **/* + + + + + ${basedir}/assembly + / + + **/README* + + + + + ${basedir}/backend/build + / + + ignite-web-console-win.exe + + + + ${basedir}/backend/build + / + 0755 + + ignite-web-console-* + + + + + ${basedir}/web-agent/target + /libs/agent_dists + + ignite-web-agent-${project.version}.zip + + + + + ${basedir}/frontend/build + /libs/frontend + + + diff --git a/modules/web-console/pom.xml b/modules/web-console/pom.xml index 677b60a5299f6..f935ca6316688 100644 --- a/modules/web-console/pom.xml +++ b/modules/web-console/pom.xml @@ -34,54 +34,133 @@ 2.3.0-SNAPSHOT http://ignite.apache.org + + v8.9.0 + + + + + + org.apache.ignite + ignite-web-agent + ${project.version} + + + + + + + com.github.eirslett + frontend-maven-plugin + 1.6 + + ${node.version} + target + + + + + com.github.eirslett frontend-maven-plugin - 1.0 - - - frontend - - install node and npm + install node and npm for frontend install-node-and-npm + + + + download dependencies for frontend + + npm + + + + frontend + install --no-optional --prod + + + + + build frontend + + npm + + - v4.4.7 - 3.8.6 frontend + run build + + production + - npm install + download dependencies for backend npm + backend install --no-optional - gulp build + build backend - gulp + npm - build + backend + run build + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 2.4 + false + + + + release-web-agent + package + + single + + + + assembly/direct-install.xml + + ignite-web-console-direct-install-${project.version} + target + false + + org.apache.maven.plugins + maven-jar-plugin + + true + + + org.apache.maven.plugins maven-deploy-plugin From 46cd6d915dac825660cbfa6c59ba2d839c4cec18 Mon Sep 17 00:00:00 2001 From: Dmitriy Shabalin Date: Wed, 6 Dec 2017 10:36:42 +0700 Subject: [PATCH 146/243] IGNITE-6390 Web Console: Added component for cluster selection. (cherry picked from commit 1367bc9) --- .../internal/visor/util/VisorTaskUtils.java | 139 ++++++++++++++ .../commands/tasks/VisorTasksCommand.scala | 1 + .../scala/org/apache/ignite/visor/visor.scala | 49 ----- .../web-console/backend/app/agentSocket.js | 2 +- .../web-console/backend/app/agentsHandler.js | 51 +++-- .../backend/app/browsersHandler.js | 7 + modules/web-console/backend/app/mongo.js | 1 + modules/web-console/backend/package.json | 3 +- modules/web-console/frontend/app/app.js | 8 +- .../app/components/bs-select-menu/style.scss | 4 +- .../cluster-select.controller.js | 64 ------- .../cluster-select/cluster-select.pug | 47 ----- .../component.js} | 19 +- .../components/cluster-selector/controller.js | 62 ++++++ .../index.js | 16 +- .../components/cluster-selector/style.scss | 66 +++++++ .../components/cluster-selector/template.pug | 75 ++++++++ .../components/list-editable/controller.js | 2 +- .../page-queries}/Notebook.data.js | 0 .../page-queries}/Notebook.service.js | 0 .../page-queries/controller.js} | 103 +++++++--- .../page-queries/index.js} | 25 +-- .../page-queries}/notebook.controller.js | 0 .../app/components/page-queries/style.scss | 36 ++++ .../components/page-queries/template.tpl.pug} | 6 +- .../app/modules/agent/AgentManager.service.js | 57 +++++- .../app/primitives/switcher/index.pug | 2 +- .../app/primitives/switcher/index.scss | 69 +++++-- .../frontend/views/includes/header-right.pug | 2 - .../agent/handlers/ClusterListener.java | 178 +++++++++++++----- .../console/agent/rest/RestExecutor.java | 63 ++++++- 31 files changed, 847 insertions(+), 310 deletions(-) delete mode 100644 modules/web-console/frontend/app/components/cluster-select/cluster-select.controller.js delete mode 100644 modules/web-console/frontend/app/components/cluster-select/cluster-select.pug rename modules/web-console/frontend/app/components/{cluster-select/cluster-select.scss => cluster-selector/component.js} (77%) create mode 100644 modules/web-console/frontend/app/components/cluster-selector/controller.js rename modules/web-console/frontend/app/components/{cluster-select => cluster-selector}/index.js (74%) create mode 100644 modules/web-console/frontend/app/components/cluster-selector/style.scss create mode 100644 modules/web-console/frontend/app/components/cluster-selector/template.pug rename modules/web-console/frontend/app/{modules/sql => components/page-queries}/Notebook.data.js (100%) rename modules/web-console/frontend/app/{modules/sql => components/page-queries}/Notebook.service.js (100%) rename modules/web-console/frontend/app/{modules/sql/sql.controller.js => components/page-queries/controller.js} (95%) rename modules/web-console/frontend/app/{modules/sql/sql.module.js => components/page-queries/index.js} (82%) rename modules/web-console/frontend/app/{modules/sql => components/page-queries}/notebook.controller.js (100%) create mode 100644 modules/web-console/frontend/app/components/page-queries/style.scss rename modules/web-console/frontend/{views/sql/sql.tpl.pug => app/components/page-queries/template.tpl.pug} (99%) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java index ace451c806b6d..fda801c460417 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java @@ -23,8 +23,10 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; +import java.math.BigDecimal; import java.net.InetAddress; import java.net.URL; +import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.CharacterCodingException; @@ -69,6 +71,7 @@ import org.apache.ignite.lang.IgniteClosure; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.eventstorage.NoopEventStorageSpi; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static java.lang.System.getProperty; @@ -1113,4 +1116,140 @@ public static byte[] zipBytes(byte[] input, int initBufSize) throws IOException public static boolean joinTimedOut(String msg) { return msg != null && msg.startsWith("Join process timed out."); } + + /** + * Special wrapper over address that can be sorted in following order: + * IPv4, private IPv4, IPv4 local host, IPv6. + * Lower addresses first. + */ + private static class SortableAddress implements Comparable { + /** */ + private int type; + + /** */ + private BigDecimal bits; + + /** */ + private String addr; + + /** + * Constructor. + * + * @param addr Address as string. + */ + private SortableAddress(String addr) { + this.addr = addr; + + if (addr.indexOf(':') > 0) + type = 4; // IPv6 + else { + try { + InetAddress inetAddr = InetAddress.getByName(addr); + + if (inetAddr.isLoopbackAddress()) + type = 3; // localhost + else if (inetAddr.isSiteLocalAddress()) + type = 2; // private IPv4 + else + type = 1; // other IPv4 + } + catch (UnknownHostException ignored) { + type = 5; + } + } + + bits = BigDecimal.valueOf(0L); + + try { + String[] octets = addr.contains(".") ? addr.split(".") : addr.split(":"); + + int len = octets.length; + + for (int i = 0; i < len; i++) { + long oct = F.isEmpty(octets[i]) ? 0 : Long.valueOf( octets[i]); + long pow = Double.valueOf(Math.pow(256, octets.length - 1 - i)).longValue(); + + bits = bits.add(BigDecimal.valueOf(oct * pow)); + } + } + catch (Exception ignore) { + // No-op. + } + } + + /** {@inheritDoc} */ + @Override public int compareTo(@NotNull SortableAddress o) { + return (type == o.type ? bits.compareTo(o.bits) : Integer.compare(type, o.type)); + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + SortableAddress other = (SortableAddress)o; + + return addr != null ? addr.equals(other.addr) : other.addr == null; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return addr != null ? addr.hashCode() : 0; + } + + /** + * @return Address. + */ + public String address() { + return addr; + } + } + + /** + * Sort addresses: IPv4 & real addresses first. + * + * @param addrs Addresses to sort. + * @return Sorted list. + */ + public static Collection sortAddresses(Collection addrs) { + if (F.isEmpty(addrs)) + return Collections.emptyList(); + + int sz = addrs.size(); + + List sorted = new ArrayList<>(sz); + + for (String addr : addrs) + sorted.add(new SortableAddress(addr)); + + Collections.sort(sorted); + + Collection res = new ArrayList<>(sz); + + for (SortableAddress sa : sorted) + res.add(sa.address()); + + return res; + } + + /** + * Split addresses. + * + * @param s String with comma separted addresses. + * @return Collection of addresses. + */ + public static Collection splitAddresses(String s) { + if (F.isEmpty(s)) + return Collections.emptyList(); + + String[] addrs = s.split(","); + + for (int i = 0; i < addrs.length; i++) + addrs[i] = addrs[i].trim(); + + return Arrays.asList(addrs); + } } diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala index 4d9b795aad776..0d6753eee5167 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala @@ -32,6 +32,7 @@ import java.util.UUID import org.apache.ignite.internal.visor.event.{VisorGridEvent, VisorGridJobEvent, VisorGridTaskEvent} import org.apache.ignite.internal.visor.node.VisorNodeEventsCollectorTask import org.apache.ignite.internal.visor.node.VisorNodeEventsCollectorTaskArg +import org.apache.ignite.internal.visor.util.VisorTaskUtils._ import scala.collection.JavaConversions._ import scala.language.implicitConversions diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala index 0f1e53a435d32..d2bc4abcee9d8 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala @@ -2699,53 +2699,4 @@ object visor extends VisorTag { else Long.MaxValue } - - /** - * Sort addresses to properly display in Visor. - * - * @param addrs Addresses to sort. - * @return Sorted list. - */ - def sortAddresses(addrs: Iterable[String]) = { - def ipToLong(ip: String) = { - try { - val octets = if (ip.contains(".")) ip.split('.') else ip.split(':') - - var dec = BigDecimal.valueOf(0L) - - for (i <- octets.indices) dec += octets(i).toLong * math.pow(256, octets.length - 1 - i).toLong - - dec - } - catch { - case _: Exception => BigDecimal.valueOf(0L) - } - } - - /** - * Sort addresses to properly display in Visor. - * - * @param addr Address to detect type for. - * @return IP class type for sorting in order: public addresses IPv4 + private IPv4 + localhost + IPv6. - */ - def addrType(addr: String) = { - if (addr.contains(':')) - 4 // IPv6 - else { - try { - InetAddress.getByName(addr) match { - case ip if ip.isLoopbackAddress => 3 // localhost - case ip if ip.isSiteLocalAddress => 2 // private IPv4 - case _ => 1 // other IPv4 - } - } - catch { - case ignore: UnknownHostException => 5 - } - } - } - - addrs.map(addr => (addrType(addr), ipToLong(addr), addr)).toSeq. - sortWith((l, r) => if (l._1 == r._1) l._2.compare(r._2) < 0 else l._1 < r._1).map(_._3) - } } diff --git a/modules/web-console/backend/app/agentSocket.js b/modules/web-console/backend/app/agentSocket.js index 75dcd53449a42..6e4518a5b2c15 100644 --- a/modules/web-console/backend/app/agentSocket.js +++ b/modules/web-console/backend/app/agentSocket.js @@ -88,7 +88,7 @@ module.exports.factory = function(_) { class AgentSocket { /** * @param {Socket} socket Socket for interaction. - * @param {String} tokens Active tokens. + * @param {Array.} tokens Agent tokens. * @param {String} demoEnabled Demo enabled. */ constructor(socket, tokens, demoEnabled) { diff --git a/modules/web-console/backend/app/agentsHandler.js b/modules/web-console/backend/app/agentsHandler.js index 112793a52f151..844ce1edc690e 100644 --- a/modules/web-console/backend/app/agentsHandler.js +++ b/modules/web-console/backend/app/agentsHandler.js @@ -17,6 +17,8 @@ 'use strict'; +const uuid = require('uuid/v4'); + // Fire me up! /** @@ -82,19 +84,14 @@ module.exports.factory = function(_, fs, path, JSZip, socketio, settings, mongo, class Cluster { constructor(top) { - let d = new Date().getTime(); - - this.id = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { - const r = (d + Math.random() * 16) % 16 | 0; - - d = Math.floor(d / 16); - - return (c === 'x' ? r : (r & 0x3 | 0x8)).toString(16); - }); + const clusterName = top.clusterName; + this.id = _.isEmpty(clusterName) ? `Cluster ${uuid().substring(0, 8).toUpperCase()}` : clusterName; this.nids = top.nids; - + this.addresses = top.addresses; + this.clients = top.clients; this.clusterVersion = top.clusterVersion; + this.active = top.active; } isSameCluster(top) { @@ -103,8 +100,18 @@ module.exports.factory = function(_, fs, path, JSZip, socketio, settings, mongo, update(top) { this.clusterVersion = top.clusterVersion; - this.nids = top.nids; + this.addresses = top.addresses; + this.clients = top.clients; + this.clusterVersion = top.clusterVersion; + this.active = top.active; + } + + same(top) { + return _.difference(this.nids, top.nids).length === 0 && + _.isEqual(this.addresses, top.addresses) && + this.clusterVersion === top.clusterVersion && + this.active === top.active; } } @@ -192,10 +199,13 @@ module.exports.factory = function(_, fs, path, JSZip, socketio, settings, mongo, } getOrCreateCluster(top) { - const cluster = _.find(this.clusters, (c) => c.isSameCluster(top)); + let cluster = _.find(this.clusters, (c) => c.isSameCluster(top)); + + if (_.isNil(cluster)) { + cluster = new Cluster(top); - if (_.isNil(cluster)) - this.clusters.push(new Cluster(top)); + this.clusters.push(cluster); + } return cluster; } @@ -230,8 +240,17 @@ module.exports.factory = function(_, fs, path, JSZip, socketio, settings, mongo, this._browsersHnd.agentStats(token); }); } - else - cluster.update(top); + else { + const changed = !cluster.same(top); + + if (changed) { + cluster.update(top); + + _.forEach(tokens, (token) => { + this._browsersHnd.clusterChanged(token, cluster); + }); + } + } }); sock.on('cluster:collector', (top) => { diff --git a/modules/web-console/backend/app/browsersHandler.js b/modules/web-console/backend/app/browsersHandler.js index 8b1385db94491..7ae247bb7c743 100644 --- a/modules/web-console/backend/app/browsersHandler.js +++ b/modules/web-console/backend/app/browsersHandler.js @@ -124,6 +124,12 @@ module.exports = { .then((stat) => _.forEach(socks, (sock) => sock.emit('agents:stat', stat))); } + clusterChanged(token, cluster) { + const socks = this._browserSockets.get(token); + + _.forEach(socks, (sock) => sock.emit('cluster:changed', cluster)); + } + emitNotification(sock) { sock.emit('user:notifications', this.notification); } @@ -224,6 +230,7 @@ module.exports = { this.registerVisorTask('queryClose', internalVisor('query.VisorQueryCleanupTask'), 'java.util.Map', 'java.util.UUID', 'java.util.Set'); this.registerVisorTask('queryCloseX2', internalVisor('query.VisorQueryCleanupTask'), internalVisor('query.VisorQueryCleanupTaskArg')); + this.registerVisorTask('toggleClusterState', internalVisor('misc.VisorChangeGridActiveStateTask'), internalVisor('misc.VisorChangeGridActiveStateTaskArg')); // Return command result from grid to browser. sock.on('node:visor', (clusterId, taskId, nids, ...args) => { diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index 81076af5f25cb..e0d0a0f8aa47a 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -79,6 +79,7 @@ const defineSchema = (passportMongo, mongoose) => { DUPLICATE_KEY_ERROR: 11000, DUPLICATE_KEY_UPDATE_ERROR: 11001 }; + // Define Account model. result.Account = mongoose.model('Account', AccountSchema); diff --git a/modules/web-console/backend/package.json b/modules/web-console/backend/package.json index f0b2b5ebb8f88..ba442f97f288e 100644 --- a/modules/web-console/backend/package.json +++ b/modules/web-console/backend/package.json @@ -68,7 +68,8 @@ "passport-local": "1.0.0", "passport-local-mongoose": "4.0.0", "passport.socketio": "3.7.0", - "socket.io": "1.7.3" + "socket.io": "1.7.3", + "uuid": "3.1.0" }, "devDependencies": { "chai": "4.1.0", diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js index ca678fcacd295..f367d3e5d1381 100644 --- a/modules/web-console/frontend/app/app.js +++ b/modules/web-console/frontend/app/app.js @@ -22,7 +22,6 @@ import './app.config'; import './modules/form/form.module'; import './modules/agent/agent.module'; -import './modules/sql/sql.module'; import './modules/nodes/nodes.module'; import './modules/demo/Demo.module'; @@ -113,7 +112,6 @@ import resetPassword from './controllers/reset-password.controller'; // Components import igniteListOfRegisteredUsers from './components/list-of-registered-users'; import IgniteActivitiesUserDialog from './components/activities-user-dialog'; -import clusterSelect from './components/cluster-select'; import './components/input-dialog'; import webConsoleHeader from './components/web-console-header'; import webConsoleFooter from './components/web-console-footer'; @@ -123,12 +121,14 @@ import userNotifications from './components/user-notifications'; import pageConfigure from './components/page-configure'; import pageConfigureBasic from './components/page-configure-basic'; import pageConfigureAdvanced from './components/page-configure-advanced'; +import pageQueries from './components/page-queries'; import gridColumnSelector from './components/grid-column-selector'; import gridItemSelected from './components/grid-item-selected'; import bsSelectMenu from './components/bs-select-menu'; import protectFromBsSelectRender from './components/protect-from-bs-select-render'; import uiGridHovering from './components/ui-grid-hovering'; import listEditable from './components/list-editable'; +import clusterSelector from './components/cluster-selector'; import igniteServices from './services'; @@ -168,7 +168,6 @@ angular.module('ignite-console', [ 'ignite-console.branding', 'ignite-console.socket', 'ignite-console.agent', - 'ignite-console.sql', 'ignite-console.nodes', 'ignite-console.demo', // States. @@ -197,6 +196,7 @@ angular.module('ignite-console', [ pageConfigure.name, pageConfigureBasic.name, pageConfigureAdvanced.name, + pageQueries.name, gridColumnSelector.name, gridItemSelected.name, bsSelectMenu.name, @@ -205,6 +205,7 @@ angular.module('ignite-console', [ AngularStrapTooltip.name, AngularStrapSelect.name, listEditable.name, + clusterSelector.name, // Ignite modules. IgniteModules.name ]) @@ -231,7 +232,6 @@ angular.module('ignite-console', [ .directive('igniteOnFocusOut', igniteOnFocusOut) .directive('igniteRestoreInputFocus', igniteRestoreInputFocus) .directive('igniteListOfRegisteredUsers', igniteListOfRegisteredUsers) -.directive('igniteClusterSelect', clusterSelect) .directive('btnIgniteLinkDashedSuccess', btnIgniteLink) .directive('btnIgniteLinkDashedSecondary', btnIgniteLink) // Services. diff --git a/modules/web-console/frontend/app/components/bs-select-menu/style.scss b/modules/web-console/frontend/app/components/bs-select-menu/style.scss index 870b1bf19d7ed..ccf33a36110ef 100644 --- a/modules/web-console/frontend/app/components/bs-select-menu/style.scss +++ b/modules/web-console/frontend/app/components/bs-select-menu/style.scss @@ -88,7 +88,7 @@ } & > li > .bssm-item-button__active { - background-color: #eeeeee; + background-color: #e5f2f9; } } -} \ No newline at end of file +} diff --git a/modules/web-console/frontend/app/components/cluster-select/cluster-select.controller.js b/modules/web-console/frontend/app/components/cluster-select/cluster-select.controller.js deleted file mode 100644 index a2d8e1e664edd..0000000000000 --- a/modules/web-console/frontend/app/components/cluster-select/cluster-select.controller.js +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -export default class { - static $inject = ['AgentManager']; - - constructor(agentMgr) { - const ctrl = this; - - ctrl.counter = 1; - - ctrl.cluster = null; - ctrl.clusters = []; - - agentMgr.connectionSbj.subscribe({ - next: ({cluster, clusters}) => { - if (_.isEmpty(clusters)) - return ctrl.clusters.length = 0; - - const removed = _.differenceBy(ctrl.clusters, clusters, 'id'); - - if (_.nonEmpty(removed)) - _.pullAll(ctrl.clusters, removed); - - const added = _.differenceBy(clusters, ctrl.clusters, 'id'); - - _.forEach(added, (cluster) => { - ctrl.clusters.push({ - id: cluster.id, - connected: true, - click: () => { - if (cluster.id === _.get(ctrl, 'cluster.id')) - return; - - if (_.get(ctrl, 'cluster.connected')) { - agentMgr.saveToStorage(cluster); - - window.open(window.location.href, '_blank'); - } - else - ctrl.cluster = _.find(ctrl.clusters, {id: cluster.id}); - } - }); - }); - - ctrl.cluster = cluster; - } - }); - } -} diff --git a/modules/web-console/frontend/app/components/cluster-select/cluster-select.pug b/modules/web-console/frontend/app/components/cluster-select/cluster-select.pug deleted file mode 100644 index eb46e2625bdb4..0000000000000 --- a/modules/web-console/frontend/app/components/cluster-select/cluster-select.pug +++ /dev/null @@ -1,47 +0,0 @@ -//- - Licensed to the Apache Software Foundation (ASF) under one or more - contributor license agreements. See the NOTICE file distributed with - this work for additional information regarding copyright ownership. - The ASF licenses this file to You under the Apache License, Version 2.0 - (the "License"); you may not use this file except in compliance with - the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --var clusterName = 'Cluster {{ ctrl.cluster.id | id8 }}' - -ul.nav - li.disabled(ng-if='ctrl.clusters.length === 0') - a(ng-if='!ctrl.cluster') - i.icon-cluster - label.padding-left-dflt(bs-tooltip='' data-placement='bottom' data-title='Check that Web Agent(s) started and connected to cluster(s)') No clusters available - a(ng-if='ctrl.cluster') - i.icon-danger - label.padding-left-dflt(bs-tooltip='' data-placement='bottom' data-title='Connection to cluster was lost') #{clusterName} - - li(ng-if='ctrl.clusters.length === 1 && ctrl.cluster.connected') - a - i.icon-cluster - label.padding-left-dflt #{clusterName} - - li(ng-if='ctrl.clusters.length > 1 || ctrl.clusters.length === 1 && !ctrl.cluster.connected') - a.dropdown-toggle(bs-dropdown='' data-placement='bottom-left' data-trigger='hover focus' data-container='self' ng-click='$event.stopPropagation()' aria-haspopup='true' aria-expanded='expanded') - i(ng-class='{"icon-cluster": ctrl.cluster.connected, "icon-danger": !ctrl.cluster.connected}') - label.padding-left-dflt #{clusterName} - span.caret - - ul.dropdown-menu(role='menu') - li(ng-repeat='item in ctrl.clusters' ng-class='{active: ctrl.cluster === item}') - div(ng-click='item.click()') - i.icon-cluster.pull-left(style='margin: 0; padding-left: 10px;') - div: a Cluster {{ item.id | id8 }} - -i.icon-help(bs-tooltip='' data-placement='bottom' data-html=true - data-title='Multi-Cluster Support
        \ - More info') diff --git a/modules/web-console/frontend/app/components/cluster-select/cluster-select.scss b/modules/web-console/frontend/app/components/cluster-selector/component.js similarity index 77% rename from modules/web-console/frontend/app/components/cluster-select/cluster-select.scss rename to modules/web-console/frontend/app/components/cluster-selector/component.js index 189ef50291171..f6141d9baf2b7 100644 --- a/modules/web-console/frontend/app/components/cluster-select/cluster-select.scss +++ b/modules/web-console/frontend/app/components/cluster-selector/component.js @@ -15,16 +15,11 @@ * limitations under the License. */ -ignite-cluster-select { - @import "./../../../public/stylesheets/variables.scss"; +import template from './template.pug'; +import controller from './controller'; +import './style.scss'; - display: flex; - flex-direction: row; - align-items: center; - - .icon-help { - margin-left: 4px; - - color: $text-color; - } -} +export default { + template, + controller +}; diff --git a/modules/web-console/frontend/app/components/cluster-selector/controller.js b/modules/web-console/frontend/app/components/cluster-selector/controller.js new file mode 100644 index 0000000000000..6a863579ad4df --- /dev/null +++ b/modules/web-console/frontend/app/components/cluster-selector/controller.js @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export default class { + static $inject = ['$scope', 'AgentManager', 'IgniteConfirm']; + + constructor($scope, agentMgr, Confirm) { + Object.assign(this, { $scope, agentMgr, Confirm }); + + this.clusters = []; + this.isDemo = agentMgr.isDemoMode(); + } + + $onInit() { + this.clusters$ = this.agentMgr.connectionSbj + .do(({ cluster, clusters }) => { + this.cluster = cluster; + this.clusters = clusters; + }) + .subscribe(() => {}); + } + + $onDestroy() { + this.clusters$.unsubscribe(); + } + + change() { + this.agentMgr.switchCluster(this.cluster); + } + + toggle($event) { + $event.preventDefault(); + + const toggleClusterState = () => { + this.inProgress = true; + + return this.agentMgr.toggleClusterState() + .finally(() => this.inProgress = false); + }; + + if (this.cluster.active) { + return this.Confirm.confirm('Are you sure you want to deactivate cluster?') + .then(() => toggleClusterState()); + } + + return toggleClusterState(); + } +} diff --git a/modules/web-console/frontend/app/components/cluster-select/index.js b/modules/web-console/frontend/app/components/cluster-selector/index.js similarity index 74% rename from modules/web-console/frontend/app/components/cluster-select/index.js rename to modules/web-console/frontend/app/components/cluster-selector/index.js index 607b0db76dc47..2bdbe44cefdaf 100644 --- a/modules/web-console/frontend/app/components/cluster-select/index.js +++ b/modules/web-console/frontend/app/components/cluster-selector/index.js @@ -15,15 +15,9 @@ * limitations under the License. */ -import template from './cluster-select.pug'; -import './cluster-select.scss'; -import controller from './cluster-select.controller'; +import angular from 'angular'; +import component from './component'; -export default [() => { - return { - restrict: 'E', - template, - controller, - controllerAs: 'ctrl' - }; -}]; +export default angular + .module('ignite-console.cluster-selector', []) + .component('clusterSelector', component); diff --git a/modules/web-console/frontend/app/components/cluster-selector/style.scss b/modules/web-console/frontend/app/components/cluster-selector/style.scss new file mode 100644 index 0000000000000..966be9991002c --- /dev/null +++ b/modules/web-console/frontend/app/components/cluster-selector/style.scss @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +cluster-selector { + @import "./../../../public/stylesheets/variables.scss"; + + position: relative; + top: 2px; + + display: flex; + align-items: center; + justify-content: space-between; + + & > .btn-ignite { + border-radius: 9px; + min-height: 0; + font-size: 12px; + font-weight: bold; + line-height: 17px; + padding-top: 0; + padding-bottom: 0; + + button { + font-weight: normal; + margin: 0 !important; + } + } + + .cluster-selector--state { + width: 85px; + } + + div { + margin: 0 10px 0 20px; + font-family: Roboto; + font-size: 12px; + } + + div:last-child { + margin-left: 10px; + color: #EE2B27; + } + + [ignite-icon='info'] { + margin-left: 7px; + color: $ignite-brand-success; + } + + .bs-select-menu { + color: $text-color; + } +} diff --git a/modules/web-console/frontend/app/components/cluster-selector/template.pug b/modules/web-console/frontend/app/components/cluster-selector/template.pug new file mode 100644 index 0000000000000..c97a69809de2d --- /dev/null +++ b/modules/web-console/frontend/app/components/cluster-selector/template.pug @@ -0,0 +1,75 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +include /app/helpers/jade/mixins + +button.btn-ignite.btn-ignite--success( + data-ng-if='$ctrl.isDemo' +) + | Demo cluster + +button.btn-ignite.btn-ignite--primary( + data-ng-if='!$ctrl.isDemo && $ctrl.clusters.length == 0' +) + | No clusters available + +button.btn-ignite.btn-ignite--primary( + data-ng-if='!$ctrl.isDemo && $ctrl.clusters.length == 1' +) + | {{ $ctrl.cluster.name }} + +div.btn-ignite.btn-ignite--primary( + data-ng-if='!$ctrl.isDemo && $ctrl.clusters.length > 1' + + data-ng-model='$ctrl.cluster' + + bs-select='' + bs-options='item as item.name for item in $ctrl.clusters' + data-trigger='hover focus' + data-container='self' + + data-ng-change='$ctrl.change()' + + protect-from-bs-select-render +) + span(ng-if='!$ctrl.cluster') No clusters available + span(ng-if='$ctrl.cluster') {{ $ctrl.cluster.name }} + span.icon-right.fa.fa-caret-down + +svg( + ng-if='!$ctrl.isDemo' + ignite-icon='info' + bs-tooltip='' + data-title='Multi-Cluster Support
        \ + More info' + data-placement='bottom' +) + +.cluster-selector--state(ng-if='!$ctrl.isDemo && $ctrl.cluster') + | Cluster {{ $ctrl.cluster.active ? 'active' : 'inactive' }} + ++switcher()( + ng-if='!$ctrl.isDemo && $ctrl.cluster' + ng-click='$ctrl.toggle($event)' + ng-checked='$ctrl.cluster.active' + ng-disabled='$ctrl.inProgress' + + tip='Toggle cluster active state' + is-in-progress='{{ $ctrl.inProgress }}' +) + +div(ng-if='$ctrl.inProgress') + | {{ !$ctrl.cluster.active ? 'Activating...' : 'Deactivating...' }} diff --git a/modules/web-console/frontend/app/components/list-editable/controller.js b/modules/web-console/frontend/app/components/list-editable/controller.js index bc864ce7cbb5e..7757d96eb8dd6 100644 --- a/modules/web-console/frontend/app/components/list-editable/controller.js +++ b/modules/web-console/frontend/app/components/list-editable/controller.js @@ -21,7 +21,7 @@ export default class { static $inject = ['$animate', '$element', '$transclude']; constructor($animate, $element, $transclude) { - $animate.enabled(false, $element); + $animate.enabled($element, false); this.hasItemView = $transclude.isSlotFilled('itemView'); diff --git a/modules/web-console/frontend/app/modules/sql/Notebook.data.js b/modules/web-console/frontend/app/components/page-queries/Notebook.data.js similarity index 100% rename from modules/web-console/frontend/app/modules/sql/Notebook.data.js rename to modules/web-console/frontend/app/components/page-queries/Notebook.data.js diff --git a/modules/web-console/frontend/app/modules/sql/Notebook.service.js b/modules/web-console/frontend/app/components/page-queries/Notebook.service.js similarity index 100% rename from modules/web-console/frontend/app/modules/sql/Notebook.service.js rename to modules/web-console/frontend/app/components/page-queries/Notebook.service.js diff --git a/modules/web-console/frontend/app/modules/sql/sql.controller.js b/modules/web-console/frontend/app/components/page-queries/controller.js similarity index 95% rename from modules/web-console/frontend/app/modules/sql/sql.controller.js rename to modules/web-console/frontend/app/components/page-queries/controller.js index a2ad912deec46..dba0269b96c5c 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.controller.js +++ b/modules/web-console/frontend/app/components/page-queries/controller.js @@ -15,6 +15,16 @@ * limitations under the License. */ +import 'rxjs/add/operator/mergeMap'; +import 'rxjs/add/operator/merge'; +import 'rxjs/add/operator/switchMap'; +import 'rxjs/add/operator/exhaustMap'; +import 'rxjs/add/operator/distinctUntilChanged'; + +import { fromPromise } from 'rxjs/observable/fromPromise'; +import { timer } from 'rxjs/observable/timer'; +import { defer } from 'rxjs/observable/defer'; + import paragraphRateTemplateUrl from 'views/sql/paragraph-rate.tpl.pug'; import cacheMetadataTemplateUrl from 'views/sql/cache-metadata.tpl.pug'; import chartSettingsTemplateUrl from 'views/sql/chart-settings.tpl.pug'; @@ -211,33 +221,49 @@ class Paragraph { checkScanInProgress(showLocal = false) { return this.scanningInProgress && (this.localQueryMode === showLocal); } + + cancelRefresh($interval) { + if (this.rate && this.rate.stopTime) { + $interval.cancel(this.rate.stopTime); + + delete this.rate.stopTime; + } + } + + reset($interval) { + this.meta = []; + this.chartColumns = []; + this.chartKeyCols = []; + this.chartValCols = []; + this.error.root = {}; + this.error.message = ''; + this.rows = []; + this.duration = 0; + + this.cancelRefresh($interval); + } } // Controller for SQL notebook screen. -export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', '$animate', '$location', '$anchorScroll', '$state', '$filter', '$modal', '$popover', 'IgniteLoading', 'IgniteLegacyUtils', 'IgniteMessages', 'IgniteConfirm', 'AgentManager', 'IgniteChartColors', 'IgniteNotebook', 'IgniteNodes', 'uiGridExporterConstants', 'IgniteVersion', 'IgniteActivitiesData', 'JavaTypes', 'IgniteCopyToClipboard', - function($root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes, IgniteCopyToClipboard) { +export default class { + static $inject = ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', '$animate', '$location', '$anchorScroll', '$state', '$filter', '$modal', '$popover', 'IgniteLoading', 'IgniteLegacyUtils', 'IgniteMessages', 'IgniteConfirm', 'AgentManager', 'IgniteChartColors', 'IgniteNotebook', 'IgniteNodes', 'uiGridExporterConstants', 'IgniteVersion', 'IgniteActivitiesData', 'JavaTypes', 'IgniteCopyToClipboard']; + + constructor($root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes, IgniteCopyToClipboard) { const $ctrl = this; + Object.assign(this, { $root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes }); + // Define template urls. $ctrl.paragraphRateTemplateUrl = paragraphRateTemplateUrl; $ctrl.cacheMetadataTemplateUrl = cacheMetadataTemplateUrl; $ctrl.chartSettingsTemplateUrl = chartSettingsTemplateUrl; - $ctrl.demoStarted = false; - let stopTopology = null; - const _tryStopRefresh = function(paragraph) { - if (paragraph.rate && paragraph.rate.stopTime) { - $interval.cancel(paragraph.rate.stopTime); - - delete paragraph.rate.stopTime; - } + paragraph.cancelRefresh($interval); }; const _stopTopologyRefresh = () => { - $interval.cancel(stopTopology); - if ($scope.notebook && $scope.notebook.paragraphs) $scope.notebook.paragraphs.forEach((paragraph) => _tryStopRefresh(paragraph)); }; @@ -850,8 +876,8 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', /** * Update caches list. */ - const _refreshFn = () => - agentMgr.topology(true) + const _refreshFn = () => { + return agentMgr.topology(true) .then((nodes) => { $scope.caches = _.sortBy(_.reduce(nodes, (cachesAcc, node) => { _.forEach(node.caches, (cache) => { @@ -897,18 +923,35 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', } }) .catch((err) => Messages.showError(err)); + }; - const _startWatch = () => - agentMgr.startClusterWatch('Back to Configuration', 'base.configuration.tabs.advanced.clusters') - .then(() => Loading.start('sqlLoading')) - .then(_refreshFn) - .then(() => { - if (!$root.IgniteDemoMode) - Loading.finish('sqlLoading'); + const _startWatch = () => { + const awaitClusters$ = fromPromise( + agentMgr.startClusterWatch('Back to Configuration', 'base.configuration.tabs.advanced.clusters')); + + const currentCluster$ = agentMgr.connectionSbj + .distinctUntilChanged((n, o) => n.cluster === o.cluster); + + const finishLoading$ = defer(() => { + if (!$root.IgniteDemoMode) + Loading.finish('sqlLoading'); + }).take(1); + + const refreshCaches = (period) => { + return timer(0, period).exhaustMap(() => _refreshFn()).merge(finishLoading$); + }; + + this.refresh$ = awaitClusters$ + .mergeMap(() => currentCluster$) + .do(() => Loading.start('sqlLoading')) + .do(() => { + _.forEach($scope.notebook.paragraphs, (paragraph) => { + paragraph.reset($interval); + }); }) - .then(() => { - stopTopology = $interval(_refreshFn, 5000, 0, false); - }); + .switchMap(() => refreshCaches(5000)) + .subscribe(); + }; Notebook.find($state.params.noteId) .then((notebook) => { @@ -930,7 +973,7 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', else $scope.rebuildScrollParagraphs(); }) - .then(_startWatch) + .then(() => _startWatch()) .catch(() => { $scope.notebookLoadFailed = true; @@ -1884,4 +1927,12 @@ export default ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', } }; } -]; + + $onInit() { + + } + + $onDestroy() { + this.refresh$.unsubscribe(); + } +} diff --git a/modules/web-console/frontend/app/modules/sql/sql.module.js b/modules/web-console/frontend/app/components/page-queries/index.js similarity index 82% rename from modules/web-console/frontend/app/modules/sql/sql.module.js rename to modules/web-console/frontend/app/components/page-queries/index.js index da9955c163239..4b553ebafdfc4 100644 --- a/modules/web-console/frontend/app/modules/sql/sql.module.js +++ b/modules/web-console/frontend/app/components/page-queries/index.js @@ -15,18 +15,23 @@ * limitations under the License. */ +import './style.scss'; import angular from 'angular'; +import templateUrl from './template.tpl.pug'; + import NotebookData from './Notebook.data'; import Notebook from './Notebook.service'; import notebook from './notebook.controller'; -import controller from './sql.controller'; - -import sqlTplUrl from 'app/../views/sql/sql.tpl.pug'; +import controller from './controller'; -angular.module('ignite-console.sql', [ +export default angular.module('ignite-console.sql', [ 'ui.router' ]) +.component('pageQueries', { + controller, + templateUrl +}) .config(['$stateProvider', ($stateProvider) => { // set up the states $stateProvider @@ -37,23 +42,19 @@ angular.module('ignite-console.sql', [ }) .state('base.sql.notebook', { url: '/notebook/{noteId}', - templateUrl: sqlTplUrl, + component: 'pageQueries', permission: 'query', tfMetaTags: { title: 'Query notebook' - }, - controller, - controllerAs: '$ctrl' + } }) .state('base.sql.demo', { url: '/demo', - templateUrl: sqlTplUrl, + component: 'pageQueries', permission: 'query', tfMetaTags: { title: 'SQL demo' - }, - controller, - controllerAs: '$ctrl' + } }); }]) .service('IgniteNotebookData', NotebookData) diff --git a/modules/web-console/frontend/app/modules/sql/notebook.controller.js b/modules/web-console/frontend/app/components/page-queries/notebook.controller.js similarity index 100% rename from modules/web-console/frontend/app/modules/sql/notebook.controller.js rename to modules/web-console/frontend/app/components/page-queries/notebook.controller.js diff --git a/modules/web-console/frontend/app/components/page-queries/style.scss b/modules/web-console/frontend/app/components/page-queries/style.scss new file mode 100644 index 0000000000000..70136fd55a64b --- /dev/null +++ b/modules/web-console/frontend/app/components/page-queries/style.scss @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +page-queries { + .docs-content > header { + margin: 0; + margin-bottom: 30px; + + display: flex; + flex-direction: row; + align-items: center; + + h1 { + margin: 0; + margin-right: 8px; + } + } + + .affix + .block-information { + margin-top: 90px; + } +} diff --git a/modules/web-console/frontend/views/sql/sql.tpl.pug b/modules/web-console/frontend/app/components/page-queries/template.tpl.pug similarity index 99% rename from modules/web-console/frontend/views/sql/sql.tpl.pug rename to modules/web-console/frontend/app/components/page-queries/template.tpl.pug index 98b4d68f0eb3a..b2173f7a69e4b 100644 --- a/modules/web-console/frontend/views/sql/sql.tpl.pug +++ b/modules/web-console/frontend/app/components/page-queries/template.tpl.pug @@ -357,10 +357,14 @@ mixin paragraph-query .row .docs-content + header + h1 Queries + cluster-selector + .row(ng-if='notebook' bs-affix style='margin-bottom: 20px;') +notebook-rename - ignite-information(data-title='With query notebook you can' style='margin-top: 0; margin-bottom: 30px') + ignite-information(data-title='With query notebook you can' style='margin-bottom: 30px') ul li Create any number of queries li Execute and explain SQL queries diff --git a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js index 752b4f05a3124..7668132d4fde6 100644 --- a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js +++ b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js @@ -38,7 +38,18 @@ class ConnectionState { this.state = State.DISCONNECTED; } + updateCluster(cluster) { + this.cluster = cluster; + this.cluster.connected = !!_.find(this.clusters, {id: this.cluster.id}); + + return cluster; + } + update(demo, count, clusters) { + _.forEach(clusters, (cluster) => { + cluster.name = cluster.id; + }); + this.clusters = clusters; if (_.isNil(this.cluster)) @@ -142,6 +153,7 @@ export default class IgniteAgentManager { }; self.socket.on('connect_error', onDisconnect); + self.socket.on('disconnect', onDisconnect); self.socket.on('agents:stat', ({clusters, count}) => { @@ -152,6 +164,8 @@ export default class IgniteAgentManager { self.connectionSbj.next(conn); }); + self.socket.on('cluster:changed', (cluster) => this.updateCluster(cluster)); + self.socket.on('user:notifications', (notification) => this.UserNotifications.notification = notification); } @@ -163,6 +177,31 @@ export default class IgniteAgentManager { } } + updateCluster(newCluster) { + const state = this.connectionSbj.getValue(); + + const oldCluster = _.find(state.clusters, (cluster) => cluster.id === newCluster.id); + + if (!_.isNil(oldCluster)) { + oldCluster.nids = newCluster.nids; + oldCluster.addresses = newCluster.addresses; + oldCluster.clusterVersion = newCluster.clusterVersion; + oldCluster.active = newCluster.active; + + this.connectionSbj.next(state); + } + } + + switchCluster(cluster) { + const state = this.connectionSbj.getValue(); + + state.updateCluster(cluster); + + this.connectionSbj.next(state); + + this.saveToStorage(cluster); + } + /** * @param states * @returns {Promise} @@ -212,6 +251,8 @@ export default class IgniteAgentManager { self.connectionSbj.next(conn); + this.modalSubscription && this.modalSubscription.unsubscribe(); + self.modalSubscription = this.connectionSbj.subscribe({ next: ({state}) => { switch (state) { @@ -252,6 +293,8 @@ export default class IgniteAgentManager { self.connectionSbj.next(conn); + this.modalSubscription && this.modalSubscription.unsubscribe(); + self.modalSubscription = this.connectionSbj.subscribe({ next: ({state}) => { switch (state) { @@ -633,7 +676,6 @@ export default class IgniteAgentManager { } /** - /** * @param {String} nid Node id. * @param {String} cacheName Cache name. * @param {String} filter Filter text. @@ -664,4 +706,17 @@ export default class IgniteAgentManager { return this.queryScan(nid, cacheName, filter, regEx, caseSensitive, near, local, pageSz) .then(fetchResult); } + + /** + * Change cluster active state. + * + * @returns {Promise} + */ + toggleClusterState() { + const state = this.connectionSbj.getValue(); + const active = !state.cluster.active; + + return this.visorTask('toggleClusterState', null, active) + .then(() => state.updateCluster(Object.assign(state.cluster, { active }))); + } } diff --git a/modules/web-console/frontend/app/primitives/switcher/index.pug b/modules/web-console/frontend/app/primitives/switcher/index.pug index 568909404d605..8b7d009c5e067 100644 --- a/modules/web-console/frontend/app/primitives/switcher/index.pug +++ b/modules/web-console/frontend/app/primitives/switcher/index.pug @@ -17,4 +17,4 @@ mixin switcher() label.switcher--ignite input(type='checkbox')&attributes(attributes) - div(bs-tooltip=attributes.tip && '' data-title=attributes.tip data-trigger='hover') + div(bs-tooltip=attributes.tip && '' data-title=attributes.tip data-trigger='hover' data-placement='bottom') diff --git a/modules/web-console/frontend/app/primitives/switcher/index.scss b/modules/web-console/frontend/app/primitives/switcher/index.scss index 3e9cd49aa3563..fb2fd1b0d5cd8 100644 --- a/modules/web-console/frontend/app/primitives/switcher/index.scss +++ b/modules/web-console/frontend/app/primitives/switcher/index.scss @@ -15,31 +15,39 @@ * limitations under the License. */ -@import '../../../public/stylesheets/variables'; +@import 'public/stylesheets/variables'; label.switcher--ignite { - width: 34px; - max-width: 34px !important; - height: 20px; + $width: 34px; + $height: 20px; - line-height: 20px; + $color-inactive-primary: #c5c5c5; + $color-inactive-secondary: #ffffff; + $color-active-primary: $ignite-brand-primary; + $color-active-secondary: #ff8485; + + width: $width; + max-width: $width !important; + height: $height; + + line-height: $height; vertical-align: middle; cursor: pointer; - input[type="checkbox"] { + input[type='checkbox'] { position: absolute; opacity: 0.0; & + div { position: relative; - width: 34px; + width: $width; height: 14px; margin: 3px 0; border-radius: 8px; - background-color: #C5C5C5; + background-color: $color-inactive-primary; transition: background 0.2s ease; &:before { @@ -49,12 +57,14 @@ label.switcher--ignite { top: -3px; left: 0; - width: 20px; - height: 20px; + width: $height; + height: $height; - border: solid 1px #C5C5C5; + border-width: 1px; + border-style: solid; border-radius: 50%; - background-color: #FFF; + border-color: $color-inactive-primary; + background-color: $color-inactive-secondary; transition: all 0.12s ease; } @@ -64,17 +74,46 @@ label.switcher--ignite { } } + &[is-in-progress='true'] + div:before { + border-left-width: 2px; + border-left-color: $color-active-primary; + + animation-name: switcher--animation; + animation-duration: 1s; + animation-iteration-count: infinite; + animation-timing-function: linear; + } + &:checked + div { - background-color: #FF8485; + background-color: $color-active-secondary; &:before { content: ''; left: 14px; - border: 0; - background-color: #EE2B27; + border-color: $color-active-primary; + background-color: $color-active-primary; + } + } + + &[is-in-progress='true']:checked + div { + background-color: $color-inactive-primary; + + &:before { + border-color: $color-inactive-primary; + border-left-color: $color-active-primary; + background-color: $color-inactive-secondary; } } } } + +@keyframes switcher--animation { + from { + transform: rotate(0deg); + } + to { + transform: rotate(360deg); + } +} diff --git a/modules/web-console/frontend/views/includes/header-right.pug b/modules/web-console/frontend/views/includes/header-right.pug index 8eeb281d970fa..56fd1029e4056 100644 --- a/modules/web-console/frontend/views/includes/header-right.pug +++ b/modules/web-console/frontend/views/includes/header-right.pug @@ -20,8 +20,6 @@ ng-click='startDemo()' ) Start Demo -ignite-cluster-select.wch-nav-item(ng-if='!IgniteDemoMode') - .wch-nav-item(ignite-userbar) div( ng-class='{active: $state.includes("base.settings")}' diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/ClusterListener.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/ClusterListener.java index 8eed3dd6ed8d2..86b9ea5847d3e 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/ClusterListener.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/handlers/ClusterListener.java @@ -22,44 +22,51 @@ import io.socket.client.Socket; import io.socket.emitter.Emitter; import java.net.ConnectException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.List; +import java.util.Map; import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import org.apache.ignite.IgniteLogger; import org.apache.ignite.console.agent.rest.RestExecutor; import org.apache.ignite.console.agent.rest.RestResult; import org.apache.ignite.internal.processors.rest.client.message.GridClientNodeBean; import org.apache.ignite.internal.processors.rest.protocols.http.jetty.GridJettyObjectMapper; import org.apache.ignite.internal.util.typedef.F; -import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteClosure; import org.apache.ignite.lang.IgniteProductVersion; -import org.slf4j.Logger; +import org.apache.ignite.logger.slf4j.Slf4jLogger; import org.slf4j.LoggerFactory; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_CLUSTER_NAME; import static org.apache.ignite.console.agent.AgentUtils.toJSON; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_BUILD_VER; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_CLIENT_MODE; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IPS; import static org.apache.ignite.internal.processors.rest.GridRestResponse.STATUS_SUCCESS; +import static org.apache.ignite.internal.visor.util.VisorTaskUtils.sortAddresses; +import static org.apache.ignite.internal.visor.util.VisorTaskUtils.splitAddresses; /** * API to transfer topology from Ignite cluster available by node-uri. */ public class ClusterListener { /** */ - private static final Logger log = LoggerFactory.getLogger(ClusterListener.class); + private static final IgniteLogger log = new Slf4jLogger(LoggerFactory.getLogger(ClusterListener.class)); /** */ private static final String EVENT_CLUSTER_CONNECTED = "cluster:connected"; /** */ private static final String EVENT_CLUSTER_TOPOLOGY = "cluster:topology"; - + /** */ private static final String EVENT_CLUSTER_DISCONNECTED = "cluster:disconnected"; @@ -78,17 +85,6 @@ public class ClusterListener { /** */ private final BroadcastTask broadcastTask = new BroadcastTask(); - /** */ - private static final IgniteClosure NODE2ID = new IgniteClosure() { - @Override public UUID apply(GridClientNodeBean n) { - return n.getNodeId(); - } - - @Override public String toString() { - return "Node bean to node ID transformer closure."; - } - }; - /** */ private static final IgniteClosure ID2ID8 = new IgniteClosure() { @Override public String apply(UUID nid) { @@ -127,7 +123,7 @@ public ClusterListener(Socket client, RestExecutor restExecutor) { * @param nids Cluster nodes IDs. */ private void clusterConnect(Collection nids) { - log.info("Connection successfully established to cluster with nodes: {}", F.viewReadOnly(nids, ID2ID8)); + log.info("Connection successfully established to cluster with nodes: " + F.viewReadOnly(nids, ID2ID8)); client.emit(EVENT_CLUSTER_CONNECTED, toJSON(nids)); } @@ -171,7 +167,7 @@ public Emitter.Listener start() { @Override public void call(Object... args) { safeStopRefresh(); - final long timeout = args.length > 1 && args[1] instanceof Long ? (long)args[1] : DFLT_TIMEOUT; + final long timeout = args.length > 1 && args[1] instanceof Long ? (long)args[1] : DFLT_TIMEOUT; refreshTask = pool.scheduleWithFixedDelay(broadcastTask, 0L, timeout, TimeUnit.MILLISECONDS); } @@ -193,42 +189,108 @@ public Emitter.Listener stop() { /** */ private static class TopologySnapshot { + /** */ + private String clusterName; + /** */ private Collection nids; /** */ - private String clusterVer; + private Map addrs; + + /** */ + private Map clients; + + /** */ + private String clusterVerStr; + + /** */ + private IgniteProductVersion clusterVer; + + /** */ + private boolean active; + + /** + * Helper method to get attribute. + * + * @param attrs Map with attributes. + * @param name Attribute name. + * @return Attribute value. + */ + private static T attribute(Map attrs, String name) { + return (T)attrs.get(name); + } /** * @param nodes Nodes. */ TopologySnapshot(Collection nodes) { - nids = F.viewReadOnly(nodes, NODE2ID); + int sz = nodes.size(); + + nids = new ArrayList<>(sz); + addrs = U.newHashMap(sz); + clients = U.newHashMap(sz); + active = false; + + for (GridClientNodeBean node : nodes) { + UUID nid = node.getNodeId(); + + nids.add(nid); - Collection> vers = F.transform(nodes, - new IgniteClosure>() { - @Override public T2 apply(GridClientNodeBean bean) { - String ver = (String)bean.getAttributes().get(ATTR_BUILD_VER); + Map attrs = node.getAttributes(); - return new T2<>(ver, IgniteProductVersion.fromString(ver)); - } - }); + if (F.isEmpty(clusterName)) + clusterName = attribute(attrs, IGNITE_CLUSTER_NAME); - T2 min = Collections.min(vers, new Comparator>() { - @SuppressWarnings("ConstantConditions") - @Override public int compare(T2 o1, T2 o2) { - return o1.get2().compareTo(o2.get2()); + Boolean client = attribute(attrs, ATTR_CLIENT_MODE); + + clients.put(nid, client); + + Collection nodeAddrs = client + ? splitAddresses((String)attribute(attrs, ATTR_IPS)) + : node.getTcpAddresses(); + + String firstIP = F.first(sortAddresses(nodeAddrs)); + + addrs.put(nid, firstIP); + + String nodeVerStr = attribute(attrs, ATTR_BUILD_VER); + + IgniteProductVersion nodeVer = IgniteProductVersion.fromString(nodeVerStr); + + if (clusterVer == null || clusterVer.compareTo(nodeVer) > 0) { + clusterVer = nodeVer; + clusterVerStr = nodeVerStr; } - }); + } + } - clusterVer = min.get1(); + /** + * @return Cluster name. + */ + public String getClusterName() { + return clusterName; } /** * @return Cluster version. */ public String getClusterVersion() { - return clusterVer; + return clusterVerStr; + } + + /** + * @return Cluster active flag. + */ + public boolean isActive() { + return active; + } + + /** + * @param active New cluster active state. + */ + public void setActive(boolean active) { + this.active = active; } /** @@ -238,14 +300,40 @@ public Collection getNids() { return nids; } - /** */ + /** + * @return Cluster nodes with IPs. + */ + public Map getAddresses() { + return addrs; + } + + /** + * @return Cluster nodes with client mode flag. + */ + public Map getClients() { + return clients; + } + + /** + * @return Cluster version. + */ + public IgniteProductVersion clusterVersion() { + return clusterVer; + } + + /** + * @return Collection of short UUIDs. + */ Collection nid8() { return F.viewReadOnly(nids, ID2ID8); } - /** */ - boolean differentCluster(TopologySnapshot old) { - return old == null || F.isEmpty(old.nids) || Collections.disjoint(nids, old.nids); + /** + * @param prev Previous topology. + * @return {@code true} in case if current topology is a new cluster. + */ + boolean differentCluster(TopologySnapshot prev) { + return prev == null || F.isEmpty(prev.nids) || Collections.disjoint(nids, prev.nids); } } @@ -264,7 +352,11 @@ private class WatchTask implements Runnable { TopologySnapshot newTop = new TopologySnapshot(nodes); if (newTop.differentCluster(top)) - log.info("Connection successfully established to cluster with nodes: {}", newTop.nid8()); + log.info("Connection successfully established to cluster with nodes: " + newTop.nid8()); + + boolean active = restExecutor.active(newTop.clusterVersion(), F.first(newTop.getNids())); + + newTop.setActive(active); top = newTop; @@ -273,7 +365,7 @@ private class WatchTask implements Runnable { break; default: - log.warn(res.getError()); + LT.warn(log, res.getError()); clusterDisconnect(); } @@ -288,7 +380,7 @@ private class WatchTask implements Runnable { } } } - + /** */ private class BroadcastTask implements Runnable { /** {@inheritDoc} */ @@ -306,7 +398,7 @@ private class BroadcastTask implements Runnable { if (top.differentCluster(newTop)) { clusterDisconnect(); - log.info("Connection successfully established to cluster with nodes: {}", newTop.nid8()); + log.info("Connection successfully established to cluster with nodes: " + newTop.nid8()); watch(); } @@ -318,7 +410,7 @@ private class BroadcastTask implements Runnable { break; default: - log.warn(res.getError()); + LT.warn(log, res.getError()); clusterDisconnect(); } diff --git a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java index 36f38852d7735..7fbe6f922f5ea 100644 --- a/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java +++ b/modules/web-console/web-agent/src/main/java/org/apache/ignite/console/agent/rest/RestExecutor.java @@ -30,6 +30,7 @@ import java.net.ConnectException; import java.util.HashMap; import java.util.Map; +import java.util.UUID; import java.util.concurrent.TimeUnit; import okhttp3.Dispatcher; import okhttp3.FormBody; @@ -44,6 +45,7 @@ import org.apache.ignite.internal.processors.rest.protocols.http.jetty.GridJettyObjectMapper; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteProductVersion; import org.apache.ignite.logger.slf4j.Slf4jLogger; import org.slf4j.LoggerFactory; @@ -58,6 +60,18 @@ * API to translate REST requests to Ignite cluster. */ public class RestExecutor { + /** */ + private static final IgniteProductVersion IGNITE_2_1 = IgniteProductVersion.fromString("2.1.0"); + + /** */ + private static final IgniteProductVersion IGNITE_2_3 = IgniteProductVersion.fromString("2.3.0"); + + /** Unique Visor key to get events last order. */ + private static final String EVT_LAST_ORDER_KEY = "WEB_AGENT_" + UUID.randomUUID().toString(); + + /** Unique Visor key to get events throttle counter. */ + private static final String EVT_THROTTLE_CNTR_KEY = "WEB_AGENT_" + UUID.randomUUID().toString(); + /** */ private static final IgniteLogger log = new Slf4jLogger(LoggerFactory.getLogger(RestExecutor.class)); @@ -208,7 +222,9 @@ public RestResult execute(boolean demo, String path, Map params, } /** - * @param demo Is demo node request. + * @param demo {@code true} in case of demo mode. + * @param full Flag indicating whether to collect metrics or not. + * @throws IOException If failed to collect topology. */ public RestResult topology(boolean demo, boolean full) throws IOException { Map params = new HashMap<>(3); @@ -220,6 +236,51 @@ public RestResult topology(boolean demo, boolean full) throws IOException { return sendRequest(demo, "ignite", params, null, null); } + /** + * @param ver Cluster version. + * @param nid Node ID. + * @return Cluster active state. + * @throws IOException If failed to collect cluster active state. + */ + public boolean active(IgniteProductVersion ver, UUID nid) throws IOException { + Map params = new HashMap<>(); + + boolean v23 = ver.compareTo(IGNITE_2_3) >= 0; + + if (v23) + params.put("cmd", "currentState"); + else { + params.put("cmd", "exe"); + params.put("name", "org.apache.ignite.internal.visor.compute.VisorGatewayTask"); + params.put("p1", nid); + params.put("p2", "org.apache.ignite.internal.visor.node.VisorNodeDataCollectorTask"); + params.put("p3", "org.apache.ignite.internal.visor.node.VisorNodeDataCollectorTaskArg"); + params.put("p4", false); + params.put("p5", EVT_LAST_ORDER_KEY); + params.put("p6", EVT_THROTTLE_CNTR_KEY); + + if (ver.compareTo(IGNITE_2_1) >= 0) + params.put("p7", false); + else { + params.put("p7", 10); + params.put("p8", false); + } + } + + RestResult res = sendRequest(false, "ignite", params, null, null); + + switch (res.getStatus()) { + case STATUS_SUCCESS: + if (v23) + return Boolean.valueOf(res.getData()); + + return res.getData().contains("\"active\":true"); + + default: + throw new IOException(res.getError()); + } + } + /** * REST response holder Java bean. */ From 3bf69d535f81b06e293108b1a7ce16ab3302dbf0 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Wed, 6 Dec 2017 21:33:34 +0700 Subject: [PATCH 147/243] IGNITE-7106 Optimized cache metrics collection. (cherry picked from commit d751010) --- .../internal/visor/cache/VisorCache.java | 94 ++++++++++++++----- .../visor/node/VisorNodeDataCollectorJob.java | 9 +- .../node/VisorNodeDataCollectorTaskArg.java | 47 +++++++++- .../visor/query/VisorQueryTaskArg.java | 2 +- 4 files changed, 125 insertions(+), 27 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCache.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCache.java index 5d7dfd3072e1f..63eb13c3cecc6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCache.java @@ -88,6 +88,9 @@ public class VisorCache extends VisorDataTransferObject { /** Cache metrics. */ private VisorCacheMetrics metrics; + /** Cache system state. */ + private boolean sys; + /** * Create data transfer object for given cache. */ @@ -99,9 +102,10 @@ public VisorCache() { * Create data transfer object for given cache. * * @param ca Internal cache. + * @param collectMetrics Collect cache metrics flag. * @throws IgniteCheckedException If failed to create data transfer object. */ - public VisorCache(IgniteEx ignite, GridCacheAdapter ca) throws IgniteCheckedException { + public VisorCache(IgniteEx ignite, GridCacheAdapter ca, boolean collectMetrics) throws IgniteCheckedException { assert ca != null; GridCacheContext cctx = ca.context(); @@ -119,28 +123,10 @@ public VisorCache(IgniteEx ignite, GridCacheAdapter ca) throws IgniteCheckedExce partitions = ca.affinity().partitions(); near = cctx.isNear(); - metrics = new VisorCacheMetrics(ignite, name); - } + if (collectMetrics) + metrics = new VisorCacheMetrics(ignite, name); - /** - * @return New instance suitable to store in history. - */ - public VisorCache history() { - VisorCache c = new VisorCache(); - - c.name = name; - c.mode = mode; - c.memorySize = memorySize; - c.indexesSize = indexesSize; - c.size = size; - c.nearSize = nearSize; - c.backupSize = backupSize; - c.primarySize = primarySize; - c.partitions = partitions; - c.metrics = metrics; - c.near = near; - - return c; + sys = ignite.context().cache().systemCache(name); } /** @@ -229,6 +215,13 @@ public VisorCacheMetrics getMetrics() { return metrics; } + /** + * @param metrics Cache metrics. + */ + public void setMetrics(VisorCacheMetrics metrics) { + this.metrics = metrics; + } + /** * @return {@code true} if cache has near cache. */ @@ -236,6 +229,60 @@ public boolean isNear() { return near; } + /** + * @return System cache flag. + */ + public boolean isSystem() { + return sys; + } + + /** + * @return Number of entries in cache in heap and off-heap. + */ + public long size() { + return size + (metrics != null ? metrics.getOffHeapEntriesCount() : 0L); + } + + /** + * @return Memory size allocated in off-heap. + */ + public long offHeapAllocatedSize() { + return metrics != null ? metrics.getOffHeapAllocatedSize() : 0L; + } + + /** + * @return Number of entries in heap memory. + */ + public long heapEntriesCount() { + return metrics != null ? metrics.getHeapEntriesCount() : 0L; + } + + /** + * @return Number of primary cache entries stored in off-heap memory. + */ + public long offHeapPrimaryEntriesCount() { + return metrics != null ? metrics.getOffHeapPrimaryEntriesCount() : 0L; + } + + /** + * @return Number of backup cache entries stored in off-heap memory. + */ + public long offHeapBackupEntriesCount() { + return metrics != null ? metrics.getOffHeapBackupEntriesCount() : 0L; + } + + /** + * @return Number of cache entries stored in off-heap memory. + */ + public long offHeapEntriesCount() { + return metrics != null ? metrics.getOffHeapEntriesCount() : 0L; + } + + /** {@inheritDoc} */ + @Override public byte getProtocolVersion() { + return V2; + } + /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { U.writeString(out, name); @@ -250,6 +297,7 @@ public boolean isNear() { out.writeInt(partitions); out.writeBoolean(near); out.writeObject(metrics); + out.writeBoolean(sys); } /** {@inheritDoc} */ @@ -266,6 +314,8 @@ public boolean isNear() { partitions = in.readInt(); near = in.readBoolean(); metrics = (VisorCacheMetrics)in.readObject(); + + sys = protoVer > V1 ? in.readBoolean() : metrics != null && metrics.isSystem(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java index 99d113278b605..7e921ad9f0a7e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java @@ -157,8 +157,11 @@ protected void memoryMetrics(VisorNodeDataCollectorJobResult res) { try { List memoryMetrics = res.getMemoryMetrics(); - for (DataRegionMetrics m : ignite.dataRegionMetrics()) - memoryMetrics.add(new VisorMemoryMetrics(m)); + // TODO: Should be really fixed in IGNITE-7111. + if (ignite.active()) { + for (DataRegionMetrics m : ignite.dataRegionMetrics()) + memoryMetrics.add(new VisorMemoryMetrics(m)); + } } catch (Exception e) { res.setMemoryMetricsEx(new VisorExceptionWrapper(e)); @@ -192,7 +195,7 @@ protected void caches(VisorNodeDataCollectorJobResult res, VisorNodeDataCollecto if (ca == null || !ca.context().started()) continue; - resCaches.add(new VisorCache(ignite, ca)); + resCaches.add(new VisorCache(ignite, ca, arg.isCollectCacheMetrics())); } catch(IllegalStateException | IllegalArgumentException e) { if (debug && ignite.log() != null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java index c39318ab76afb..1876d06c1b11d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java @@ -43,6 +43,9 @@ public class VisorNodeDataCollectorTaskArg extends VisorDataTransferObject { /** If {@code true} then collect information about system caches. */ private boolean sysCaches; + /** If {@code false} then cache metrics will not be collected. */ + private boolean collectCacheMetrics; + /** * Default constructor. */ @@ -57,17 +60,37 @@ public VisorNodeDataCollectorTaskArg() { * @param evtOrderKey Event order key, unique for Visor instance. * @param evtThrottleCntrKey Event throttle counter key, unique for Visor instance. * @param sysCaches If {@code true} then collect information about system caches. + * @param collectCacheMetrics If {@code false} then cache metrics will not be collected. */ public VisorNodeDataCollectorTaskArg( boolean taskMonitoringEnabled, String evtOrderKey, String evtThrottleCntrKey, - boolean sysCaches + boolean sysCaches, + boolean collectCacheMetrics ) { this.taskMonitoringEnabled = taskMonitoringEnabled; this.evtOrderKey = evtOrderKey; this.evtThrottleCntrKey = evtThrottleCntrKey; this.sysCaches = sysCaches; + this.collectCacheMetrics = collectCacheMetrics; + } + + /** + * Create task arguments with given parameters. + * + * @param taskMonitoringEnabled If {@code true} then Visor should collect information about tasks. + * @param evtOrderKey Event order key, unique for Visor instance. + * @param evtThrottleCntrKey Event throttle counter key, unique for Visor instance. + * @param sysCaches If {@code true} then collect information about system caches. + */ + public VisorNodeDataCollectorTaskArg( + boolean taskMonitoringEnabled, + String evtOrderKey, + String evtThrottleCntrKey, + boolean sysCaches + ) { + this(taskMonitoringEnabled, evtOrderKey, evtThrottleCntrKey, sysCaches, true); } /** @@ -126,12 +149,32 @@ public void setSystemCaches(boolean sysCaches) { this.sysCaches = sysCaches; } + /** + * @return If {@code false} then cache metrics will not be collected. + */ + public boolean isCollectCacheMetrics() { + return collectCacheMetrics; + } + + /** + * @param collectCacheMetrics If {@code false} then cache metrics will not be collected. + */ + public void setCollectCacheMetrics(boolean collectCacheMetrics) { + this.collectCacheMetrics = collectCacheMetrics; + } + + /** {@inheritDoc} */ + @Override public byte getProtocolVersion() { + return V2; + } + /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { out.writeBoolean(taskMonitoringEnabled); U.writeString(out, evtOrderKey); U.writeString(out, evtThrottleCntrKey); out.writeBoolean(sysCaches); + out.writeBoolean(collectCacheMetrics); } /** {@inheritDoc} */ @@ -140,6 +183,8 @@ public void setSystemCaches(boolean sysCaches) { evtOrderKey = U.readString(in); evtThrottleCntrKey = U.readString(in); sysCaches = in.readBoolean(); + + collectCacheMetrics = protoVer < V2 || in.readBoolean(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTaskArg.java index dd38332f90915..e9428809d1128 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTaskArg.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTaskArg.java @@ -181,7 +181,7 @@ public boolean getLazy() { loc = in.readBoolean(); pageSize = in.readInt(); - if (protoVer == V2) + if (protoVer > V1) lazy = in.readBoolean(); } From b331fa51bea167bf01bd22ff27257e1d16ce8d5e Mon Sep 17 00:00:00 2001 From: vsisko Date: Wed, 6 Dec 2017 22:23:02 +0700 Subject: [PATCH 148/243] IGNITE-6987 Showed actual client connector configuration "Thread pool size" property. (cherry picked from commit 8132473) --- .../VisorExecutorServiceConfiguration.java | 18 +++++++++--------- .../config/VisorConfigurationCommand.scala | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorExecutorServiceConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorExecutorServiceConfiguration.java index 31a371386e2d6..bf2572b80d48a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorExecutorServiceConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorExecutorServiceConfiguration.java @@ -21,9 +21,9 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.util.List; +import org.apache.ignite.configuration.ClientConnectorConfiguration; import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.SqlConnectorConfiguration; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.VisorDataTransferObject; @@ -74,8 +74,8 @@ public class VisorExecutorServiceConfiguration extends VisorDataTransferObject { /** Utility cache pool size. */ private int utilityCachePoolSize; - /** SQL connector configuration pool size. */ - private int sqlConnCfgPoolSize; + /** Client connector configuration pool size. */ + private int cliConnCfgPoolSize; /** List of executor configurations. */ private List executors; @@ -112,10 +112,10 @@ public VisorExecutorServiceConfiguration(IgniteConfiguration c) { svcPoolSize = c.getServiceThreadPoolSize(); utilityCachePoolSize = c.getUtilityCacheThreadPoolSize(); - SqlConnectorConfiguration scc = c.getSqlConnectorConfiguration(); + ClientConnectorConfiguration scc = c.getClientConnectorConfiguration(); if (scc != null) - sqlConnCfgPoolSize = scc.getThreadPoolSize(); + cliConnCfgPoolSize = scc.getThreadPoolSize(); executors = VisorExecutorConfiguration.list(c.getExecutorConfiguration()); } @@ -216,8 +216,8 @@ public int getUtilityCacheThreadPoolSize() { /** * @return Thread pool that is in charge of processing ODBC tasks. */ - public int getSqlConnectorConfigurationThreadPoolSize() { - return sqlConnCfgPoolSize; + public int getClientConnectorConfigurationThreadPoolSize() { + return cliConnCfgPoolSize; } /** @@ -242,7 +242,7 @@ public List getExecutors() { out.writeInt(stripedPoolSize); out.writeInt(svcPoolSize); out.writeInt(utilityCachePoolSize); - out.writeInt(sqlConnCfgPoolSize); + out.writeInt(cliConnCfgPoolSize); U.writeCollection(out, executors); } @@ -261,7 +261,7 @@ public List getExecutors() { stripedPoolSize = in.readInt(); svcPoolSize = in.readInt(); utilityCachePoolSize = in.readInt(); - sqlConnCfgPoolSize = in.readInt(); + cliConnCfgPoolSize = in.readInt(); executors = U.readList(in); } diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala index b9b0af96f6a61..0185228877a85 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala @@ -290,7 +290,7 @@ class VisorConfigurationCommand extends VisorConsoleCommand { execSvcT += ("Peer-to-Peer thread pool size", safe(execCfg.getPeerClassLoadingThreadPoolSize)) execSvcT += ("Rebalance Thread Pool size", execCfg.getRebalanceThreadPoolSize) execSvcT += ("REST thread pool size", safe(execCfg.getRestThreadPoolSize)) - execSvcT += ("SQL processor thread pool size", safe(execCfg.getSqlConnectorConfigurationThreadPoolSize)) + execSvcT += ("Client connector thread pool size", safe(execCfg.getClientConnectorConfigurationThreadPoolSize)) execSvcT.render() From a15abffcf00458fb0bb7de36e8bae77beaaa8420 Mon Sep 17 00:00:00 2001 From: vsisko Date: Wed, 6 Dec 2017 23:49:35 +0700 Subject: [PATCH 149/243] IGNITE-6897 Visor: Show valid message for caches when cluster is inactive. (cherry picked from commit 936dc95) --- .../commands/cache/VisorCacheCommand.scala | 5 + .../commands/common/VisorConsoleCommand.scala | 4 +- .../scala/org/apache/ignite/visor/visor.scala | 109 ++++++++++-------- 3 files changed, 70 insertions(+), 48 deletions(-) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala index dde3289ccb0f3..d67b65c707bf8 100755 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala @@ -209,6 +209,11 @@ class VisorCacheCommand { def cache(args: String) { if (!isConnected) adviseToConnect() + else if (!isActive) { + warn("Can not perform the operation because the cluster is inactive.", + "Note, that the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes join the cluster.", + "To activate the cluster execute following command: top -active.") + } else { var argLst = parseArgs(args) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala index 12e37235dd78d..8c361fb36cd9f 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala @@ -41,8 +41,8 @@ trait VisorConsoleCommand { assert(warnMsgs != null) warnMsgs.foreach{ - case ex: Throwable => println(s"(wrn) : ${ex.getMessage}") - case line => println(s"(wrn) : $line") + case ex: Throwable => warn(ex.getMessage) + case line => warn(line) } } diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala index d2bc4abcee9d8..069e50f9f57c7 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala @@ -220,8 +220,8 @@ object visor extends VisorTag { /** Internal thread pool. */ @volatile var pool: ExecutorService = new IgniteThreadPoolExecutor( - Runtime.getRuntime().availableProcessors(), - Runtime.getRuntime().availableProcessors(), + Runtime.getRuntime.availableProcessors(), + Runtime.getRuntime.availableProcessors(), 0L, new LinkedBlockingQueue[Runnable](), new IgniteThreadFactory("visorInstance", "visor") @@ -276,7 +276,7 @@ object visor extends VisorTag { * @param cacheName Cache name to take cluster group for. * @return Cluster group with data nodes for specified cache or cluster group for specified node. */ - def groupForDataNode(node: Option[ClusterNode], cacheName: String) = { + def groupForDataNode(node: Option[ClusterNode], cacheName: String): ClusterGroup = { val grp = node match { case Some(n) => ignite.cluster.forNode(n) case None => ignite.cluster.forNodeIds(executeRandom(classOf[VisorCacheNodesTask], @@ -294,7 +294,7 @@ object visor extends VisorTag { * @param cacheName Cache name. * @return Message about why node was not found. */ - def messageNodeNotFound(nodeOpt: Option[ClusterNode], cacheName: String) = nodeOpt match { + def messageNodeNotFound(nodeOpt: Option[ClusterNode], cacheName: String): String = nodeOpt match { case Some(node) => "Can't find node with specified id: " + node.id() case None => "Can't find nodes for cache: " + escapeName(cacheName) } @@ -309,7 +309,7 @@ object visor extends VisorTag { close() // This will stop the grid too if Visor is connection owner. } catch { - case ignore: Throwable => // ignore + case _: Throwable => // ignore } } }) @@ -670,14 +670,15 @@ object visor extends VisorTag { * * @param v Value to find by. */ - def mfind(@Nullable v: String) = mem.filter(t => t._2 == v).toSeq + def mfind(@Nullable v: String): Seq[(String, String)] = mem.filter(t => t._2 == v).toSeq /** * Finds variable by its value. * * @param v Value to find by. */ - def mfindHead(@Nullable v: String) = mfind(v).filterNot(entry => Seq("nl", "nr").contains(entry._1)).headOption + def mfindHead(@Nullable v: String): Option[(String, String)] = + mfind(v).filterNot(entry => Seq("nl", "nr").contains(entry._1)).headOption /** * Sets Visor console memory variable. Note that this method '''does not''' @@ -870,7 +871,7 @@ object visor extends VisorTag { * @param argLst Command arguments. * @return Error message or node ref. */ - def parseNode(argLst: ArgList) = { + def parseNode(argLst: ArgList): Either[String, Option[ClusterNode]] = { val id8 = argValue("id8", argLst) val id = argValue("id", argLst) @@ -893,7 +894,7 @@ object visor extends VisorTag { Left("'id' does not match any node: " + id.get) } catch { - case e: IllegalArgumentException => Left("Invalid node 'id': " + id.get) + case _: IllegalArgumentException => Left("Invalid node 'id': " + id.get) } } else @@ -1026,7 +1027,7 @@ object visor extends VisorTag { * @param a Parameter. * @param dflt Value to return if `a` is `null`. */ - def safe(@Nullable a: Any, dflt: Any = NA) = { + def safe(@Nullable a: Any, dflt: Any = NA): String = { assert(dflt != null) if (a != null) a.toString else dflt.toString @@ -1039,7 +1040,7 @@ object visor extends VisorTag { * @param dflt Value to return if `arr` is `null` or empty. * @return String. */ - def arr2Str[T](arr: Array[T], dflt: Any = NA) = + def arr2Str[T](arr: Array[T], dflt: Any = NA): String = if (arr != null && arr.length > 0) U.compact(arr.mkString(", ")) else dflt.toString /** @@ -1048,7 +1049,7 @@ object visor extends VisorTag { * @param bool Boolean value. * @return String. */ - def bool2Str(bool: Boolean) = if (bool) "on" else "off" + def bool2Str(bool: Boolean): String = if (bool) "on" else "off" /** * Converts `java.lang.Boolean` to 'on'/'off' string. @@ -1057,7 +1058,7 @@ object visor extends VisorTag { * @param ifNull Default value in case if `bool` is `null`. * @return String. */ - def javaBoolToStr(bool: JavaBoolean, ifNull: Boolean = false) = + def javaBoolToStr(bool: JavaBoolean, ifNull: Boolean = false): String = bool2Str(if (bool == null) ifNull else bool.booleanValue()) /** @@ -1132,7 +1133,7 @@ object visor extends VisorTag { null ) catch { - case e: Throwable => None + case _: Throwable => None } } @@ -1217,7 +1218,7 @@ object visor extends VisorTag { */ def has(m: Long): Boolean = toUnits(m) >= 1 - override def toString = name + override def toString: String = name } private[this] case object BYTES extends VisorMemoryUnit("b", 1) @@ -1285,29 +1286,46 @@ object visor extends VisorTag { * * @return `True` if Visor console is connected. */ - def isConnected = - isCon + def isConnected: Boolean = isCon + + /** + * Check cluster is active. + * + * @return `True` when cluster is active. + */ + def isActive: Boolean = ignite.active /** * Gets timestamp of Visor console connection. Returns `0` if Visor console is not connected. * * @return Timestamp of Visor console connection. */ - def connectTimestamp = - conTs + def connectTimestamp: Long = conTs /** * Prints properly formatted error message like: - * {{{ - * (wrn) : warning message - * }}} + * {{{ [WARN]: warning message }}} * - * @param warnMsgs Error messages to print. If `null` - this function is no-op. + * @param warnMsgs Warning messages to print */ def warn(warnMsgs: Any*) { - assert(warnMsgs != null) + if (warnMsgs != null) + warnMsgs.foreach(line => println(s"[WARN ] $line")) + else + println("[ERROR] Warning message is missing") + } - warnMsgs.foreach(line => println(s"(wrn) : $line")) + /** + * Prints properly formatted info message like: + * {{{ [INFO]: info message }}} + * + * @param infoMsgs Info messages to print. + */ + def info(infoMsgs: Any*) { + if (infoMsgs != null) + infoMsgs.foreach(line => println(s"[INFO ] $line")) + else + println("[ERROR] Info message is missing") } /** @@ -1902,7 +1920,7 @@ object visor extends VisorTag { try Some(nodes(a.toInt).id) catch { - case e: Throwable => + case _: Throwable => warn("Invalid selection: " + a) None @@ -1985,7 +2003,7 @@ object visor extends VisorTag { try Some(ignite.cluster.forNodes(neighborhood(a.toInt))) catch { - case e: Throwable => + case _: Throwable => warn("Invalid selection: " + a) None @@ -2026,7 +2044,7 @@ object visor extends VisorTag { try Some(files(a.toInt).get3.getPath) catch { - case e: Throwable => + case _: Throwable => nl() warn("Invalid selection: " + a) @@ -2099,7 +2117,7 @@ object visor extends VisorTag { try Some(ids(idx.toInt - 1)) catch { - case e: Throwable => + case _: Throwable => if (idx.isEmpty) warn("Index can't be empty.") else @@ -2155,7 +2173,7 @@ object visor extends VisorTag { /** * Gets visor uptime. */ - def uptime = if (isCon) System.currentTimeMillis() - conTs else -1L + def uptime: Long = if (isCon) System.currentTimeMillis() - conTs else -1L /** * ==Command== @@ -2176,15 +2194,15 @@ object visor extends VisorTag { if (!pool.awaitTermination(5, TimeUnit.SECONDS)) pool.shutdownNow catch { - case e: InterruptedException => + case _: InterruptedException => pool.shutdownNow Thread.currentThread.interrupt() } pool = new IgniteThreadPoolExecutor( - Runtime.getRuntime().availableProcessors(), - Runtime.getRuntime().availableProcessors(), + Runtime.getRuntime.availableProcessors(), + Runtime.getRuntime.availableProcessors(), 0L, new LinkedBlockingQueue[Runnable](), new IgniteThreadFactory("visorInstance", "visor") @@ -2359,7 +2377,7 @@ object visor extends VisorTag { logStarted = false - println(": Log stopped: " + logFile.getAbsolutePath) + info("Log stopped: " + logFile.getAbsolutePath) } /** Unique Visor key to get events last order. */ @@ -2404,7 +2422,7 @@ object visor extends VisorTag { try freq = freqOpt.getOrElse("10").toLong * 1000L catch { - case e: NumberFormatException => + case _: NumberFormatException => throw new IllegalArgumentException("Invalid frequency: " + freqOpt.get) } @@ -2419,7 +2437,7 @@ object visor extends VisorTag { try topFreq = topFreqOpt.getOrElse("20").toLong * 1000L catch { - case e: NumberFormatException => + case _: NumberFormatException => throw new IllegalArgumentException("Invalid topology frequency: " + topFreqOpt.get) } @@ -2489,7 +2507,7 @@ object visor extends VisorTag { } catch { case _: ClusterGroupEmptyCheckedException => // Ignore. - case e: Exception => logText("Failed to collect log.") + case _: Exception => logText("Failed to collect log.") } } } @@ -2507,7 +2525,7 @@ object visor extends VisorTag { logText("Log started.") - println(": Log started: " + logFile.getAbsolutePath) + info("Log started: " + logFile.getAbsolutePath) } /** @@ -2520,8 +2538,8 @@ object visor extends VisorTag { try drawBar(g.cluster.metrics()) catch { - case e: ClusterGroupEmptyCheckedException => logText("Topology is empty.") - case e: Exception => () + case _: ClusterGroupEmptyCheckedException => logText("Topology is empty.") + case _: Exception => () } } @@ -2577,7 +2595,7 @@ object visor extends VisorTag { ) } catch { - case e: IOException => () + case _: IOException => () } finally { U.close(out, null) @@ -2620,9 +2638,10 @@ object visor extends VisorTag { help() } - lazy val commands = cmdLst.map(_.name) ++ cmdLst.flatMap(_.aliases) + lazy val commands: Seq[String] = cmdLst.map(_.name) ++ cmdLst.flatMap(_.aliases) - def searchCmd(cmd: String) = cmdLst.find(c => c.name.equals(cmd) || (c.aliases != null && c.aliases.contains(cmd))) + def searchCmd(cmd: String): Option[VisorCommandHolder] = + cmdLst.find(c => c.name.equals(cmd) || (c.aliases != null && c.aliases.contains(cmd))) /** * Transform node ID to ID8 string. @@ -2650,9 +2669,7 @@ object visor extends VisorTag { * @param id8 Node ID in ID8 format. * @return Collection of nodes that has specified ID8. */ - def nodeById8(id8: String) = { - ignite.cluster.nodes().filter(n => id8.equalsIgnoreCase(nid8(n))) - } + def nodeById8(id8: String): Iterable[ClusterNode] = ignite.cluster.nodes().filter(n => id8.equalsIgnoreCase(nid8(n))) /** * Introduction of `^^` operator for `Any` type that will call `break`. From 375dfa50c5bccf1edc9f56f302b7e79931f39ddb Mon Sep 17 00:00:00 2001 From: Ilya Borisov Date: Thu, 7 Dec 2017 14:59:12 +0700 Subject: [PATCH 150/243] IGNITE-7133 Web Console: Implemented service for managing icons. (cherry picked from commit cab5cc5) --- modules/web-console/frontend/app/app.js | 4 ++- .../app/components/ignite-icon/directive.js | 10 +++--- .../app/components/ignite-icon/index.js | 2 ++ .../app/components/ignite-icon/service.js | 32 +++++++++++++++++++ 4 files changed, 41 insertions(+), 7 deletions(-) create mode 100644 modules/web-console/frontend/app/components/ignite-icon/service.js diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js index f367d3e5d1381..dde6aa94391ad 100644 --- a/modules/web-console/frontend/app/app.js +++ b/modules/web-console/frontend/app/app.js @@ -136,6 +136,7 @@ import igniteServices from './services'; import IgniteModules from 'IgniteModules/index'; import baseTemplate from 'views/base.pug'; +import * as icons from '../public/images/icons'; angular.module('ignite-console', [ // Optional AngularJS modules. @@ -323,4 +324,5 @@ angular.module('ignite-console', [ .catch(Messages.showError); }; } -]); +]) +.run(['IgniteIcon', (IgniteIcon) => IgniteIcon.registerIcons(icons)]); diff --git a/modules/web-console/frontend/app/components/ignite-icon/directive.js b/modules/web-console/frontend/app/components/ignite-icon/directive.js index de087d869779b..4ce87a74f0b6d 100644 --- a/modules/web-console/frontend/app/components/ignite-icon/directive.js +++ b/modules/web-console/frontend/app/components/ignite-icon/directive.js @@ -15,16 +15,14 @@ * limitations under the License. */ -import * as icons from '../../../public/images/icons/index.js'; - export default function() { return { restrict: 'A', controller: class { - static $inject = ['$scope', '$attrs', '$sce', '$element', '$window']; + static $inject = ['$scope', '$attrs', '$sce', '$element', '$window', 'IgniteIcon']; - constructor($scope, $attrs, $sce, $element, $window) { - Object.assign(this, {$scope, $attrs, $sce, $element, $window}); + constructor($scope, $attrs, $sce, $element, $window, IgniteIcon) { + Object.assign(this, {$scope, $attrs, $sce, $element, $window, IgniteIcon}); } $onInit() { @@ -43,7 +41,7 @@ export default function() { $postLink() { this.name = this.$attrs.igniteIcon; - this.$element.attr('viewBox', icons[this.name].viewBox); + this.$element.attr('viewBox', this.IgniteIcon.getIcon(this.name).viewBox); this.render(this.getFragmentURL()); } diff --git a/modules/web-console/frontend/app/components/ignite-icon/index.js b/modules/web-console/frontend/app/components/ignite-icon/index.js index e12b5b0009538..30954f10ef833 100644 --- a/modules/web-console/frontend/app/components/ignite-icon/index.js +++ b/modules/web-console/frontend/app/components/ignite-icon/index.js @@ -18,8 +18,10 @@ import angular from 'angular'; import directive from './directive'; +import service from './service'; import './style.scss'; export default angular .module('ignite-console.ignite-icon', []) + .service('IgniteIcon', service) .directive('igniteIcon', directive); diff --git a/modules/web-console/frontend/app/components/ignite-icon/service.js b/modules/web-console/frontend/app/components/ignite-icon/service.js new file mode 100644 index 0000000000000..e142a2d480639 --- /dev/null +++ b/modules/web-console/frontend/app/components/ignite-icon/service.js @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export default class IgniteIcon { + _icons = {}; + + registerIcons(icons) { + return Object.assign(this._icons, icons); + } + + getIcon(name) { + return this._icons[name]; + } + + getAllIcons() { + return this._icons; + } +} From 2f1b749363310ee8a63f5346aa573e45c633ec40 Mon Sep 17 00:00:00 2001 From: vsisko Date: Mon, 11 Dec 2017 11:55:06 +0700 Subject: [PATCH 151/243] WC-253 Web Console: Show user-friendly error message. (cherry picked from commit 592fb33) --- .../frontend/app/services/Messages.service.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/web-console/frontend/app/services/Messages.service.js b/modules/web-console/frontend/app/services/Messages.service.js index 523adaecc326f..6a6b9d316cc4f 100644 --- a/modules/web-console/frontend/app/services/Messages.service.js +++ b/modules/web-console/frontend/app/services/Messages.service.js @@ -27,8 +27,13 @@ export default ['IgniteMessages', ['$alert', ($alert) => { if (err.hasOwnProperty('data')) err = err.data; - if (err.hasOwnProperty('message')) - return prefix + err.message; + if (err.hasOwnProperty('message')) { + const msg = err.message; + + const errIndex = msg.indexOf(' err='); + + return prefix + (errIndex >= 0 ? msg.substring(errIndex + 5, msg.length - 1) : msg); + } if (_.nonEmpty(err.className)) { if (_.isEmpty(prefix)) From aa04bd7de414328404e3b6383a3dcba862ef8f0a Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Mon, 11 Dec 2017 15:03:33 +0700 Subject: [PATCH 152/243] IGNITE-4835 Visor CMD: Added ability to start cache rebalance. (cherry picked from commit 0dd16a7) --- .../ignite/visor/commands/VisorConsole.scala | 1 + .../commands/cache/VisorCacheCommand.scala | 126 ++++++++-------- .../cache/VisorCacheRebalanceCommand.scala | 134 ++++++++++++++++++ 3 files changed, 203 insertions(+), 58 deletions(-) create mode 100644 modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheRebalanceCommand.scala diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala index ce8b313f6208d..0a7bcb09f2d04 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala @@ -63,6 +63,7 @@ class VisorConsole { org.apache.ignite.visor.commands.alert.VisorAlertCommand org.apache.ignite.visor.commands.cache.VisorCacheClearCommand org.apache.ignite.visor.commands.cache.VisorCacheResetCommand + org.apache.ignite.visor.commands.cache.VisorCacheRebalanceCommand org.apache.ignite.visor.commands.cache.VisorCacheCommand org.apache.ignite.visor.commands.config.VisorConfigurationCommand org.apache.ignite.visor.commands.deploy.VisorDeployCommand diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala index d67b65c707bf8..fec5a9647fe54 100755 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala @@ -40,25 +40,27 @@ import scala.language.{implicitConversions, reflectiveCalls} * * ==Help== * {{{ - * +-----------------------------------------------------------------------------------------+ - * | cache | Prints statistics about caches from specified node on the entire grid. | - * | | Output sorting can be specified in arguments. | - * | | | - * | | Output abbreviations: | - * | | # Number of nodes. | - * | | H/h Number of cache hits. | - * | | M/m Number of cache misses. | - * | | R/r Number of cache reads. | - * | | W/w Number of cache writes. | - * +-----------------------------------------------------------------------------------------+ - * | cache -clear | Clears all entries from cache on all nodes. | - * +-----------------------------------------------------------------------------------------+ - * | cache -scan | List all entries in cache with specified name. | - * +-----------------------------------------------------------------------------------------+ - * | cache -stop | Stop cache with specified name. | - * +-----------------------------------------------------------------------------------------+ - * | cache -reset | Reset metrics for cache with specified name. | - * +-----------------------------------------------------------------------------------------+ + * +-------------------------------------------------------------------------------------------+ + * | cache | Prints statistics about caches from specified node on the entire grid. | + * | | Output sorting can be specified in arguments. | + * | | | + * | | Output abbreviations: | + * | | # Number of nodes. | + * | | H/h Number of cache hits. | + * | | M/m Number of cache misses. | + * | | R/r Number of cache reads. | + * | | W/w Number of cache writes. | + * +-------------------------------------------------------------------------------------------+ + * | cache -clear | Clears all entries from cache on all nodes. | + * +-------------------------------------------------------------------------------------------+ + * | cache -scan | List all entries in cache with specified name. | + * +-------------------------------------------------------------------------------------------+ + * | cache -stop | Stop cache with specified name. | + * +-------------------------------------------------------------------------------------------+ + * | cache -reset | Reset metrics for cache with specified name. | + * +-------------------------------------------------------------------------------------------+ + * | cache -rebalance | Re-balance partitions for cache with specified name. | + * +-------------------------------------------------------------------------------------------+ * * }}} * @@ -71,6 +73,7 @@ import scala.language.{implicitConversions, reflectiveCalls} * cache -scan -c= {-id=|id8=} {-p=} {-system} * cache -stop -c= * cache -reset -c= + * cache -rebalance -c= * }}} * * ====Arguments==== @@ -115,6 +118,8 @@ import scala.language.{implicitConversions, reflectiveCalls} * Stop cache with specified name. * -reset * Reset metrics for cache with specified name. + * -rebalance + * Re-balance partitions for cache with specified name. * -p= * Number of object to fetch from cache at once. * Valid range from 1 to 100. @@ -151,6 +156,8 @@ import scala.language.{implicitConversions, reflectiveCalls} * Stops cache with name 'cache'. * cache -reset -c=cache * Reset metrics for cache with name 'cache'. + * cache -rebalance -c=cache + * Re-balance partitions for cache with name 'cache'. * * }}} */ @@ -212,7 +219,7 @@ class VisorCacheCommand { else if (!isActive) { warn("Can not perform the operation because the cluster is inactive.", "Note, that the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes join the cluster.", - "To activate the cluster execute following command: top -active.") + "To activate the cluster execute following command: top -activate.") } else { var argLst = parseArgs(args) @@ -258,9 +265,10 @@ class VisorCacheCommand { // Get cache stats data from all nodes. val aggrData = cacheData(node, cacheName, showSystem) - if (hasArgFlagIn("clear", "scan", "stop", "reset")) { + if (hasArgFlagIn("clear", "scan", "stop", "reset", "rebalance")) { if (cacheName.isEmpty) - askForCache("Select cache from:", node, showSystem && !hasArgFlagIn("clear", "stop", "reset"), aggrData) match { + askForCache("Select cache from:", node, showSystem + && !hasArgFlagIn("clear", "stop", "reset", "rebalance"), aggrData) match { case Some(name) => argLst = argLst ++ Seq("c" -> name) @@ -270,25 +278,34 @@ class VisorCacheCommand { } cacheName.foreach(name => { - if (hasArgFlag("scan", argLst)) - VisorCacheScanCommand().scan(argLst, node) - else { - if (aggrData.nonEmpty && !aggrData.exists(cache => F.eq(cache.getName, name) && cache.isSystem)) { - if (hasArgFlag("clear", argLst)) - VisorCacheClearCommand().clear(argLst, node) - else if (hasArgFlag("stop", argLst)) - VisorCacheStopCommand().stop(argLst, node) - else if (hasArgFlag("reset", argLst)) - VisorCacheResetCommand().reset(argLst, node) - } - else { - if (hasArgFlag("clear", argLst)) - warn("Clearing of system cache is not allowed: " + name) - else if (hasArgFlag("stop", argLst)) - warn("Stopping of system cache is not allowed: " + name) - else if (hasArgFlag("reset", argLst)) - warn("Reset metrics of system cache is not allowed: " + name) - } + aggrData.find(cache => F.eq(cache.getName, name)) match { + case Some(cache) => + if (!cache.isSystem) { + if (hasArgFlag("scan", argLst)) + VisorCacheScanCommand().scan(argLst, node) + else if (hasArgFlag("clear", argLst)) + VisorCacheClearCommand().clear(argLst, node) + else if (hasArgFlag("stop", argLst)) + VisorCacheStopCommand().stop(argLst, node) + else if (hasArgFlag("reset", argLst)) + VisorCacheResetCommand().reset(argLst, node) + else if (hasArgFlag("rebalance", argLst)) + VisorCacheRebalanceCommand().rebalance(argLst, node) + } + else { + if (hasArgFlag("scan", argLst)) + warn("Scan of system cache is not allowed: " + name) + else if (hasArgFlag("clear", argLst)) + warn("Clearing of system cache is not allowed: " + name) + else if (hasArgFlag("stop", argLst)) + warn("Stopping of system cache is not allowed: " + name) + else if (hasArgFlag("reset", argLst)) + warn("Reset metrics of system cache is not allowed: " + name) + else if (hasArgFlag("rebalance", argLst)) + warn("Re-balance partitions of system cache is not allowed: " + name) + } + case None => + warn("Cache with specified name not found: " + name) } }) @@ -709,7 +726,8 @@ object VisorCacheCommand { "cache -clear {-c=} {-id=|id8=}", "cache -scan -c= {-id=|id8=} {-p=}", "cache -stop -c=", - "cache -reset -c=" + "cache -reset -c=", + "cache -rebalance -c=" ), args = Seq( "-id8=" -> Seq( @@ -729,21 +747,12 @@ object VisorCacheCommand { "Name of the cache.", "Note you can also use '@c0' ... '@cn' variables as shortcut to ." ), - "-clear" -> Seq( - "Clears cache." - ), - "-system" -> Seq( - "Enable showing of information about system caches." - ), - "-scan" -> Seq( - "Prints list of all entries from cache." - ), - "-stop" -> Seq( - "Stop cache with specified name." - ), - "-reset" -> Seq( - "Reset metrics of cache with specified name." - ), + "-clear" -> "Clears cache.", + "-system" -> "Enable showing of information about system caches.", + "-scan" -> "Prints list of all entries from cache.", + "-stop" -> "Stop cache with specified name.", + "-reset" -> "Reset metrics of cache with specified name.", + "-rebalance" -> "Re-balance partitions for cache with specified name.", "-s=hi|mi|rd|wr|cn" -> Seq( "Defines sorting type. Sorted by:", " hi Hits.", @@ -800,7 +809,8 @@ object VisorCacheCommand { " with page of 50 items from all nodes with this cache."), "cache -scan -c=cache -id8=12345678" -> "Prints list entries from cache with name 'cache' and node '12345678' ID8.", "cache -stop -c=@c0" -> "Stop cache with name taken from 'c0' memory variable.", - "cache -reset -c=@c0" -> "Reset metrics for cache with name taken from 'c0' memory variable." + "cache -reset -c=@c0" -> "Reset metrics for cache with name taken from 'c0' memory variable.", + "cache -rebalance -c=cache" -> "Re-balance partitions for cache with name 'cache'." ), emptyArgs = cmd.cache, withArgs = cmd.cache diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheRebalanceCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheRebalanceCommand.scala new file mode 100644 index 0000000000000..b416e6af8dfc3 --- /dev/null +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheRebalanceCommand.scala @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.visor.commands.cache + +import java.util.{HashSet => JavaSet} + +import org.apache.ignite.cluster.{ClusterGroupEmptyException, ClusterNode} +import org.apache.ignite.internal.visor.cache.{VisorCacheRebalanceTask, VisorCacheRebalanceTaskArg} +import org.apache.ignite.internal.visor.util.VisorTaskUtils._ +import org.apache.ignite.visor.visor._ + +import scala.language.reflectiveCalls + +/** + * ==Overview== + * Visor 'rebalance' command implementation. + * + * ====Specification==== + * {{{ + * cache -rebalance -c= + * }}} + * + * ====Arguments==== + * {{{ + * + * Name of the cache. + * }}} + * + * ====Examples==== + * {{{ + * cache -rebalance -c=@c0 + * Re-balance partitions for cache with name taken from 'c0' memory variable. + * }}} + */ +class VisorCacheRebalanceCommand { + /** + * Prints error message and advise. + * + * @param errMsgs Error messages. + */ + private def scold(errMsgs: Any*) { + assert(errMsgs != null) + + warn(errMsgs: _*) + warn("Type 'help cache' to see how to use this command.") + } + + private def error(e: Exception) { + var cause: Throwable = e + + while (cause.getCause != null) + cause = cause.getCause + + scold(cause.getMessage) + } + + /** + * ===Command=== + * Re-balance partitions for cache with specified name. + * + * ===Examples=== + * cache -c=cache -rebalance + * Re-balance partitions for cache with name 'cache'. + * + * @param argLst Command arguments. + */ + def rebalance(argLst: ArgList, node: Option[ClusterNode]) { + val cacheArg = argValue("c", argLst) + + val cacheName = cacheArg match { + case None => null // default cache. + + case Some(s) if s.startsWith("@") => + warn("Can't find cache variable with specified name: " + s, + "Type 'cache' to see available cache variables." + ) + + return + + case Some(name) => name + } + + val grp = try { + groupForDataNode(node, cacheName) + } + catch { + case _: ClusterGroupEmptyException => + scold(messageNodeNotFound(node, cacheName)) + + return + } + + try { + val cacheNames = new JavaSet[String]() + cacheNames.add(cacheName) + + executeRandom(grp, classOf[VisorCacheRebalanceTask], new VisorCacheRebalanceTaskArg(cacheNames)) + + println("Visor successfully re-balance partitions for cache: " + escapeName(cacheName)) + } + catch { + case _: ClusterGroupEmptyException => scold(messageNodeNotFound(node, cacheName)) + case e: Exception => error(e) + } + } +} + +/** + * Companion object that does initialization of the command. + */ +object VisorCacheRebalanceCommand { + /** Singleton command. */ + private val cmd = new VisorCacheRebalanceCommand + + /** + * Singleton. + */ + def apply(): VisorCacheRebalanceCommand = cmd +} From b5782b0261c2f8221d132114854d8038bc94577d Mon Sep 17 00:00:00 2001 From: "Andrey V. Mashenkov" Date: Tue, 5 Dec 2017 10:22:50 +0300 Subject: [PATCH 153/243] ignite-1267 Fixed job stealing so that newly joined node is able to steal jobs (cherry picked from commit 24f9087) --- .../internal/GridJobExecuteRequest.java | 133 ++++++++----- .../ignite/internal/GridTaskSessionImpl.java | 18 ++ .../ignite/internal/IgniteComputeImpl.java | 8 +- .../processors/job/GridJobProcessor.java | 8 + .../session/GridTaskSessionProcessor.java | 6 + .../processors/task/GridTaskProcessor.java | 15 +- .../task/GridTaskThreadContextKey.java | 3 + .../processors/task/GridTaskWorker.java | 2 + .../internal/GridJobStealingSelfTest.java | 7 +- .../GridMultithreadedJobStealingSelfTest.java | 177 ++++++++++++++---- 10 files changed, 284 insertions(+), 93 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java index fe2d6d802b8be..4357d1da07dce 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java @@ -23,6 +23,7 @@ import java.util.Collection; import java.util.Map; import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.compute.ComputeJob; import org.apache.ignite.compute.ComputeJobSibling; import org.apache.ignite.configuration.DeploymentMode; @@ -31,6 +32,7 @@ import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; @@ -136,6 +138,13 @@ public class GridJobExecuteRequest implements ExecutorAwareMessage { @GridDirectCollection(UUID.class) private Collection top; + /** */ + @GridDirectTransient + private IgnitePredicate topPred; + + /** */ + private byte[] topPredBytes; + /** */ private int[] idsOfCaches; @@ -166,6 +175,8 @@ public GridJobExecuteRequest() { * @param startTaskTime Task execution start time. * @param timeout Task execution timeout. * @param top Topology. + * @param topPred Topology predicate. + * @param topPredBytes Marshalled topology predicate. * @param siblingsBytes Serialized collection of split siblings. * @param siblings Collection of split siblings. * @param sesAttrsBytes Map of session attributes. @@ -197,6 +208,8 @@ public GridJobExecuteRequest( long startTaskTime, long timeout, @Nullable Collection top, + @Nullable IgnitePredicate topPred, + byte[] topPredBytes, byte[] siblingsBytes, Collection siblings, byte[] sesAttrsBytes, @@ -216,7 +229,6 @@ public GridJobExecuteRequest( int part, @Nullable AffinityTopologyVersion topVer, @Nullable String execName) { - this.top = top; assert sesId != null; assert jobId != null; assert taskName != null; @@ -224,6 +236,7 @@ public GridJobExecuteRequest( assert job != null || jobBytes != null; assert sesAttrs != null || sesAttrsBytes != null || !sesFullSup; assert jobAttrs != null || jobAttrsBytes != null; + assert top != null || topPred != null || topPredBytes != null; assert clsLdrId != null; assert userVer != null; assert depMode != null; @@ -238,6 +251,9 @@ public GridJobExecuteRequest( this.startTaskTime = startTaskTime; this.timeout = timeout; this.top = top; + this.topVer = topVer; + this.topPred = topPred; + this.topPredBytes = topPredBytes; this.siblingsBytes = siblingsBytes; this.siblings = siblings; this.sesAttrsBytes = sesAttrsBytes; @@ -424,6 +440,21 @@ public boolean isForceLocalDeployment() { @Nullable public Collection topology() { return top; } + + /** + * @return Topology predicate. + */ + public IgnitePredicate getTopologyPredicate() { + return topPred; + } + + /** + * @return Marshalled topology predicate. + */ + public byte[] getTopologyPredicateBytes() { + return topPredBytes; + } + /** * @return {@code True} if session attributes are enabled. */ @@ -513,127 +544,133 @@ public AffinityTopologyVersion getTopVer() { writer.incrementState(); case 4: - if (!writer.writeBoolean("forceLocDep", forceLocDep)) + if (!writer.writeString("execName", execName)) return false; writer.incrementState(); case 5: - if (!writer.writeIntArray("idsOfCaches", idsOfCaches)) + if (!writer.writeBoolean("forceLocDep", forceLocDep)) return false; writer.incrementState(); case 6: - if (!writer.writeBoolean("internal", internal)) + if (!writer.writeIntArray("idsOfCaches", idsOfCaches)) return false; writer.incrementState(); case 7: - if (!writer.writeByteArray("jobAttrsBytes", jobAttrsBytes)) + if (!writer.writeBoolean("internal", internal)) return false; writer.incrementState(); case 8: - if (!writer.writeByteArray("jobBytes", jobBytes)) + if (!writer.writeByteArray("jobAttrsBytes", jobAttrsBytes)) return false; writer.incrementState(); case 9: - if (!writer.writeIgniteUuid("jobId", jobId)) + if (!writer.writeByteArray("jobBytes", jobBytes)) return false; writer.incrementState(); case 10: - if (!writer.writeMap("ldrParticipants", ldrParticipants, MessageCollectionItemType.UUID, MessageCollectionItemType.IGNITE_UUID)) + if (!writer.writeIgniteUuid("jobId", jobId)) return false; writer.incrementState(); case 11: - if (!writer.writeInt("part", part)) + if (!writer.writeMap("ldrParticipants", ldrParticipants, MessageCollectionItemType.UUID, MessageCollectionItemType.IGNITE_UUID)) return false; writer.incrementState(); case 12: - if (!writer.writeByteArray("sesAttrsBytes", sesAttrsBytes)) + if (!writer.writeInt("part", part)) return false; writer.incrementState(); case 13: - if (!writer.writeBoolean("sesFullSup", sesFullSup)) + if (!writer.writeByteArray("sesAttrsBytes", sesAttrsBytes)) return false; writer.incrementState(); case 14: - if (!writer.writeIgniteUuid("sesId", sesId)) + if (!writer.writeBoolean("sesFullSup", sesFullSup)) return false; writer.incrementState(); case 15: - if (!writer.writeByteArray("siblingsBytes", siblingsBytes)) + if (!writer.writeIgniteUuid("sesId", sesId)) return false; writer.incrementState(); case 16: - if (!writer.writeLong("startTaskTime", startTaskTime)) + if (!writer.writeByteArray("siblingsBytes", siblingsBytes)) return false; writer.incrementState(); case 17: - if (!writer.writeUuid("subjId", subjId)) + if (!writer.writeLong("startTaskTime", startTaskTime)) return false; writer.incrementState(); case 18: - if (!writer.writeString("taskClsName", taskClsName)) + if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); case 19: - if (!writer.writeString("taskName", taskName)) + if (!writer.writeString("taskClsName", taskClsName)) return false; writer.incrementState(); case 20: - if (!writer.writeLong("timeout", timeout)) + if (!writer.writeString("taskName", taskName)) return false; writer.incrementState(); case 21: - if (!writer.writeCollection("top", top, MessageCollectionItemType.UUID)) + if (!writer.writeLong("timeout", timeout)) return false; writer.incrementState(); case 22: - if (!writer.writeMessage("topVer", topVer)) + if (!writer.writeCollection("top", top, MessageCollectionItemType.UUID)) return false; writer.incrementState(); case 23: - if (!writer.writeString("userVer", userVer)) + if (!writer.writeByteArray("topPredBytes", topPredBytes)) return false; writer.incrementState(); case 24: - if (!writer.writeString("executorName", execName)) + if (!writer.writeMessage("topVer", topVer)) + return false; + + writer.incrementState(); + + case 25: + if (!writer.writeString("userVer", userVer)) return false; writer.incrementState(); @@ -688,7 +725,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 4: - forceLocDep = reader.readBoolean("forceLocDep"); + execName = reader.readString("execName"); if (!reader.isLastRead()) return false; @@ -696,7 +733,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 5: - idsOfCaches = reader.readIntArray("idsOfCaches"); + forceLocDep = reader.readBoolean("forceLocDep"); if (!reader.isLastRead()) return false; @@ -704,7 +741,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 6: - internal = reader.readBoolean("internal"); + idsOfCaches = reader.readIntArray("idsOfCaches"); if (!reader.isLastRead()) return false; @@ -712,7 +749,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 7: - jobAttrsBytes = reader.readByteArray("jobAttrsBytes"); + internal = reader.readBoolean("internal"); if (!reader.isLastRead()) return false; @@ -720,7 +757,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 8: - jobBytes = reader.readByteArray("jobBytes"); + jobAttrsBytes = reader.readByteArray("jobAttrsBytes"); if (!reader.isLastRead()) return false; @@ -728,7 +765,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 9: - jobId = reader.readIgniteUuid("jobId"); + jobBytes = reader.readByteArray("jobBytes"); if (!reader.isLastRead()) return false; @@ -736,7 +773,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 10: - ldrParticipants = reader.readMap("ldrParticipants", MessageCollectionItemType.UUID, MessageCollectionItemType.IGNITE_UUID, false); + jobId = reader.readIgniteUuid("jobId"); if (!reader.isLastRead()) return false; @@ -744,7 +781,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 11: - part = reader.readInt("part"); + ldrParticipants = reader.readMap("ldrParticipants", MessageCollectionItemType.UUID, MessageCollectionItemType.IGNITE_UUID, false); if (!reader.isLastRead()) return false; @@ -752,7 +789,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 12: - sesAttrsBytes = reader.readByteArray("sesAttrsBytes"); + part = reader.readInt("part"); if (!reader.isLastRead()) return false; @@ -760,7 +797,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 13: - sesFullSup = reader.readBoolean("sesFullSup"); + sesAttrsBytes = reader.readByteArray("sesAttrsBytes"); if (!reader.isLastRead()) return false; @@ -768,7 +805,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 14: - sesId = reader.readIgniteUuid("sesId"); + sesFullSup = reader.readBoolean("sesFullSup"); if (!reader.isLastRead()) return false; @@ -776,7 +813,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 15: - siblingsBytes = reader.readByteArray("siblingsBytes"); + sesId = reader.readIgniteUuid("sesId"); if (!reader.isLastRead()) return false; @@ -784,7 +821,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 16: - startTaskTime = reader.readLong("startTaskTime"); + siblingsBytes = reader.readByteArray("siblingsBytes"); if (!reader.isLastRead()) return false; @@ -792,7 +829,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 17: - subjId = reader.readUuid("subjId"); + startTaskTime = reader.readLong("startTaskTime"); if (!reader.isLastRead()) return false; @@ -800,7 +837,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 18: - taskClsName = reader.readString("taskClsName"); + subjId = reader.readUuid("subjId"); if (!reader.isLastRead()) return false; @@ -808,7 +845,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 19: - taskName = reader.readString("taskName"); + taskClsName = reader.readString("taskClsName"); if (!reader.isLastRead()) return false; @@ -816,7 +853,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 20: - timeout = reader.readLong("timeout"); + taskName = reader.readString("taskName"); if (!reader.isLastRead()) return false; @@ -824,7 +861,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 21: - top = reader.readCollection("top", MessageCollectionItemType.UUID); + timeout = reader.readLong("timeout"); if (!reader.isLastRead()) return false; @@ -832,7 +869,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 22: - topVer = reader.readMessage("topVer"); + top = reader.readCollection("top", MessageCollectionItemType.UUID); if (!reader.isLastRead()) return false; @@ -840,7 +877,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 23: - userVer = reader.readString("userVer"); + topPredBytes = reader.readByteArray("topPredBytes"); if (!reader.isLastRead()) return false; @@ -848,7 +885,15 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 24: - execName = reader.readString("executorName"); + topVer = reader.readMessage("topVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 25: + userVer = reader.readString("userVer"); if (!reader.isLastRead()) return false; @@ -867,7 +912,7 @@ public AffinityTopologyVersion getTopVer() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 25; + return 26; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java index 458ad361cb942..ce6e8313abdd0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java @@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; +import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.compute.ComputeJobSibling; import org.apache.ignite.compute.ComputeTaskSessionAttributeListener; import org.apache.ignite.compute.ComputeTaskSessionScope; @@ -38,6 +39,7 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteFuture; +import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; @@ -108,6 +110,9 @@ public class GridTaskSessionImpl implements GridTaskSessionInternal { /** */ private final Collection top; + /** */ + private final IgnitePredicate topPred; + /** */ private final UUID subjId; @@ -124,6 +129,7 @@ public class GridTaskSessionImpl implements GridTaskSessionInternal { * @param taskClsName Task class name. * @param sesId Task session ID. * @param top Topology. + * @param topPred Topology predicate. * @param startTime Task execution start time. * @param endTime Task execution end time. * @param siblings Collection of siblings. @@ -141,6 +147,7 @@ public GridTaskSessionImpl( String taskClsName, IgniteUuid sesId, @Nullable Collection top, + @Nullable IgnitePredicate topPred, long startTime, long endTime, Collection siblings, @@ -159,6 +166,7 @@ public GridTaskSessionImpl( this.taskName = taskName; this.dep = dep; this.top = top; + this.topPred = topPred; // Note that class name might be null here if task was not explicitly // deployed. @@ -772,8 +780,18 @@ protected boolean removeCheckpoint0(GridTaskSessionInternal ses, String key) thr return ctx.checkpoint().removeCheckpoint(ses, key); } + /** + * @return Topology predicate. + */ + @Nullable public IgnitePredicate getTopologyPredicate() { + return topPred; + } + /** {@inheritDoc} */ @Override public Collection getTopology() { + if (topPred != null) + return F.viewReadOnly(ctx.discovery().allNodes(), F.node2id(), topPred); + return top != null ? top : F.nodeIds(ctx.discovery().allNodes()); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java index 06619f977907b..8d473e6599ede 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java @@ -52,7 +52,7 @@ import static org.apache.ignite.internal.GridClosureCallMode.BALANCE; import static org.apache.ignite.internal.GridClosureCallMode.BROADCAST; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_NO_FAILOVER; -import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBGRID; +import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBGRID_PREDICATE; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBJ_ID; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_TASK_NAME; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_TIMEOUT; @@ -481,7 +481,7 @@ private IgniteInternalFuture executeAsync0(String taskName, @Nullable guard(); try { - ctx.task().setThreadContextIfNotNull(TC_SUBGRID, prj.nodes()); + ctx.task().setThreadContextIfNotNull(TC_SUBGRID_PREDICATE, prj.predicate()); ctx.task().setThreadContextIfNotNull(TC_SUBJ_ID, subjId); return ctx.task().execute(taskName, arg, execName); @@ -521,7 +521,7 @@ private IgniteInternalFuture executeAsync0(Class ComputeTaskInternalFuture executeAsync0(ComputeTask task, guard(); try { - ctx.task().setThreadContextIfNotNull(TC_SUBGRID, prj.nodes()); + ctx.task().setThreadContextIfNotNull(TC_SUBGRID_PREDICATE, prj.predicate()); ctx.task().setThreadContextIfNotNull(TC_SUBJ_ID, subjId); return ctx.task().execute(task, arg, execName); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java index 9052543cec135..a5add4e14cbf5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java @@ -1043,6 +1043,13 @@ public void processJobExecuteRequest(ClusterNode node, final GridJobExecuteReque U.resolveClassLoader(dep.classLoader(), ctx.config())); } + IgnitePredicate topologyPred = req.getTopologyPredicate(); + + if (topologyPred == null && req.getTopologyPredicateBytes() != null) { + topologyPred = U.unmarshal(marsh, req.getTopologyPredicateBytes(), + U.resolveClassLoader(dep.classLoader(), ctx.config())); + } + // Note that we unmarshal session/job attributes here with proper class loader. GridTaskSessionImpl taskSes = ctx.session().createTaskSession( req.getSessionId(), @@ -1051,6 +1058,7 @@ public void processJobExecuteRequest(ClusterNode node, final GridJobExecuteReque dep, req.getTaskClassName(), req.topology(), + topologyPred, req.getStartTaskTime(), endTime, siblings, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/session/GridTaskSessionProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/session/GridTaskSessionProcessor.java index 91ccf4a3a800c..765743c612b6b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/session/GridTaskSessionProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/session/GridTaskSessionProcessor.java @@ -22,12 +22,14 @@ import java.util.UUID; import java.util.concurrent.ConcurrentMap; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.compute.ComputeJobSibling; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridTaskSessionImpl; import org.apache.ignite.internal.managers.deployment.GridDeployment; import org.apache.ignite.internal.processors.GridProcessorAdapter; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; import org.jsr166.ConcurrentHashMap8; @@ -69,6 +71,7 @@ public GridTaskSessionProcessor(GridKernalContext ctx) { * @param dep Deployment. * @param taskClsName Task class name. * @param top Topology. + * @param topPred Topology predicate. * @param startTime Execution start time. * @param endTime Execution end time. * @param siblings Collection of siblings. @@ -86,6 +89,7 @@ public GridTaskSessionImpl createTaskSession( @Nullable GridDeployment dep, String taskClsName, @Nullable Collection top, + @Nullable IgnitePredicate topPred, long startTime, long endTime, Collection siblings, @@ -102,6 +106,7 @@ public GridTaskSessionImpl createTaskSession( taskClsName, sesId, top, + topPred, startTime, endTime, siblings, @@ -126,6 +131,7 @@ public GridTaskSessionImpl createTaskSession( taskClsName, sesId, top, + topPred, startTime, endTime, siblings, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java index 4606b7c551419..25a38acc094fd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java @@ -70,6 +70,7 @@ import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteFuture; +import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.marshaller.Marshaller; import org.apache.ignite.plugin.security.SecurityPermission; @@ -85,6 +86,7 @@ import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SYSTEM_POOL; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SKIP_AUTH; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBGRID; +import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBGRID_PREDICATE; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBJ_ID; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_TASK_NAME; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_TIMEOUT; @@ -658,12 +660,18 @@ else if (task != null) { if (log.isDebugEnabled()) log.debug("Task deployment: " + dep); - boolean fullSup = dep != null && taskCls!= null && + boolean fullSup = dep != null && taskCls != null && dep.annotation(taskCls, ComputeTaskSessionFullSupport.class) != null; - Collection nodes = (Collection)map.get(TC_SUBGRID); + Collection top = null; - Collection top = nodes != null ? F.nodeIds(nodes) : null; + final IgnitePredicate topPred = (IgnitePredicate)map.get(TC_SUBGRID_PREDICATE); + + if (topPred == null) { + final Collection nodes = (Collection)map.get(TC_SUBGRID); + + top = nodes != null ? F.nodeIds(nodes) : null; + } UUID subjId = getThreadContext(TC_SUBJ_ID); @@ -685,6 +693,7 @@ else if (task != null) { dep, taskCls == null ? null : taskCls.getName(), top, + topPred, startTime, endTime, Collections.emptyList(), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java index f0e56c731fd2c..92bcd4156430f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java @@ -30,6 +30,9 @@ public enum GridTaskThreadContextKey { /** Projection for the task. */ TC_SUBGRID, + /** Projection predicate for the task. */ + TC_SUBGRID_PREDICATE, + /** Timeout in milliseconds associated with the task. */ TC_TIMEOUT, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java index b94a42727da01..25f3029228195 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java @@ -1381,6 +1381,8 @@ private void sendRequest(ComputeJobResult res) { ses.getStartTime(), timeout, ses.getTopology(), + loc ? ses.getTopologyPredicate() : null, + loc ? null : U.marshal(marsh, ses.getTopologyPredicate()), loc ? null : U.marshal(marsh, ses.getJobSiblings()), loc ? ses.getJobSiblings() : null, loc ? null : U.marshal(marsh, sesAttrs), diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridJobStealingSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridJobStealingSelfTest.java index 56683b6190143..f3a19aaeccb4f 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/GridJobStealingSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/GridJobStealingSelfTest.java @@ -187,10 +187,13 @@ public void testProjectionPredicate() throws Exception { public void testProjectionPredicateInternalStealing() throws Exception { final Ignite ignite3 = startGrid(3); + final UUID node1 = ignite1.cluster().localNode().id(); + final UUID node3 = ignite3.cluster().localNode().id(); + IgnitePredicate p = new P1() { @Override public boolean apply(ClusterNode e) { - return ignite1.cluster().localNode().id().equals(e.id()) || - ignite3.cluster().localNode().id().equals(e.id()); // Limit projection with only grid1 or grid3 node. + return node1.equals(e.id()) || + node3.equals(e.id()); // Limit projection with only grid1 or grid3 node. } }; diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridMultithreadedJobStealingSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridMultithreadedJobStealingSelfTest.java index b64a6ad895e9a..9c5d6949c6249 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/GridMultithreadedJobStealingSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/GridMultithreadedJobStealingSelfTest.java @@ -18,12 +18,17 @@ package org.apache.ignite.internal; import java.io.Serializable; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCompute; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterNode; @@ -40,6 +45,7 @@ import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.apache.ignite.testframework.junits.common.GridCommonTest; +import org.eclipse.jetty.util.ConcurrentHashSet; import org.jetbrains.annotations.Nullable; /** @@ -50,6 +56,9 @@ public class GridMultithreadedJobStealingSelfTest extends GridCommonAbstractTest /** */ private Ignite ignite; + /** */ + private static volatile CountDownLatch jobExecutedLatch; + /** */ public GridMultithreadedJobStealingSelfTest() { super(false /* don't start grid*/); @@ -77,6 +86,7 @@ public void testTwoJobsMultithreaded() throws Exception { final AtomicInteger stolen = new AtomicInteger(0); final AtomicInteger noneStolen = new AtomicInteger(0); + final ConcurrentHashSet nodes = new ConcurrentHashSet(); int threadsNum = 10; @@ -84,28 +94,13 @@ public void testTwoJobsMultithreaded() throws Exception { /** */ @Override public void run() { try { - JobStealingResult res = ignite.compute().execute(JobStealingTask.class, null); + JobStealingResult res = ignite.compute().execute(new JobStealingTask(2), null); info("Task result: " + res); - switch(res) { - case NONE_STOLEN : { - noneStolen.addAndGet(2); - break; - } - case ONE_STOLEN : { - noneStolen.addAndGet(1); - stolen.addAndGet(1); - break; - } - case BOTH_STOLEN: { - stolen.addAndGet(2); - break; - } - default: { - assert false : "Result is: " + res; - } - } + stolen.addAndGet(res.stolen); + noneStolen.addAndGet(res.nonStolen); + nodes.addAll(res.nodes); } catch (IgniteException e) { log.error("Failed to execute task.", e); @@ -119,20 +114,91 @@ public void testTwoJobsMultithreaded() throws Exception { info("Metrics [nodeId=" + g.cluster().localNode().id() + ", metrics=" + g.cluster().localNode().metrics() + ']'); - assert fail.get() == null : "Test failed with exception: " + fail.get(); + assertNull("Test failed with exception: ",fail.get()); // Total jobs number is threadsNum * 2 - assert stolen.get() + noneStolen.get() == threadsNum * 2 : "Incorrect processed jobs number"; + assertEquals("Incorrect processed jobs number",threadsNum * 2, stolen.get() + noneStolen.get()); - assert stolen.get() != 0 : "No jobs were stolen."; + assertFalse( "No jobs were stolen.",stolen.get() == 0); + + for (Ignite g : G.allGrids()) + assertTrue("Node get no jobs.", nodes.contains(g.name())); // Under these circumstances we should not have more than 2 jobs // difference. //(but muted to 4 due to very rare fails and low priority of fix) - assert Math.abs(stolen.get() - noneStolen.get()) <= 4 : "Stats [stolen=" + stolen + - ", noneStolen=" + noneStolen + ']'; + assertTrue( "Stats [stolen=" + stolen + ", noneStolen=" + noneStolen + ']', + Math.abs(stolen.get() - noneStolen.get()) <= 4); + } + + /** + * Test newly joined node can steal jobs. + * + * @throws Exception If test failed. + */ + public void testJoinedNodeCanStealJobs() throws Exception { + final AtomicReference fail = new AtomicReference<>(null); + + final AtomicInteger stolen = new AtomicInteger(0); + final AtomicInteger noneStolen = new AtomicInteger(0); + final ConcurrentHashSet nodes = new ConcurrentHashSet(); + + int threadsNum = 10; + + final int jobsPerTask = 4; + + jobExecutedLatch = new CountDownLatch(threadsNum); + + final IgniteInternalFuture future = GridTestUtils.runMultiThreadedAsync(new Runnable() { + /** */ + @Override public void run() { + try { + final IgniteCompute compute = ignite.compute().withAsync(); + + compute.execute(new JobStealingTask(jobsPerTask), null); + + JobStealingResult res = (JobStealingResult)compute.future().get(); + + info("Task result: " + res); + + stolen.addAndGet(res.stolen); + noneStolen.addAndGet(res.nonStolen); + nodes.addAll(res.nodes); + } + catch (IgniteException e) { + log.error("Failed to execute task.", e); + + fail.getAndSet(e); + } + } + }, threadsNum, "JobStealingThread"); + + //Wait for first job begin execution. + jobExecutedLatch.await(); + + startGrid(2); + + for (Ignite g : G.allGrids()) + info("Metrics [nodeId=" + g.cluster().localNode().id() + + ", metrics=" + g.cluster().localNode().metrics() + ']'); + + future.get(); + + assertNull("Test failed with exception: ",fail.get()); + + // Total jobs number is threadsNum * 3 + assertEquals("Incorrect processed jobs number",threadsNum * jobsPerTask, stolen.get() + noneStolen.get()); + + assertFalse( "No jobs were stolen.",stolen.get() == 0); + + for (Ignite g : G.allGrids()) + assertTrue("Node get no jobs.", nodes.contains(g.name())); + + assertTrue( "Stats [stolen=" + stolen + ", noneStolen=" + noneStolen + ']', + Math.abs(stolen.get() - 2 * noneStolen.get()) <= 6); } + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); @@ -166,38 +232,50 @@ private static class JobStealingTask extends ComputeTaskAdapter map(List subgrid, + @Override public Map map(List subgrid, @Nullable Object arg) { assert subgrid.size() == 2 : "Invalid subgrid size: " + subgrid.size(); Map map = new HashMap<>(subgrid.size()); // Put all jobs onto local node. - for (int i = 0; i < subgrid.size(); i++) - map.put(new GridJobStealingJob(2000L), ignite.cluster().localNode()); + for (int i = 0; i < jobsToRun; i++) + map.put(new GridJobStealingJob(3000L), ignite.cluster().localNode()); return map; } /** {@inheritDoc} */ @Override public JobStealingResult reduce(List results) { - assert results.size() == 2; + int stolen = 0; + int nonStolen = 0; + + Set nodes = new HashSet<>(results.size()); - for (ComputeJobResult res : results) - log.info("Job result: " + res.getData()); + for (ComputeJobResult res : results) { + String data = res.getData(); - Object obj0 = results.get(0).getData(); + log.info("Job result: " + data); - if (obj0.equals(results.get(1).getData())) { - if (obj0.equals(ignite.name())) - return JobStealingResult.NONE_STOLEN; + nodes.add(data); - return JobStealingResult.BOTH_STOLEN; + if (!data.equals(ignite.name())) + stolen++; + else + nonStolen++; } - return JobStealingResult.ONE_STOLEN; + return new JobStealingResult(stolen, nonStolen, nodes); } } @@ -219,6 +297,9 @@ private static final class GridJobStealingJob extends ComputeJobAdapter { /** {@inheritDoc} */ @Override public Serializable execute() { try { + if (jobExecutedLatch != null) + jobExecutedLatch.countDown(); + Long sleep = argument(0); assert sleep != null; @@ -236,14 +317,30 @@ private static final class GridJobStealingJob extends ComputeJobAdapter { /** * Job stealing result. */ - private enum JobStealingResult { + private static class JobStealingResult { + /** */ + int stolen; + /** */ - BOTH_STOLEN, + int nonStolen; /** */ - ONE_STOLEN, + Set nodes; /** */ - NONE_STOLEN + public JobStealingResult(int stolen, int nonStolen, Set nodes) { + this.stolen = stolen; + this.nonStolen = nonStolen; + this.nodes = nodes; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return "JobStealingResult{" + + "stolen=" + stolen + + ", nonStolen=" + nonStolen + + ", nodes=" + Arrays.toString(nodes.toArray()) + + '}'; + } } } \ No newline at end of file From 352d59feae38fbfcaacde86faf109c4234955cad Mon Sep 17 00:00:00 2001 From: "Andrey V. Mashenkov" Date: Mon, 11 Dec 2017 17:23:37 +0300 Subject: [PATCH 154/243] IGNITE-7157: Fixed deadlock when partition eviction run in different threads. Signed-off-by: Andrey Gura (cherry picked from commit 0cf65ad) --- .../dht/GridDhtPartitionTopologyImpl.java | 475 ++++++++++-------- 1 file changed, 255 insertions(+), 220 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java index 7abe09b5a74c9..abe276f345762 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java @@ -809,56 +809,63 @@ private GridDhtLocalPartition localPartition0(int p, boolean created = false; - lock.writeLock().lock(); + ctx.database().checkpointReadLock(); try { - loc = locParts.get(p); + lock.writeLock().lock(); - state = loc != null ? loc.state() : null; + try { + loc = locParts.get(p); - boolean belongs = partitionLocalNode(p, topVer); + state = loc != null ? loc.state() : null; - if (loc != null && state == EVICTED) { - try { - loc.rent(false).get(); - } - catch (IgniteCheckedException ex) { - throw new IgniteException(ex); - } + boolean belongs = partitionLocalNode(p, topVer); + + if (loc != null && state == EVICTED) { + try { + loc.rent(false).get(); + } + catch (IgniteCheckedException ex) { + throw new IgniteException(ex); + } - locParts.set(p, loc = null); + locParts.set(p, loc = null); - if (!belongs) { - throw new GridDhtInvalidPartitionException(p, "Adding entry to evicted partition " + - "(often may be caused by inconsistent 'key.hashCode()' implementation) " + - "[part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.readyTopVer + ']'); + if (!belongs) { + throw new GridDhtInvalidPartitionException(p, "Adding entry to evicted partition " + + "(often may be caused by inconsistent 'key.hashCode()' implementation) " + + "[part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.readyTopVer + ']'); + } + } + else if (loc != null && state == RENTING && !showRenting) { + throw new GridDhtInvalidPartitionException(p, "Adding entry to partition that is concurrently " + + "evicted [part=" + p + ", shouldBeMoving=" + loc.reload() + ", belongs=" + belongs + + ", topVer=" + topVer + ", curTopVer=" + this.readyTopVer + "]"); } - } - else if (loc != null && state == RENTING && !showRenting) { - throw new GridDhtInvalidPartitionException(p, "Adding entry to partition that is concurrently " + - "evicted [part=" + p + ", shouldBeMoving=" + loc.reload() + ", belongs=" + belongs + - ", topVer=" + topVer + ", curTopVer=" + this.readyTopVer + "]"); - } - if (loc == null) { - if (!belongs) - throw new GridDhtInvalidPartitionException(p, "Creating partition which does not belong to " + - "local node (often may be caused by inconsistent 'key.hashCode()' implementation) " + - "[part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.readyTopVer + ']'); + if (loc == null) { + if (!belongs) + throw new GridDhtInvalidPartitionException(p, "Creating partition which does not belong to " + + "local node (often may be caused by inconsistent 'key.hashCode()' implementation) " + + "[part=" + p + ", topVer=" + topVer + ", this.topVer=" + this.readyTopVer + ']'); - locParts.set(p, loc = new GridDhtLocalPartition(ctx, grp, p)); + locParts.set(p, loc = new GridDhtLocalPartition(ctx, grp, p)); - if (updateSeq) - this.updateSeq.incrementAndGet(); + if (updateSeq) + this.updateSeq.incrementAndGet(); - created = true; + created = true; - if (log.isDebugEnabled()) - log.debug("Created local partition: " + loc); + if (log.isDebugEnabled()) + log.debug("Created local partition: " + loc); + } + } + finally { + lock.writeLock().unlock(); } } finally { - lock.writeLock().unlock(); + ctx.database().checkpointReadUnlock(); } if (created && ctx.pageStore() != null) { @@ -1544,120 +1551,127 @@ private boolean isStaleUpdate(GridDhtPartitionMap currentMap, GridDhtPartitionMa return false; } - lock.writeLock().lock(); + ctx.database().checkpointReadLock(); try { - if (stopping) - return false; - - if (!force) { - if (lastTopChangeVer.initialized() && exchId != null && lastTopChangeVer.compareTo(exchId.topologyVersion()) > 0) { - U.warn(log, "Stale exchange id for single partition map update (will ignore) [" + - "lastTopChange=" + lastTopChangeVer + - ", readTopVer=" + readyTopVer + - ", exch=" + exchId.topologyVersion() + ']'); + lock.writeLock().lock(); + try { + if (stopping) return false; + + if (!force) { + if (lastTopChangeVer.initialized() && exchId != null && lastTopChangeVer.compareTo(exchId.topologyVersion()) > 0) { + U.warn(log, "Stale exchange id for single partition map update (will ignore) [" + + "lastTopChange=" + lastTopChangeVer + + ", readTopVer=" + readyTopVer + + ", exch=" + exchId.topologyVersion() + ']'); + + return false; + } } - } - if (node2part == null) - // Create invalid partition map. - node2part = new GridDhtPartitionFullMap(); + if (node2part == null) + // Create invalid partition map. + node2part = new GridDhtPartitionFullMap(); - GridDhtPartitionMap cur = node2part.get(parts.nodeId()); + GridDhtPartitionMap cur = node2part.get(parts.nodeId()); - if (force) { - if (cur != null && cur.topologyVersion().initialized()) - parts.updateSequence(cur.updateSequence(), cur.topologyVersion()); - } - else if (isStaleUpdate(cur, parts)) { - U.warn(log, "Stale update for single partition map update (will ignore) [exchId=" + exchId + - ", curMap=" + cur + - ", newMap=" + parts + ']'); + if (force) { + if (cur != null && cur.topologyVersion().initialized()) + parts.updateSequence(cur.updateSequence(), cur.topologyVersion()); + } + else if (isStaleUpdate(cur, parts)) { + U.warn(log, "Stale update for single partition map update (will ignore) [exchId=" + exchId + + ", curMap=" + cur + + ", newMap=" + parts + ']'); - return false; - } + return false; + } - long updateSeq = this.updateSeq.incrementAndGet(); + long updateSeq = this.updateSeq.incrementAndGet(); - node2part.newUpdateSequence(updateSeq); + node2part.newUpdateSequence(updateSeq); - boolean changed = false; + boolean changed = false; - if (cur == null || !cur.equals(parts)) - changed = true; + if (cur == null || !cur.equals(parts)) + changed = true; - node2part.put(parts.nodeId(), parts); + node2part.put(parts.nodeId(), parts); - // During exchange diff is calculated after all messages are received and affinity initialized. - if (exchId == null && !grp.isReplicated()) { - if (readyTopVer.initialized() && readyTopVer.compareTo(diffFromAffinityVer) >= 0) { - AffinityAssignment affAssignment = grp.affinity().readyAffinity(readyTopVer); + // During exchange diff is calculated after all messages are received and affinity initialized. + if (exchId == null && !grp.isReplicated()) { + if (readyTopVer.initialized() && readyTopVer.compareTo(diffFromAffinityVer) >= 0) { + AffinityAssignment affAssignment = grp.affinity().readyAffinity(readyTopVer); - // Add new mappings. - for (Map.Entry e : parts.entrySet()) { - int p = e.getKey(); + // Add new mappings. + for (Map.Entry e : parts.entrySet()) { + int p = e.getKey(); - Set diffIds = diffFromAffinity.get(p); + Set diffIds = diffFromAffinity.get(p); - if ((e.getValue() == MOVING || e.getValue() == OWNING || e.getValue() == RENTING) - && !affAssignment.getIds(p).contains(parts.nodeId())) { - if (diffIds == null) - diffFromAffinity.put(p, diffIds = U.newHashSet(3)); + if ((e.getValue() == MOVING || e.getValue() == OWNING || e.getValue() == RENTING) + && !affAssignment.getIds(p).contains(parts.nodeId())) { + if (diffIds == null) + diffFromAffinity.put(p, diffIds = U.newHashSet(3)); - if (diffIds.add(parts.nodeId())) - changed = true; - } - else { - if (diffIds != null && diffIds.remove(parts.nodeId())) { - changed = true; + if (diffIds.add(parts.nodeId())) + changed = true; + } + else { + if (diffIds != null && diffIds.remove(parts.nodeId())) { + changed = true; - if (diffIds.isEmpty()) - diffFromAffinity.remove(p); + if (diffIds.isEmpty()) + diffFromAffinity.remove(p); + } } } - } - // Remove obsolete mappings. - if (cur != null) { - for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) { - Set ids = diffFromAffinity.get(p); + // Remove obsolete mappings. + if (cur != null) { + for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) { + Set ids = diffFromAffinity.get(p); - if (ids != null && ids.remove(parts.nodeId())) { - changed = true; + if (ids != null && ids.remove(parts.nodeId())) { + changed = true; - if (ids.isEmpty()) - diffFromAffinity.remove(p); + if (ids.isEmpty()) + diffFromAffinity.remove(p); + } } } - } - diffFromAffinityVer = readyTopVer; + diffFromAffinityVer = readyTopVer; + } } - } - if (readyTopVer.initialized() && readyTopVer.equals(lastTopChangeVer)) { - AffinityAssignment aff = grp.affinity().readyAffinity(readyTopVer); + if (readyTopVer.initialized() && readyTopVer.equals(lastTopChangeVer)) { + AffinityAssignment aff = grp.affinity().readyAffinity(readyTopVer); - if (exchId == null) - changed |= checkEvictions(updateSeq, aff); + if (exchId == null) + changed |= checkEvictions(updateSeq, aff); - updateRebalanceVersion(aff.assignment()); - } + updateRebalanceVersion(aff.assignment()); + } - consistencyCheck(); + consistencyCheck(); - if (log.isDebugEnabled()) - log.debug("Partition map after single update: " + fullMapString()); + if (log.isDebugEnabled()) + log.debug("Partition map after single update: " + fullMapString()); - if (changed && exchId == null) - ctx.exchange().scheduleResendPartitions(); + if (changed && exchId == null) + ctx.exchange().scheduleResendPartitions(); - return changed; + return changed; + } + finally { + lock.writeLock().unlock(); + } } finally { - lock.writeLock().unlock(); + ctx.database().checkpointReadUnlock(); } } @@ -1775,124 +1789,138 @@ private void rebuildDiff(AffinityAssignment affAssignment) { /** {@inheritDoc} */ @Override public boolean detectLostPartitions(AffinityTopologyVersion resTopVer, DiscoveryEvent discoEvt) { - lock.writeLock().lock(); + ctx.database().checkpointReadLock(); try { - if (node2part == null) - return false; + lock.writeLock().lock(); - int parts = grp.affinity().partitions(); + try { + if (node2part == null) + return false; - Set lost = new HashSet<>(parts); + int parts = grp.affinity().partitions(); - for (int p = 0; p < parts; p++) - lost.add(p); + Set lost = new HashSet<>(parts); - for (GridDhtPartitionMap partMap : node2part.values()) { - for (Map.Entry e : partMap.entrySet()) { - if (e.getValue() == OWNING) { - lost.remove(e.getKey()); + for (int p = 0; p < parts; p++) + lost.add(p); - if (lost.isEmpty()) - break; + for (GridDhtPartitionMap partMap : node2part.values()) { + for (Map.Entry e : partMap.entrySet()) { + if (e.getValue() == OWNING) { + lost.remove(e.getKey()); + + if (lost.isEmpty()) + break; + } } } - } - boolean changed = false; + boolean changed = false; - if (!F.isEmpty(lost)) { - PartitionLossPolicy plc = grp.config().getPartitionLossPolicy(); + if (!F.isEmpty(lost)) { + PartitionLossPolicy plc = grp.config().getPartitionLossPolicy(); - assert plc != null; + assert plc != null; - // Update partition state on all nodes. - for (Integer part : lost) { - long updSeq = updateSeq.incrementAndGet(); + // Update partition state on all nodes. + for (Integer part : lost) { + long updSeq = updateSeq.incrementAndGet(); - GridDhtLocalPartition locPart = localPartition(part, resTopVer, false, true); + GridDhtLocalPartition locPart = localPartition(part, resTopVer, false, true); - if (locPart != null) { - boolean marked = plc == PartitionLossPolicy.IGNORE ? locPart.own() : locPart.markLost(); + if (locPart != null) { + boolean marked = plc == PartitionLossPolicy.IGNORE ? locPart.own() : locPart.markLost(); - if (!marked && locPart.state() == RENTING) - try { - //TODO https://issues.apache.org/jira/browse/IGNITE-6433 - locPart.tryEvict(); - locPart.rent(false).get(); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to wait for RENTING partition eviction after partition LOST event", - e); - } + if (!marked && locPart.state() == RENTING) + try { + //TODO https://issues.apache.org/jira/browse/IGNITE-6433 + locPart.tryEvict(); + locPart.rent(false).get(); + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to wait for RENTING partition eviction after partition LOST event", + e); + } - if (marked) - updateLocal(locPart.id(), locPart.state(), updSeq, resTopVer); + if (marked) + updateLocal(locPart.id(), locPart.state(), updSeq, resTopVer); - changed |= marked; - } - // Update map for remote node. - else if (plc != PartitionLossPolicy.IGNORE) { - for (Map.Entry e : node2part.entrySet()) { - if (e.getKey().equals(ctx.localNodeId())) - continue; - - if (e.getValue().get(part) != EVICTED) - e.getValue().put(part, LOST); + changed |= marked; + } + // Update map for remote node. + else if (plc != PartitionLossPolicy.IGNORE) { + for (Map.Entry e : node2part.entrySet()) { + if (e.getKey().equals(ctx.localNodeId())) + continue; + + if (e.getValue().get(part) != EVICTED) + e.getValue().put(part, LOST); + } } - } - if (grp.eventRecordable(EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST)) { - grp.addRebalanceEvent(part, - EVT_CACHE_REBALANCE_PART_DATA_LOST, - discoEvt.eventNode(), - discoEvt.type(), - discoEvt.timestamp()); + if (grp.eventRecordable(EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST)) { + grp.addRebalanceEvent(part, + EVT_CACHE_REBALANCE_PART_DATA_LOST, + discoEvt.eventNode(), + discoEvt.type(), + discoEvt.timestamp()); + } } + + if (plc != PartitionLossPolicy.IGNORE) + grp.needsRecovery(true); } - if (plc != PartitionLossPolicy.IGNORE) - grp.needsRecovery(true); + return changed; + } + finally { + lock.writeLock().unlock(); } - - return changed; } finally { - lock.writeLock().unlock(); + ctx.database().checkpointReadUnlock(); } } /** {@inheritDoc} */ @Override public void resetLostPartitions(AffinityTopologyVersion resTopVer) { - lock.writeLock().lock(); + ctx.database().checkpointReadLock(); try { - long updSeq = updateSeq.incrementAndGet(); + lock.writeLock().lock(); - for (Map.Entry e : node2part.entrySet()) { - for (Map.Entry e0 : e.getValue().entrySet()) { - if (e0.getValue() != LOST) - continue; + try { + long updSeq = updateSeq.incrementAndGet(); - e0.setValue(OWNING); + for (Map.Entry e : node2part.entrySet()) { + for (Map.Entry e0 : e.getValue().entrySet()) { + if (e0.getValue() != LOST) + continue; + + e0.setValue(OWNING); - GridDhtLocalPartition locPart = localPartition(e0.getKey(), resTopVer, false); + GridDhtLocalPartition locPart = localPartition(e0.getKey(), resTopVer, false); - if (locPart != null && locPart.state() == LOST) { - boolean marked = locPart.own(); + if (locPart != null && locPart.state() == LOST) { + boolean marked = locPart.own(); - if (marked) - updateLocal(locPart.id(), locPart.state(), updSeq, resTopVer); + if (marked) + updateLocal(locPart.id(), locPart.state(), updSeq, resTopVer); + } } } - } - checkEvictions(updSeq, grp.affinity().readyAffinity(resTopVer)); + checkEvictions(updSeq, grp.affinity().readyAffinity(resTopVer)); - grp.needsRecovery(false); + grp.needsRecovery(false); + } + finally { + lock.writeLock().unlock(); + } } finally { - lock.writeLock().unlock(); + ctx.database().checkpointReadUnlock(); } } @@ -1930,61 +1958,68 @@ else if (plc != PartitionLossPolicy.IGNORE) { @Override public Set setOwners(int p, Set owners, boolean haveHistory, boolean updateSeq) { Set result = haveHistory ? Collections.emptySet() : new HashSet(); - lock.writeLock().lock(); + ctx.database().checkpointReadLock(); try { - GridDhtLocalPartition locPart = locParts.get(p); + lock.writeLock().lock(); - if (locPart != null) { - if (locPart.state() == OWNING && !owners.contains(ctx.localNodeId())) { - if (haveHistory) - locPart.moving(); - else { - locPart.rent(false); + try { + GridDhtLocalPartition locPart = locParts.get(p); - locPart.reload(true); + if (locPart != null) { + if (locPart.state() == OWNING && !owners.contains(ctx.localNodeId())) { + if (haveHistory) + locPart.moving(); + else { + locPart.rent(false); - result.add(ctx.localNodeId()); - } + locPart.reload(true); + + result.add(ctx.localNodeId()); + } - U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " + - "[nodeId=" + ctx.localNodeId() + ", cacheOrGroupName=" + grp.cacheOrGroupName() + - ", partId=" + locPart.id() + ", haveHistory=" + haveHistory + "]"); + U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " + + "[nodeId=" + ctx.localNodeId() + ", cacheOrGroupName=" + grp.cacheOrGroupName() + + ", partId=" + locPart.id() + ", haveHistory=" + haveHistory + "]"); + } } - } - for (Map.Entry e : node2part.entrySet()) { - GridDhtPartitionMap partMap = e.getValue(); + for (Map.Entry e : node2part.entrySet()) { + GridDhtPartitionMap partMap = e.getValue(); - if (!partMap.containsKey(p)) - continue; + if (!partMap.containsKey(p)) + continue; - if (partMap.get(p) == OWNING && !owners.contains(e.getKey())) { - if (haveHistory) - partMap.put(p, MOVING); - else { - partMap.put(p, RENTING); + if (partMap.get(p) == OWNING && !owners.contains(e.getKey())) { + if (haveHistory) + partMap.put(p, MOVING); + else { + partMap.put(p, RENTING); - result.add(e.getKey()); - } + result.add(e.getKey()); + } - partMap.updateSequence(partMap.updateSequence() + 1, partMap.topologyVersion()); + partMap.updateSequence(partMap.updateSequence() + 1, partMap.topologyVersion()); - if (partMap.nodeId().equals(ctx.localNodeId())) - this.updateSeq.setIfGreater(partMap.updateSequence()); + if (partMap.nodeId().equals(ctx.localNodeId())) + this.updateSeq.setIfGreater(partMap.updateSequence()); - U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " + - "[nodeId=" + e.getKey() + ", cacheOrGroupName=" + grp.cacheOrGroupName() + - ", partId=" + p + ", haveHistory=" + haveHistory + "]"); + U.warn(log, "Partition has been scheduled for rebalancing due to outdated update counter " + + "[nodeId=" + e.getKey() + ", cacheOrGroupName=" + grp.cacheOrGroupName() + + ", partId=" + p + ", haveHistory=" + haveHistory + "]"); + } } - } - if (updateSeq) - node2part = new GridDhtPartitionFullMap(node2part, this.updateSeq.incrementAndGet()); + if (updateSeq) + node2part = new GridDhtPartitionFullMap(node2part, this.updateSeq.incrementAndGet()); + } + finally { + lock.writeLock().unlock(); + } } finally { - lock.writeLock().unlock(); + ctx.database().checkpointReadUnlock(); } return result; From 96585f20cfc6f35aab760644169f346396f26aa7 Mon Sep 17 00:00:00 2001 From: vsisko Date: Mon, 11 Dec 2017 21:43:42 +0700 Subject: [PATCH 155/243] IGNITE-7145 Visor CMD: Fixed stack overflow for VisorConsoleCommand.warn() method. (cherry picked from commit 77e9a87) --- .../ignite/visor/commands/common/VisorConsoleCommand.scala | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala index 8c361fb36cd9f..fb2b7165ad482 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala @@ -20,6 +20,7 @@ package org.apache.ignite.visor.commands.common import org.apache.ignite.visor.visor.NA +import org.apache.ignite.visor.visor import scala.collection.JavaConversions._ @@ -41,8 +42,8 @@ trait VisorConsoleCommand { assert(warnMsgs != null) warnMsgs.foreach{ - case ex: Throwable => warn(ex.getMessage) - case line => warn(line) + case ex: Throwable => visor.warn(ex.getMessage) + case line => visor.warn(line) } } From cfa86abebbe7bbc7e2aa98c41e813a5a44febc94 Mon Sep 17 00:00:00 2001 From: vsisko Date: Tue, 12 Dec 2017 10:58:52 +0700 Subject: [PATCH 156/243] IGNITE-7121 Visor CMD: Fixed reading last command line in batch mode. (cherry picked from commit 7b7fb8c) --- .../scala/org/apache/ignite/visor/commands/VisorConsole.scala | 2 +- .../src/main/scala/org/apache/ignite/visor/visor.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala index 0a7bcb09f2d04..a43f9ff67b594 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala @@ -161,7 +161,7 @@ class VisorConsole { visor.batchMode = true visor.quiet = quiet - val script = if (noBatchQuit) cmd else cmd + "\nquit\n" + val script = cmd + (if (cmd.last == '\n') "" else "\n") + (if (noBatchQuit) "" else "quit\n") new ByteArrayInputStream(script.getBytes("UTF-8")) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala index 069e50f9f57c7..41a14d00a177b 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala @@ -240,7 +240,7 @@ object visor extends VisorTag { var batchMode: Boolean = false - /** Quiet mode to disable node log and information messages output. */ + /** Quiet mode to disable internal node log and information messages output. */ var quiet: Boolean = false def reader(reader: ConsoleReader) { From c0a9790b6fa3dff54cec7e1508f05383de6dff0b Mon Sep 17 00:00:00 2001 From: Dmitriy Shabalin Date: Tue, 12 Dec 2017 11:41:04 +0700 Subject: [PATCH 157/243] IGNITE-7147 Web Console: Added "Number of connected clusters" component. (cherry picked from commit 6762533) --- modules/web-console/frontend/app/app.js | 2 + .../connected-clusters/controller.js | 36 +++++++++++++++++ .../components/connected-clusters/index.js | 29 ++++++++++++++ .../components/connected-clusters/style.scss | 40 +++++++++++++++++++ .../connected-clusters/template.pug | 18 +++++++++ .../web-console-header/component.js | 11 +++++ .../web-console-header/template.pug | 2 + .../public/images/icons/connectedClusters.svg | 1 + .../frontend/public/images/icons/index.js | 1 + 9 files changed, 140 insertions(+) create mode 100644 modules/web-console/frontend/app/components/connected-clusters/controller.js create mode 100644 modules/web-console/frontend/app/components/connected-clusters/index.js create mode 100644 modules/web-console/frontend/app/components/connected-clusters/style.scss create mode 100644 modules/web-console/frontend/app/components/connected-clusters/template.pug create mode 100644 modules/web-console/frontend/public/images/icons/connectedClusters.svg diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js index dde6aa94391ad..5a27ea2ec7ec9 100644 --- a/modules/web-console/frontend/app/app.js +++ b/modules/web-console/frontend/app/app.js @@ -129,6 +129,7 @@ import protectFromBsSelectRender from './components/protect-from-bs-select-rende import uiGridHovering from './components/ui-grid-hovering'; import listEditable from './components/list-editable'; import clusterSelector from './components/cluster-selector'; +import connectedClusters from './components/connected-clusters'; import igniteServices from './services'; @@ -207,6 +208,7 @@ angular.module('ignite-console', [ AngularStrapSelect.name, listEditable.name, clusterSelector.name, + connectedClusters.name, // Ignite modules. IgniteModules.name ]) diff --git a/modules/web-console/frontend/app/components/connected-clusters/controller.js b/modules/web-console/frontend/app/components/connected-clusters/controller.js new file mode 100644 index 0000000000000..377948ee67eff --- /dev/null +++ b/modules/web-console/frontend/app/components/connected-clusters/controller.js @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export default class { + static $inject = ['AgentManager']; + + constructor(agentMgr) { + Object.assign(this, { agentMgr }); + + this.connectedClusters = 0; + } + + $onInit() { + this.connectedClusters$ = this.agentMgr.connectionSbj + .do(({ clusters }) => this.connectedClusters = clusters.length) + .subscribe(); + } + + $onDestroy() { + this.connectedClusters$.unsubscribe(); + } +} diff --git a/modules/web-console/frontend/app/components/connected-clusters/index.js b/modules/web-console/frontend/app/components/connected-clusters/index.js new file mode 100644 index 0000000000000..fd8dd115e276a --- /dev/null +++ b/modules/web-console/frontend/app/components/connected-clusters/index.js @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import angular from 'angular'; + +import './style.scss'; +import template from './template.pug'; +import controller from './controller'; + +export default angular + .module('ignite-console.connected-clusters', []) + .component('connectedClusters', { + template, + controller + }); diff --git a/modules/web-console/frontend/app/components/connected-clusters/style.scss b/modules/web-console/frontend/app/components/connected-clusters/style.scss new file mode 100644 index 0000000000000..b8af8d6202f36 --- /dev/null +++ b/modules/web-console/frontend/app/components/connected-clusters/style.scss @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +connected-clusters { + @import "./../../../public/stylesheets/variables.scss"; + + position: absolute; + top: 0; + right: 0; + + display: flex; + align-items: center; + padding: 3px 10px; + + color: white; + font-size: 12px; + line-height: 12px; + + background-color: $text-color; + + border-radius: 0 0 4px 4px; + + [ignite-icon] { + margin-right: 6px; + } +} diff --git a/modules/web-console/frontend/app/components/connected-clusters/template.pug b/modules/web-console/frontend/app/components/connected-clusters/template.pug new file mode 100644 index 0000000000000..8a77084657a97 --- /dev/null +++ b/modules/web-console/frontend/app/components/connected-clusters/template.pug @@ -0,0 +1,18 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +svg(ignite-icon='connectedClusters') + | Connected clusters: {{ $ctrl.connectedClusters }} diff --git a/modules/web-console/frontend/app/components/web-console-header/component.js b/modules/web-console/frontend/app/components/web-console-header/component.js index 66f09e9c71a2f..fbdbcfc4af307 100644 --- a/modules/web-console/frontend/app/components/web-console-header/component.js +++ b/modules/web-console/frontend/app/components/web-console-header/component.js @@ -29,6 +29,10 @@ export default { 'base.settings' ]; + static connectedClustersUnvisibleStates = [ + '403', '404' + ]; + constructor($rootScope, $scope, $state, branding, UserNotifications) { Object.assign(this, {$rootScope, $scope, $state, branding, UserNotifications}); } @@ -38,10 +42,17 @@ export default { this.constructor.webAgentDownloadVisibleStates.some((state) => this.$state.includes(state)); } + setConnectedClustersVisible() { + this.isConnectedClustersVisible = + !this.constructor.connectedClustersUnvisibleStates.some((state) => this.$state.includes(state)); + } + $onInit() { this.setWebAgentDownloadVisible(); + this.setConnectedClustersVisible(); this.$scope.$on('$stateChangeSuccess', () => this.setWebAgentDownloadVisible()); + this.$scope.$on('$stateChangeSuccess', () => this.setConnectedClustersVisible()); } }, transclude: { diff --git a/modules/web-console/frontend/app/components/web-console-header/template.pug b/modules/web-console/frontend/app/components/web-console-header/template.pug index 23fc81e52415c..41586b7e75cc0 100644 --- a/modules/web-console/frontend/app/components/web-console-header/template.pug +++ b/modules/web-console/frontend/app/components/web-console-header/template.pug @@ -24,6 +24,8 @@ | You are now in #[b Demo Mode]. #[a(ng-click='closeDemo();') Close Demo?] .wch-content.container + connected-clusters(ng-if='$ctrl.$rootScope.user && !$ctrl.$rootScope.IgniteDemoMode && $ctrl.isConnectedClustersVisible') + a(ui-sref='signin') img.wch-logo(ng-src='{{::$ctrl.branding.headerLogo}}') diff --git a/modules/web-console/frontend/public/images/icons/connectedClusters.svg b/modules/web-console/frontend/public/images/icons/connectedClusters.svg new file mode 100644 index 0000000000000..f4053171c6591 --- /dev/null +++ b/modules/web-console/frontend/public/images/icons/connectedClusters.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/modules/web-console/frontend/public/images/icons/index.js b/modules/web-console/frontend/public/images/icons/index.js index d802805ef2ed4..5ca63076361e2 100644 --- a/modules/web-console/frontend/public/images/icons/index.js +++ b/modules/web-console/frontend/public/images/icons/index.js @@ -26,3 +26,4 @@ export search from './search.svg'; export refresh from './refresh.svg'; export sort from './sort.svg'; export info from './info.svg'; +export connectedClusters from './connectedClusters.svg'; From 24ffe3ebb3a1a76a62efdc16c8ad330eab935c81 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Tue, 12 Dec 2017 13:45:08 +0700 Subject: [PATCH 158/243] IGNITE-7147 Minor fix. (cherry picked from commit 645d04d) --- .../frontend/app/components/connected-clusters/template.pug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/components/connected-clusters/template.pug b/modules/web-console/frontend/app/components/connected-clusters/template.pug index 8a77084657a97..f238eac04de3c 100644 --- a/modules/web-console/frontend/app/components/connected-clusters/template.pug +++ b/modules/web-console/frontend/app/components/connected-clusters/template.pug @@ -15,4 +15,4 @@ limitations under the License. svg(ignite-icon='connectedClusters') - | Connected clusters: {{ $ctrl.connectedClusters }} +| Connected clusters: {{ $ctrl.connectedClusters }} From e613692721883d0c419d0f98bb4fd16b5725c5fa Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Fri, 8 Dec 2017 18:42:08 +0300 Subject: [PATCH 159/243] IGNITE-7052 S3 IP finder: add an ability to provide endpoint address. This closes #3179. Signed-off-by: nikolay_tikhonov --- .../spi/checkpoint/s3/S3CheckpointSpi.java | 37 +++++++++++ .../checkpoint/s3/S3CheckpointSpiMBean.java | 6 ++ .../ipfinder/s3/TcpDiscoveryS3IpFinder.java | 28 ++++++++- .../s3/S3CheckpointManagerSelfTest.java | 2 +- .../s3/S3CheckpointSpiSelfTest.java | 26 +++++++- ...intSpiStartStopBucketEndpointSelfTest.java | 50 +++++++++++++++ .../s3/S3CheckpointSpiStartStopSelfTest.java | 2 +- .../s3/S3SessionCheckpointSelfTest.java | 2 +- ...cpDiscoveryS3IpFinderAbstractSelfTest.java | 48 +++++++++++++-- ...pFinderAwsCredentialsProviderSelfTest.java | 1 + ...overyS3IpFinderAwsCredentialsSelfTest.java | 1 + ...overyS3IpFinderBucketEndpointSelfTest.java | 61 +++++++++++++++++++ .../ignite/testsuites/IgniteS3TestSuite.java | 4 ++ 13 files changed, 257 insertions(+), 11 deletions(-) create mode 100644 modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopBucketEndpointSelfTest.java create mode 100644 modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java diff --git a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java index 2330ef3626366..195e69ef23614 100644 --- a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java +++ b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java @@ -74,6 +74,7 @@ *

          *
        • {@link #setBucketNameSuffix(String)}
        • *
        • {@link #setClientConfiguration(ClientConfiguration)}
        • + *
        • {@link #setBucketEndpoint(String)}
        • *
        *

        Java Example

        * {@link S3CheckpointSpi} can be configured as follows: @@ -155,6 +156,9 @@ public class S3CheckpointSpi extends IgniteSpiAdapter implements CheckpointSpi { /** Bucket name (generated). */ private String bucketName; + /** Bucket endpoint (set by user). */ + private @Nullable String bucketEndpoint; + /** Amazon client configuration. */ private ClientConfiguration cfg; @@ -174,6 +178,15 @@ public String getBucketName() { return bucketName; } + /** + * Gets S3 bucket endpoint to use. + * + * @return S3 bucket endpoint to use. + */ + public @Nullable String getBucketEndpoint() { + return bucketEndpoint; + } + /** * Gets S3 access key. * @@ -241,6 +254,22 @@ public S3CheckpointSpi setBucketNameSuffix(String bucketNameSuffix) { return this; } + /** + * Sets bucket endpoint. + * If the endpoint is not set then S3CheckpointSpi will go to each region to find a corresponding bucket. + * For information about possible endpoint names visit + * docs.aws.amazon.com + * + * @param bucketEndpoint Bucket endpoint, for example, {@code }s3.us-east-2.amazonaws.com. + * @return {@code this} for chaining. + */ + @IgniteSpiConfiguration(optional = true) + public S3CheckpointSpi setBucketEndpoint(String bucketEndpoint) { + this.bucketEndpoint = bucketEndpoint; + + return this; + } + /** * Sets Amazon client configuration. *

        @@ -298,6 +327,9 @@ public S3CheckpointSpi setAwsCredentials(AWSCredentials cred) { s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred); + if (!F.isEmpty(bucketEndpoint)) + s3.setEndpoint(bucketEndpoint); + if (!s3.doesBucketExist(bucketName)) { try { s3.createBucket(bucketName); @@ -734,6 +766,11 @@ private class S3CheckpointSpiMBeanImpl extends IgniteSpiMBeanAdapter implements return S3CheckpointSpi.this.getBucketName(); } + /** {@inheritDoc} */ + @Override public String getBucketEndpoint() { + return S3CheckpointSpi.this.getBucketName(); + } + /** {@inheritDoc} */ @Override public String getAccessKey() { return S3CheckpointSpi.this.getAccessKey(); diff --git a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java index 5c7c8a5d7bde7..032e066ba4aeb 100644 --- a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java +++ b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java @@ -33,6 +33,12 @@ public interface S3CheckpointSpiMBean extends IgniteSpiManagementMBean { @MXBeanDescription("S3 bucket name.") public String getBucketName(); + /** + * @return S3 bucket endpoint. + */ + @MXBeanDescription("S3 bucket endpoint.") + public String getBucketEndpoint(); + /** * @return S3 access key. */ diff --git a/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java b/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java index 2307a6622d676..dd8c1a8bc7049 100644 --- a/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java +++ b/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java @@ -44,6 +44,7 @@ import org.apache.ignite.spi.IgniteSpiConfiguration; import org.apache.ignite.spi.IgniteSpiException; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinderAdapter; +import org.jetbrains.annotations.Nullable; /** * AWS S3-based IP finder. @@ -60,6 +61,7 @@ *

          *
        • Client configuration (see {@link #setClientConfiguration(ClientConfiguration)})
        • *
        • Shared flag (see {@link #setShared(boolean)})
        • + *
        • Bucket endpoint (see {@link #setBucketEndpoint(String)})
        • *
        *

        * The finder will create S3 bucket with configured name. The bucket will contain entries named @@ -98,6 +100,9 @@ public class TcpDiscoveryS3IpFinder extends TcpDiscoveryIpFinderAdapter { /** Bucket name. */ private String bucketName; + /** Bucket endpoint */ + private @Nullable String bucketEndpoint; + /** Init guard. */ @GridToStringExclude private final AtomicBoolean initGuard = new AtomicBoolean(); @@ -299,9 +304,14 @@ private void initClient() throws IgniteSpiException { * @return Client instance to use to connect to AWS. */ private AmazonS3Client createAmazonS3Client() { - return cfg != null + AmazonS3Client cln = cfg != null ? (cred != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(credProvider, cfg)) : (cred != null ? new AmazonS3Client(cred) : new AmazonS3Client(credProvider)); + + if (!F.isEmpty(bucketEndpoint)) + cln.setEndpoint(bucketEndpoint); + + return cln; } /** @@ -317,6 +327,22 @@ public TcpDiscoveryS3IpFinder setBucketName(String bucketName) { return this; } + /** + * Sets bucket endpoint for IP finder. + * If the endpoint is not set then IP finder will go to each region to find a corresponding bucket. + * For information about possible endpoint names visit + * docs.aws.amazon.com + * + * @param bucketEndpoint Bucket endpoint, for example, s3.us-east-2.amazonaws.com. + * @return {@code this} for chaining. + */ + @IgniteSpiConfiguration(optional = true) + public TcpDiscoveryS3IpFinder setBucketEndpoint(String bucketEndpoint) { + this.bucketEndpoint = bucketEndpoint; + + return this; + } + /** * Sets Amazon client configuration. *

        diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointManagerSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointManagerSelfTest.java index 529e4d0878e5e..acda385d5fb7e 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointManagerSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointManagerSelfTest.java @@ -41,7 +41,7 @@ public class S3CheckpointManagerSelfTest extends GridCheckpointManagerAbstractSe spi.setAwsCredentials(cred); - spi.setBucketNameSuffix("unit-test-bucket"); + spi.setBucketNameSuffix(S3CheckpointSpiSelfTest.getBucketNameSuffix()); cfg.setCheckpointSpi(spi); diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiSelfTest.java index 23abe0642136f..cb38083506e69 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiSelfTest.java @@ -24,6 +24,9 @@ import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.concurrent.ThreadLocalRandom; import org.apache.ignite.GridTestIoUtils; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.IgniteInterruptedCheckedException; @@ -55,7 +58,7 @@ public class S3CheckpointSpiSelfTest extends GridSpiAbstractTest { + + /** {@inheritDoc} */ + @Override protected void spiConfigure(S3CheckpointSpi spi) throws Exception { + AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), + IgniteS3TestSuite.getSecretKey()); + + spi.setAwsCredentials(cred); + spi.setBucketNameSuffix(S3CheckpointSpiSelfTest.getBucketNameSuffix() + "-e"); + spi.setBucketEndpoint("s3.us-east-2.amazonaws.com"); + + super.spiConfigure(spi); + } + + /** {@inheritDoc} */ + @IgniteIgnore("https://issues.apache.org/jira/browse/IGNITE-2420") + @Override public void testStartStop() throws Exception { + super.testStartStop(); + } +} \ No newline at end of file diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSelfTest.java index c26897f9909a4..a062b51f28e3f 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSelfTest.java @@ -36,7 +36,7 @@ public class S3CheckpointSpiStartStopSelfTest extends GridSpiStartStopAbstractTe spi.setAwsCredentials(cred); - spi.setBucketNameSuffix("unit-test-bucket"); + spi.setBucketNameSuffix(S3CheckpointSpiSelfTest.getBucketNameSuffix()); super.spiConfigure(spi); } diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3SessionCheckpointSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3SessionCheckpointSelfTest.java index c1290a4d07b3f..54a7910d030f3 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3SessionCheckpointSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3SessionCheckpointSelfTest.java @@ -43,7 +43,7 @@ public void testS3Checkpoint() throws Exception { spi.setAwsCredentials(cred); - spi.setBucketNameSuffix("unit-test-bucket"); + spi.setBucketNameSuffix(S3CheckpointSpiSelfTest.getBucketNameSuffix()); cfg.setCheckpointSpi(spi); diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java index d17416f7d4e36..89a44be0f0509 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java @@ -19,7 +19,9 @@ import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.util.Collection; +import java.util.concurrent.ThreadLocalRandom; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinderAbstractSelfTest; import org.apache.ignite.testsuites.IgniteIgnore; @@ -35,7 +37,7 @@ abstract class TcpDiscoveryS3IpFinderAbstractSelfTest * * @throws Exception If any error occurs. */ - protected TcpDiscoveryS3IpFinderAbstractSelfTest() throws Exception { + TcpDiscoveryS3IpFinderAbstractSelfTest() throws Exception { } /** {@inheritDoc} */ @@ -47,11 +49,8 @@ protected TcpDiscoveryS3IpFinderAbstractSelfTest() throws Exception { assert finder.isShared() : "Ip finder should be shared by default."; setAwsCredentials(finder); - - // Bucket name should be unique for the host to parallel test run on one bucket. - String bucketName = IgniteS3TestSuite.getBucketName( - "ip-finder-unit-test-bucket-" + InetAddress.getLocalHost().getAddress()[3]); - finder.setBucketName(bucketName); + setBucketEndpoint(finder); + setBucketName(finder); for (int i = 0; i < 5; i++) { Collection addrs = finder.getRegisteredAddresses(); @@ -81,4 +80,41 @@ protected TcpDiscoveryS3IpFinderAbstractSelfTest() throws Exception { * @param finder finder credentials to set into */ protected abstract void setAwsCredentials(TcpDiscoveryS3IpFinder finder); + + /** + * Set Bucket endpoint into the provided {@code finder}. + * @param finder finder endpoint to set into. + */ + protected void setBucketEndpoint(TcpDiscoveryS3IpFinder finder) { + // No-op. + } + + /** + * Set Bucket endpoint into the provided {@code finder}. + * @param finder finder endpoint to set into. + */ + protected void setBucketName(TcpDiscoveryS3IpFinder finder) { + finder.setBucketName(getBucketName()); + } + + /** + * Gets Bucket name. + * Bucket name should be unique for the host to parallel test run on one bucket. + * Please note that the final bucket name should not exceed 63 chars. + * + * @return Bucket name. + */ + static String getBucketName() { + String bucketName; + try { + bucketName = IgniteS3TestSuite.getBucketName( + "ip-finder-unit-test-" + InetAddress.getLocalHost().getHostName().toLowerCase()); + } + catch (UnknownHostException e) { + bucketName = IgniteS3TestSuite.getBucketName( + "ip-finder-unit-test-rnd-" + ThreadLocalRandom.current().nextInt(100)); + } + + return bucketName; + } } \ No newline at end of file diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest.java index ea316c4a14818..9ff5571a26e5e 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest.java @@ -40,6 +40,7 @@ public TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest() throws Exception { new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), IgniteS3TestSuite.getSecretKey()))); } + /** {@inheritDoc} */ @Override public void testIpFinder() throws Exception { super.testIpFinder(); } diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsSelfTest.java index 7447378544b16..5bea2515530c5 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAwsCredentialsSelfTest.java @@ -39,6 +39,7 @@ public TcpDiscoveryS3IpFinderAwsCredentialsSelfTest() throws Exception { IgniteS3TestSuite.getSecretKey())); } + /** {@inheritDoc} */ @Override public void testIpFinder() throws Exception { super.testIpFinder(); } diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java new file mode 100644 index 0000000000000..9eda35113e332 --- /dev/null +++ b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.discovery.tcp.ipfinder.s3; + +import com.amazonaws.auth.BasicAWSCredentials; +import org.apache.ignite.testsuites.IgniteS3TestSuite; + +/** + * TcpDiscoveryS3IpFinder test using AWS credentials and selected bucket endpoint + * Possible endpoints are here: http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region. + */ +public class TcpDiscoveryS3IpFinderBucketEndpointSelfTest extends TcpDiscoveryS3IpFinderAbstractSelfTest { + /** + * Constructor. + * + * @throws Exception If any error occurs. + */ + public TcpDiscoveryS3IpFinderBucketEndpointSelfTest() throws Exception { + // No-op. + } + + /** {@inheritDoc} */ + @Override protected void setAwsCredentials(TcpDiscoveryS3IpFinder finder) { + finder.setAwsCredentials(new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), + IgniteS3TestSuite.getSecretKey())); + } + + /** {@inheritDoc} */ + @Override protected void setBucketEndpoint(TcpDiscoveryS3IpFinder finder) { + super.setBucketEndpoint(finder); + + finder.setBucketEndpoint("s3.us-east-2.amazonaws.com"); + } + + /** {@inheritDoc} */ + @Override protected void setBucketName(TcpDiscoveryS3IpFinder finder) { + super.setBucketName(finder); + + finder.setBucketName(getBucketName() + "-e"); + } + + /** {@inheritDoc} */ + @Override public void testIpFinder() throws Exception { + super.testIpFinder(); + } +} \ No newline at end of file diff --git a/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java b/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java index a703c6600b6fc..009916b111fff 100644 --- a/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java +++ b/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java @@ -21,10 +21,12 @@ import org.apache.ignite.spi.checkpoint.s3.S3CheckpointManagerSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiConfigSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiSelfTest; +import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiStartStopBucketEndpointSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiStartStopSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3SessionCheckpointSelfTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinderAwsCredentialsSelfTest; +import org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinderBucketEndpointSelfTest; import org.apache.ignite.testframework.IgniteTestSuite; /** @@ -44,10 +46,12 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(S3CheckpointSpiStartStopSelfTest.class); suite.addTestSuite(S3CheckpointManagerSelfTest.class); suite.addTestSuite(S3SessionCheckpointSelfTest.class); + suite.addTestSuite(S3CheckpointSpiStartStopBucketEndpointSelfTest.class); // S3 IP finder. suite.addTestSuite(TcpDiscoveryS3IpFinderAwsCredentialsSelfTest.class); suite.addTestSuite(TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest.class); + suite.addTestSuite(TcpDiscoveryS3IpFinderBucketEndpointSelfTest.class); return suite; } From 71bd0961f59ef3c1a275164ab3d856fb09661a85 Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Mon, 11 Dec 2017 16:30:24 +0300 Subject: [PATCH 160/243] IGNITE-7053 S3 IP finder: support server side encryption. This close #3189. Signed-off-by: nikolay_tikhonov --- .../spi/checkpoint/s3/S3CheckpointSpi.java | 45 +++++++++++++++-- .../checkpoint/s3/S3CheckpointSpiMBean.java | 6 +++ .../ipfinder/s3/TcpDiscoveryS3IpFinder.java | 39 +++++++++++---- ...pointSpiStartStopSSEAlgorithmSelfTest.java | 49 +++++++++++++++++++ ...cpDiscoveryS3IpFinderAbstractSelfTest.java | 22 ++++++++- ...overyS3IpFinderBucketEndpointSelfTest.java | 16 ++---- ...scoveryS3IpFinderSSEAlgorithmSelfTest.java | 48 ++++++++++++++++++ .../ignite/testsuites/IgniteS3TestSuite.java | 4 ++ 8 files changed, 203 insertions(+), 26 deletions(-) create mode 100644 modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSSEAlgorithmSelfTest.java create mode 100644 modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest.java diff --git a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java index 195e69ef23614..270d518cfda0e 100644 --- a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java +++ b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java @@ -67,13 +67,15 @@ *

        Mandatory

        * This SPI has one mandatory configuration parameter: *
          - *
        • {@link #setAwsCredentials(AWSCredentials)}
        • + *
        • AWS credentials (see {@link #setAwsCredentials(AWSCredentials)} *
        *

        Optional

        * This SPI has following optional configuration parameters: *
          - *
        • {@link #setBucketNameSuffix(String)}
        • - *
        • {@link #setClientConfiguration(ClientConfiguration)}
        • + *
        • Bucket name suffix (see {@link #setBucketNameSuffix(String)})
        • + *
        • Client configuration (see {@link #setClientConfiguration(ClientConfiguration)})
        • + *
        • Bucket endpoint (see {@link #setBucketEndpoint(String)})
        • + *
        • Server side encryption algorithm (see {@link #setSSEAlgorithm(String)})
        • *
        • {@link #setBucketEndpoint(String)}
        • *
        *

        Java Example

        @@ -159,6 +161,9 @@ public class S3CheckpointSpi extends IgniteSpiAdapter implements CheckpointSpi { /** Bucket endpoint (set by user). */ private @Nullable String bucketEndpoint; + /** Server side encryption algorithm */ + private @Nullable String sseAlg; + /** Amazon client configuration. */ private ClientConfiguration cfg; @@ -187,6 +192,15 @@ public String getBucketName() { return bucketEndpoint; } + /** + * Gets S3 server-side encryption algorithm. + * + * @return S3 server-side encryption algorithm to use. + */ + public @Nullable String getSSEAlgorithm() { + return sseAlg; + } + /** * Gets S3 access key. * @@ -270,6 +284,21 @@ public S3CheckpointSpi setBucketEndpoint(String bucketEndpoint) { return this; } + /** + * Sets server-side encryption algorithm for Amazon S3-managed encryption keys. + * For information about possible S3-managed encryption keys visit + * docs.aws.amazon.com. + * + * @param sseAlg Server-side encryption algorithm, for example, AES256 or SSES3. + * @return {@code this} for chaining. + */ + @IgniteSpiConfiguration(optional = true) + public S3CheckpointSpi setSSEAlgorithm(String sseAlg) { + this.sseAlg = sseAlg; + + return this; + } + /** * Sets Amazon client configuration. *

        @@ -312,6 +341,8 @@ public S3CheckpointSpi setAwsCredentials(AWSCredentials cred) { log.debug(configInfo("awsCredentials", cred)); log.debug(configInfo("clientConfiguration", cfg)); log.debug(configInfo("bucketNameSuffix", bucketNameSuffix)); + log.debug(configInfo("bucketEndpoint", bucketEndpoint)); + log.debug(configInfo("SSEAlgorithm", sseAlg)); } if (cfg == null) @@ -560,6 +591,9 @@ private void write(S3CheckpointData data) throws IgniteCheckedException, AmazonC meta.setContentLength(buf.length); + if (!F.isEmpty(sseAlg)) + meta.setSSEAlgorithm(sseAlg); + s3.putObject(bucketName, data.getKey(), new ByteArrayInputStream(buf), meta); } @@ -771,6 +805,11 @@ private class S3CheckpointSpiMBeanImpl extends IgniteSpiMBeanAdapter implements return S3CheckpointSpi.this.getBucketName(); } + /** {@inheritDoc} */ + @Override public String getSSEAlgorithm() { + return S3CheckpointSpi.this.getSSEAlgorithm(); + } + /** {@inheritDoc} */ @Override public String getAccessKey() { return S3CheckpointSpi.this.getAccessKey(); diff --git a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java index 032e066ba4aeb..4f8064923bb50 100644 --- a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java +++ b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiMBean.java @@ -39,6 +39,12 @@ public interface S3CheckpointSpiMBean extends IgniteSpiManagementMBean { @MXBeanDescription("S3 bucket endpoint.") public String getBucketEndpoint(); + /** + * @return S3 server-side encryption algorithm. + */ + @MXBeanDescription("S3 server-side encryption algorithm.") + public String getSSEAlgorithm(); + /** * @return S3 access key. */ diff --git a/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java b/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java index dd8c1a8bc7049..79559e8b6d86c 100644 --- a/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java +++ b/modules/aws/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinder.java @@ -62,6 +62,7 @@ *

      • Client configuration (see {@link #setClientConfiguration(ClientConfiguration)})
      • *
      • Shared flag (see {@link #setShared(boolean)})
      • *
      • Bucket endpoint (see {@link #setBucketEndpoint(String)})
      • + *
      • Server side encryption algorithm (see {@link #setSSEAlgorithm(String)})
      • * *

        * The finder will create S3 bucket with configured name. The bucket will contain entries named @@ -80,14 +81,9 @@ public class TcpDiscoveryS3IpFinder extends TcpDiscoveryIpFinderAdapter { /** Entry content. */ private static final byte[] ENTRY_CONTENT = new byte[] {1}; - /** Entry metadata with content length set. */ - private static final ObjectMetadata ENTRY_METADATA; - - static { - ENTRY_METADATA = new ObjectMetadata(); - - ENTRY_METADATA.setContentLength(ENTRY_CONTENT.length); - } + /** Entry metadata. */ + @GridToStringExclude + private final ObjectMetadata objMetadata = new ObjectMetadata(); /** Grid logger. */ @LoggerResource @@ -103,6 +99,9 @@ public class TcpDiscoveryS3IpFinder extends TcpDiscoveryIpFinderAdapter { /** Bucket endpoint */ private @Nullable String bucketEndpoint; + /** Server side encryption algorithm */ + private @Nullable String sseAlg; + /** Init guard. */ @GridToStringExclude private final AtomicBoolean initGuard = new AtomicBoolean(); @@ -192,7 +191,7 @@ public TcpDiscoveryS3IpFinder() { String key = key(addr); try { - s3.putObject(bucketName, key, new ByteArrayInputStream(ENTRY_CONTENT), ENTRY_METADATA); + s3.putObject(bucketName, key, new ByteArrayInputStream(ENTRY_CONTENT), objMetadata); } catch (AmazonClientException e) { throw new IgniteSpiException("Failed to put entry [bucketName=" + bucketName + @@ -256,6 +255,11 @@ private void initClient() throws IgniteSpiException { if (F.isEmpty(bucketName)) throw new IgniteSpiException("Bucket name is null or empty (provide bucket name and restart)."); + objMetadata.setContentLength(ENTRY_CONTENT.length); + + if (!F.isEmpty(sseAlg)) + objMetadata.setSSEAlgorithm(sseAlg); + s3 = createAmazonS3Client(); if (!s3.doesBucketExist(bucketName)) { @@ -331,7 +335,7 @@ public TcpDiscoveryS3IpFinder setBucketName(String bucketName) { * Sets bucket endpoint for IP finder. * If the endpoint is not set then IP finder will go to each region to find a corresponding bucket. * For information about possible endpoint names visit - * docs.aws.amazon.com + * docs.aws.amazon.com. * * @param bucketEndpoint Bucket endpoint, for example, s3.us-east-2.amazonaws.com. * @return {@code this} for chaining. @@ -343,6 +347,21 @@ public TcpDiscoveryS3IpFinder setBucketEndpoint(String bucketEndpoint) { return this; } + /** + * Sets server-side encryption algorithm for Amazon S3-managed encryption keys. + * For information about possible S3-managed encryption keys visit + * docs.aws.amazon.com. + * + * @param sseAlg Server-side encryption algorithm, for example, AES256 or SSES3. + * @return {@code this} for chaining. + */ + @IgniteSpiConfiguration(optional = true) + public TcpDiscoveryS3IpFinder setSSEAlgorithm(String sseAlg) { + this.sseAlg = sseAlg; + + return this; + } + /** * Sets Amazon client configuration. *

        diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSSEAlgorithmSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSSEAlgorithmSelfTest.java new file mode 100644 index 0000000000000..7bfb75dd7888e --- /dev/null +++ b/modules/aws/src/test/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpiStartStopSSEAlgorithmSelfTest.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.checkpoint.s3; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.BasicAWSCredentials; +import org.apache.ignite.spi.GridSpiStartStopAbstractTest; +import org.apache.ignite.testframework.junits.spi.GridSpiTest; +import org.apache.ignite.testsuites.IgniteIgnore; +import org.apache.ignite.testsuites.IgniteS3TestSuite; + +/** + * Grid S3 checkpoint SPI start stop self test. + */ +@GridSpiTest(spi = S3CheckpointSpi.class, group = "Checkpoint SPI") +public class S3CheckpointSpiStartStopSSEAlgorithmSelfTest extends GridSpiStartStopAbstractTest { + /** {@inheritDoc} */ + @Override protected void spiConfigure(S3CheckpointSpi spi) throws Exception { + AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), + IgniteS3TestSuite.getSecretKey()); + + spi.setAwsCredentials(cred); + spi.setBucketNameSuffix(S3CheckpointSpiSelfTest.getBucketNameSuffix()); + spi.setSSEAlgorithm("AES256"); + + super.spiConfigure(spi); + } + + /** {@inheritDoc} */ + @IgniteIgnore("https://issues.apache.org/jira/browse/IGNITE-2420") + @Override public void testStartStop() throws Exception { + super.testStartStop(); + } +} \ No newline at end of file diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java index 89a44be0f0509..af4a47ab5d58d 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderAbstractSelfTest.java @@ -26,12 +26,20 @@ import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinderAbstractSelfTest; import org.apache.ignite.testsuites.IgniteIgnore; import org.apache.ignite.testsuites.IgniteS3TestSuite; +import org.jetbrains.annotations.Nullable; /** * Abstract TcpDiscoveryS3IpFinder to test with different ways of setting AWS credentials. */ abstract class TcpDiscoveryS3IpFinderAbstractSelfTest extends TcpDiscoveryIpFinderAbstractSelfTest { + + /** Bucket endpoint */ + protected @Nullable String bucketEndpoint; + + /** Server-side encryption algorithm for Amazon S3-managed encryption keys. */ + protected @Nullable String SSEAlgorithm; + /** * Constructor. * @@ -51,6 +59,7 @@ abstract class TcpDiscoveryS3IpFinderAbstractSelfTest setAwsCredentials(finder); setBucketEndpoint(finder); setBucketName(finder); + setSSEAlgorithm(finder); for (int i = 0; i < 5; i++) { Collection addrs = finder.getRegisteredAddresses(); @@ -85,8 +94,17 @@ abstract class TcpDiscoveryS3IpFinderAbstractSelfTest * Set Bucket endpoint into the provided {@code finder}. * @param finder finder endpoint to set into. */ - protected void setBucketEndpoint(TcpDiscoveryS3IpFinder finder) { - // No-op. + private void setBucketEndpoint(TcpDiscoveryS3IpFinder finder) { + finder.setBucketEndpoint(bucketEndpoint); + } + + /** + * Set server-side encryption algorithm for Amazon S3-managed encryption keys into the provided {@code finder}. + * + * @param finder finder encryption algorithm to set into. + */ + private void setSSEAlgorithm(TcpDiscoveryS3IpFinder finder) { + finder.setSSEAlgorithm(SSEAlgorithm); } /** diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java index 9eda35113e332..07d4839d959d7 100644 --- a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java +++ b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderBucketEndpointSelfTest.java @@ -21,8 +21,9 @@ import org.apache.ignite.testsuites.IgniteS3TestSuite; /** - * TcpDiscoveryS3IpFinder test using AWS credentials and selected bucket endpoint - * Possible endpoints are here: http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region. + * TcpDiscoveryS3IpFinder tests bucket endpoint for IP finder. + * For information about possible endpoint names visit + * docs.aws.amazon.com. */ public class TcpDiscoveryS3IpFinderBucketEndpointSelfTest extends TcpDiscoveryS3IpFinderAbstractSelfTest { /** @@ -31,7 +32,7 @@ public class TcpDiscoveryS3IpFinderBucketEndpointSelfTest extends TcpDiscoveryS3 * @throws Exception If any error occurs. */ public TcpDiscoveryS3IpFinderBucketEndpointSelfTest() throws Exception { - // No-op. + bucketEndpoint = "s3.us-east-2.amazonaws.com"; } /** {@inheritDoc} */ @@ -40,13 +41,6 @@ public TcpDiscoveryS3IpFinderBucketEndpointSelfTest() throws Exception { IgniteS3TestSuite.getSecretKey())); } - /** {@inheritDoc} */ - @Override protected void setBucketEndpoint(TcpDiscoveryS3IpFinder finder) { - super.setBucketEndpoint(finder); - - finder.setBucketEndpoint("s3.us-east-2.amazonaws.com"); - } - /** {@inheritDoc} */ @Override protected void setBucketName(TcpDiscoveryS3IpFinder finder) { super.setBucketName(finder); @@ -58,4 +52,4 @@ public TcpDiscoveryS3IpFinderBucketEndpointSelfTest() throws Exception { @Override public void testIpFinder() throws Exception { super.testIpFinder(); } -} \ No newline at end of file +} diff --git a/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest.java b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest.java new file mode 100644 index 0000000000000..838a3c67f37c7 --- /dev/null +++ b/modules/aws/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/s3/TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.discovery.tcp.ipfinder.s3; + +import com.amazonaws.auth.BasicAWSCredentials; +import org.apache.ignite.testsuites.IgniteS3TestSuite; + +/** + * TcpDiscoveryS3IpFinder tests server-side encryption algorithm for Amazon S3-managed encryption keys. + * For information about possible S3-managed encryption keys visit + * docs.aws.amazon.com. + */ +public class TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest extends TcpDiscoveryS3IpFinderAbstractSelfTest { + /** + * Constructor. + * + * @throws Exception If any error occurs. + */ + public TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest() throws Exception { + SSEAlgorithm = "AES256"; + } + + /** {@inheritDoc} */ + @Override protected void setAwsCredentials(TcpDiscoveryS3IpFinder finder) { + finder.setAwsCredentials(new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), + IgniteS3TestSuite.getSecretKey())); + } + + /** {@inheritDoc} */ + @Override public void testIpFinder() throws Exception { + super.testIpFinder(); + } +} \ No newline at end of file diff --git a/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java b/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java index 009916b111fff..f96340e914f3c 100644 --- a/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java +++ b/modules/aws/src/test/java/org/apache/ignite/testsuites/IgniteS3TestSuite.java @@ -22,11 +22,13 @@ import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiConfigSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiStartStopBucketEndpointSelfTest; +import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiStartStopSSEAlgorithmSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3CheckpointSpiStartStopSelfTest; import org.apache.ignite.spi.checkpoint.s3.S3SessionCheckpointSelfTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinderAwsCredentialsSelfTest; import org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinderBucketEndpointSelfTest; +import org.apache.ignite.spi.discovery.tcp.ipfinder.s3.TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest; import org.apache.ignite.testframework.IgniteTestSuite; /** @@ -47,11 +49,13 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(S3CheckpointManagerSelfTest.class); suite.addTestSuite(S3SessionCheckpointSelfTest.class); suite.addTestSuite(S3CheckpointSpiStartStopBucketEndpointSelfTest.class); + suite.addTestSuite(S3CheckpointSpiStartStopSSEAlgorithmSelfTest.class); // S3 IP finder. suite.addTestSuite(TcpDiscoveryS3IpFinderAwsCredentialsSelfTest.class); suite.addTestSuite(TcpDiscoveryS3IpFinderAwsCredentialsProviderSelfTest.class); suite.addTestSuite(TcpDiscoveryS3IpFinderBucketEndpointSelfTest.class); + suite.addTestSuite(TcpDiscoveryS3IpFinderSSEAlgorithmSelfTest.class); return suite; } From d87612383198b72bb60382df345ab6ae24138bbe Mon Sep 17 00:00:00 2001 From: apopov Date: Tue, 12 Dec 2017 11:06:59 +0300 Subject: [PATCH 161/243] S3CheckpointSpi javadoc merge error fixed --- .../org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java index 270d518cfda0e..8ea5ef7057818 100644 --- a/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java +++ b/modules/aws/src/main/java/org/apache/ignite/spi/checkpoint/s3/S3CheckpointSpi.java @@ -76,7 +76,6 @@ *

      • Client configuration (see {@link #setClientConfiguration(ClientConfiguration)})
      • *
      • Bucket endpoint (see {@link #setBucketEndpoint(String)})
      • *
      • Server side encryption algorithm (see {@link #setSSEAlgorithm(String)})
      • - *
      • {@link #setBucketEndpoint(String)}
      • * *

        Java Example

        * {@link S3CheckpointSpi} can be configured as follows: From 7fd70724cbb41145feca51fd50ed310f48fe71e3 Mon Sep 17 00:00:00 2001 From: Igor Sapego Date: Tue, 12 Dec 2017 16:19:37 +0300 Subject: [PATCH 162/243] IGNITE-7114: C++ node can start without example folder now (cherry picked from commit bb2d59b) --- .../include/ignite/common/platform_utils.h | 20 ++- .../os/linux/src/common/platform_utils.cpp | 34 +++-- .../os/win/src/common/platform_utils.cpp | 29 +++- .../cpp/core-test/src/test_utils.cpp | 8 +- modules/platforms/cpp/core/src/ignition.cpp | 10 +- .../cpp/jni/include/ignite/jni/utils.h | 17 +-- .../platforms/cpp/jni/os/linux/src/utils.cpp | 120 +++++++++------- .../platforms/cpp/jni/os/win/src/utils.cpp | 133 ++++++++++-------- .../cpp/odbc-test/src/test_utils.cpp | 8 +- 9 files changed, 220 insertions(+), 159 deletions(-) diff --git a/modules/platforms/cpp/common/include/ignite/common/platform_utils.h b/modules/platforms/cpp/common/include/ignite/common/platform_utils.h index 8674ce3232e19..b8c6aa63c11de 100644 --- a/modules/platforms/cpp/common/include/ignite/common/platform_utils.h +++ b/modules/platforms/cpp/common/include/ignite/common/platform_utils.h @@ -61,10 +61,18 @@ namespace ignite * Read system environment variable taking thread-safety in count. * * @param name Environment variable name. - * @param val Environment variable value. - * @return True if the environment variable with such name was found. + * @return Environment variable value if found and empty string otherwise. */ - IGNITE_IMPORT_EXPORT bool GetEnv(const std::string& name, std::string& val); + IGNITE_IMPORT_EXPORT std::string GetEnv(const std::string& name); + + /** + * Read system environment variable taking thread-safety in count. + * + * @param name Environment variable name. + * @param dflt Default value to return on fail. + * @return Environment variable value if found and @c dflt otherwise. + */ + IGNITE_IMPORT_EXPORT std::string GetEnv(const std::string& name, const std::string& dflt); /** * Ensure that file on the given path exists in the system. @@ -73,6 +81,12 @@ namespace ignite * @return True if file exists, false otherwise. */ IGNITE_IMPORT_EXPORT bool FileExists(const std::string& path); + + /** + * Check if the provided path is the valid directory. + * @return @c true if the provided path is the valid directory. + */ + IGNITE_IMPORT_EXPORT bool IsValidDirectory(const std::string& path); } } diff --git a/modules/platforms/cpp/common/os/linux/src/common/platform_utils.cpp b/modules/platforms/cpp/common/os/linux/src/common/platform_utils.cpp index 3e8d0c9ef970b..b74f11c9fe01f 100644 --- a/modules/platforms/cpp/common/os/linux/src/common/platform_utils.cpp +++ b/modules/platforms/cpp/common/os/linux/src/common/platform_utils.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -51,25 +52,42 @@ namespace ignite return localtime_r(&in, &out) == 0; } - bool GetEnv(const std::string& name, std::string& val) + std::string GetEnv(const std::string& name) + { + static const std::string empty; + + return GetEnv(name, empty); + } + + std::string GetEnv(const std::string& name, const std::string& dflt) { char* val0 = std::getenv(name.c_str()); if (!val0) - return false; - - val = val0; + return dflt; - return true; + return std::string(val0); } bool FileExists(const std::string& path) { - struct stat s; + glob_t gs; + + int res = glob(path.c_str(), 0, 0, &gs); + + globfree(&gs); + + return res == 0; + } + + bool IsValidDirectory(const std::string& path) + { + if (path.empty()) + return false; - int res = stat(path.c_str(), &s); + struct stat pathStat; - return res != -1; + return stat(path.c_str(), &pathStat) != -1 && S_ISDIR(pathStat.st_mode); } } } diff --git a/modules/platforms/cpp/common/os/win/src/common/platform_utils.cpp b/modules/platforms/cpp/common/os/win/src/common/platform_utils.cpp index b8a445c5ce17e..a0f4505c46b71 100644 --- a/modules/platforms/cpp/common/os/win/src/common/platform_utils.cpp +++ b/modules/platforms/cpp/common/os/win/src/common/platform_utils.cpp @@ -49,18 +49,23 @@ namespace ignite return localtime_s(&out, &in) == 0; } - bool GetEnv(const std::string& name, std::string& val) + std::string GetEnv(const std::string& name) { - char res0[32767]; + static const std::string empty; - DWORD envRes = GetEnvironmentVariableA(name.c_str(), res0, sizeof(res0) / sizeof(res0[0])); + return GetEnv(name, empty); + } - if (envRes == 0) - return false; + std::string GetEnv(const std::string& name, const std::string& dflt) + { + char res[32767]; - val.assign(res0); + DWORD envRes = GetEnvironmentVariableA(name.c_str(), res, sizeof(res) / sizeof(res[0])); - return true; + if (envRes == 0 || envRes > sizeof(res)) + return dflt; + + return std::string(res, static_cast(envRes)); } bool FileExists(const std::string& path) @@ -76,5 +81,15 @@ namespace ignite return true; } + + bool IsValidDirectory(const std::string& path) + { + if (path.empty()) + return false; + + DWORD attrs = GetFileAttributesA(path.c_str()); + + return attrs != INVALID_FILE_ATTRIBUTES && (attrs & FILE_ATTRIBUTE_DIRECTORY) != 0; + } } } diff --git a/modules/platforms/cpp/core-test/src/test_utils.cpp b/modules/platforms/cpp/core-test/src/test_utils.cpp index ded229e520134..749c581b94f1f 100644 --- a/modules/platforms/cpp/core-test/src/test_utils.cpp +++ b/modules/platforms/cpp/core-test/src/test_utils.cpp @@ -38,12 +38,8 @@ namespace ignite_test cfg.jvmOpts.push_back("-DIGNITE_UPDATE_NOTIFIER=false"); cfg.jvmOpts.push_back("-Duser.language=en"); - std::string home; - bool homeFound = jni::ResolveIgniteHome("", home); - - assert(homeFound); - - cfg.jvmClassPath = jni::CreateIgniteHomeClasspath(home, true); + cfg.igniteHome = jni::ResolveIgniteHome(); + cfg.jvmClassPath = jni::CreateIgniteHomeClasspath(cfg.igniteHome, true); #ifdef IGNITE_TESTS_32 cfg.jvmInitMem = 256; diff --git a/modules/platforms/cpp/core/src/ignition.cpp b/modules/platforms/cpp/core/src/ignition.cpp index 78ddc54d9b6bc..7d90a52c77682 100644 --- a/modules/platforms/cpp/core/src/ignition.cpp +++ b/modules/platforms/cpp/core/src/ignition.cpp @@ -226,16 +226,10 @@ namespace ignite } // 2. Resolve IGNITE_HOME. - std::string home; - bool homeFound = ResolveIgniteHome(cfg.igniteHome, home); + std::string home = ResolveIgniteHome(cfg.igniteHome); // 3. Create classpath. - std::string cp; - - if (homeFound) - cp = CreateIgniteClasspath(cfg.jvmClassPath, home); - else - cp = CreateIgniteClasspath(cfg.jvmClassPath); + std::string cp = CreateIgniteClasspath(cfg.jvmClassPath, home); if (cp.empty()) { diff --git a/modules/platforms/cpp/jni/include/ignite/jni/utils.h b/modules/platforms/cpp/jni/include/ignite/jni/utils.h index a6eb37a61b01b..285f58733e9c8 100644 --- a/modules/platforms/cpp/jni/include/ignite/jni/utils.h +++ b/modules/platforms/cpp/jni/include/ignite/jni/utils.h @@ -158,14 +158,6 @@ namespace ignite */ IGNITE_IMPORT_EXPORT std::string CreateIgniteHomeClasspath(const std::string& home, bool forceTest); - /** - * Create Ignite classpath based on user input directory. - * - * @param usrCp User's classpath. - * @return Classpath. - */ - IGNITE_IMPORT_EXPORT std::string CreateIgniteClasspath(const std::string& usrCp); - /** * Create Ignite classpath based on user input and home directory. * @@ -181,15 +173,14 @@ namespace ignite * 1) Check for path provided as argument. * 2) Check for environment variable. * 3) Check for current working directory. - * Result of these 3 checks are evaluated based on existence of certain - * predefined folders inside possible GG home. If they are found, + * Result of these checks are evaluated based on existence of certain + * predefined folders inside possible Ignite home. If they are found, * IGNITE_HOME is considered resolved. * * @param path Optional path to evaluate. - * @param home Resolved GG home. - * @return True if IGNITE_HOME home was found. + * @return Resolved Ignite home. */ - IGNITE_IMPORT_EXPORT bool ResolveIgniteHome(const std::string& path, std::string& home); + IGNITE_IMPORT_EXPORT std::string ResolveIgniteHome(const std::string& path = ""); } } diff --git a/modules/platforms/cpp/jni/os/linux/src/utils.cpp b/modules/platforms/cpp/jni/os/linux/src/utils.cpp index e74e4e072259b..52e4097b67f51 100644 --- a/modules/platforms/cpp/jni/os/linux/src/utils.cpp +++ b/modules/platforms/cpp/jni/os/linux/src/utils.cpp @@ -25,6 +25,7 @@ #include "ignite/common/utils.h" #include "ignite/common/fixed_size_array.h" + #include "ignite/jni/utils.h" #include "ignite/jni/java.h" @@ -40,9 +41,6 @@ namespace ignite const char* IGNITE_HOME = "IGNITE_HOME"; - const char* PROBE_BIN = "/bin"; - const char* PROBE_EXAMPLES = "/examples"; - const char* IGNITE_NATIVE_TEST_CLASSPATH = "IGNITE_NATIVE_TEST_CLASSPATH"; /** Key indicating that the thread is attached. */ @@ -77,55 +75,66 @@ namespace ignite } /** - * Helper function for GG home resolution. Checks whether certain folders - * exist in the path. Optionally goes upwards in directory hierarchy. - * - * @param path Path to evaluate. - * @param up Whether to go upwards. - * @param res Resolved directory. - * @return Resolution result. + * Checks if the path looks like binary release home directory. + * Internally checks for presence of some directories, that are + * @return @c true if the path looks like binary release home directory. */ - bool ResolveIgniteHome0(const std::string& path, bool up, std::string& res) + bool LooksLikeBinaryReleaseHome(const std::string& path) { - struct stat pathStat; + static const char* PROBE_CORE_LIB = "/libs/ignite-core*.jar"; - if (stat(path.c_str(), &pathStat) == -1 || !S_ISDIR(pathStat.st_mode)) - return false; - - // Remove trailing slashes, otherwise we will have an infinite loop. - std::string path0; + std::string coreLibProbe = path + PROBE_CORE_LIB; - size_t last = path.find_last_not_of("/\\ "); + return FileExists(coreLibProbe); + } - if (last != std::string::npos) - path0.assign(path, 0, last + 1); + /** + * Checks if the path looks like source release home directory. + * Internally checks for presence of core source directory. + * @return @c true if the path looks like binary release home directory. + */ + bool LooksLikeSourceReleaseHome(const std::string& path) + { + static const char* PROBE_CORE_SOURCE = "/modules/core/src/main/java/org/apache/ignite"; - std::string binStr = path0 + PROBE_BIN; - struct stat binStat; + std::string coreSourcePath = path + PROBE_CORE_SOURCE; - std::string examplesStr = path0 + PROBE_EXAMPLES; - struct stat examplesStat; + return IsValidDirectory(coreSourcePath); + } - if (stat(binStr.c_str(), &binStat) != -1 && S_ISDIR(binStat.st_mode) && - stat(examplesStr.c_str(), &examplesStat) != -1 && S_ISDIR(examplesStat.st_mode)) - { - res = path0; + /** + * Helper function for Ignite home resolution. + * Goes upwards in directory hierarchy and checks whether certain + * folders exist in the path. + * + * @param path Path to evaluate. + * @return res Resolved directory. Empty string if not found. + */ + std::string ResolveIgniteHome0(const std::string& path) + { + if (!IsValidDirectory(path)) + return std::string(); - return true; - } + // Remove trailing slashes, otherwise we will have an infinite loop. + size_t last = path.find_last_not_of("/ "); + + if (last == std::string::npos) + return std::string(); + + std::string path0(path, 0, last + 1); - if (!up) - return false; + if (LooksLikeBinaryReleaseHome(path0) || LooksLikeSourceReleaseHome(path0)) + return path0; // Evaluate parent directory. size_t slashPos = path0.find_last_of("/"); if (slashPos == std::string::npos) - return false; + return std::string(); std::string parent(path0, 0, slashPos); - return ResolveIgniteHome0(parent, true, res); + return ResolveIgniteHome0(parent); } /** @@ -298,9 +307,9 @@ namespace ignite if (!path.empty() && FileExists(path)) return path; - std::string javaEnv; + std::string javaEnv = GetEnv(JAVA_HOME); - if (GetEnv(JAVA_HOME, javaEnv)) + if (!javaEnv.empty()) { std::string javaDll = javaEnv + JAVA_DLL; @@ -351,7 +360,12 @@ namespace ignite return cp; } - std::string CreateIgniteClasspath(const std::string& usrCp) + /** + * Adds semicolon at the end of the path if needed. + * @param usrCp Classpath provided by user. + * @return Normalized classpath. + */ + std::string NormalizeClasspath(const std::string& usrCp) { if (usrCp.empty() || *usrCp.rbegin() == ';') return usrCp; @@ -362,33 +376,35 @@ namespace ignite std::string CreateIgniteClasspath(const std::string& usrCp, const std::string& home) { // 1. Append user classpath if it exists. - std::string cp = CreateIgniteClasspath(usrCp); + std::string cp = NormalizeClasspath(usrCp); // 2. Append home classpath - std::string env; - bool envFound = GetEnv(IGNITE_NATIVE_TEST_CLASSPATH, env); + if (!home.empty()) + { + std::string env = GetEnv(IGNITE_NATIVE_TEST_CLASSPATH, "false"); - bool forceTest = envFound && env.compare("true") == 0; + bool forceTest = ToLower(env) == "true"; - std::string homeCp = CreateIgniteHomeClasspath(home, forceTest); + std::string homeCp = CreateIgniteHomeClasspath(home, forceTest); - cp.append(homeCp); + cp.append(homeCp); + } // 3. Return. return cp; } - bool ResolveIgniteHome(const std::string& path, std::string& home) + std::string ResolveIgniteHome(const std::string& path) { - if (!path.empty()) - // 1. Check passed argument. - return ResolveIgniteHome0(path, false, home); + // 1. Check passed argument. + if (IsValidDirectory(path)) + return path; // 2. Check environment variable. - std::string env; + std::string home = GetEnv(IGNITE_HOME); - if (GetEnv(IGNITE_HOME, env)) - return ResolveIgniteHome0(env, false, home); + if (IsValidDirectory(home)) + return home; // 3. Check current work dir. FixedSizeArray curDir(1024 * 16); @@ -396,11 +412,11 @@ namespace ignite char* res = getcwd(curDir.GetData(), curDir.GetSize()); if (!res) - return false; + return std::string(); std::string curDirStr(curDir.GetData()); - return ResolveIgniteHome0(curDirStr, true, home); + return ResolveIgniteHome0(curDirStr); } } } diff --git a/modules/platforms/cpp/jni/os/win/src/utils.cpp b/modules/platforms/cpp/jni/os/win/src/utils.cpp index 17ab6c38a0a9f..4d0c197e0e6e2 100644 --- a/modules/platforms/cpp/jni/os/win/src/utils.cpp +++ b/modules/platforms/cpp/jni/os/win/src/utils.cpp @@ -33,6 +33,13 @@ namespace ignite { namespace jni { + const char* JAVA_HOME = "JAVA_HOME"; + const char* JAVA_DLL = "\\jre\\bin\\server\\jvm.dll"; + + const char* IGNITE_HOME = "IGNITE_HOME"; + + const char* IGNITE_NATIVE_TEST_CLASSPATH = "IGNITE_NATIVE_TEST_CLASSPATH"; + AttachHelper::~AttachHelper() { // No-op. @@ -43,66 +50,67 @@ namespace ignite // No-op. } - const char* JAVA_HOME = "JAVA_HOME"; - const char* JAVA_DLL = "\\jre\\bin\\server\\jvm.dll"; + /** + * Checks if the path looks like binary release home directory. + * Internally checks for presence of core library. + * @return @c true if the path looks like binary release home directory. + */ + bool LooksLikeBinaryReleaseHome(const std::string& path) + { + static const char* PROBE_CORE_LIB = "\\libs\\ignite-core*.jar"; - const char* IGNITE_HOME = "IGNITE_HOME"; + std::string coreLibProbe = path + PROBE_CORE_LIB; - const char* PROBE_BIN = "\\bin"; - const char* PROBE_EXAMPLES = "\\examples"; + return FileExists(coreLibProbe); + } - const char* IGNITE_NATIVE_TEST_CLASSPATH = "IGNITE_NATIVE_TEST_CLASSPATH"; + /** + * Checks if the path looks like source release home directory. + * Internally checks for presence of core source directory. + * @return @c true if the path looks like binary release home directory. + */ + bool LooksLikeSourceReleaseHome(const std::string& path) + { + static const char* PROBE_CORE_SOURCE = "\\modules\\core\\src\\main\\java\\org\\apache\\ignite"; + + std::string coreSourcePath = path + PROBE_CORE_SOURCE; + + return IsValidDirectory(coreSourcePath); + } /** - * Helper function for GG home resolution. Checks whether certain folders - * exist in the path. Optionally goes upwards in directory hierarchy. + * Helper function for Ignite home resolution. + * Goes upwards in directory hierarchy and checks whether certain + * folders exist in the path. * * @param path Path to evaluate. - * @param up Whether to go upwards. - * @param res Resolved directory. - * @return Resolution result. + * @return res Resolved directory. Empty string if not found. */ - bool ResolveIgniteHome0(const std::string& path, bool up, std::string& res) + std::string ResolveIgniteHome0(const std::string& path) { - DWORD attrs = GetFileAttributesA(path.c_str()); - - if (attrs == INVALID_FILE_ATTRIBUTES || !(attrs & FILE_ATTRIBUTE_DIRECTORY)) - return false; + if (!IsValidDirectory(path)) + return std::string(); // Remove trailing slashes, otherwise we will have an infinite loop. - std::string path0; - size_t last = path.find_last_not_of("/\\ "); - if (last != std::string::npos) - path0.assign(path, 0, last + 1); - - std::string binStr = path0 + PROBE_BIN; - DWORD binAttrs = GetFileAttributesA(binStr.c_str()); - - std::string examplesStr = path0 + PROBE_EXAMPLES; - DWORD examplesAttrs = GetFileAttributesA(examplesStr.c_str()); + if (last == std::string::npos) + return std::string(); - if (binAttrs != INVALID_FILE_ATTRIBUTES && (binAttrs & FILE_ATTRIBUTE_DIRECTORY) && - examplesAttrs != INVALID_FILE_ATTRIBUTES && (examplesAttrs & FILE_ATTRIBUTE_DIRECTORY)) - { - res = path0; + std::string path0(path, 0, last + 1); - return true; - } - - if (!up) - return false; + if (LooksLikeBinaryReleaseHome(path0) || LooksLikeSourceReleaseHome(path0)) + return path0; // Evaluate parent directory. size_t slashPos = path0.find_last_of("/\\"); if (slashPos == std::string::npos) - return false; + return std::string(); std::string parent(path0, 0, slashPos); - return ResolveIgniteHome0(parent, true, res); + return ResolveIgniteHome0(parent); } /** @@ -271,9 +279,9 @@ namespace ignite if (!path.empty() && FileExists(path)) return path; - std::string javaEnv; + std::string javaEnv = GetEnv(JAVA_HOME); - if (GetEnv(JAVA_HOME, javaEnv)) + if (!javaEnv.empty()) { std::string javaDll = javaEnv + JAVA_DLL; @@ -324,7 +332,12 @@ namespace ignite return cp; } - std::string CreateIgniteClasspath(const std::string& usrCp) + /** + * Adds semicolon at the end of the path if needed. + * @param usrCp Classpath provided by user. + * @return Normalized classpath. + */ + std::string NormalizeClasspath(const std::string& usrCp) { if (usrCp.empty() || *usrCp.rbegin() == ';') return usrCp; @@ -335,44 +348,52 @@ namespace ignite std::string CreateIgniteClasspath(const std::string& usrCp, const std::string& home) { // 1. Append user classpath if it exists. - std::string cp = CreateIgniteClasspath(usrCp); + std::string cp = NormalizeClasspath(usrCp); // 2. Append home classpath - std::string env; - bool envFound = GetEnv(IGNITE_NATIVE_TEST_CLASSPATH, env); + if (!home.empty()) + { + std::string env = GetEnv(IGNITE_NATIVE_TEST_CLASSPATH, "false"); - bool forceTest = envFound && env.compare("true") == 0; + bool forceTest = ToLower(env) == "true"; - std::string homeCp = CreateIgniteHomeClasspath(home, forceTest); + std::string homeCp = CreateIgniteHomeClasspath(home, forceTest); - cp.append(homeCp); + cp.append(homeCp); + } // 3. Return. return cp; } - bool ResolveIgniteHome(const std::string& path, std::string& home) + std::string ResolveIgniteHome(const std::string& path) { - if (!path.empty()) - // 1. Check passed argument. - return ResolveIgniteHome0(path, false, home); + // 1. Check passed argument. + if (IsValidDirectory(path)) + return path; // 2. Check environment variable. - std::string env; + std::string home = GetEnv(IGNITE_HOME); - if (GetEnv(IGNITE_HOME, env)) - return ResolveIgniteHome0(env, false, home); + if (IsValidDirectory(home)) + return home; // 3. Check current work dir. - const DWORD curDirLen = GetCurrentDirectory(0, NULL); + DWORD curDirLen = GetCurrentDirectoryA(0, NULL); + + if (!curDirLen) + return std::string(); FixedSizeArray curDir(curDirLen); - GetCurrentDirectoryA(curDir.GetSize(), curDir.GetData()); + curDirLen = GetCurrentDirectoryA(curDir.GetSize(), curDir.GetData()); + + if (!curDirLen) + return std::string(); std::string curDirStr(curDir.GetData()); - return ResolveIgniteHome0(curDirStr, true, home); + return ResolveIgniteHome0(curDirStr); } } } diff --git a/modules/platforms/cpp/odbc-test/src/test_utils.cpp b/modules/platforms/cpp/odbc-test/src/test_utils.cpp index 6e8fe6a31d59f..3e1a9d7d68e19 100644 --- a/modules/platforms/cpp/odbc-test/src/test_utils.cpp +++ b/modules/platforms/cpp/odbc-test/src/test_utils.cpp @@ -65,12 +65,8 @@ namespace ignite_test cfg.jvmOpts.push_back("-DIGNITE_UPDATE_NOTIFIER=false"); cfg.jvmOpts.push_back("-Duser.language=en"); - std::string home; - bool homeFound = jni::ResolveIgniteHome("", home); - - assert(homeFound); - - cfg.jvmClassPath = jni::CreateIgniteHomeClasspath(home, true); + cfg.igniteHome = jni::ResolveIgniteHome(); + cfg.jvmClassPath = jni::CreateIgniteHomeClasspath(cfg.igniteHome, true); #ifdef IGNITE_TESTS_32 cfg.jvmInitMem = 256; From 1cd4d2562163d9cba335da6eb6643ce8af977823 Mon Sep 17 00:00:00 2001 From: Andrey Novikov Date: Fri, 24 Nov 2017 14:14:48 +0700 Subject: [PATCH 163/243] IGNITE-6920 Fixed dependencies resolving. (cherry picked from commit c10aa0c) --- .../web-console/backend/app/agentSocket.js | 8 ++-- .../web-console/backend/app/agentsHandler.js | 15 ++++---- modules/web-console/backend/app/apiServer.js | 9 ++++- .../backend/app/browsersHandler.js | 7 +++- modules/web-console/backend/app/configure.js | 14 +++++-- modules/web-console/backend/app/mongo.js | 10 +++-- modules/web-console/backend/app/mongoose.js | 8 ++-- modules/web-console/backend/app/nconf.js | 6 ++- modules/web-console/backend/app/settings.js | 6 ++- .../web-console/backend/routes/activities.js | 37 ++++++++++--------- modules/web-console/backend/routes/admin.js | 8 ++-- modules/web-console/backend/routes/caches.js | 6 ++- .../web-console/backend/routes/clusters.js | 6 ++- .../backend/routes/configuration.js | 6 ++- modules/web-console/backend/routes/demo.js | 7 +++- modules/web-console/backend/routes/domains.js | 6 ++- .../web-console/backend/routes/downloads.js | 6 ++- modules/web-console/backend/routes/igfss.js | 6 ++- .../web-console/backend/routes/notebooks.js | 7 +++- modules/web-console/backend/routes/profile.js | 10 ++--- modules/web-console/backend/routes/public.js | 9 +++-- .../backend/services/activities.js | 7 ++-- modules/web-console/backend/services/auth.js | 5 +-- .../web-console/backend/services/caches.js | 7 ++-- .../web-console/backend/services/clusters.js | 7 ++-- .../backend/services/configurations.js | 5 +-- .../web-console/backend/services/domains.js | 7 ++-- .../web-console/backend/services/downloads.js | 13 ++++--- modules/web-console/backend/services/igfss.js | 7 ++-- modules/web-console/backend/services/mails.js | 9 +++-- .../web-console/backend/services/notebooks.js | 7 ++-- .../backend/services/notifications.js | 5 +-- .../web-console/backend/services/sessions.js | 5 +-- modules/web-console/backend/services/users.js | 7 ++-- 34 files changed, 167 insertions(+), 121 deletions(-) diff --git a/modules/web-console/backend/app/agentSocket.js b/modules/web-console/backend/app/agentSocket.js index 6e4518a5b2c15..39d880fc16609 100644 --- a/modules/web-console/backend/app/agentSocket.js +++ b/modules/web-console/backend/app/agentSocket.js @@ -17,14 +17,15 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! /** * Module interaction with agents. */ module.exports = { - implements: 'agent-socket', - inject: ['require(lodash)'] + implements: 'agent-socket' }; /** @@ -78,10 +79,9 @@ class Command { } /** - * @param _ * @returns {AgentSocket} */ -module.exports.factory = function(_) { +module.exports.factory = function() { /** * Connected agent descriptor. */ diff --git a/modules/web-console/backend/app/agentsHandler.js b/modules/web-console/backend/app/agentsHandler.js index 844ce1edc690e..9ee64cee0c300 100644 --- a/modules/web-console/backend/app/agentsHandler.js +++ b/modules/web-console/backend/app/agentsHandler.js @@ -19,6 +19,12 @@ const uuid = require('uuid/v4'); +const fs = require('fs'); +const path = require('path'); +const JSZip = require('jszip'); +const socketio = require('socket.io'); +const _ = require('lodash'); + // Fire me up! /** @@ -26,21 +32,16 @@ const uuid = require('uuid/v4'); */ module.exports = { implements: 'agents-handler', - inject: ['require(lodash)', 'require(fs)', 'require(path)', 'require(jszip)', 'require(socket.io)', 'settings', 'mongo', 'agent-socket'] + inject: ['settings', 'mongo', 'agent-socket'] }; /** - * @param _ - * @param fs - * @param path - * @param JSZip - * @param socketio * @param settings * @param mongo * @param {AgentSocket} AgentSocket * @returns {AgentsHandler} */ -module.exports.factory = function(_, fs, path, JSZip, socketio, settings, mongo, AgentSocket) { +module.exports.factory = function(settings, mongo, AgentSocket) { class AgentSockets { constructor() { /** diff --git a/modules/web-console/backend/app/apiServer.js b/modules/web-console/backend/app/apiServer.js index cb097509e4b40..0030529da1b1e 100644 --- a/modules/web-console/backend/app/apiServer.js +++ b/modules/web-console/backend/app/apiServer.js @@ -17,12 +17,17 @@ 'use strict'; +const fs = require('fs'); +const path = require('path'); + // Fire me up! +const Express = require('express'); + module.exports = { implements: 'api-server', - inject: ['require(fs)', 'require(path)', 'require(express)', 'settings', 'configure', 'routes'], - factory(fs, path, Express, settings, configure, routes) { + inject: ['settings', 'configure', 'routes'], + factory(settings, configure, routes) { /** * Connected agents manager. */ diff --git a/modules/web-console/backend/app/browsersHandler.js b/modules/web-console/backend/app/browsersHandler.js index 7ae247bb7c743..198bc1250e005 100644 --- a/modules/web-console/backend/app/browsersHandler.js +++ b/modules/web-console/backend/app/browsersHandler.js @@ -17,6 +17,9 @@ 'use strict'; +const _ = require('lodash'); +const socketio = require('socket.io'); + // Fire me up! /** @@ -24,8 +27,8 @@ */ module.exports = { implements: 'browsers-handler', - inject: ['require(lodash)', 'require(socket.io)', 'configure', 'errors', 'mongo'], - factory: (_, socketio, configure, errors, mongo) => { + inject: ['configure', 'errors', 'mongo'], + factory: (configure, errors, mongo) => { class BrowserSockets { constructor() { this.sockets = new Map(); diff --git a/modules/web-console/backend/app/configure.js b/modules/web-console/backend/app/configure.js index 7624bdd4babbe..b7bdb49097d33 100644 --- a/modules/web-console/backend/app/configure.js +++ b/modules/web-console/backend/app/configure.js @@ -17,6 +17,15 @@ 'use strict'; +const _ = require('lodash'); +const logger = require('morgan'); +const cookieParser = require('cookie-parser'); +const bodyParser = require('body-parser'); +const session = require('express-session'); +const connectMongo = require('connect-mongo'); +const passport = require('passport'); +const passportSocketIo = require('passport.socketio'); + // Fire me up! /** @@ -24,11 +33,10 @@ */ module.exports = { implements: 'configure', - inject: ['require(lodash)', 'require(morgan)', 'require(cookie-parser)', 'require(body-parser)', - 'require(express-session)', 'require(connect-mongo)', 'require(passport)', 'require(passport.socketio)', 'settings', 'mongo', 'middlewares:*'] + inject: ['settings', 'mongo', 'middlewares:*'] }; -module.exports.factory = function(_, logger, cookieParser, bodyParser, session, connectMongo, passport, passportSocketIo, settings, mongo, apis) { +module.exports.factory = function(settings, mongo, apis) { const _sessionStore = new (connectMongo(session))({mongooseConnection: mongo.connection}); return { diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index e0d0a0f8aa47a..de9167696980a 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -17,6 +17,8 @@ 'use strict'; +const passportMongo = require('passport-local-mongoose'); + // Fire me up! /** @@ -24,10 +26,10 @@ */ module.exports = { implements: 'mongo', - inject: ['require(passport-local-mongoose)', 'settings', 'mongoose'] + inject: ['settings', 'mongoose'] }; -const defineSchema = (passportMongo, mongoose) => { +const defineSchema = (mongoose) => { const Schema = mongoose.Schema; const ObjectId = mongoose.Schema.Types.ObjectId; const result = { connection: mongoose.connection }; @@ -1150,7 +1152,7 @@ const defineSchema = (passportMongo, mongoose) => { return result; }; -module.exports.factory = function(passportMongo, settings, mongoose) { +module.exports.factory = function(settings, mongoose) { // Use native promises mongoose.Promise = global.Promise; @@ -1204,5 +1206,5 @@ module.exports.factory = function(passportMongo, settings, mongoose) { return Promise.reject(err); }); }) - .then(() => defineSchema(passportMongo, mongoose)); + .then(() => defineSchema(mongoose)); }; diff --git a/modules/web-console/backend/app/mongoose.js b/modules/web-console/backend/app/mongoose.js index 7b6e7f31e676a..94f9c608a1ab0 100644 --- a/modules/web-console/backend/app/mongoose.js +++ b/modules/web-console/backend/app/mongoose.js @@ -17,13 +17,11 @@ 'use strict'; +const mongoose = require('mongoose'); + // Fire me up! module.exports = { implements: 'mongoose', - inject: ['require(mongoose)'] -}; - -module.exports.factory = (mongoose) => { - return mongoose; + factory: () => mongoose }; diff --git a/modules/web-console/backend/app/nconf.js b/modules/web-console/backend/app/nconf.js index 6813f0fd38656..3f5d7d1724109 100644 --- a/modules/web-console/backend/app/nconf.js +++ b/modules/web-console/backend/app/nconf.js @@ -17,6 +17,9 @@ 'use strict'; +const fs = require('fs'); +const nconf = require('nconf'); + // Fire me up! /** @@ -24,8 +27,7 @@ */ module.exports = { implements: 'nconf', - inject: ['require(nconf)', 'require(fs)'], - factory(nconf, fs) { + factory() { nconf.env({separator: '_'}).argv(); const dfltFile = 'config/settings.json'; diff --git a/modules/web-console/backend/app/settings.js b/modules/web-console/backend/app/settings.js index 5032443582097..2d94eb56352de 100644 --- a/modules/web-console/backend/app/settings.js +++ b/modules/web-console/backend/app/settings.js @@ -17,6 +17,8 @@ 'use strict'; +const fs = require('fs'); + // Fire me up! /** @@ -24,8 +26,8 @@ */ module.exports = { implements: 'settings', - inject: ['nconf', 'require(fs)'], - factory(nconf, fs) { + inject: ['nconf'], + factory(nconf) { /** * Normalize a port into a number, string, or false. */ diff --git a/modules/web-console/backend/routes/activities.js b/modules/web-console/backend/routes/activities.js index ad0e46962821d..4db6ac2372ad0 100644 --- a/modules/web-console/backend/routes/activities.js +++ b/modules/web-console/backend/routes/activities.js @@ -17,29 +17,30 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/activities', - inject: ['require(express)', 'services/activities'] -}; + inject: ['services/activities'], + /** + * @param express + * @param {ActivitiesService} activitiesService + * @returns {Promise} + */ + factory: (activitiesService) => { + return new Promise((factoryResolve) => { + const router = new express.Router(); -/** - * @param express - * @param {ActivitiesService} activitiesService - * @returns {Promise} - */ -module.exports.factory = function(express, activitiesService) { - return new Promise((factoryResolve) => { - const router = new express.Router(); + // Post user activities to page. + router.post('/page', (req, res) => { + activitiesService.merge(req.user._id, req.body) + .then(res.api.ok) + .catch(res.api.error); + }); - // Post user activities to page. - router.post('/page', (req, res) => { - activitiesService.merge(req.user._id, req.body) - .then(res.api.ok) - .catch(res.api.error); + factoryResolve(router); }); - - factoryResolve(router); - }); + } }; diff --git a/modules/web-console/backend/routes/admin.js b/modules/web-console/backend/routes/admin.js index 5ee41c8469ae5..a9efc8d6aaf7c 100644 --- a/modules/web-console/backend/routes/admin.js +++ b/modules/web-console/backend/routes/admin.js @@ -17,16 +17,16 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/admin', - inject: ['require(lodash)', 'require(express)', 'settings', 'mongo', 'services/spaces', 'services/mails', 'services/sessions', 'services/users', 'services/notifications'] + inject: ['settings', 'mongo', 'services/spaces', 'services/mails', 'services/sessions', 'services/users', 'services/notifications'] }; /** - * @param _ - * @param express * @param settings * @param mongo * @param spacesService @@ -36,7 +36,7 @@ module.exports = { * @param {NotificationsService} notificationsService * @returns {Promise} */ -module.exports.factory = function(_, express, settings, mongo, spacesService, mailsService, sessionsService, usersService, notificationsService) { +module.exports.factory = function(settings, mongo, spacesService, mailsService, sessionsService, usersService, notificationsService) { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/caches.js b/modules/web-console/backend/routes/caches.js index e040fdab3578b..d7ed8b84dea4d 100644 --- a/modules/web-console/backend/routes/caches.js +++ b/modules/web-console/backend/routes/caches.js @@ -17,14 +17,16 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/caches', - inject: ['require(lodash)', 'require(express)', 'mongo', 'services/caches'] + inject: ['mongo', 'services/caches'] }; -module.exports.factory = function(_, express, mongo, cachesService) { +module.exports.factory = function(mongo, cachesService) { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/clusters.js b/modules/web-console/backend/routes/clusters.js index 97a446a0e264e..24334c2e802b6 100644 --- a/modules/web-console/backend/routes/clusters.js +++ b/modules/web-console/backend/routes/clusters.js @@ -17,14 +17,16 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/clusters', - inject: ['require(lodash)', 'require(express)', 'mongo', 'services/clusters'] + inject: ['mongo', 'services/clusters'] }; -module.exports.factory = function(_, express, mongo, clustersService) { +module.exports.factory = function(mongo, clustersService) { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/configuration.js b/modules/web-console/backend/routes/configuration.js index c3ff5d6a482b5..d9bde7516ec10 100644 --- a/modules/web-console/backend/routes/configuration.js +++ b/modules/web-console/backend/routes/configuration.js @@ -17,14 +17,16 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/configurations', - inject: ['require(lodash)', 'require(express)', 'mongo', 'services/configurations'] + inject: ['mongo', 'services/configurations'] }; -module.exports.factory = function(_, express, mongo, configurationsService) { +module.exports.factory = function(mongo, configurationsService) { return new Promise((factoryResolve) => { const router = new express.Router(); /** diff --git a/modules/web-console/backend/routes/demo.js b/modules/web-console/backend/routes/demo.js index b200d83b0e804..a18fa7ac543c3 100644 --- a/modules/web-console/backend/routes/demo.js +++ b/modules/web-console/backend/routes/demo.js @@ -17,6 +17,9 @@ 'use strict'; +const express = require('express'); +const _ = require('lodash'); + // Fire me up! const clusters = require('./demo/clusters.json'); @@ -26,7 +29,7 @@ const igfss = require('./demo/igfss.json'); module.exports = { implements: 'routes/demo', - inject: ['require(lodash)', 'require(express)', 'errors', 'settings', 'mongo', 'services/spaces'] + inject: ['errors', 'settings', 'mongo', 'services/spaces'] }; /** @@ -39,7 +42,7 @@ module.exports = { * @param spacesService * @return {Promise} */ -module.exports.factory = (_, express, errors, settings, mongo, spacesService) => { +module.exports.factory = (errors, settings, mongo, spacesService) => { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/domains.js b/modules/web-console/backend/routes/domains.js index db1d8928ab6f4..caa9201ab13e9 100644 --- a/modules/web-console/backend/routes/domains.js +++ b/modules/web-console/backend/routes/domains.js @@ -17,14 +17,16 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/domains', - inject: ['require(lodash)', 'require(express)', 'mongo', 'services/domains'] + inject: ['mongo', 'services/domains'] }; -module.exports.factory = (_, express, mongo, domainsService) => { +module.exports.factory = (mongo, domainsService) => { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/downloads.js b/modules/web-console/backend/routes/downloads.js index 88a1923b1e61f..a06bb27290d63 100644 --- a/modules/web-console/backend/routes/downloads.js +++ b/modules/web-console/backend/routes/downloads.js @@ -17,11 +17,13 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/downloads', - inject: ['require(lodash)', 'require(express)', 'services/agents', 'services/activities'] + inject: ['services/agents', 'services/activities'] }; /** @@ -31,7 +33,7 @@ module.exports = { * @param {ActivitiesService} activitiesService * @returns {Promise} */ -module.exports.factory = function(_, express, downloadsService, activitiesService) { +module.exports.factory = function(downloadsService, activitiesService) { return new Promise((resolveFactory) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/igfss.js b/modules/web-console/backend/routes/igfss.js index c88d6270a1803..b95f21fbc6be0 100644 --- a/modules/web-console/backend/routes/igfss.js +++ b/modules/web-console/backend/routes/igfss.js @@ -17,14 +17,16 @@ 'use strict'; +const express = require('express'); + // Fire me up! module.exports = { implements: 'routes/igfss', - inject: ['require(lodash)', 'require(express)', 'mongo', 'services/igfss'] + inject: ['mongo', 'services/igfss'] }; -module.exports.factory = function(_, express, mongo, igfssService) { +module.exports.factory = function(mongo, igfssService) { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/notebooks.js b/modules/web-console/backend/routes/notebooks.js index c33080947997b..0807db8ae9649 100644 --- a/modules/web-console/backend/routes/notebooks.js +++ b/modules/web-console/backend/routes/notebooks.js @@ -17,14 +17,17 @@ 'use strict'; +const express = require('express'); +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'routes/notebooks', - inject: ['require(lodash)', 'require(express)', 'mongo', 'services/spaces', 'services/notebooks'] + inject: ['mongo', 'services/spaces', 'services/notebooks'] }; -module.exports.factory = (_, express, mongo, spacesService, notebooksService) => { +module.exports.factory = (mongo, spacesService, notebooksService) => { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/profile.js b/modules/web-console/backend/routes/profile.js index 76edf721fde56..0ce2656b1f7f0 100644 --- a/modules/web-console/backend/routes/profile.js +++ b/modules/web-console/backend/routes/profile.js @@ -17,22 +17,22 @@ 'use strict'; +const express = require('express'); +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'routes/profiles', - inject: ['require(lodash)', 'require(express)', 'mongo', 'services/users'] + inject: ['mongo', 'services/users'] }; /** - * - * @param _ Lodash module - * @param express Express module * @param mongo * @param {UsersService} usersService * @returns {Promise} */ -module.exports.factory = function(_, express, mongo, usersService) { +module.exports.factory = function(mongo, usersService) { return new Promise((resolveFactory) => { const router = new express.Router(); diff --git a/modules/web-console/backend/routes/public.js b/modules/web-console/backend/routes/public.js index 860e2678c2fff..a643a00c46e9c 100644 --- a/modules/web-console/backend/routes/public.js +++ b/modules/web-console/backend/routes/public.js @@ -17,23 +17,24 @@ 'use strict'; +const express = require('express'); +const passport = require('passport'); + // Fire me up! module.exports = { implements: 'routes/public', - inject: ['require(express)', 'require(passport)', 'mongo', 'services/mails', 'services/users', 'services/auth'] + inject: ['mongo', 'services/mails', 'services/users', 'services/auth'] }; /** - * @param express - * @param passport * @param mongo * @param mailsService * @param {UsersService} usersService * @param {AuthService} authService * @returns {Promise} */ -module.exports.factory = function(express, passport, mongo, mailsService, usersService, authService) { +module.exports.factory = function(mongo, mailsService, usersService, authService) { return new Promise((factoryResolve) => { const router = new express.Router(); diff --git a/modules/web-console/backend/services/activities.js b/modules/web-console/backend/services/activities.js index a049f65f2c645..9399eb056d8a6 100644 --- a/modules/web-console/backend/services/activities.js +++ b/modules/web-console/backend/services/activities.js @@ -17,19 +17,20 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'services/activities', - inject: ['require(lodash)', 'mongo'] + inject: ['mongo'] }; /** - * @param _ * @param mongo * @returns {ActivitiesService} */ -module.exports.factory = (_, mongo) => { +module.exports.factory = (mongo) => { class ActivitiesService { /** * Update page activities. diff --git a/modules/web-console/backend/services/auth.js b/modules/web-console/backend/services/auth.js index c3423da95687c..dde246071bb64 100644 --- a/modules/web-console/backend/services/auth.js +++ b/modules/web-console/backend/services/auth.js @@ -21,18 +21,17 @@ module.exports = { implements: 'services/auth', - inject: ['require(lodash)', 'mongo', 'settings', 'errors'] + inject: ['mongo', 'settings', 'errors'] }; /** - * @param _ * @param mongo * @param settings * @param errors * @returns {AuthService} */ -module.exports.factory = (_, mongo, settings, errors) => { +module.exports.factory = (mongo, settings, errors) => { class AuthService { /** * Generate token string. diff --git a/modules/web-console/backend/services/caches.js b/modules/web-console/backend/services/caches.js index 19aed8200a668..5c96ccd7a4dc0 100644 --- a/modules/web-console/backend/services/caches.js +++ b/modules/web-console/backend/services/caches.js @@ -17,21 +17,22 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'services/caches', - inject: ['require(lodash)', 'mongo', 'services/spaces', 'errors'] + inject: ['mongo', 'services/spaces', 'errors'] }; /** - * @param _ * @param mongo * @param {SpacesService} spaceService * @param errors * @returns {CachesService} */ -module.exports.factory = (_, mongo, spaceService, errors) => { +module.exports.factory = (mongo, spaceService, errors) => { /** * Convert remove status operation to own presentation. * diff --git a/modules/web-console/backend/services/clusters.js b/modules/web-console/backend/services/clusters.js index 06e413a96c08b..9f50edefa3e40 100644 --- a/modules/web-console/backend/services/clusters.js +++ b/modules/web-console/backend/services/clusters.js @@ -17,21 +17,22 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'services/clusters', - inject: ['require(lodash)', 'mongo', 'services/spaces', 'errors'] + inject: ['mongo', 'services/spaces', 'errors'] }; /** - * @param _ * @param mongo * @param {SpacesService} spacesService * @param errors * @returns {ClustersService} */ -module.exports.factory = (_, mongo, spacesService, errors) => { +module.exports.factory = (mongo, spacesService, errors) => { /** * Convert remove status operation to own presentation. * diff --git a/modules/web-console/backend/services/configurations.js b/modules/web-console/backend/services/configurations.js index 7eef8a230c5ea..36d99321b6b07 100644 --- a/modules/web-console/backend/services/configurations.js +++ b/modules/web-console/backend/services/configurations.js @@ -21,11 +21,10 @@ module.exports = { implements: 'services/configurations', - inject: ['require(lodash)', 'mongo', 'services/spaces', 'services/clusters', 'services/caches', 'services/domains', 'services/igfss'] + inject: ['mongo', 'services/spaces', 'services/clusters', 'services/caches', 'services/domains', 'services/igfss'] }; /** - * @param _ * @param mongo * @param {SpacesService} spacesService * @param {ClustersService} clustersService @@ -34,7 +33,7 @@ module.exports = { * @param {IgfssService} igfssService * @returns {ConfigurationsService} */ -module.exports.factory = (_, mongo, spacesService, clustersService, cachesService, domainsService, igfssService) => { +module.exports.factory = (mongo, spacesService, clustersService, cachesService, domainsService, igfssService) => { class ConfigurationsService { static list(userId, demo) { let spaces; diff --git a/modules/web-console/backend/services/domains.js b/modules/web-console/backend/services/domains.js index 22582c17f6e74..e25d0916e5374 100644 --- a/modules/web-console/backend/services/domains.js +++ b/modules/web-console/backend/services/domains.js @@ -17,22 +17,23 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'services/domains', - inject: ['require(lodash)', 'mongo', 'services/spaces', 'services/caches', 'errors'] + inject: ['mongo', 'services/spaces', 'services/caches', 'errors'] }; /** - * @param _ * @param mongo * @param {SpacesService} spacesService * @param {CachesService} cachesService * @param errors * @returns {DomainsService} */ -module.exports.factory = (_, mongo, spacesService, cachesService, errors) => { +module.exports.factory = (mongo, spacesService, cachesService, errors) => { /** * Convert remove status operation to own presentation. * diff --git a/modules/web-console/backend/services/downloads.js b/modules/web-console/backend/services/downloads.js index 3dfc2bed1cf91..6a03fe29a597a 100644 --- a/modules/web-console/backend/services/downloads.js +++ b/modules/web-console/backend/services/downloads.js @@ -17,24 +17,25 @@ 'use strict'; +const fs = require('fs'); +const path = require('path'); +const _ = require('lodash'); +const JSZip = require('jszip'); + // Fire me up! module.exports = { implements: 'services/agents', - inject: ['require(lodash)', 'require(fs)', 'require(path)', 'require(jszip)', 'settings', 'agents-handler', 'errors'] + inject: ['settings', 'agents-handler', 'errors'] }; /** - * @param _ - * @param fs - * @param path - * @param JSZip * @param settings * @param agentsHnd * @param errors * @returns {DownloadsService} */ -module.exports.factory = (_, fs, path, JSZip, settings, agentsHnd, errors) => { +module.exports.factory = (settings, agentsHnd, errors) => { class DownloadsService { /** * Get agent archive with user agent configuration. diff --git a/modules/web-console/backend/services/igfss.js b/modules/web-console/backend/services/igfss.js index acb565b21a26b..e9ff38eb5a30b 100644 --- a/modules/web-console/backend/services/igfss.js +++ b/modules/web-console/backend/services/igfss.js @@ -17,21 +17,22 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'services/igfss', - inject: ['require(lodash)', 'mongo', 'services/spaces', 'errors'] + inject: ['mongo', 'services/spaces', 'errors'] }; /** - * @param _ * @param mongo * @param {SpacesService} spacesService * @param errors * @returns {IgfssService} */ -module.exports.factory = (_, mongo, spacesService, errors) => { +module.exports.factory = (mongo, spacesService, errors) => { /** * Convert remove status operation to own presentation. * diff --git a/modules/web-console/backend/services/mails.js b/modules/web-console/backend/services/mails.js index 3c22a95a5fb3f..6a31e93466fff 100644 --- a/modules/web-console/backend/services/mails.js +++ b/modules/web-console/backend/services/mails.js @@ -17,20 +17,21 @@ 'use strict'; +const _ = require('lodash'); +const nodemailer = require('nodemailer'); + // Fire me up! module.exports = { implements: 'services/mails', - inject: ['require(lodash)', 'require(nodemailer)', 'settings'] + inject: ['settings'] }; /** - * @param _ - * @param nodemailer * @param settings * @returns {MailsService} */ -module.exports.factory = (_, nodemailer, settings) => { +module.exports.factory = (settings) => { /** * Send mail to user. * diff --git a/modules/web-console/backend/services/notebooks.js b/modules/web-console/backend/services/notebooks.js index 9aa2c386d8abe..5d8b57d2ec3f8 100644 --- a/modules/web-console/backend/services/notebooks.js +++ b/modules/web-console/backend/services/notebooks.js @@ -17,21 +17,22 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'services/notebooks', - inject: ['require(lodash)', 'mongo', 'services/spaces', 'errors'] + inject: ['mongo', 'services/spaces', 'errors'] }; /** - * @param _ * @param mongo * @param {SpacesService} spacesService * @param errors * @returns {NotebooksService} */ -module.exports.factory = (_, mongo, spacesService, errors) => { +module.exports.factory = (mongo, spacesService, errors) => { /** * Convert remove status operation to own presentation. * diff --git a/modules/web-console/backend/services/notifications.js b/modules/web-console/backend/services/notifications.js index f1860e759ce2c..c7d803387d540 100644 --- a/modules/web-console/backend/services/notifications.js +++ b/modules/web-console/backend/services/notifications.js @@ -21,16 +21,15 @@ module.exports = { implements: 'services/notifications', - inject: ['require(lodash)', 'mongo', 'browsers-handler'] + inject: ['mongo', 'browsers-handler'] }; /** - * @param _ * @param mongo * @param browsersHnd * @returns {NotificationsService} */ -module.exports.factory = (_, mongo, browsersHnd) => { +module.exports.factory = (mongo, browsersHnd) => { class NotificationsService { /** * Update notifications. diff --git a/modules/web-console/backend/services/sessions.js b/modules/web-console/backend/services/sessions.js index 7f3fc7351b19b..0518ce2b46603 100644 --- a/modules/web-console/backend/services/sessions.js +++ b/modules/web-console/backend/services/sessions.js @@ -21,16 +21,15 @@ module.exports = { implements: 'services/sessions', - inject: ['require(lodash)', 'mongo', 'errors'] + inject: ['mongo', 'errors'] }; /** - * @param _ * @param mongo * @param errors * @returns {SessionsService} */ -module.exports.factory = (_, mongo, errors) => { +module.exports.factory = (mongo, errors) => { class SessionsService { /** * Become user. diff --git a/modules/web-console/backend/services/users.js b/modules/web-console/backend/services/users.js index 991928aa2da0a..620f7e95a1cf9 100644 --- a/modules/web-console/backend/services/users.js +++ b/modules/web-console/backend/services/users.js @@ -17,15 +17,16 @@ 'use strict'; +const _ = require('lodash'); + // Fire me up! module.exports = { implements: 'services/users', - inject: ['require(lodash)', 'errors', 'settings', 'mongo', 'services/spaces', 'services/mails', 'services/activities', 'agents-handler'] + inject: ['errors', 'settings', 'mongo', 'services/spaces', 'services/mails', 'services/activities', 'agents-handler'] }; /** - * @param _ * @param mongo * @param errors * @param settings @@ -35,7 +36,7 @@ module.exports = { * @param {AgentsHandler} agentHnd * @returns {UsersService} */ -module.exports.factory = (_, errors, settings, mongo, spacesService, mailsService, activitiesService, agentHnd) => { +module.exports.factory = (errors, settings, mongo, spacesService, mailsService, activitiesService, agentHnd) => { const _randomString = () => { const possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; const possibleLen = possible.length; From 95381a3f1196b3f106d9f54ffae5e24391d928a4 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Wed, 13 Dec 2017 10:58:22 +0700 Subject: [PATCH 164/243] IGNITE-6390 Web Console: Fixed cluster selector on topology changed. (cherry picked from commit e6489ea) --- .../frontend/app/components/page-queries/controller.js | 5 +---- .../frontend/app/modules/agent/AgentManager.service.js | 6 ++++++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/web-console/frontend/app/components/page-queries/controller.js b/modules/web-console/frontend/app/components/page-queries/controller.js index dba0269b96c5c..bd03b222c1f52 100644 --- a/modules/web-console/frontend/app/components/page-queries/controller.js +++ b/modules/web-console/frontend/app/components/page-queries/controller.js @@ -929,9 +929,6 @@ export default class { const awaitClusters$ = fromPromise( agentMgr.startClusterWatch('Back to Configuration', 'base.configuration.tabs.advanced.clusters')); - const currentCluster$ = agentMgr.connectionSbj - .distinctUntilChanged((n, o) => n.cluster === o.cluster); - const finishLoading$ = defer(() => { if (!$root.IgniteDemoMode) Loading.finish('sqlLoading'); @@ -942,7 +939,7 @@ export default class { }; this.refresh$ = awaitClusters$ - .mergeMap(() => currentCluster$) + .mergeMap(() => agentMgr.currentCluster$) .do(() => Loading.start('sqlLoading')) .do(() => { _.forEach($scope.notebook.paragraphs, (paragraph) => { diff --git a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js index 7668132d4fde6..7bc23a98b49ec 100644 --- a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js +++ b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js @@ -112,6 +112,12 @@ export default class IgniteAgentManager { this.connectionSbj = new BehaviorSubject(new ConnectionState(cluster)); + let prevCluster; + + this.currentCluster$ = this.connectionSbj + .distinctUntilChanged(({ cluster }) => prevCluster === cluster) + .do(({ cluster }) => prevCluster = cluster); + this.clusterVersion = '2.1.0'; if (!this.isDemoMode()) { From d7cb5b8926931b9479f577e3df524b9cf3b343f1 Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Thu, 30 Nov 2017 15:45:44 +0300 Subject: [PATCH 165/243] IGNITE-6828 Fixed confusing messages SLF4J: Failed to load class at Ignite start. This closes #2980. Signed-off-by: nikolay_tikhonov --- modules/spring-data/pom.xml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/spring-data/pom.xml b/modules/spring-data/pom.xml index 8a68559b67658..caa92261e6afc 100644 --- a/modules/spring-data/pom.xml +++ b/modules/spring-data/pom.xml @@ -58,6 +58,13 @@ org.springframework.data spring-data-commons ${spring.data.version} + + + + org.slf4j + jcl-over-slf4j + +
        From 428c9bf0fb0682ce0f2b0a062168d9593463a8ce Mon Sep 17 00:00:00 2001 From: "Andrey V. Mashenkov" Date: Wed, 13 Dec 2017 14:16:50 +0300 Subject: [PATCH 166/243] IGNITE-6423: PDS could be corrupted if partition have been evicted and owned again. This closes #3115. Fixed page memory update operations without checkpoint lock. Fixed page CRC calculation. Fixed outdated page handling. Added checkpoint lock hold assertions for memory update operations. Fixed incorrect tests. (cherry picked from commit e24d4d0) --- .../internal/pagemem/store/PageStore.java | 5 +- .../processors/cache/GridCacheTtlManager.java | 3 +- .../cache/IgniteCacheOffheapManagerImpl.java | 41 +++-- .../dht/GridDhtTxPrepareFuture.java | 5 + .../local/atomic/GridLocalAtomicCache.java | 171 +++++++++--------- .../GridCacheDatabaseSharedManager.java | 9 +- .../persistence/GridCacheOffheapManager.java | 4 + .../cache/persistence/RowStore.java | 2 + .../cache/persistence/file/FilePageStore.java | 32 +++- .../file/FilePageStoreManager.java | 7 +- .../persistence/pagemem/PageMemoryImpl.java | 56 ++++-- .../processors/cache/tree/CacheDataTree.java | 2 + .../cache/tree/PendingEntriesTree.java | 2 + .../impl/PageMemoryNoLoadSelfTest.java | 10 +- .../cache/persistence/DummyPageIO.java | 41 +++++ ...itePdsRecoveryAfterFileCorruptionTest.java | 83 +++++++-- ...pointSimulationWithRealCpDisabledTest.java | 53 +++++- .../db/file/IgnitePdsEvictionTest.java | 30 +++ .../query/h2/database/H2TreeIndex.java | 56 ++++-- 19 files changed, 437 insertions(+), 175 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/DummyPageIO.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java index f6e577ce3916e..42d584d5eba1c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/PageStore.java @@ -71,10 +71,11 @@ public interface PageStore { * @param pageId Page ID. * @param pageBuf Page buffer to write. * @param tag Partition file version, 1-based incrementing counter. For outdated pages {@code tag} has lower value, - * and write does nothing + * and write does nothing. + * @param calculateCrc if {@code False} crc calculation will be forcibly skipped. * @throws IgniteCheckedException If page writing failed (IO error occurred). */ - public void write(long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException; + public void write(long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException; /** * Gets page offset within the store file. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java index b006154de5069..9c013fc19e839 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java @@ -23,7 +23,6 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; -import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; import org.apache.ignite.internal.util.GridConcurrentSkipListSet; import org.apache.ignite.internal.util.lang.IgniteInClosure2X; import org.apache.ignite.internal.util.typedef.X; @@ -145,7 +144,7 @@ public long pendingSize() throws IgniteCheckedException { try { X.println(">>>"); X.println(">>> TTL processor memory stats [igniteInstanceName=" + cctx.igniteInstanceName() + - ", cache=" + cctx.name() + ']'); + ", cache=" + cctx.name() + ']'); X.println(">>> pendingEntriesSize: " + pendingSize()); } catch (IgniteCheckedException e) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 56879560384f7..4a810e75bd013 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -139,20 +139,20 @@ public class IgniteCacheOffheapManagerImpl implements IgniteCacheOffheapManager } /** {@inheritDoc} */ - public void onCacheStarted(GridCacheContext cctx) throws IgniteCheckedException{ + public void onCacheStarted(GridCacheContext cctx) throws IgniteCheckedException { if (cctx.affinityNode() && cctx.ttl().eagerTtlEnabled() && pendingEntries == null) { String name = "PendingEntries"; - long rootPage = allocateForTree(); + long rootPage = allocateForTree(); - pendingEntries = new PendingEntriesTree( - grp, - name, - grp.dataRegion().pageMemory(), - rootPage, - grp.reuseList(), - true); - } + pendingEntries = new PendingEntriesTree( + grp, + name, + grp.dataRegion().pageMemory(), + rootPage, + grp.reuseList(), + true); + } } /** @@ -196,6 +196,7 @@ private void removeCacheData(int cacheId) { try { if (grp.sharedGroup()) { assert cacheId != CU.UNDEFINED_CACHE_ID; + assert ctx.database().checkpointLockIsHeldByThread(); for (CacheDataStore store : cacheDataStores()) store.clear(cacheId); @@ -443,7 +444,7 @@ private Iterator cacheData(boolean primary, boolean backup, Affi while (it.hasNext()) { cctx.shared().database().checkpointReadLock(); - try{ + try { KeyCacheObject key = it.next().key(); try { @@ -1198,6 +1199,8 @@ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow ol try { int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID; + assert cctx.shared().database().checkpointLockIsHeldByThread(); + dataTree.invoke(new SearchRow(cacheId, key), CacheDataRowAdapter.RowData.NO_KEY, c); switch (c.operationType()) { @@ -1238,8 +1241,7 @@ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow ol CacheObject val, GridCacheVersion ver, long expireTime, - @Nullable CacheDataRow oldRow) throws IgniteCheckedException - { + @Nullable CacheDataRow oldRow) throws IgniteCheckedException { int cacheId = grp.storeCacheIdInDataPage() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID; DataRow dataRow = new DataRow(key, val, ver, partId, expireTime, cacheId); @@ -1264,8 +1266,9 @@ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow ol } /** {@inheritDoc} */ - @Override public void update(GridCacheContext cctx,KeyCacheObject key, - + @Override public void update( + GridCacheContext cctx, + KeyCacheObject key, CacheObject val, GridCacheVersion ver, long expireTime, @@ -1291,6 +1294,8 @@ private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow ol CacheDataRow old; + assert cctx.shared().database().checkpointLockIsHeldByThread(); + if (canUpdateOldRow(cctx, oldRow, dataRow) && rowStore.updateRow(oldRow.link(), dataRow)) { old = oldRow; @@ -1411,6 +1416,8 @@ private void finishUpdate(GridCacheContext cctx, CacheDataRow newRow, @Nullable try { int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID; + assert cctx.shared().database().checkpointLockIsHeldByThread(); + CacheDataRow oldRow = dataTree.remove(new SearchRow(cacheId, key)); finishRemove(cctx, key, oldRow); @@ -1480,8 +1487,7 @@ private void finishRemove(GridCacheContext cctx, KeyCacheObject key, @Nullable C return dataTree.find(null, null); } - /** {@inheritDoc} - * @param cacheId*/ + /** {@inheritDoc} */ @Override public GridCursor cursor(int cacheId) throws IgniteCheckedException { return cursor(cacheId, null, null); } @@ -1541,6 +1547,7 @@ private void finishRemove(GridCacheContext cctx, KeyCacheObject key, @Nullable C /** {@inheritDoc} */ @Override public void clear(int cacheId) throws IgniteCheckedException { assert cacheId != CU.UNDEFINED_CACHE_ID; + assert ctx.database().checkpointLockIsHeldByThread(); if (cacheSize(cacheId) == 0) return; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java index 6873890b5c397..0fb9ee43647ca 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java @@ -1819,6 +1819,8 @@ void onResult(GridDhtTxPrepareResponse res) { GridDrType drType = cacheCtx.isDrEnabled() ? GridDrType.DR_PRELOAD : GridDrType.DR_NONE; + cctx.database().checkpointReadLock(); + try { if (entry.initialValue(info.value(), info.version(), @@ -1850,6 +1852,9 @@ void onResult(GridDhtTxPrepareResponse res) { log.debug("Failed to set entry initial value (entry is obsolete, " + "will retry): " + entry); } + finally { + cctx.database().checkpointReadUnlock(); + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java index 599a58c173177..1454e9647f622 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java @@ -830,113 +830,120 @@ private Object updateAllInternal(GridCacheOperation op, CacheEntryPredicate[] filters = CU.filterArray(filter); - ctx.shared().database().ensureFreeSpace(ctx.dataRegion()); - - if (writeThrough && keys.size() > 1) { - return updateWithBatch(op, - keys, - vals, - invokeArgs, - expiryPlc, - ver, - filters, - keepBinary, - subjId, - taskName); - } - - Iterator valsIter = vals != null ? vals.iterator() : null; - IgniteBiTuple res = null; CachePartialUpdateCheckedException err = null; - boolean intercept = ctx.config().getInterceptor() != null; + ctx.shared().database().checkpointReadLock(); - for (K key : keys) { - if (key == null) - throw new NullPointerException("Null key."); + try { + ctx.shared().database().ensureFreeSpace(ctx.dataRegion()); + + if (writeThrough && keys.size() > 1) { + return updateWithBatch(op, + keys, + vals, + invokeArgs, + expiryPlc, + ver, + filters, + keepBinary, + subjId, + taskName); + } - Object val = valsIter != null ? valsIter.next() : null; + Iterator valsIter = vals != null ? vals.iterator() : null; - if (val == null && op != DELETE) - throw new NullPointerException("Null value."); + boolean intercept = ctx.config().getInterceptor() != null; - KeyCacheObject cacheKey = ctx.toCacheKeyObject(key); + for (K key : keys) { + if (key == null) + throw new NullPointerException("Null key."); - if (op == UPDATE) { - val = ctx.toCacheObject(val); + Object val = valsIter != null ? valsIter.next() : null; - ctx.validateKeyAndValue(cacheKey, (CacheObject)val); - } - else if (op == TRANSFORM) - ctx.kernalContext().resource().inject(val, GridResourceIoc.AnnotationSet.ENTRY_PROCESSOR, ctx.name()); + if (val == null && op != DELETE) + throw new NullPointerException("Null value."); - while (true) { - GridCacheEntryEx entry = null; + KeyCacheObject cacheKey = ctx.toCacheKeyObject(key); - try { - entry = entryEx(cacheKey); - - GridTuple3> t = entry.innerUpdateLocal( - ver, - val == null ? DELETE : op, - val, - invokeArgs, - writeThrough, - readThrough, - retval, - keepBinary, - expiryPlc, - true, - true, - filters, - intercept, - subjId, - taskName); + if (op == UPDATE) { + val = ctx.toCacheObject(val); - if (op == TRANSFORM) { - if (t.get3() != null) { - Map computedMap; + ctx.validateKeyAndValue(cacheKey, (CacheObject)val); + } + else if (op == TRANSFORM) + ctx.kernalContext().resource().inject(val, GridResourceIoc.AnnotationSet.ENTRY_PROCESSOR, ctx.name()); - if (res == null) { - computedMap = U.newHashMap(keys.size()); + while (true) { + GridCacheEntryEx entry = null; - res = new IgniteBiTuple<>(true, computedMap); - } - else - computedMap = (Map)res.get2(); + try { + entry = entryEx(cacheKey); - computedMap.put(key, t.get3()); + GridTuple3> t = entry.innerUpdateLocal( + ver, + val == null ? DELETE : op, + val, + invokeArgs, + writeThrough, + readThrough, + retval, + keepBinary, + expiryPlc, + true, + true, + filters, + intercept, + subjId, + taskName); + + if (op == TRANSFORM) { + if (t.get3() != null) { + Map computedMap; + + if (res == null) { + computedMap = U.newHashMap(keys.size()); + + res = new IgniteBiTuple<>(true, computedMap); + } + else + computedMap = (Map)res.get2(); + + computedMap.put(key, t.get3()); + } } - } - else if (res == null) - res = new T2(t.get1(), t.get2()); + else if (res == null) + res = new T2(t.get1(), t.get2()); - break; // While. - } - catch (GridCacheEntryRemovedException ignored) { - if (log.isDebugEnabled()) - log.debug("Got removed entry while updating (will retry): " + key); + break; // While. + } + catch (GridCacheEntryRemovedException ignored) { + if (log.isDebugEnabled()) + log.debug("Got removed entry while updating (will retry): " + key); - entry = null; - } - catch (IgniteCheckedException e) { - if (err == null) - err = partialUpdateException(); + entry = null; + } + catch (IgniteCheckedException e) { + if (err == null) + err = partialUpdateException(); - err.add(F.asList(key), e); + err.add(F.asList(key), e); - U.error(log, "Failed to update key : " + key, e); + U.error(log, "Failed to update key : " + key, e); - break; - } - finally { - if (entry != null) - ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion()); + break; + } + finally { + if (entry != null) + ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion()); + } } } } + finally { + ctx.shared().database().checkpointReadUnlock(); + } if (err != null) throw err; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index ff15cc02d8ae2..d9c66209b1187 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -1846,7 +1846,7 @@ private void finalizeCheckpointOnRecovery(long cpTs, UUID cpId, WALPointer walPt if (tag != null) { tmpWriteBuf.rewind(); - PageStore store = storeMgr.writeInternal(fullId.groupId(), fullId.pageId(), tmpWriteBuf, tag); + PageStore store = storeMgr.writeInternal(fullId.groupId(), fullId.pageId(), tmpWriteBuf, tag, true); tmpWriteBuf.rewind(); @@ -2621,6 +2621,9 @@ private WriteCheckpointPages( fullId, tmpWriteBuf, persStoreMetrics.metricsEnabled() ? tracker : null); if (tag != null) { + assert PageIO.getType(tmpWriteBuf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); + assert PageIO.getVersion(tmpWriteBuf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); + tmpWriteBuf.rewind(); if (persStoreMetrics.metricsEnabled()) { @@ -2642,9 +2645,7 @@ private WriteCheckpointPages( tmpWriteBuf.rewind(); - PageIO.setCrc(writeAddr, 0); - - PageStore store = storeMgr.writeInternal(grpId, fullId.pageId(), tmpWriteBuf, tag); + PageStore store = storeMgr.writeInternal(grpId, fullId.pageId(), tmpWriteBuf, tag, false); updStores.add(store); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 6ed62f86b1d44..1ea0e7d653126 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -922,6 +922,8 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException reuseRoot.pageId().pageId(), reuseRoot.isAllocated()) { @Override protected long allocatePageNoReuse() throws IgniteCheckedException { + assert grp.shared().database().checkpointLockIsHeldByThread(); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); } }; @@ -938,6 +940,8 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException treeRoot.pageId().pageId(), treeRoot.isAllocated()) { @Override protected long allocatePageNoReuse() throws IgniteCheckedException { + assert grp.shared().database().checkpointLockIsHeldByThread(); + return pageMem.allocatePage(grpId, partId, PageIdAllocator.FLAG_DATA); } }; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java index 20510216c9100..ad2f7313b6459 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java @@ -107,6 +107,8 @@ public void addRow(CacheDataRow row) throws IgniteCheckedException { * @return {@code True} if was able to update row. */ public boolean updateRow(long link, CacheDataRow row) throws IgniteCheckedException { + assert !persistenceEnabled || ctx.database().checkpointLockIsHeldByThread(); + return freeList.updateDataRow(link, row); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java index 408240cab3ab2..47f1d4d3144c2 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java @@ -433,7 +433,7 @@ private void init() throws IgniteCheckedException { } /** {@inheritDoc} */ - @Override public void write(long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { + @Override public void write(long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { init(); lock.readLock().lock(); @@ -450,13 +450,20 @@ private void init() throws IgniteCheckedException { assert pageBuf.capacity() == pageSize; assert pageBuf.position() == 0; assert pageBuf.order() == ByteOrder.nativeOrder(); - assert PageIO.getCrc(pageBuf) == 0 : U.hexLong(pageId); + assert PageIO.getType(pageBuf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(pageId); + assert PageIO.getVersion(pageBuf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(pageId); - int crc32 = skipCrc ? 0 : PureJavaCrc32.calcCrc32(pageBuf, pageSize); + if (calculateCrc && !skipCrc) { + assert PageIO.getCrc(pageBuf) == 0 : U.hexLong(pageId); - PageIO.setCrc(pageBuf, crc32); + PageIO.setCrc(pageBuf, calcCrc32(pageBuf, pageSize)); + } - pageBuf.position(0); + // Check whether crc was calculated somewhere above the stack if it is forcibly skipped. + assert skipCrc || PageIO.getCrc(pageBuf) != 0 || calcCrc32(pageBuf, pageSize) == 0 : + "CRC hasn't been calculated, crc=0"; + + assert pageBuf.position() == 0 : pageBuf.position(); int len = pageSize; @@ -480,6 +487,21 @@ private void init() throws IgniteCheckedException { } } + /** + * @param pageBuf Page buffer. + * @param pageSize Page size. + */ + private static int calcCrc32(ByteBuffer pageBuf, int pageSize) { + try { + pageBuf.position(0); + + return PureJavaCrc32.calcCrc32(pageBuf, pageSize); + } + finally { + pageBuf.position(0); + } + } + /** {@inheritDoc} */ @Override public long pageOffset(long pageId) { return (long) PageIdUtils.pageIndex(pageId) * pageSize + headerSize(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index aadcee60626d9..99c5a5bf45cbf 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -290,7 +290,7 @@ public void read(int cacheId, long pageId, ByteBuffer pageBuf, boolean keepCrc) /** {@inheritDoc} */ @Override public void write(int grpId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { - writeInternal(grpId, pageId, pageBuf, tag); + writeInternal(grpId, pageId, pageBuf, tag, true); } /** {@inheritDoc} */ @@ -305,15 +305,16 @@ public void read(int cacheId, long pageId, ByteBuffer pageBuf, boolean keepCrc) * @param pageId Page ID. * @param pageBuf Page buffer. * @param tag Partition tag (growing 1-based partition file version). Used to validate page is not outdated + * @param calculateCrc if {@code False} crc calculation will be forcibly skipped. * @return PageStore to which the page has been written. * @throws IgniteCheckedException If IO error occurred. */ - public PageStore writeInternal(int cacheId, long pageId, ByteBuffer pageBuf, int tag) throws IgniteCheckedException { + public PageStore writeInternal(int cacheId, long pageId, ByteBuffer pageBuf, int tag, boolean calculateCrc) throws IgniteCheckedException { int partId = PageIdUtils.partId(pageId); PageStore store = getStore(cacheId, partId); - store.write(pageId, pageBuf, tag); + store.write(pageId, pageBuf, tag, calculateCrc); return store; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index 8c64e0e22b171..e54d8e61e8300 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -176,7 +176,7 @@ public class PageMemoryImpl implements PageMemoryEx { private final int sysPageSize; /** Shared context. */ - private final GridCacheSharedContext sharedCtx; + private final GridCacheSharedContext ctx; /** State checker. */ private final CheckpointLockStateChecker stateChecker; @@ -242,7 +242,7 @@ public class PageMemoryImpl implements PageMemoryEx { /** * @param directMemoryProvider Memory allocator to use. - * @param sharedCtx Cache shared context. + * @param ctx Cache shared context. * @param pageSize Page size. * @param flushDirtyPage Callback invoked when a dirty page is evicted. * @param changeTracker Callback invoked to track changes in pages. @@ -251,7 +251,7 @@ public class PageMemoryImpl implements PageMemoryEx { public PageMemoryImpl( DirectMemoryProvider directMemoryProvider, long[] sizes, - GridCacheSharedContext sharedCtx, + GridCacheSharedContext ctx, int pageSize, GridInClosure3X flushDirtyPage, GridInClosure3X changeTracker, @@ -259,11 +259,11 @@ public PageMemoryImpl( DataRegionMetricsImpl memMetrics, boolean throttleEnabled ) { - assert sharedCtx != null; + assert ctx != null; - log = sharedCtx.logger(PageMemoryImpl.class); + log = ctx.logger(PageMemoryImpl.class); - this.sharedCtx = sharedCtx; + this.ctx = ctx; this.directMemoryProvider = directMemoryProvider; this.sizes = sizes; this.flushDirtyPage = flushDirtyPage; @@ -271,8 +271,8 @@ public PageMemoryImpl( this.stateChecker = stateChecker; this.throttleEnabled = throttleEnabled; - storeMgr = sharedCtx.pageStore(); - walMgr = sharedCtx.wal(); + storeMgr = ctx.pageStore(); + walMgr = ctx.wal(); assert storeMgr != null; assert walMgr != null; @@ -342,15 +342,15 @@ public PageMemoryImpl( * */ private void initWriteThrottle() { - if (!(sharedCtx.database() instanceof GridCacheDatabaseSharedManager)) { + if (!(ctx.database() instanceof GridCacheDatabaseSharedManager)) { log.error("Write throttle can't start. Unexpected class of database manager: " + - sharedCtx.database().getClass()); + ctx.database().getClass()); throttleEnabled = false; } if (throttleEnabled) - writeThrottle = new PagesWriteThrottle(this, (GridCacheDatabaseSharedManager)sharedCtx.database()); + writeThrottle = new PagesWriteThrottle(this, (GridCacheDatabaseSharedManager)ctx.database()); } /** {@inheritDoc} */ @@ -435,6 +435,8 @@ private void initWriteThrottle() { flags == PageIdAllocator.FLAG_IDX && partId == PageIdAllocator.INDEX_PARTITION : "flags = " + flags + ", partId = " + partId; + assert ctx.database().checkpointLockIsHeldByThread(); + long pageId = storeMgr.allocatePage(cacheId, partId, flags); assert PageIdUtils.pageIndex(pageId) > 0; //it's crucial for tracking pages (zero page is super one) @@ -451,7 +453,19 @@ private void initWriteThrottle() { boolean isTrackingPage = trackingIO.trackingPageFor(pageId, pageSize()) == pageId; try { - long relPtr = seg.borrowOrAllocateFreePage(pageId); + long relPtr = seg.loadedPages.get( + cacheId, + PageIdUtils.effectivePageId(pageId), + seg.partTag(cacheId, partId), + INVALID_REL_PTR, + OUTDATED_REL_PTR + ); + + if (relPtr == OUTDATED_REL_PTR) + relPtr = refreshOutdatedPage(seg, cacheId, pageId, false); + + if (relPtr == INVALID_REL_PTR) + relPtr = seg.borrowOrAllocateFreePage(pageId); if (relPtr == INVALID_REL_PTR) relPtr = seg.evictPage(); @@ -480,8 +494,8 @@ private void initWriteThrottle() { if (PageIO.getType(pageAddr) == 0) { trackingIO.initNewPage(pageAddr, pageId, pageSize()); - if (!sharedCtx.wal().isAlwaysWriteFullPages()) - sharedCtx.wal().log( + if (!ctx.wal().isAlwaysWriteFullPages()) + ctx.wal().log( new InitNewPageRecord( cacheId, pageId, @@ -490,7 +504,7 @@ private void initWriteThrottle() { ) ); else - sharedCtx.wal().log(new PageSnapshot(fullId, absPtr + PAGE_OVERHEAD, pageSize())); + ctx.wal().log(new PageSnapshot(fullId, absPtr + PAGE_OVERHEAD, pageSize())); } } @@ -1003,6 +1017,9 @@ private boolean copyPageForCheckpoint( PageHeader.releasePage(absPtr); } + assert PageIO.getType(tmpBuf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); + assert PageIO.getVersion(tmpBuf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); + return true; } finally { @@ -1241,6 +1258,9 @@ private long postWriteLockPage(long absPtr, FullPageId fullId) { pageSize() ); + assert PageIO.getType(tmpAbsPtr + PAGE_OVERHEAD) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); + assert PageIO.getVersion(tmpAbsPtr + PAGE_OVERHEAD) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); + PageHeader.dirty(absPtr, false); PageHeader.tempBufferPointer(absPtr, tmpRelPtr); @@ -1281,6 +1301,10 @@ private void writeUnlockPage( long pageId = PageIO.getPageId(page + PAGE_OVERHEAD); + assert pageId != 0 : U.hexLong(PageHeader.readPageId(page)); + assert PageIO.getVersion(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); + assert PageIO.getType(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); + try { rwLock.writeUnlock(page + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId)); @@ -1402,6 +1426,8 @@ void setDirty(FullPageId pageId, long absPtr, boolean dirty, boolean forceAdd) { boolean wasDirty = PageHeader.dirty(absPtr, dirty); if (dirty) { + assert ctx.database().checkpointLockIsHeldByThread(); + if (!wasDirty || forceAdd) { boolean added = segment(pageId.groupId(), pageId.pageId()).dirtyPages.add(pageId); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java index afa3fd7a4bf6b..f2bfa414e87b2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java @@ -77,6 +77,8 @@ public CacheDataTree( this.rowStore = rowStore; this.grp = grp; + assert !grp.dataRegion().config().isPersistenceEnabled() || grp.shared().database().checkpointLockIsHeldByThread(); + initTree(initNew); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java index a6ec6e7b38ee7..0b1c931e4321a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java @@ -64,6 +64,8 @@ public PendingEntriesTree( this.grp = grp; + assert !grp.dataRegion().config().isPersistenceEnabled() || grp.shared().database().checkpointLockIsHeldByThread(); + initTree(initNew); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java index 3b9e3933ed980..0f3bf9cd6ebde 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoLoadSelfTest.java @@ -33,6 +33,7 @@ import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.DummyPageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -47,6 +48,9 @@ public class PageMemoryNoLoadSelfTest extends GridCommonAbstractTest { /** */ private static final int MAX_MEMORY_SIZE = 10 * 1024 * 1024; + /** */ + private static final PageIO PAGE_IO = new DummyPageIO(); + /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "pagemem", false)); @@ -226,6 +230,8 @@ public void testPageIdRotation() throws Exception { assertNotNull(pageAddr); try { + PAGE_IO.initNewPage(pageAddr, id.pageId(), mem.pageSize()); + long updId = PageIdUtils.rotatePageId(id.pageId()); PageIO.setPageId(pageAddr, updId); @@ -334,7 +340,7 @@ private void writePage(PageMemory mem, long pageId, long page, int val) { long pageAddr = mem.writeLock(-1, pageId, page); try { - PageIO.setPageId(pageAddr, pageId); + PAGE_IO.initNewPage(pageAddr, pageId, mem.pageSize()); for (int i = PageIO.COMMON_HEADER_END; i < PAGE_SIZE; i++) PageUtils.putByte(pageAddr, i, (byte)val); @@ -355,7 +361,7 @@ private void readPage(PageMemory mem, long pageId, long page, int expVal) { long pageAddr = mem.readLock(-1, pageId, page); - assert(pageAddr != 0); + assert pageAddr != 0; try { for (int i = PageIO.COMMON_HEADER_END; i < PAGE_SIZE; i++) { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/DummyPageIO.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/DummyPageIO.java new file mode 100644 index 0000000000000..1b36ac1cefe72 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/DummyPageIO.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.util.GridStringBuilder; + +/** + * Dummy PageIO implementation. For test purposes only. + */ +public class DummyPageIO extends PageIO { + /** */ + public DummyPageIO() { + super(2 * Short.MAX_VALUE, 1); + } + + /** {@inheritDoc} */ + @Override + protected void printPage(long addr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException { + sb.a("DummyPageIO [\n"); + sb.a("addr=").a(addr).a(", "); + sb.a("pageSize=").a(addr); + sb.a("\n]"); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java index 936944317b694..8e205852bd0b4 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsRecoveryAfterFileCorruptionTest.java @@ -142,20 +142,33 @@ public void testPageRecoveryAfterFileCorruption() throws Exception { PageMemory mem = sharedCtx.database().dataRegion(policyName).pageMemory(); + DummyPageIO pageIO = new DummyPageIO(); + int cacheId = sharedCtx.cache().cache(cacheName).context().cacheId(); FullPageId[] pages = new FullPageId[totalPages]; - for (int i = 0; i < totalPages; i++) - pages[i] = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId); + // Get lock to prevent assertion. A new page should be allocated under checkpoint lock. + psMgr.checkpointReadLock(); - generateWal( - (PageMemoryImpl)mem, - sharedCtx.pageStore(), - sharedCtx.wal(), - cacheId, - pages - ); + try { + for (int i = 0; i < totalPages; i++) { + pages[i] = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId); + + initPage(mem, pageIO, pages[i]); + } + + generateWal( + (PageMemoryImpl)mem, + sharedCtx.pageStore(), + sharedCtx.wal(), + cacheId, + pages + ); + } + finally { + psMgr.checkpointReadUnlock(); + } eraseDataFromDisk(pageStore, cacheId, pages[0]); @@ -168,6 +181,31 @@ public void testPageRecoveryAfterFileCorruption() throws Exception { checkRestore(ig, pages); } + /** + * Initializes page. + * @param mem page memory implementation. + * @param pageIO page io implementation. + * @param fullId full page id. + * @throws IgniteCheckedException if error occurs. + */ + private void initPage(PageMemory mem, PageIO pageIO, FullPageId fullId) throws IgniteCheckedException { + long page = mem.acquirePage(fullId.groupId(), fullId.pageId()); + + try { + final long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page); + + try { + pageIO.initNewPage(pageAddr, fullId.pageId(), mem.pageSize()); + } + finally { + mem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, true); + } + } + finally { + mem.releasePage(fullId.groupId(), fullId.pageId(), page); + } + } + /** * @param pageStore Page store. * @param cacheId Cache id. @@ -207,21 +245,28 @@ private void checkRestore(IgniteEx ig, FullPageId[] pages) throws IgniteCheckedE PageMemory mem = shared.database().dataRegion(null).pageMemory(); - for (FullPageId fullId : pages) { - long page = mem.acquirePage(fullId.groupId(), fullId.pageId()); + dbMgr.checkpointReadLock(); - try { - long pageAddr = mem.readLock(fullId.groupId(), fullId.pageId(), page); + try { + for (FullPageId fullId : pages) { + long page = mem.acquirePage(fullId.groupId(), fullId.pageId()); + + try { + long pageAddr = mem.readLock(fullId.groupId(), fullId.pageId(), page); - for (int j = PageIO.COMMON_HEADER_END; j < mem.pageSize(); j += 4) - assertEquals(j + (int)fullId.pageId(), PageUtils.getInt(pageAddr, j)); + for (int j = PageIO.COMMON_HEADER_END; j < mem.pageSize(); j += 4) + assertEquals(j + (int)fullId.pageId(), PageUtils.getInt(pageAddr, j)); - mem.readUnlock(fullId.groupId(), fullId.pageId(), page); - } - finally { - mem.releasePage(fullId.groupId(), fullId.pageId(), page); + mem.readUnlock(fullId.groupId(), fullId.pageId(), page); + } + finally { + mem.releasePage(fullId.groupId(), fullId.pageId(), page); + } } } + finally { + dbMgr.checkpointReadUnlock(); + } } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java index 5ae8969358496..0dd915374d165 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsCheckpointSimulationWithRealCpDisabledTest.java @@ -47,7 +47,9 @@ import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; +import org.apache.ignite.internal.processors.cache.persistence.DummyPageIO; import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO; @@ -519,10 +521,12 @@ public void testDirtyFlag() throws Exception { PageMemoryEx mem = (PageMemoryEx) dbMgr.dataRegion(null).pageMemory(); - ig.context().cache().context().database().checkpointReadLock(); - FullPageId[] pageIds = new FullPageId[100]; + DummyPageIO pageIO = new DummyPageIO(); + + ig.context().cache().context().database().checkpointReadLock(); + try { for (int i = 0; i < pageIds.length; i++) pageIds[i] = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId); @@ -535,9 +539,9 @@ public void testDirtyFlag() throws Exception { long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page); - PageIO.setPageId(pageAddr, fullId.pageId()); - try { + pageIO.initNewPage(pageAddr, fullId.pageId(), mem.pageSize()); + assertTrue(mem.isDirty(fullId.groupId(), fullId.pageId(), page)); } finally { @@ -737,8 +741,22 @@ private IgniteBiTuple, WALPointer> runCheckpointing( Set allocated = new HashSet<>(); + IgniteCacheDatabaseSharedManager db = ig.context().cache().context().database(); + + PageIO pageIO = new DummyPageIO(); + for (int i = 0; i < TOTAL_PAGES; i++) { - FullPageId fullId = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId); + FullPageId fullId; + + db.checkpointReadLock(); + try { + fullId = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId); + + initPage(mem, pageIO, fullId); + } + finally { + db.checkpointReadUnlock(); + } resMap.put(fullId, -1); @@ -982,4 +1000,29 @@ private IgniteBiTuple, WALPointer> runCheckpointing( private void deleteWorkFiles() throws IgniteCheckedException { deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false)); } + + /** + * Initializes page. + * @param mem page memory implementation. + * @param pageIO page io implementation. + * @param fullId full page id. + * @throws IgniteCheckedException if error occurs. + */ + private void initPage(PageMemory mem, PageIO pageIO, FullPageId fullId) throws IgniteCheckedException { + long page = mem.acquirePage(fullId.groupId(), fullId.pageId()); + + try { + final long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page); + + try { + pageIO.initNewPage(pageAddr, fullId.pageId(), mem.pageSize()); + } + finally { + mem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, true); + } + } + finally { + mem.releasePage(fullId.groupId(), fullId.pageId(), page); + } + } } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java index 47a4b7b7f935b..1b86e3da056bf 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsEvictionTest.java @@ -35,6 +35,7 @@ import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.DummyPageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; @@ -138,6 +139,8 @@ private void writeData(final IgniteEx ignite, final PageMemory memory, final int IgniteCacheDatabaseSharedManager db = ignite.context().cache().context().database(); + PageIO pageIO = new DummyPageIO(); + // Allocate. for (int i = 0; i < size; i++) { db.checkpointReadLock(); @@ -145,6 +148,8 @@ private void writeData(final IgniteEx ignite, final PageMemory memory, final int final FullPageId fullId = new FullPageId(memory.allocatePage(cacheId, i % 256, PageMemory.FLAG_DATA), cacheId); + initPage(memory, pageIO, fullId); + pageIds.add(fullId); } finally { @@ -179,6 +184,31 @@ private void writeData(final IgniteEx ignite, final PageMemory memory, final int System.out.println("Read pages: " + pageIds.size()); } + /** + * Initializes page. + * @param mem page memory implementation. + * @param pageIO page io implementation. + * @param fullId full page id. + * @throws IgniteCheckedException if error occurs. + */ + private void initPage(PageMemory mem, PageIO pageIO, FullPageId fullId) throws IgniteCheckedException { + long page = mem.acquirePage(fullId.groupId(), fullId.pageId()); + + try { + final long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page); + + try { + pageIO.initNewPage(pageAddr, fullId.pageId(), mem.pageSize()); + } + finally { + mem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, true); + } + } + finally { + mem.releasePage(fullId.groupId(), fullId.pageId(), page); + } + } + /** * @param start Start index. * @param end End index. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java index b3307d09802f2..6915e3ce75e2e 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.RootPage; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; @@ -103,26 +104,35 @@ public H2TreeIndex( segments = new H2Tree[segmentsCnt]; + IgniteCacheDatabaseSharedManager db = cctx.shared().database(); + for (int i = 0; i < segments.length; i++) { - RootPage page = getMetaPage(name, i); - - segments[i] = new H2Tree( - name, - cctx.offheap().reuseListForIndex(name), - cctx.groupId(), - cctx.dataRegion().pageMemory(), - cctx.shared().wal(), - cctx.offheap().globalRemoveId(), - tbl.rowFactory(), - page.pageId().pageId(), - page.isAllocated(), - cols, - inlineIdxs, - computeInlineSize(inlineIdxs, inlineSize)) { - @Override public int compareValues(Value v1, Value v2) { - return v1 == v2 ? 0 : table.compareTypeSafe(v1, v2); - } - }; + db.checkpointReadLock(); + + try { + RootPage page = getMetaPage(name, i); + + segments[i] = new H2Tree( + name, + cctx.offheap().reuseListForIndex(name), + cctx.groupId(), + cctx.dataRegion().pageMemory(), + cctx.shared().wal(), + cctx.offheap().globalRemoveId(), + tbl.rowFactory(), + page.pageId().pageId(), + page.isAllocated(), + cols, + inlineIdxs, + computeInlineSize(inlineIdxs, inlineSize)) { + @Override public int compareValues(Value v1, Value v2) { + return v1 == v2 ? 0 : table.compareTypeSafe(v1, v2); + } + }; + } + finally { + db.checkpointReadUnlock(); + } } } else { @@ -194,6 +204,8 @@ private List getAvailableInlineColumns(IndexColumn[] cols) { H2Tree tree = treeForRead(seg); + assert cctx.shared().database().checkpointLockIsHeldByThread(); + return tree.put(row); } catch (IgniteCheckedException e) { @@ -213,6 +225,8 @@ private List getAvailableInlineColumns(IndexColumn[] cols) { H2Tree tree = treeForRead(seg); + assert cctx.shared().database().checkpointLockIsHeldByThread(); + return tree.remove(row); } catch (IgniteCheckedException e) { @@ -232,6 +246,8 @@ private List getAvailableInlineColumns(IndexColumn[] cols) { H2Tree tree = treeForRead(seg); + assert cctx.shared().database().checkpointLockIsHeldByThread(); + tree.removex(row); } catch (IgniteCheckedException e) { @@ -295,6 +311,8 @@ private List getAvailableInlineColumns(IndexColumn[] cols) { @Override public void destroy(boolean rmvIndex) { try { if (cctx.affinityNode() && rmvIndex) { + assert cctx.shared().database().checkpointLockIsHeldByThread(); + for (int i = 0; i < segments.length; i++) { H2Tree tree = segments[i]; From 2209f35b6c1e8f5e856d088f3d764f0129818ac4 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Wed, 13 Dec 2017 23:02:40 +0700 Subject: [PATCH 167/243] IGNITE-4454 Minor fix for duration filter. (cherry picked from commit 9f7bc54) --- modules/web-console/frontend/app/filters/duration.filter.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/filters/duration.filter.js b/modules/web-console/frontend/app/filters/duration.filter.js index 703b6a471dfbe..55ec7e0b7db3a 100644 --- a/modules/web-console/frontend/app/filters/duration.filter.js +++ b/modules/web-console/frontend/app/filters/duration.filter.js @@ -36,6 +36,6 @@ export default [() => { const s = Math.floor((t - d * cd - h * ch - m * cm) / cs); const ms = Math.round(t % 1000); - return a(d, 'd') + a(h, 'h') + a(m, 'm') + a(s, 's') + (t === 0 || (t < cm && ms !== 0) ? ms + 'ms' : ''); + return a(d, 'd') + a(h, 'h') + a(m, 'm') + a(s, 's') + (t < 1000 || (t < cm && ms !== 0) ? ms + 'ms' : ''); }; }]; From 4546eb37eb0a5d1d5fc585572955fbd1fc35eccc Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Wed, 13 Dec 2017 23:33:05 +0700 Subject: [PATCH 168/243] IGNITE-6995 Visor CMD: Updated eviction policy factory in configs. (cherry picked from commit 47e7daf) --- .../VisorCacheEvictionConfiguration.java | 4 +- .../cache/VisorCacheNearConfiguration.java | 7 +- .../internal/visor/util/VisorTaskUtils.java | 14 ++-- .../generator/AbstractTransformer.js | 8 +-- .../generator/ConfigurationGenerator.js | 66 ++++++++++++------- .../generator/PlatformGenerator.js | 6 +- .../generator/defaults/Cache.service.js | 14 +--- .../frontend/app/services/Version.service.js | 4 ++ 8 files changed, 66 insertions(+), 57 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheEvictionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheEvictionConfiguration.java index 7792d8e542efd..496c609c8a8ba 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheEvictionConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheEvictionConfiguration.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; -import org.apache.ignite.cache.eviction.EvictionPolicy; +import javax.cache.configuration.Factory; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; @@ -58,7 +58,7 @@ public VisorCacheEvictionConfiguration() { * @param ccfg Cache configuration. */ public VisorCacheEvictionConfiguration(CacheConfiguration ccfg) { - final EvictionPolicy evictionPlc = ccfg.getEvictionPolicy(); + final Factory evictionPlc = ccfg.getEvictionPolicyFactory(); plc = compactClass(evictionPlc); plcMaxSize = evictionPolicyMaxSize(evictionPlc); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNearConfiguration.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNearConfiguration.java index ae55f83e42a68..a7b431c27c0c3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNearConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNearConfiguration.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.ObjectInput; import java.io.ObjectOutput; +import javax.cache.configuration.Factory; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.processors.cache.GridCacheUtils; @@ -68,9 +69,11 @@ public VisorCacheNearConfiguration(CacheConfiguration ccfg) { if (nearEnabled) { NearCacheConfiguration nccfg = ccfg.getNearConfiguration(); + final Factory nearEvictionPlc = nccfg.getNearEvictionPolicyFactory(); + nearStartSize = nccfg.getNearStartSize(); - nearEvictPlc = compactClass(nccfg.getNearEvictionPolicy()); - nearEvictMaxSize = evictionPolicyMaxSize(nccfg.getNearEvictionPolicy()); + nearEvictPlc = compactClass(nearEvictionPlc); + nearEvictMaxSize = evictionPolicyMaxSize(nearEvictionPlc); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java index fda801c460417..0da7a34836c02 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/util/VisorTaskUtils.java @@ -50,13 +50,12 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; +import javax.cache.configuration.Factory; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteFileSystem; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.cache.eviction.EvictionPolicy; -import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicyMBean; -import org.apache.ignite.cache.eviction.lru.LruEvictionPolicyMBean; +import org.apache.ignite.cache.eviction.AbstractEvictionPolicyFactory; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.events.Event; import org.apache.ignite.internal.processors.igfs.IgfsEx; @@ -759,12 +758,9 @@ else if (igfs == null) * @param plc Eviction policy. * @return Extracted max size. */ - public static Integer evictionPolicyMaxSize(@Nullable EvictionPolicy plc) { - if (plc instanceof LruEvictionPolicyMBean) - return ((LruEvictionPolicyMBean)plc).getMaxSize(); - - if (plc instanceof FifoEvictionPolicyMBean) - return ((FifoEvictionPolicyMBean)plc).getMaxSize(); + public static Integer evictionPolicyMaxSize(@Nullable Factory plc) { + if (plc instanceof AbstractEvictionPolicyFactory) + return ((AbstractEvictionPolicyFactory) plc).getMaxSize(); return null; } diff --git a/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js b/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js index af799bd20f456..339dbea58d6df 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/AbstractTransformer.js @@ -284,13 +284,13 @@ export default class AbstractTransformer { } // Generate server near cache group. - static cacheNearServer(cache) { - return this.toSection(this.generator.cacheNearServer(cache)); + static cacheNearServer(cache, available) { + return this.toSection(this.generator.cacheNearServer(cache, available)); } // Generate client near cache group. - static cacheNearClient(cache) { - return this.toSection(this.generator.cacheNearClient(cache)); + static cacheNearClient(cache, available) { + return this.toSection(this.generator.cacheNearClient(cache, available)); } // Generate cache statistics group. diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js index 16202f809e833..0c40890f4f0cd 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js @@ -1887,34 +1887,52 @@ export default class IgniteConfigurationGenerator { * @returns {Object} Parent configuration. * @private */ - static _evictionPolicy(ccfg, name, src, dflt) { - let bean; + static _evictionPolicy(ccfg, available, near, src, dflt) { + let propName; + let beanProps; - switch (_.get(src, 'kind')) { - case 'LRU': - bean = new Bean('org.apache.ignite.cache.eviction.lru.LruEvictionPolicy', 'evictionPlc', - src.LRU, dflt.LRU); + if (available('2.4.0')) { + switch (_.get(src, 'kind')) { + case 'LRU': beanProps = {cls: 'org.apache.ignite.cache.eviction.lru.LruEvictionPolicyFactory', src: src.LRU }; + break; - break; - case 'FIFO': - bean = new Bean('org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy', 'evictionPlc', - src.FIFO, dflt.FIFO); + case 'FIFO': beanProps = {cls: 'org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicyFactory', src: src.FIFO }; + break; - break; - case 'SORTED': - bean = new Bean('org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy', 'evictionPlc', - src.SORTED, dflt.SORTED); + case 'SORTED': beanProps = {cls: 'org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicyFactory', src: src.SORTED }; + break; - break; - default: - return ccfg; + default: + return ccfg; + } + + propName = (near ? 'nearEviction' : 'eviction') + 'PolicyFactory'; } + else { + switch (_.get(src, 'kind')) { + case 'LRU': beanProps = {cls: 'org.apache.ignite.cache.eviction.lru.LruEvictionPolicy', src: src.LRU }; + break; + + case 'FIFO': beanProps = {cls: 'org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy', src: src.FIFO }; + break; + + case 'SORTED': beanProps = {cls: 'org.apache.ignite.cache.eviction.sorted.SortedEvictionPolicy', src: src.SORTED }; + break; + + default: + return ccfg; + } + + propName = (near ? 'nearEviction' : 'eviction') + 'Policy'; + } + + const bean = new Bean(beanProps.cls, 'evictionPlc', beanProps.src, dflt); bean.intProperty('batchSize') .intProperty('maxMemorySize') .intProperty('maxSize'); - ccfg.beanProperty(name, bean); + ccfg.beanProperty(propName, bean); return ccfg; } @@ -2013,7 +2031,7 @@ export default class IgniteConfigurationGenerator { .emptyBeanProperty('evictionFilter'); } - this._evictionPolicy(ccfg, 'evictionPolicy', cache.evictionPolicy, cacheDflts.evictionPolicy); + this._evictionPolicy(ccfg, available, false, cache.evictionPolicy, cacheDflts.evictionPolicy); // Removed in ignite 2.0 if (available(['1.0.0', '2.0.0'])) { @@ -2249,15 +2267,14 @@ export default class IgniteConfigurationGenerator { } // Generate server near cache group. - static cacheNearServer(cache, ccfg = this.cacheConfigurationBean(cache)) { + static cacheNearServer(cache, available, ccfg = this.cacheConfigurationBean(cache)) { if (ccfg.valueOf('cacheMode') === 'PARTITIONED' && _.get(cache, 'nearConfiguration.enabled')) { const bean = new Bean('org.apache.ignite.configuration.NearCacheConfiguration', 'nearConfiguration', cache.nearConfiguration, cacheDflts.nearConfiguration); bean.intProperty('nearStartSize'); - this._evictionPolicy(bean, 'nearEvictionPolicy', - bean.valueOf('nearEvictionPolicy'), cacheDflts.evictionPolicy); + this._evictionPolicy(bean, available, true, bean.valueOf('nearEvictionPolicy'), cacheDflts.evictionPolicy); ccfg.beanProperty('nearConfiguration', bean); } @@ -2266,7 +2283,7 @@ export default class IgniteConfigurationGenerator { } // Generate client near cache group. - static cacheNearClient(cache, ccfg = this.cacheConfigurationBean(cache)) { + static cacheNearClient(cache, available, ccfg = this.cacheConfigurationBean(cache)) { if (ccfg.valueOf('cacheMode') === 'PARTITIONED' && _.get(cache, 'clientNearConfiguration.enabled')) { const bean = new Bean('org.apache.ignite.configuration.NearCacheConfiguration', javaTypes.toJavaName('nearConfiguration', ccfg.valueOf('name')), @@ -2274,8 +2291,7 @@ export default class IgniteConfigurationGenerator { bean.intProperty('nearStartSize'); - this._evictionPolicy(bean, 'nearEvictionPolicy', - bean.valueOf('nearEvictionPolicy'), cacheDflts.evictionPolicy); + this._evictionPolicy(bean, available, true, bean.valueOf('nearEvictionPolicy'), cacheDflts.evictionPolicy); return bean; } diff --git a/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js index 234c7ecebfb56..2f652d41345dc 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js @@ -291,13 +291,13 @@ export default ['JavaTypes', 'igniteClusterPlatformDefaults', 'igniteCachePlatfo } // Generate cache memory group. - static cacheMemory(cache, ccfg = this.cacheConfigurationBean(cache)) { + static cacheMemory(cache, available, ccfg = this.cacheConfigurationBean(cache)) { ccfg.enumProperty('memoryMode'); if (ccfg.valueOf('memoryMode') !== 'OFFHEAP_VALUES') ccfg.intProperty('offHeapMaxMemory'); - // this._evictionPolicy(ccfg, 'evictionPolicy', cache.evictionPolicy, cacheDflts.evictionPolicy); + // this._evictionPolicy(ccfg, available, false, cache.evictionPolicy, cacheDflts.evictionPolicy); ccfg.intProperty('startSize') .boolProperty('swapEnabled', 'EnableSwap'); @@ -483,7 +483,7 @@ export default ['JavaTypes', 'igniteClusterPlatformDefaults', 'igniteCachePlatfo bean.intProperty('nearStartSize'); - this._evictionPolicy(bean, 'nearEvictionPolicy', + this._evictionPolicy(bean, true, bean.valueOf('nearEvictionPolicy'), cacheDflts.evictionPolicy); ccfg.beanProperty('nearConfiguration', bean); diff --git a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cache.service.js b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cache.service.js index ad5b88a1b6cb4..3f87b8a35c315 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cache.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/defaults/Cache.service.js @@ -85,18 +85,8 @@ const DFLT_CACHE = { nearStartSize: 375000 }, evictionPolicy: { - LRU: { - batchSize: 1, - maxSize: 100000 - }, - FIFO: { - batchSize: 1, - maxSize: 100000 - }, - SORTED: { - batchSize: 1, - maxSize: 100000 - } + batchSize: 1, + maxSize: 100000 }, queryMetadata: 'Configuration', queryDetailMetricsSize: 0, diff --git a/modules/web-console/frontend/app/services/Version.service.js b/modules/web-console/frontend/app/services/Version.service.js index 8b67eb7d5a206..22d0732cff835 100644 --- a/modules/web-console/frontend/app/services/Version.service.js +++ b/modules/web-console/frontend/app/services/Version.service.js @@ -76,6 +76,10 @@ export default class IgniteVersion { this.webConsole = '2.2.0'; this.supportedVersions = [ + { + label: 'Ignite 2.4', + ignite: '2.4.0' + }, { label: 'Ignite 2.3', ignite: '2.3.0' From 394e831cdb9be2290e7c248a7660a899c3e6b6ab Mon Sep 17 00:00:00 2001 From: vsisko Date: Thu, 14 Dec 2017 10:37:39 +0700 Subject: [PATCH 169/243] IGNITE-4943 Web Console: Improved tooltips on "Admin panel" screen. (cherry picked from commit c9d6dd5) --- .../list-of-registered-users.column-defs.js | 49 ++++++++++--------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.column-defs.js b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.column-defs.js index 493c23995d5fc..83382e0f4083c 100644 --- a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.column-defs.js +++ b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.column-defs.js @@ -17,7 +17,8 @@ const ICON_SORT = ''; -const USER_TEMPLATE = '
         {{ COL_FIELD }}
        '; +const USER_TEMPLATE = '
        ' + + ' 
        '; const CLUSTER_HEADER_TEMPLATE = `
        ${ICON_SORT}
        `; const MODEL_HEADER_TEMPLATE = `
        ${ICON_SORT}
        `; @@ -46,35 +47,37 @@ const ACTIONS_TEMPLATE = ` `; -const EMAIL_TEMPLATE = ''; +const EMAIL_TEMPLATE = ''; +const DATE_WITH_TITLE = '
        '; +const VALUE_WITH_TITLE = '
        '; export default [ {name: 'actions', enableHiding: false, displayName: 'Actions', categoryDisplayName: 'Actions', cellTemplate: ACTIONS_TEMPLATE, field: 'actions', minWidth: 70, width: 70, enableFiltering: false, enableSorting: false, visible: false}, {name: 'user', enableHiding: false, displayName: 'User', categoryDisplayName: 'User', field: 'userName', cellTemplate: USER_TEMPLATE, minWidth: 160, enableFiltering: true, pinnedLeft: true, filter: { placeholder: 'Filter by name...' }}, {name: 'email', displayName: 'Email', categoryDisplayName: 'Email', field: 'email', cellTemplate: EMAIL_TEMPLATE, minWidth: 160, enableFiltering: true, filter: { placeholder: 'Filter by email...' }}, - {name: 'company', displayName: 'Company', categoryDisplayName: 'Company', field: 'company', minWidth: 180, enableFiltering: true, filter: { placeholder: 'Filter by company...' }}, - {name: 'country', displayName: 'Country', categoryDisplayName: 'Country', field: 'countryCode', minWidth: 160, enableFiltering: true, filter: { placeholder: 'Filter by country...' }}, - {name: 'lastlogin', displayName: 'Last login', categoryDisplayName: 'Last login', field: 'lastLogin', cellFilter: 'date:"M/d/yy HH:mm"', minWidth: 135, width: 135, enableFiltering: false, visible: false}, - {name: 'lastactivity', displayName: 'Last activity', categoryDisplayName: 'Last activity', field: 'lastActivity', cellFilter: 'date:"M/d/yy HH:mm"', minWidth: 135, width: 135, enableFiltering: false, visible: true, sort: { direction: 'desc', priority: 0 }}, + {name: 'company', displayName: 'Company', categoryDisplayName: 'Company', field: 'company', cellTemplate: VALUE_WITH_TITLE, minWidth: 180, enableFiltering: true, filter: { placeholder: 'Filter by company...' }}, + {name: 'country', displayName: 'Country', categoryDisplayName: 'Country', field: 'countryCode', cellTemplate: VALUE_WITH_TITLE, minWidth: 160, enableFiltering: true, filter: { placeholder: 'Filter by country...' }}, + {name: 'lastlogin', displayName: 'Last login', categoryDisplayName: 'Last login', field: 'lastLogin', cellTemplate: DATE_WITH_TITLE, minWidth: 135, width: 135, enableFiltering: false, visible: false}, + {name: 'lastactivity', displayName: 'Last activity', categoryDisplayName: 'Last activity', field: 'lastActivity', cellTemplate: DATE_WITH_TITLE, minWidth: 135, width: 135, enableFiltering: false, visible: true, sort: { direction: 'desc', priority: 0 }}, // Configurations - {name: 'cfg_clusters', displayName: 'Clusters count', categoryDisplayName: 'Configurations', headerCellTemplate: CLUSTER_HEADER_TEMPLATE, field: 'counters.clusters', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Clusters count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, - {name: 'cfg_models', displayName: 'Models count', categoryDisplayName: 'Configurations', headerCellTemplate: MODEL_HEADER_TEMPLATE, field: 'counters.models', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Models count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, - {name: 'cfg_caches', displayName: 'Caches count', categoryDisplayName: 'Configurations', headerCellTemplate: CACHE_HEADER_TEMPLATE, field: 'counters.caches', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Caches count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, - {name: 'cfg_igfs', displayName: 'IGFS count', categoryDisplayName: 'Configurations', headerCellTemplate: IGFS_HEADER_TEMPLATE, field: 'counters.igfs', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'IGFS count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, + {name: 'cfg_clusters', displayName: 'Clusters count', categoryDisplayName: 'Configurations', headerCellTemplate: CLUSTER_HEADER_TEMPLATE, field: 'counters.clusters', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Clusters count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, + {name: 'cfg_models', displayName: 'Models count', categoryDisplayName: 'Configurations', headerCellTemplate: MODEL_HEADER_TEMPLATE, field: 'counters.models', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Models count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, + {name: 'cfg_caches', displayName: 'Caches count', categoryDisplayName: 'Configurations', headerCellTemplate: CACHE_HEADER_TEMPLATE, field: 'counters.caches', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Caches count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, + {name: 'cfg_igfs', displayName: 'IGFS count', categoryDisplayName: 'Configurations', headerCellTemplate: IGFS_HEADER_TEMPLATE, field: 'counters.igfs', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'IGFS count', minWidth: 65, width: 65, enableFiltering: false, visible: false}, // Activities Total - {name: 'cfg', displayName: 'Cfg', categoryDisplayName: 'Total activities', field: 'activitiesTotal["configuration"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of configuration usages', minWidth: 70, width: 70, enableFiltering: false}, - {name: 'qry', displayName: 'Qry', categoryDisplayName: 'Total activities', field: 'activitiesTotal["queries"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of queries usages', minWidth: 70, width: 70, enableFiltering: false}, - {name: 'demo', displayName: 'Demo', categoryDisplayName: 'Total activities', field: 'activitiesTotal["demo"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of demo startup', minWidth: 85, width: 85, enableFiltering: false}, - {name: 'dnld', displayName: 'Dnld', categoryDisplayName: 'Total activities', field: 'activitiesDetail["/agent/download"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of agent downloads', minWidth: 80, width: 80, enableFiltering: false}, - {name: 'starts', displayName: 'Starts', categoryDisplayName: 'Total activities', field: 'activitiesDetail["/agent/start"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of agent startup', minWidth: 87, width: 87, enableFiltering: false}, + {name: 'cfg', displayName: 'Cfg', categoryDisplayName: 'Total activities', field: 'activitiesTotal["configuration"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of configuration usages', minWidth: 70, width: 70, enableFiltering: false}, + {name: 'qry', displayName: 'Qry', categoryDisplayName: 'Total activities', field: 'activitiesTotal["queries"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of queries usages', minWidth: 70, width: 70, enableFiltering: false}, + {name: 'demo', displayName: 'Demo', categoryDisplayName: 'Total activities', field: 'activitiesTotal["demo"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of demo startup', minWidth: 85, width: 85, enableFiltering: false}, + {name: 'dnld', displayName: 'Dnld', categoryDisplayName: 'Total activities', field: 'activitiesDetail["/agent/download"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of agent downloads', minWidth: 80, width: 80, enableFiltering: false}, + {name: 'starts', displayName: 'Starts', categoryDisplayName: 'Total activities', field: 'activitiesDetail["/agent/start"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Total count of agent startup', minWidth: 87, width: 87, enableFiltering: false}, // Activities Configuration - {name: 'clusters', displayName: 'Clusters', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/clusters"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration clusters', minWidth: 100, width: 100, enableFiltering: false, visible: false}, - {name: 'model', displayName: 'Model', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/domains"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration model', minWidth: 87, width: 87, enableFiltering: false, visible: false}, - {name: 'caches', displayName: 'Caches', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/caches"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration caches', minWidth: 96, width: 96, enableFiltering: false, visible: false}, - {name: 'igfs', displayName: 'IGFS', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/igfs"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration IGFS', minWidth: 85, width: 85, enableFiltering: false, visible: false}, - {name: 'summary', displayName: 'Summary', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/summary"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration summary', minWidth: 111, width: 111, enableFiltering: false, visible: false}, + {name: 'clusters', displayName: 'Clusters', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/clusters"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration clusters', minWidth: 100, width: 100, enableFiltering: false, visible: false}, + {name: 'model', displayName: 'Model', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/domains"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration model', minWidth: 87, width: 87, enableFiltering: false, visible: false}, + {name: 'caches', displayName: 'Caches', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/caches"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration caches', minWidth: 96, width: 96, enableFiltering: false, visible: false}, + {name: 'igfs', displayName: 'IGFS', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/igfs"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration IGFS', minWidth: 85, width: 85, enableFiltering: false, visible: false}, + {name: 'summary', displayName: 'Summary', categoryDisplayName: 'Configuration\'s activities', field: 'activitiesDetail["/configuration/summary"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Configuration summary', minWidth: 111, width: 111, enableFiltering: false, visible: false}, // Activities Queries - {name: 'execute', displayName: 'Execute', categoryDisplayName: 'Queries\' activities', field: 'activitiesDetail["/queries/execute"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Query executions', minWidth: 98, width: 98, enableFiltering: false, visible: false}, - {name: 'explain', displayName: 'Explain', categoryDisplayName: 'Queries\' activities', field: 'activitiesDetail["/queries/explain"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Query explain executions', minWidth: 95, width: 95, enableFiltering: false, visible: false}, - {name: 'scan', displayName: 'Scan', categoryDisplayName: 'Queries\' activities', field: 'activitiesDetail["/queries/scan"] || 0', type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Scan query executions', minWidth: 80, width: 80, enableFiltering: false, visible: false} + {name: 'execute', displayName: 'Execute', categoryDisplayName: 'Queries\' activities', field: 'activitiesDetail["/queries/execute"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Query executions', minWidth: 98, width: 98, enableFiltering: false, visible: false}, + {name: 'explain', displayName: 'Explain', categoryDisplayName: 'Queries\' activities', field: 'activitiesDetail["/queries/explain"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Query explain executions', minWidth: 95, width: 95, enableFiltering: false, visible: false}, + {name: 'scan', displayName: 'Scan', categoryDisplayName: 'Queries\' activities', field: 'activitiesDetail["/queries/scan"] || 0', cellTemplate: VALUE_WITH_TITLE, type: 'number', cellClass: 'ui-grid-number-cell', headerTooltip: 'Scan query executions', minWidth: 80, width: 80, enableFiltering: false, visible: false} ]; From abb925419bb03e18e6bd5d3fcbb7d048650e7754 Mon Sep 17 00:00:00 2001 From: Andrey Novikov Date: Thu, 14 Dec 2017 11:32:11 +0700 Subject: [PATCH 170/243] IGNITE-6920 Fixed rhel detection in web console direct-install. (cherry picked from commit 3325876) --- modules/web-console/DEVNOTES.txt | 2 +- modules/web-console/pom.xml | 111 +++++++++++++++++++++++++++++-- 2 files changed, 106 insertions(+), 7 deletions(-) diff --git a/modules/web-console/DEVNOTES.txt b/modules/web-console/DEVNOTES.txt index 6dbb15ad65883..dfb017eb3efb8 100644 --- a/modules/web-console/DEVNOTES.txt +++ b/modules/web-console/DEVNOTES.txt @@ -35,6 +35,6 @@ How to migrate model: Ignite Web Console Direct-Install Maven Build Instructions ========================================================== To build direct-install archive from sources run following command in Ignite project root folder: -"mvn clean package -pl :ignite-web-agent,:ignite-web-console -am -P web-console -DskipTests=true" +"mvn clean package -pl :ignite-web-agent,:ignite-web-console -am -P web-console -DskipTests=true -DskipClientDocs -Dmaven.javadoc.skip=true" Assembled archive can be found here: `/modules/web-console/target/ignite-web-console-direct-install-*.zip`. diff --git a/modules/web-console/pom.xml b/modules/web-console/pom.xml index f935ca6316688..c8f5478ecc07d 100644 --- a/modules/web-console/pom.xml +++ b/modules/web-console/pom.xml @@ -87,12 +87,26 @@ + + download dependencies for backend + + npm + + + + backend + install --no-optional + + + build frontend npm + compile + frontend run build @@ -103,26 +117,111 @@ - download dependencies for backend + build backend npm + compile + backend - install --no-optional + run build + + + + + org.apache.maven.plugins + maven-clean-plugin + 2.5 + - build backend + clean-frontend-build - npm + clean + + process-resources + + true + + + ${project.basedir}/frontend/build + + + + + + + clean-backend-build + + clean + process-resources + + true + + + ${project.basedir}/backend/build + + + + + + remove-old-getos + + clean + + process-resources - backend - run build + true + + + ${project.basedir}/backend/node_modules/mongodb-download/node_modules + + + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + 1.7 + + + fixed-rhel-detection + + run + + process-resources + + + + + + + + + + + fixed-download-url + + run + + process-resources + + + + + + From b9436ebea955e8378fdc6c0cc3b07cde982c2db7 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 14 Dec 2017 12:07:40 +0700 Subject: [PATCH 171/243] IGNITE-7199 Web console: several minor improvements. (cherry picked from commit e08d19c) --- modules/web-console/backend/.eslintrc | 185 ------------------ .../web-console/backend/app/agentsHandler.js | 20 +- .../backend/app/browsersHandler.js | 8 +- modules/web-console/backend/app/mongo.js | 4 + .../backend/config/settings.json.sample | 8 +- .../1508395969410-init-registered-date.js | 33 ++++ modules/web-console/backend/package.json | 2 + modules/web-console/backend/services/Utils.js | 51 +++++ modules/web-console/backend/services/auth.js | 26 +-- .../web-console/backend/services/caches.js | 4 + .../web-console/backend/services/clusters.js | 4 + .../web-console/backend/services/domains.js | 4 + modules/web-console/backend/services/igfss.js | 4 + modules/web-console/backend/services/mails.js | 80 ++++---- .../web-console/backend/services/notebooks.js | 4 + modules/web-console/backend/services/users.js | 23 +-- modules/web-console/frontend/app/app.js | 6 + .../app/components/bs-select-menu/style.scss | 10 +- .../components/bs-select-menu/template.pug | 15 +- .../app/components/grid-export/component.js | 52 +++++ .../app/components/grid-export/index.js | 24 +++ .../app/components/grid-export/template.pug | 18 ++ .../app/components/grid-no-data/component.js | 33 ++++ .../app/components/grid-no-data/controller.js | 50 +++++ .../app/components/grid-no-data/index.js | 24 +++ .../app/components/grid-no-data/style.scss | 31 +++ .../list-editable-cols/cols.template.pug | 1 - .../list-editable-cols/row.directive.js | 2 +- .../app/components/list-editable/style.scss | 12 +- .../app/components/list-editable/template.pug | 6 +- .../list-of-registered-users.tpl.pug | 2 +- .../components/ui-grid-filters/directive.js | 62 ++++++ .../app/components/ui-grid-filters/index.js | 43 ++++ .../app/components/ui-grid-filters/style.scss | 36 ++++ .../components/ui-grid-filters/template.pug | 47 +++++ .../web-console-header-extension/component.js | 22 +++ .../web-console-header-extension/template.pug | 15 ++ .../components/web-console-header/index.js | 4 +- .../components/web-console-header/style.scss | 2 +- .../web-console-header/template.pug | 6 +- .../app/modules/form/field/input/text.scss | 1 + .../frontend/app/modules/user/permissions.js | 4 +- .../frontend/app/primitives/btn/index.scss | 17 ++ .../app/primitives/form-field/index.scss | 2 +- .../primitives/ui-grid-settings/index.scss | 12 ++ .../app/primitives/ui-grid/index.scss | 2 + modules/web-console/frontend/package.json | 3 +- .../public/images/checkbox-active.svg | 25 +++ .../frontend/public/images/checkbox.svg | 22 +++ .../frontend/public/images/icons/alert.svg | 1 + .../public/images/icons/checkmark.svg | 3 + .../frontend/public/images/icons/index.js | 3 + .../frontend/public/images/icons/sort.svg | 2 +- .../frontend/views/includes/header-left.pug | 8 +- .../frontend/views/includes/header-right.pug | 4 +- .../frontend/views/settings/profile.tpl.pug | 9 +- 56 files changed, 790 insertions(+), 311 deletions(-) delete mode 100644 modules/web-console/backend/.eslintrc create mode 100644 modules/web-console/backend/migrations/1508395969410-init-registered-date.js create mode 100644 modules/web-console/backend/services/Utils.js create mode 100644 modules/web-console/frontend/app/components/grid-export/component.js create mode 100644 modules/web-console/frontend/app/components/grid-export/index.js create mode 100644 modules/web-console/frontend/app/components/grid-export/template.pug create mode 100644 modules/web-console/frontend/app/components/grid-no-data/component.js create mode 100644 modules/web-console/frontend/app/components/grid-no-data/controller.js create mode 100644 modules/web-console/frontend/app/components/grid-no-data/index.js create mode 100644 modules/web-console/frontend/app/components/grid-no-data/style.scss create mode 100644 modules/web-console/frontend/app/components/ui-grid-filters/directive.js create mode 100644 modules/web-console/frontend/app/components/ui-grid-filters/index.js create mode 100644 modules/web-console/frontend/app/components/ui-grid-filters/style.scss create mode 100644 modules/web-console/frontend/app/components/ui-grid-filters/template.pug create mode 100644 modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/component.js create mode 100644 modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/template.pug create mode 100644 modules/web-console/frontend/public/images/checkbox-active.svg create mode 100644 modules/web-console/frontend/public/images/checkbox.svg create mode 100644 modules/web-console/frontend/public/images/icons/alert.svg create mode 100644 modules/web-console/frontend/public/images/icons/checkmark.svg diff --git a/modules/web-console/backend/.eslintrc b/modules/web-console/backend/.eslintrc deleted file mode 100644 index 1915518ab17b8..0000000000000 --- a/modules/web-console/backend/.eslintrc +++ /dev/null @@ -1,185 +0,0 @@ -env: - es6: true - node: true - mocha: true - -parserOptions: - sourceType: module - ecmaFeatures: - arrowFunctions: true - blockBindings: true - classes: true - defaultParams: true - destructuring: true - module: true - objectLiteralComputedProperties: true - objectLiteralShorthandMethods: true - objectLiteralShorthandProperties: true - spread: true - templateStrings: true - experimentalObjectRestSpread: true - -globals: - _: true - io: true - -rules: - arrow-parens: [1, "always"] - arrow-spacing: [1, { "before": true, "after": true }] - accessor-pairs: 2 - block-scoped-var: 2 - brace-style: [0, "1tbs"] - comma-dangle: [2, "never"] - comma-spacing: [2, {"before": false, "after": true}] - comma-style: [2, "last"] - complexity: [1, 40] - computed-property-spacing: [2, "never"] - consistent-return: 0 - consistent-this: [0, "that"] - constructor-super: 2 - curly: [2, "multi-or-nest"] - default-case: 2 - dot-location: 0 - dot-notation: [2, { "allowKeywords": true }] - eol-last: 2 - eqeqeq: 2 - func-names: 0 - func-style: [0, "declaration"] - generator-star-spacing: 0 - guard-for-in: 1 - handle-callback-err: 0 - id-length: [2, {"min": 1, "max": 60}] - indent: [2, 4, {"SwitchCase": 1, "MemberExpression": "off", "CallExpression": {"arguments": "off"}}] - key-spacing: [2, { "beforeColon": false, "afterColon": true }] - lines-around-comment: 0 - linebreak-style: [0, "unix"] - max-depth: [0, 4] - max-len: [0, 120, 4] - max-nested-callbacks: [1, 4] - max-params: [0, 3] - max-statements: [0, 10] - new-cap: 2 - new-parens: 2 - no-alert: 2 - no-array-constructor: 2 - no-bitwise: 0 - no-caller: 2 - no-catch-shadow: 2 - no-cond-assign: 2 - no-console: 0 - no-constant-condition: 2 - no-continue: 0 - no-class-assign: 2 - no-const-assign: 2 - no-control-regex: 2 - no-debugger: 2 - no-delete-var: 2 - no-div-regex: 0 - no-dupe-keys: 2 - no-dupe-args: 2 - no-duplicate-case: 2 - no-else-return: 2 - no-empty: 2 - no-empty-character-class: 2 - no-eq-null: 2 - no-eval: 2 - no-ex-assign: 2 - no-extend-native: 2 - no-extra-bind: 2 - no-extra-boolean-cast: 2 - no-extra-parens: 0 - no-extra-semi: 2 - no-fallthrough: 2 - no-floating-decimal: 1 - no-func-assign: 2 - no-implied-eval: 2 - no-inline-comments: 0 - no-inner-declarations: [2, "functions"] - no-invalid-regexp: 2 - no-irregular-whitespace: 2 - no-iterator: 2 - no-label-var: 2 - no-labels: 2 - no-lone-blocks: 2 - no-lonely-if: 2 - no-implicit-coercion: [2, {"boolean": false, "number": true, "string": true}] - no-loop-func: 2 - no-mixed-requires: [0, false] - no-mixed-spaces-and-tabs: [2, true] - no-multi-spaces: ["error", {"exceptions": { "VariableDeclarator": true }}] - no-multi-str: 2 - no-multiple-empty-lines: [0, {"max": 2}] - no-native-reassign: 2 - no-negated-in-lhs: 2 - no-nested-ternary: 0 - no-new: 2 - no-new-func: 2 - no-new-object: 2 - no-new-require: 0 - no-new-wrappers: 2 - no-obj-calls: 2 - no-octal: 2 - no-octal-escape: 2 - no-param-reassign: 0 - no-path-concat: 0 - no-plusplus: 0 - no-process-env: 0 - no-process-exit: 0 - no-proto: 2 - no-redeclare: 2 - no-regex-spaces: 1 - no-restricted-modules: 0 - no-script-url: 0 - no-self-compare: 2 - no-sequences: 2 - no-shadow: 0 - no-shadow-restricted-names: 2 - no-spaced-func: 2 - no-sparse-arrays: 1 - no-sync: 0 - no-ternary: 0 - no-trailing-spaces: 2 - no-throw-literal: 0 - no-this-before-super: 2 - no-unexpected-multiline: 2 - no-undef: 2 - no-undef-init: 2 - no-undefined: 2 - no-unneeded-ternary: 2 - no-unreachable: 2 - no-unused-expressions: [2, { allowShortCircuit: true }] - no-unused-vars: [2, {"vars": "all", "args": "after-used"}] - no-use-before-define: 2 - no-useless-call: 2 - no-void: 0 - no-var: 2 - no-warning-comments: 0 - no-with: 2 - newline-after-var: 0 - object-shorthand: [2, "always"] - one-var: [2, "never"] - operator-assignment: [2, "always"] - operator-linebreak: 0 - padded-blocks: 0 - prefer-const: 1 - prefer-spread: 2 - quote-props: [2, "as-needed"] - quotes: [2, "single", {"allowTemplateLiterals": true}] - radix: 1 - semi: [2, "always"] - semi-spacing: [2, {"before": false, "after": true}] - sort-vars: 0 - keyword-spacing: 2 - space-before-blocks: [2, "always"] - space-before-function-paren: [2, "never"] - space-in-parens: 0 - space-infix-ops: 2 - space-unary-ops: [2, { "words": true, "nonwords": false }] - spaced-comment: [1, "always"] - use-isnan: 2 - valid-jsdoc: 0 - valid-typeof: 2 - vars-on-top: 2 - wrap-iife: 0 - wrap-regex: 0 - yoda: [2, "never"] diff --git a/modules/web-console/backend/app/agentsHandler.js b/modules/web-console/backend/app/agentsHandler.js index 9ee64cee0c300..eb3a1e0cd416a 100644 --- a/modules/web-console/backend/app/agentsHandler.js +++ b/modules/web-console/backend/app/agentsHandler.js @@ -131,6 +131,7 @@ module.exports.factory = function(settings, mongo, AgentSocket) { this._agentSockets = new AgentSockets(); this.clusters = []; + this.topLsnrs = []; } /** @@ -211,6 +212,15 @@ module.exports.factory = function(settings, mongo, AgentSocket) { return cluster; } + /** + * Add topology listener. + * + * @param lsnr + */ + addTopologyListener(lsnr) { + this.topLsnrs.push(lsnr); + } + /** * Link agent with browsers by account. * @@ -234,7 +244,9 @@ module.exports.factory = function(settings, mongo, AgentSocket) { sock.on('cluster:topology', (top) => { const cluster = this.getOrCreateCluster(top); - if (_.isNil(agentSocket.cluster)) { + _.forEach(this.topLsnrs, (lsnr) => lsnr(agentSocket, cluster, top)); + + if (agentSocket.cluster !== cluster) { agentSocket.cluster = cluster; _.forEach(tokens, (token) => { @@ -254,11 +266,11 @@ module.exports.factory = function(settings, mongo, AgentSocket) { } }); - sock.on('cluster:collector', (top) => { + sock.on('cluster:disconnected', () => { + const newTop = _.assign({}, agentSocket.cluster, {nids: []}); - }); + _.forEach(this.topLsnrs, (lsnr) => lsnr(agentSocket, agentSocket.cluster, newTop)); - sock.on('cluster:disconnected', () => { agentSocket.cluster = null; _.forEach(tokens, (token) => { diff --git a/modules/web-console/backend/app/browsersHandler.js b/modules/web-console/backend/app/browsersHandler.js index 198bc1250e005..a8ca7067b6f79 100644 --- a/modules/web-console/backend/app/browsersHandler.js +++ b/modules/web-console/backend/app/browsersHandler.js @@ -133,6 +133,10 @@ module.exports = { _.forEach(socks, (sock) => sock.emit('cluster:changed', cluster)); } + pushInitialData(sock) { + // Send initial data. + } + emitNotification(sock) { sock.emit('user:notifications', this.notification); } @@ -185,7 +189,8 @@ module.exports = { * @return {Promise.} */ executeOnNode(agent, demo, params) { - return agent.then((agentSock) => agentSock.emitEvent('node:rest', {uri: 'ignite', demo, params})) + return agent + .then((agentSock) => agentSock.emitEvent('node:rest', {uri: 'ignite', demo, params})) .then((res) => { if (res.status === 0) { if (res.zipped) @@ -310,6 +315,7 @@ module.exports = { this.agentListeners(sock); this.nodeListeners(sock); + this.pushInitialData(sock); this.agentStats(sock.request.user.token, [sock]); this.emitNotification(sock); }); diff --git a/modules/web-console/backend/app/mongo.js b/modules/web-console/backend/app/mongo.js index de9167696980a..955cd47296c26 100644 --- a/modules/web-console/backend/app/mongo.js +++ b/modules/web-console/backend/app/mongo.js @@ -41,8 +41,10 @@ const defineSchema = (mongoose) => { firstName: String, lastName: String, email: String, + phone: String, company: String, country: String, + registered: Date, lastLogin: Date, lastActivity: Date, admin: Boolean, @@ -60,12 +62,14 @@ const defineSchema = (mongoose) => { return { _id: ret._id, email: ret.email, + phone: ret.phone, firstName: ret.firstName, lastName: ret.lastName, company: ret.company, country: ret.country, admin: ret.admin, token: ret.token, + registered: ret.registered, lastLogin: ret.lastLogin, lastActivity: ret.lastActivity }; diff --git a/modules/web-console/backend/config/settings.json.sample b/modules/web-console/backend/config/settings.json.sample index 71a64eabb8614..aa93e07c57eba 100644 --- a/modules/web-console/backend/config/settings.json.sample +++ b/modules/web-console/backend/config/settings.json.sample @@ -12,11 +12,11 @@ }, "mail": { "service": "", - "sign": "Kind regards,
        Apache Ignite Team", - "greeting": "Apache Ignite Web Console", - "from": "Apache Ignite Web Console ", + "from": "Some Company Web Console ", + "greeting": "Some Company Web Console", + "sign": "Kind regards,
        Some Company Team", "auth": { - "user": "someusername@somecompany.tld", + "user": "some_username@some_company.com", "pass": "" } } diff --git a/modules/web-console/backend/migrations/1508395969410-init-registered-date.js b/modules/web-console/backend/migrations/1508395969410-init-registered-date.js new file mode 100644 index 0000000000000..b994cac3d415f --- /dev/null +++ b/modules/web-console/backend/migrations/1508395969410-init-registered-date.js @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const _ = require('lodash'); + +exports.up = function up(done) { + const accounts = this('Account'); + + accounts.find({}).lean().exec() + .then((data) => _.forEach(data, (acc) => accounts.update({_id: acc._id}, {$set: {registered: acc.lastLogin}}, {upsert: true}).exec())) + .then(() => done()) + .catch(done); +}; + +exports.down = function down(done) { + this('Account').update({}, {$unset: {registered: 1}}, {multi: true}).exec() + .then(() => done()) + .catch(done); +}; diff --git a/modules/web-console/backend/package.json b/modules/web-console/backend/package.json index ba442f97f288e..889c40d7d4a31 100644 --- a/modules/web-console/backend/package.json +++ b/modules/web-console/backend/package.json @@ -49,6 +49,7 @@ "dependencies": { "app-module-path": "2.2.0", "body-parser": "1.17.2", + "common-tags": "1.4.0", "connect-mongo": "1.3.2", "cookie-parser": "1.4.3", "express": "4.15.3", @@ -62,6 +63,7 @@ "mongodb-prebuilt": "6.3.3", "mongoose": "4.11.4", "morgan": "1.8.2", + "nexmo": "2.0.2", "nconf": "0.8.4", "nodemailer": "4.0.1", "passport": "0.3.2", diff --git a/modules/web-console/backend/services/Utils.js b/modules/web-console/backend/services/Utils.js new file mode 100644 index 0000000000000..ec97e4efac0f2 --- /dev/null +++ b/modules/web-console/backend/services/Utils.js @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +// Fire me up! + +module.exports = { + implements: 'services/utils' +}; + +/** + * @returns {UtilsService} + */ +module.exports.factory = () => { + class UtilsService { + /** + * Generate token string. + * + * @param len length of string + * @returns {String} Random string. + */ + static randomString(len) { + const possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; + const possibleLen = possible.length; + + let res = ''; + + for (let i = 0; i < len; i++) + res += possible.charAt(Math.floor(Math.random() * possibleLen)); + + return res; + } + } + + return UtilsService; +}; diff --git a/modules/web-console/backend/services/auth.js b/modules/web-console/backend/services/auth.js index dde246071bb64..986ed9555d0df 100644 --- a/modules/web-console/backend/services/auth.js +++ b/modules/web-console/backend/services/auth.js @@ -21,37 +21,19 @@ module.exports = { implements: 'services/auth', - inject: ['mongo', 'settings', 'errors'] + inject: ['mongo', 'settings', 'errors', 'services/utils'] }; /** * @param mongo * @param settings * @param errors + * @param {UtilsService} utilsService * @returns {AuthService} */ -module.exports.factory = (mongo, settings, errors) => { +module.exports.factory = (mongo, settings, errors, utilsService) => { class AuthService { - /** - * Generate token string. - * - * @param length - length of string - * @returns {string} - generated token - */ - static generateResetToken(length) { - length = length || settings.tokenLength; - const possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; - const possibleLen = possible.length; - - let res = ''; - - for (let i = 0; i < length; i++) - res += possible.charAt(Math.floor(Math.random() * possibleLen)); - - return res; - } - /** * Reset password reset token for user. * @@ -64,7 +46,7 @@ module.exports.factory = (mongo, settings, errors) => { if (!user) throw new errors.MissingResourceException('Account with that email address does not exists!'); - user.resetPasswordToken = AuthService.generateResetToken(settings.tokenLength); + user.resetPasswordToken = utilsService.randomString(settings.tokenLength); return user.save(); }); diff --git a/modules/web-console/backend/services/caches.js b/modules/web-console/backend/services/caches.js index 5c96ccd7a4dc0..9cb65a19527c0 100644 --- a/modules/web-console/backend/services/caches.js +++ b/modules/web-console/backend/services/caches.js @@ -58,6 +58,8 @@ module.exports.factory = (mongo, spaceService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_UPDATE_ERROR || err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Cache with name: "' + cache.name + '" already exist.'); + else + throw err; }); }; @@ -77,6 +79,8 @@ module.exports.factory = (mongo, spaceService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Cache with name: "' + cache.name + '" already exist.'); + else + throw err; }); }; diff --git a/modules/web-console/backend/services/clusters.js b/modules/web-console/backend/services/clusters.js index 9f50edefa3e40..49e6f09af49e3 100644 --- a/modules/web-console/backend/services/clusters.js +++ b/modules/web-console/backend/services/clusters.js @@ -58,6 +58,8 @@ module.exports.factory = (mongo, spacesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_UPDATE_ERROR || err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Cluster with name: "' + cluster.name + '" already exist.'); + else + throw err; }); }; @@ -77,6 +79,8 @@ module.exports.factory = (mongo, spacesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Cluster with name: "' + cluster.name + '" already exist.'); + else + throw err; }); }; diff --git a/modules/web-console/backend/services/domains.js b/modules/web-console/backend/services/domains.js index e25d0916e5374..986991d55dfd4 100644 --- a/modules/web-console/backend/services/domains.js +++ b/modules/web-console/backend/services/domains.js @@ -65,6 +65,8 @@ module.exports.factory = (mongo, spacesService, cachesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_UPDATE_ERROR || err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Domain model with value type: "' + domain.valueType + '" already exist.'); + else + throw err; }); }; @@ -86,6 +88,8 @@ module.exports.factory = (mongo, spacesService, cachesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Domain model with value type: "' + domain.valueType + '" already exist.'); + else + throw err; }); }; diff --git a/modules/web-console/backend/services/igfss.js b/modules/web-console/backend/services/igfss.js index e9ff38eb5a30b..5296f1613b274 100644 --- a/modules/web-console/backend/services/igfss.js +++ b/modules/web-console/backend/services/igfss.js @@ -56,6 +56,8 @@ module.exports.factory = (mongo, spacesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_UPDATE_ERROR || err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('IGFS with name: "' + igfs.name + '" already exist.'); + else + throw err; }); }; @@ -74,6 +76,8 @@ module.exports.factory = (mongo, spacesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('IGFS with name: "' + igfs.name + '" already exist.'); + else + throw err; }); }; diff --git a/modules/web-console/backend/services/mails.js b/modules/web-console/backend/services/mails.js index 6a31e93466fff..c16db3b18e2e3 100644 --- a/modules/web-console/backend/services/mails.js +++ b/modules/web-console/backend/services/mails.js @@ -32,44 +32,44 @@ module.exports = { * @returns {MailsService} */ module.exports.factory = (settings) => { - /** - * Send mail to user. - * - * @param {Account} user - * @param {String} subject - * @param {String} html - * @param {String} sendErr - * @throws {Error} - * @return {Promise} - */ - const send = (user, subject, html, sendErr) => { - return new Promise((resolve, reject) => { - const transportConfig = settings.mail; - - if (_.isEmpty(transportConfig.service) || _.isEmpty(transportConfig.auth.user) || _.isEmpty(transportConfig.auth.pass)) - throw new Error('Failed to send email. SMTP server is not configured. Please ask webmaster to setup SMTP server!'); - - const mailer = nodemailer.createTransport(transportConfig); - - const sign = settings.mail.sign ? `

        --------------
        ${settings.mail.sign}
        ` : ''; - - const mail = { - from: settings.mail.from, - to: settings.mail.address(`${user.firstName} ${user.lastName}`, user.email), - subject, - html: html + sign - }; - - mailer.sendMail(mail, (err) => { - if (err) - return reject(sendErr ? new Error(sendErr) : err); - - resolve(user); + class MailsService { + /** + * Send mail to user. + * + * @param {Account} user + * @param {String} subject + * @param {String} html + * @param {String} sendErr + * @throws {Error} + * @return {Promise} + */ + static send(user, subject, html, sendErr) { + return new Promise((resolve, reject) => { + const transportConfig = settings.mail; + + if (_.isEmpty(transportConfig.service) || _.isEmpty(transportConfig.auth.user) || _.isEmpty(transportConfig.auth.pass)) + throw new Error('Failed to send email. SMTP server is not configured. Please ask webmaster to setup SMTP server!'); + + const mailer = nodemailer.createTransport(transportConfig); + + const sign = settings.mail.sign ? `

        --------------
        ${settings.mail.sign}
        ` : ''; + + const mail = { + from: settings.mail.from, + to: settings.mail.address(`${user.firstName} ${user.lastName}`, user.email), + subject, + html: html + sign + }; + + mailer.sendMail(mail, (err) => { + if (err) + return reject(sendErr ? new Error(sendErr) : err); + + resolve(user); + }); }); - }); - }; + } - class MailsService { /** * Send email to user for password reset. * @param host @@ -78,7 +78,7 @@ module.exports.factory = (settings) => { static emailUserSignUp(host, user) { const resetLink = `${host}/password/reset?token=${user.resetPasswordToken}`; - return send(user, `Thanks for signing up for ${settings.mail.greeting}.`, + return MailsService.send(user, `Thanks for signing up for ${settings.mail.greeting}.`, `Hello ${user.firstName} ${user.lastName}!

        ` + `You are receiving this email because you have signed up to use ${settings.mail.greeting}.

        ` + 'If you have not done the sign up and do not know what this email is about, please ignore it.
        ' + @@ -94,7 +94,7 @@ module.exports.factory = (settings) => { static emailUserResetLink(host, user) { const resetLink = `${host}/password/reset?token=${user.resetPasswordToken}`; - return send(user, 'Password Reset', + return MailsService.send(user, 'Password Reset', `Hello ${user.firstName} ${user.lastName}!

        ` + 'You are receiving this because you (or someone else) have requested the reset of the password for your account.

        ' + 'Please click on the following link, or paste this into your browser to complete the process:

        ' + @@ -109,7 +109,7 @@ module.exports.factory = (settings) => { * @param user */ static emailPasswordChanged(host, user) { - return send(user, 'Your password has been changed', + return MailsService.send(user, 'Your password has been changed', `Hello ${user.firstName} ${user.lastName}!

        ` + `This is a confirmation that the password for your account on ${settings.mail.greeting} has just been changed.

        `, 'Password was changed, but failed to send confirmation email!'); @@ -121,7 +121,7 @@ module.exports.factory = (settings) => { * @param user */ static emailUserDeletion(host, user) { - return send(user, 'Your account was removed', + return MailsService.send(user, 'Your account was removed', `Hello ${user.firstName} ${user.lastName}!

        ` + `You are receiving this email because your account for ${settings.mail.greeting} was removed.`, 'Account was removed, but failed to send email notification to user!'); diff --git a/modules/web-console/backend/services/notebooks.js b/modules/web-console/backend/services/notebooks.js index 5d8b57d2ec3f8..4ae89d258ff96 100644 --- a/modules/web-console/backend/services/notebooks.js +++ b/modules/web-console/backend/services/notebooks.js @@ -51,6 +51,8 @@ module.exports.factory = (mongo, spacesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_UPDATE_ERROR || err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Notebook with name: "' + notebook.name + '" already exist.'); + else + throw err; }); }; @@ -65,6 +67,8 @@ module.exports.factory = (mongo, spacesService, errors) => { .catch((err) => { if (err.code === mongo.errCodes.DUPLICATE_KEY_ERROR) throw new errors.DuplicateKeyException('Notebook with name: "' + notebook.name + '" already exist.'); + else + throw err; }); }; diff --git a/modules/web-console/backend/services/users.js b/modules/web-console/backend/services/users.js index 620f7e95a1cf9..f804840e98c02 100644 --- a/modules/web-console/backend/services/users.js +++ b/modules/web-console/backend/services/users.js @@ -23,7 +23,7 @@ const _ = require('lodash'); module.exports = { implements: 'services/users', - inject: ['errors', 'settings', 'mongo', 'services/spaces', 'services/mails', 'services/activities', 'agents-handler'] + inject: ['errors', 'settings', 'mongo', 'services/spaces', 'services/mails', 'services/activities', 'services/utils', 'agents-handler'] }; /** @@ -33,22 +33,11 @@ module.exports = { * @param {SpacesService} spacesService * @param {MailsService} mailsService * @param {ActivitiesService} activitiesService + * @param {UtilsService} utilsService * @param {AgentsHandler} agentHnd * @returns {UsersService} */ -module.exports.factory = (errors, settings, mongo, spacesService, mailsService, activitiesService, agentHnd) => { - const _randomString = () => { - const possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; - const possibleLen = possible.length; - - let res = ''; - - for (let i = 0; i < settings.tokenLength; i++) - res += possible.charAt(Math.floor(Math.random() * possibleLen)); - - return res; - }; - +module.exports.factory = (errors, settings, mongo, spacesService, mailsService, activitiesService, utilsService, agentHnd) => { class UsersService { /** * Save profile information. @@ -61,8 +50,8 @@ module.exports.factory = (errors, settings, mongo, spacesService, mailsService, return mongo.Account.count().exec() .then((cnt) => { user.admin = cnt === 0; - - user.token = _randomString(); + user.registered = new Date(); + user.token = utilsService.randomString(settings.tokenLength); return new mongo.Account(user); }) @@ -80,7 +69,7 @@ module.exports.factory = (errors, settings, mongo, spacesService, mailsService, }); }) .then((registered) => { - registered.resetPasswordToken = _randomString(); + registered.resetPasswordToken = utilsService.randomString(settings.tokenLength); return registered.save() .then(() => mongo.Space.create({name: 'Personal space', owner: registered._id})) diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js index 5a27ea2ec7ec9..a1cd6eb8c6980 100644 --- a/modules/web-console/frontend/app/app.js +++ b/modules/web-console/frontend/app/app.js @@ -124,9 +124,12 @@ import pageConfigureAdvanced from './components/page-configure-advanced'; import pageQueries from './components/page-queries'; import gridColumnSelector from './components/grid-column-selector'; import gridItemSelected from './components/grid-item-selected'; +import gridNoData from './components/grid-no-data'; +import gridExport from './components/grid-export'; import bsSelectMenu from './components/bs-select-menu'; import protectFromBsSelectRender from './components/protect-from-bs-select-render'; import uiGridHovering from './components/ui-grid-hovering'; +import uiGridFilters from './components/ui-grid-filters'; import listEditable from './components/list-editable'; import clusterSelector from './components/cluster-selector'; import connectedClusters from './components/connected-clusters'; @@ -201,8 +204,11 @@ angular.module('ignite-console', [ pageQueries.name, gridColumnSelector.name, gridItemSelected.name, + gridNoData.name, + gridExport.name, bsSelectMenu.name, uiGridHovering.name, + uiGridFilters.name, protectFromBsSelectRender.name, AngularStrapTooltip.name, AngularStrapSelect.name, diff --git a/modules/web-console/frontend/app/components/bs-select-menu/style.scss b/modules/web-console/frontend/app/components/bs-select-menu/style.scss index ccf33a36110ef..d82bf19e053a6 100644 --- a/modules/web-console/frontend/app/components/bs-select-menu/style.scss +++ b/modules/web-console/frontend/app/components/bs-select-menu/style.scss @@ -31,7 +31,6 @@ overflow-y: auto; overflow-x: hidden; max-height: $max-visible-items * $item-height; - max-width: 280px; box-shadow: 0 2px 5px 0 rgba(0, 0, 0, 0.3); border-radius: $ignite-button-border-radius; border: 1px solid #c5c5c5; @@ -76,6 +75,15 @@ } } + .bssm-click-overlay { + position: fixed; + top: 0; + right: 0; + bottom: 0; + left: 0; + z-index: -1; + } + &.bssm-multiple { .bssm-active-indicator { display: initial; diff --git a/modules/web-console/frontend/app/components/bs-select-menu/template.pug b/modules/web-console/frontend/app/components/bs-select-menu/template.pug index a9c1c2881727a..c8c6eaac09099 100644 --- a/modules/web-console/frontend/app/components/bs-select-menu/template.pug +++ b/modules/web-console/frontend/app/components/bs-select-menu/template.pug @@ -25,11 +25,8 @@ ul.bs-select-menu( ng-click='$ctrl.areAllSelected() ? $selectNone() : $selectAll()' type='button' ) - span.bssm-active-indicator.icon.icon-left.fa( - ng-class=`{ - 'fa-check-square bssm-active-indicator__active': $ctrl.areAllSelected(), - 'fa-square-o': !$ctrl.areAllSelected() - }` + img.bssm-active-indicator.icon-left( + ng-src='{{ $ctrl.areAllSelected() ? "/images/checkbox-active.svg" : "/images/checkbox.svg" }}' ) | All li(role='presentation' ng-repeat='match in $matches') @@ -42,10 +39,8 @@ ul.bs-select-menu( data-placement='right auto' title='{{ ::match.label }}' ) - span.bssm-active-indicator.icon.icon-left.fa( - ng-class=`{ - 'fa-check-square bssm-active-indicator__active': $isActive($index), - 'fa-square-o': !$isActive($index) - }` + img.bssm-active-indicator.icon-left( + ng-src='{{ $isActive($index) ? "/images/checkbox-active.svg" : "/images/checkbox.svg" }}' ) span.bssm-item-text(ng-bind-html='match.label') + .bssm-click-overlay(ng-click='$hide()') diff --git a/modules/web-console/frontend/app/components/grid-export/component.js b/modules/web-console/frontend/app/components/grid-export/component.js new file mode 100644 index 0000000000000..9c239a6340b6b --- /dev/null +++ b/modules/web-console/frontend/app/components/grid-export/component.js @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import template from './template.pug'; + +export default { + template, + controller: class { + static $inject = ['$scope', 'uiGridGroupingConstants', 'uiGridExporterService', 'uiGridExporterConstants']; + + constructor($scope, uiGridGroupingConstants, uiGridExporterService, uiGridExporterConstants) { + Object.assign(this, { uiGridGroupingConstants, uiGridExporterService, uiGridExporterConstants }); + } + + export() { + const data = []; + const columnHeaders = this.uiGridExporterService.getColumnHeaders(this.gridApi.grid, this.uiGridExporterConstants.VISIBLE); + + _.forEach(this.gridApi.grid.rows, (row) => { + if (!row.visible) + return; + + const values = []; + _.forEach(columnHeaders, ({ name }) => { + values.push({ value: row.entity[name] }); + }); + + data.push(values); + }); + + const csvContent = this.uiGridExporterService.formatAsCsv(columnHeaders, data, this.gridApi.grid.options.exporterCsvColumnSeparator); + this.uiGridExporterService.downloadFile(this.gridApi.grid.options.exporterCsvFilename, csvContent, this.gridApi.grid.options.exporterOlderExcelCompatibility); + } + }, + bindings: { + gridApi: '<' + } +}; diff --git a/modules/web-console/frontend/app/components/grid-export/index.js b/modules/web-console/frontend/app/components/grid-export/index.js new file mode 100644 index 0000000000000..9fa8835b8197f --- /dev/null +++ b/modules/web-console/frontend/app/components/grid-export/index.js @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import angular from 'angular'; + +import component from './component'; + +export default angular + .module('ignite-console.grid-export', []) + .component('gridExport', component); diff --git a/modules/web-console/frontend/app/components/grid-export/template.pug b/modules/web-console/frontend/app/components/grid-export/template.pug new file mode 100644 index 0000000000000..99780ee8461e3 --- /dev/null +++ b/modules/web-console/frontend/app/components/grid-export/template.pug @@ -0,0 +1,18 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +button.btn-ignite.btn-ignite--primary-outline(ng-click='$ctrl.export()' bs-tooltip='' data-title='Export filtered rows to CSV' data-placement='top') + svg(ignite-icon='csv') diff --git a/modules/web-console/frontend/app/components/grid-no-data/component.js b/modules/web-console/frontend/app/components/grid-no-data/component.js new file mode 100644 index 0000000000000..aa219dfad35eb --- /dev/null +++ b/modules/web-console/frontend/app/components/grid-no-data/component.js @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import './style.scss'; +import controller from './controller.js'; + +export default { + template: ` +
        +
        + `, + controller, + bindings: { + gridApi: '<' + }, + transclude: { + noDataFiltered: '?gridNoDataFiltered' + } +}; diff --git a/modules/web-console/frontend/app/components/grid-no-data/controller.js b/modules/web-console/frontend/app/components/grid-no-data/controller.js new file mode 100644 index 0000000000000..95e8a5e150948 --- /dev/null +++ b/modules/web-console/frontend/app/components/grid-no-data/controller.js @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import filter from 'lodash/fp/filter'; + +const rowsFiltered = filter(({ visible }) => visible); + +export default class { + static $inject = ['$scope', 'uiGridConstants']; + + constructor($scope, uiGridConstants) { + Object.assign(this, {$scope, uiGridConstants}); + + this.noData = true; + } + + $onChanges(changes) { + if (changes && 'gridApi' in changes && changes.gridApi.currentValue) { + this.gridApi.core.on.rowsVisibleChanged(this.$scope, () => { + this.applyValues(); + }); + } + } + + applyValues() { + if (!this.gridApi.grid.rows.length) { + this.noData = true; + return; + } + + this.noData = false; + + const filtered = rowsFiltered(this.gridApi.grid.rows); + this.noDataFiltered = !filtered.length; + } +} diff --git a/modules/web-console/frontend/app/components/grid-no-data/index.js b/modules/web-console/frontend/app/components/grid-no-data/index.js new file mode 100644 index 0000000000000..2acecf9981a6e --- /dev/null +++ b/modules/web-console/frontend/app/components/grid-no-data/index.js @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import angular from 'angular'; + +import component from './component'; + +export default angular + .module('ignite-console.grid-no-data', []) + .component('gridNoData', component); diff --git a/modules/web-console/frontend/app/components/grid-no-data/style.scss b/modules/web-console/frontend/app/components/grid-no-data/style.scss new file mode 100644 index 0000000000000..0a51ac2214573 --- /dev/null +++ b/modules/web-console/frontend/app/components/grid-no-data/style.scss @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +grid-no-data { + position: relative; + display: block; + padding: 0 51px; + + border-radius: 0 0 4px 4px; + + font-style: italic; + line-height: 16px; + + [ng-transclude] { + padding: 16px 0; + } +} diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug index 23311737c772f..f1607070c2202 100644 --- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug +++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug @@ -15,7 +15,6 @@ limitations under the License. .list-editable-cols__header( - ng-show='$ctrl.ngModel.$viewValue.length' ng-class='::$ctrl.rowClass' ) .list-editable-cols__header-cell(ng-repeat='col in ::$ctrl.colDefs' ng-class='::col.cellClass') diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/row.directive.js b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/row.directive.js index e427ab5d7bb62..32d75f9496eb3 100644 --- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/row.directive.js +++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/row.directive.js @@ -33,7 +33,7 @@ export default function() { el.addClass(ctrl.rowClass); ctrl.colDefs.forEach(({ cellClass }, index) => { - children[index].classList.add(...(Array.isArray(cellClass) ? cellClass : [cellClass])); + _.forEach((Array.isArray(cellClass) ? cellClass : [cellClass]), (item) => children[index].classList.add(item)); }); } }; diff --git a/modules/web-console/frontend/app/components/list-editable/style.scss b/modules/web-console/frontend/app/components/list-editable/style.scss index 0f3f8aecbf34b..83ce0d49fb52d 100644 --- a/modules/web-console/frontend/app/components/list-editable/style.scss +++ b/modules/web-console/frontend/app/components/list-editable/style.scss @@ -33,7 +33,8 @@ list-editable { display: flex; align-items: center; min-height: $min-height; - padding: 5px 10px; + padding: 5px 20px; + margin: -6px 0; font-style: italic; } @@ -76,6 +77,10 @@ list-editable { justify-content: center; } + &-sort { + display: none; + } + &-cross { [ignite-icon] { width: 12px; @@ -105,11 +110,6 @@ list-editable { } &:not(.le-row--has-item-view) { - & > .le-row-index, - & > .le-row-cross { - margin-top: 18px; - } - align-items: flex-start; } } diff --git a/modules/web-console/frontend/app/components/list-editable/template.pug b/modules/web-console/frontend/app/components/list-editable/template.pug index a713188607273..29006026585e4 100644 --- a/modules/web-console/frontend/app/components/list-editable/template.pug +++ b/modules/web-console/frontend/app/components/list-editable/template.pug @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. -.le-body(ng-if='$ctrl.ngModel.$viewValue.length') +.le-body() .le-row( + ng-if='$ctrl.ngModel.$viewValue.length' ng-repeat='item in $ctrl.ngModel.$viewValue track by $ctrl.$index(item, $index)' ng-class=`{ 'le-row--editable': $ctrl.isEditView($index), @@ -46,4 +47,5 @@ button.btn-ignite.btn-ignite--link-dashed-secondary(type='button' ng-click='$ctrl.remove($index)') svg(ignite-icon='cross') -div(ng-transclude='noItems' ng-if='!$ctrl.ngModel.$viewValue.length') + .le-row(ng-if='!$ctrl.ngModel.$viewValue.length') + div(ng-transclude='noItems') diff --git a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug index e311246e160d0..11ff7bc6fffa9 100644 --- a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug +++ b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug @@ -45,7 +45,7 @@ include /app/helpers/jade/mixins required: false, options: '$ctrl.actionOptions' }) - button.btn-ignite.btn-ignite--primary-outline(ng-click='$ctrl.exportCsv()' bs-tooltip='' data-title='Export table to csv' data-placement='top') + button.btn-ignite.btn-ignite--primary-outline(ng-click='$ctrl.exportCsv()' bs-tooltip='' data-title='Export table to CSV' data-placement='top') svg(ignite-icon='csv') form.ui-grid-settings-dateperiod(name=form novalidate) -var form = 'admin' diff --git a/modules/web-console/frontend/app/components/ui-grid-filters/directive.js b/modules/web-console/frontend/app/components/ui-grid-filters/directive.js new file mode 100644 index 0000000000000..2e18933edd690 --- /dev/null +++ b/modules/web-console/frontend/app/components/ui-grid-filters/directive.js @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import template from './template.pug'; +import './style.scss'; + +export default function uiGridFilters(uiGridConstants) { + return { + require: 'uiGrid', + link: { + pre(scope, el, attr, grid) { + if (!grid.grid.options.enableFiltering) return; + grid.grid.options.columnDefs.filter((cd) => cd.multiselectFilterOptions).forEach((cd) => { + cd.headerCellTemplate = template; + cd.filter = { + type: uiGridConstants.filter.SELECT, + term: cd.multiselectFilterOptions.map((t) => t.value), + condition(searchTerm, cellValue) { + return searchTerm.includes(cellValue); + }, + selectOptions: cd.multiselectFilterOptions, + $$selectOptionsMapping: cd.multiselectFilterOptions.reduce((a, v) => Object.assign(a, {[v.value]: v.label}), {}), + $$multiselectFilterTooltip() { + const prefix = 'Active filter'; + switch (this.term.length) { + case 0: + return `${prefix}: show none`; + default: + return `${prefix}: ${this.term.map((t) => this.$$selectOptionsMapping[t]).join(', ')}`; + case this.selectOptions.length: + return `${prefix}: show all`; + } + } + }; + if (!cd.cellTemplate) { + cd.cellTemplate = ` +
        + {{ col.colDef.filter.$$selectOptionsMapping[row.entity[col.field]] }} +
        + `; + } + }); + } + } + }; +} + +uiGridFilters.$inject = ['uiGridConstants']; diff --git a/modules/web-console/frontend/app/components/ui-grid-filters/index.js b/modules/web-console/frontend/app/components/ui-grid-filters/index.js new file mode 100644 index 0000000000000..0f05b779f2318 --- /dev/null +++ b/modules/web-console/frontend/app/components/ui-grid-filters/index.js @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import angular from 'angular'; +import directive from './directive'; +import flow from 'lodash/flow'; + +export default angular + .module('ignite-console.ui-grid-filters', ['ui.grid']) + .decorator('$tooltip', ['$delegate', ($delegate) => { + return function(el, config) { + const instance = $delegate(el, config); + instance.$referenceElement = el; + instance.destroy = flow(instance.destroy, () => instance.$referenceElement = null); + instance.$applyPlacement = flow(instance.$applyPlacement, () => { + if (!instance.$element) return; + const refWidth = instance.$referenceElement[0].getBoundingClientRect().width; + const elWidth = instance.$element[0].getBoundingClientRect().width; + if (refWidth > elWidth) { + instance.$element.css({ + width: refWidth, + maxWidth: 'initial' + }); + } + }); + return instance; + }; + }]) + .directive('uiGridFilters', directive); diff --git a/modules/web-console/frontend/app/components/ui-grid-filters/style.scss b/modules/web-console/frontend/app/components/ui-grid-filters/style.scss new file mode 100644 index 0000000000000..629cbadafd27e --- /dev/null +++ b/modules/web-console/frontend/app/components/ui-grid-filters/style.scss @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.ui-grid-filters[role="columnheader"] { + display: flex; + + // Decrease horizontal padding because multiselect button already has it + padding-left: 8px !important; + padding-right: 8px !important; + + & > div:first-child { + flex: auto !important; + } + + .ui-grid-cell-contents[role="button"] { + flex: auto !important; + flex-basis: 100% !important; + + padding: 0 !important; + overflow: visible !important; + } +} diff --git a/modules/web-console/frontend/app/components/ui-grid-filters/template.pug b/modules/web-console/frontend/app/components/ui-grid-filters/template.pug new file mode 100644 index 0000000000000..c898078cb2396 --- /dev/null +++ b/modules/web-console/frontend/app/components/ui-grid-filters/template.pug @@ -0,0 +1,47 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +.ui-grid-filter-container.ui-grid-filters(role='columnheader') + div(ng-style='col.extraStyle' + ng-repeat='colFilter in col.filters' + ng-class="{'ui-grid-filter-cancel-button-hidden' : colFilter.disableCancelFilterButton === true }" + ng-switch='colFilter.type') + div(ng-switch-when='select') + button.btn-ignite.btn-ignite--link-dashed-success( + ng-class=`{ + 'bold': colFilter.term.length !== colFilter.selectOptions.length + }` + type='button' + title='{{ colFilter.$$multiselectFilterTooltip() }}' + ng-model='colFilter.term' + bs-select + bs-options='option.value as option.label for option in colFilter.selectOptions' + data-multiple='true' + data-trigger='click' + data-placement='bottom-left' + protect-from-bs-select-render + ) {{ col.displayName }} + + .ui-grid-cell-contents(role='button') + button.btn-ignite.btn-ignite--link-dashed-success( + ui-grid-one-bind-id-grid="col.uid + '-sortdir-text'" + ui-grid-visible="col.sort.direction" + aria-label="Sort Descending") + i(ng-class="{\ + 'ui-grid-icon-up-dir': col.sort.direction == 'asc',\ + 'ui-grid-icon-down-dir': col.sort.direction == 'desc',\ + 'ui-grid-icon-blank': !col.sort.direction\ + }" title="" aria-hidden="true") diff --git a/modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/component.js b/modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/component.js new file mode 100644 index 0000000000000..1621adccceee4 --- /dev/null +++ b/modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/component.js @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import template from './template.pug'; + +export default { + template +}; diff --git a/modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/template.pug b/modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/template.pug new file mode 100644 index 0000000000000..0545db18b96a7 --- /dev/null +++ b/modules/web-console/frontend/app/components/web-console-header/components/web-console-header-extension/template.pug @@ -0,0 +1,15 @@ +//- + Licensed to the Apache Software Foundation (ASF) under one or more + contributor license agreements. See the NOTICE file distributed with + this work for additional information regarding copyright ownership. + The ASF licenses this file to You under the Apache License, Version 2.0 + (the "License"); you may not use this file except in compliance with + the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/modules/web-console/frontend/app/components/web-console-header/index.js b/modules/web-console/frontend/app/components/web-console-header/index.js index e41e1ccc273e6..3fc5c66579fc1 100644 --- a/modules/web-console/frontend/app/components/web-console-header/index.js +++ b/modules/web-console/frontend/app/components/web-console-header/index.js @@ -17,7 +17,9 @@ import angular from 'angular'; import component from './component'; +import componentExtension from './components/web-console-header-extension/component'; export default angular .module('ignite-console.web-console-header', []) - .component('webConsoleHeader', component); + .component('webConsoleHeader', component) + .component('webConsoleHeaderExtension', componentExtension); diff --git a/modules/web-console/frontend/app/components/web-console-header/style.scss b/modules/web-console/frontend/app/components/web-console-header/style.scss index f221a00fedd90..db49c3e1b7aaf 100644 --- a/modules/web-console/frontend/app/components/web-console-header/style.scss +++ b/modules/web-console/frontend/app/components/web-console-header/style.scss @@ -18,7 +18,7 @@ web-console-header { @import "./../../../public/stylesheets/variables.scss"; - $nav-item-margin: 40px; + $nav-item-margin: 30px; $bottom-border-width: 4px; display: block; diff --git a/modules/web-console/frontend/app/components/web-console-header/template.pug b/modules/web-console/frontend/app/components/web-console-header/template.pug index 41586b7e75cc0..660874e12e2ec 100644 --- a/modules/web-console/frontend/app/components/web-console-header/template.pug +++ b/modules/web-console/frontend/app/components/web-console-header/template.pug @@ -16,10 +16,10 @@ .wch-notification(ng-show='$ctrl.UserNotifications.message' ng-bind-html='$ctrl.UserNotifications.message') -.wch-notification(ng-show='$ctrl.$rootScope.user.becomeUsed') - | You are currently viewing user #[strong {{$ctrl.$rootScope.user.firstName}} {{$ctrl.$rootScope.user.lastName}}] as administrator. #[a(ng-click='$ctrl.$rootScope.revertIdentity()') Revert to your identity?] +.wch-notification(ng-show='$root.user.becomeUsed') + | You are currently viewing user #[strong {{$root.user.firstName}} {{$root.user.lastName}}] as administrator. #[a(ng-click='$root.revertIdentity()') Revert to your identity?] -.wch-notification.wch-notification--demo(ng-if='$ctrl.$rootScope.IgniteDemoMode') +.wch-notification.wch-notification--demo(ng-if='$root.IgniteDemoMode') .container(ng-controller='demoController') | You are now in #[b Demo Mode]. #[a(ng-click='closeDemo();') Close Demo?] diff --git a/modules/web-console/frontend/app/modules/form/field/input/text.scss b/modules/web-console/frontend/app/modules/form/field/input/text.scss index 6882469de8ab7..658d740b8ebd2 100644 --- a/modules/web-console/frontend/app/modules/form/field/input/text.scss +++ b/modules/web-console/frontend/app/modules/form/field/input/text.scss @@ -17,6 +17,7 @@ .checkbox label .input-tip { position: initial; + overflow: visible; } .input-tip .fa-floppy-o { diff --git a/modules/web-console/frontend/app/modules/user/permissions.js b/modules/web-console/frontend/app/modules/user/permissions.js index b6f7c3a30cf42..616226abdbba0 100644 --- a/modules/web-console/frontend/app/modules/user/permissions.js +++ b/modules/web-console/frontend/app/modules/user/permissions.js @@ -16,8 +16,8 @@ */ const guest = ['login']; -const becomed = ['profile', 'configuration', 'query', 'demo']; -const user = becomed.concat(['logout']); +const becomed = ['profile', 'configuration']; +const user = becomed.concat(['logout', 'query', 'demo']); const admin = user.concat(['admin_page']); export default { diff --git a/modules/web-console/frontend/app/primitives/btn/index.scss b/modules/web-console/frontend/app/primitives/btn/index.scss index 94b5bd2a3a63c..2870cff49e646 100644 --- a/modules/web-console/frontend/app/primitives/btn/index.scss +++ b/modules/web-console/frontend/app/primitives/btn/index.scss @@ -242,6 +242,14 @@ $btn-content-padding-with-border: 9px 11px; @include btn-ignite--link-dashed($color, $activeHover, $disabled); } +.btn-ignite--link-dashed-primary { + $color: $ignite-brand-primary; + $activeHover: change-color($color, $lightness: 26%); + $disabled: change-color($color, $saturation: 57%, $lightness: 68%); + + @include btn-ignite--link-dashed($color, $activeHover, $disabled); +} + .btn-ignite--link-dashed-secondary { $activeHover: change-color($ignite-brand-success, $lightness: 26%); @include btn-ignite--link-dashed($text-color, $activeHover, $gray-light); @@ -319,3 +327,12 @@ $btn-content-padding-with-border: 9px 11px; $color-hover: change-color($ignite-brand-success, $lightness: 26%) ); } + +.btn-ignite--link { + background: transparent; + + @include ignite-link( + $color: $ignite-brand-success, + $color-hover: change-color($ignite-brand-success, $lightness: 26%) + ); +} diff --git a/modules/web-console/frontend/app/primitives/form-field/index.scss b/modules/web-console/frontend/app/primitives/form-field/index.scss index f6d849661b4ac..01dd941c0f9e2 100644 --- a/modules/web-console/frontend/app/primitives/form-field/index.scss +++ b/modules/web-console/frontend/app/primitives/form-field/index.scss @@ -26,7 +26,7 @@ width: auto; } - .ignite-form-field__label { + &__label { float: left; width: 100%; margin: 0 10px 4px; diff --git a/modules/web-console/frontend/app/primitives/ui-grid-settings/index.scss b/modules/web-console/frontend/app/primitives/ui-grid-settings/index.scss index e0cf13907709a..56bab220a1e56 100644 --- a/modules/web-console/frontend/app/primitives/ui-grid-settings/index.scss +++ b/modules/web-console/frontend/app/primitives/ui-grid-settings/index.scss @@ -92,6 +92,18 @@ &--heading { cursor: default; + + sub { + bottom: 0; + height: 12px; + margin-left: 22px; + + font-family: Roboto; + font-size: 12px; + line-height: 1; + text-align: left; + color: $gray-light; + } } &--heading > span { diff --git a/modules/web-console/frontend/app/primitives/ui-grid/index.scss b/modules/web-console/frontend/app/primitives/ui-grid/index.scss index 5caa57cd0b1bc..a83cb579fecb8 100644 --- a/modules/web-console/frontend/app/primitives/ui-grid/index.scss +++ b/modules/web-console/frontend/app/primitives/ui-grid/index.scss @@ -157,6 +157,7 @@ .ui-grid-filter-container { padding-left: 20px; padding-right: 20px; + font-weight: normal; } .ng-hide + .ui-grid-header-cell-row .ui-grid-header-cell { @@ -519,6 +520,7 @@ } } +// Obsoleted, use grid-no-data. .ui-grid--ignite.no-data { position: relative; diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json index 5b1734fa91cc1..7f7671a826923 100644 --- a/modules/web-console/frontend/package.json +++ b/modules/web-console/frontend/package.json @@ -49,7 +49,7 @@ "angular-strap": "2.3.12", "angular-translate": "2.16.0", "angular-tree-control": "0.2.28", - "angular-ui-grid": "4.0.7", + "angular-ui-grid": "4.0.11", "babel-core": "6.25.0", "babel-eslint": "7.2.3", "babel-loader": "7.1.1", @@ -62,6 +62,7 @@ "bootstrap-sass": "3.3.7", "brace": "0.10.0", "browser-update": "2.1.9", + "bson-objectid": "1.1.5", "copy-webpack-plugin": "4.0.1", "css-loader": "0.28.7", "eslint": "4.3.0", diff --git a/modules/web-console/frontend/public/images/checkbox-active.svg b/modules/web-console/frontend/public/images/checkbox-active.svg new file mode 100644 index 0000000000000..82e59c6da3258 --- /dev/null +++ b/modules/web-console/frontend/public/images/checkbox-active.svg @@ -0,0 +1,25 @@ + + + + 1F50951A-D0DF-4DE9-B464-5A57049A9426 + Created with sketchtool. + + + + + + + + + + + + + + + + + + + + diff --git a/modules/web-console/frontend/public/images/checkbox.svg b/modules/web-console/frontend/public/images/checkbox.svg new file mode 100644 index 0000000000000..82264a97000d8 --- /dev/null +++ b/modules/web-console/frontend/public/images/checkbox.svg @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/modules/web-console/frontend/public/images/icons/alert.svg b/modules/web-console/frontend/public/images/icons/alert.svg new file mode 100644 index 0000000000000..6d1b3e274b44e --- /dev/null +++ b/modules/web-console/frontend/public/images/icons/alert.svg @@ -0,0 +1 @@ + diff --git a/modules/web-console/frontend/public/images/icons/checkmark.svg b/modules/web-console/frontend/public/images/icons/checkmark.svg new file mode 100644 index 0000000000000..a7896c78b9bde --- /dev/null +++ b/modules/web-console/frontend/public/images/icons/checkmark.svg @@ -0,0 +1,3 @@ + + + diff --git a/modules/web-console/frontend/public/images/icons/index.js b/modules/web-console/frontend/public/images/icons/index.js index 5ca63076361e2..48d9227833b88 100644 --- a/modules/web-console/frontend/public/images/icons/index.js +++ b/modules/web-console/frontend/public/images/icons/index.js @@ -26,4 +26,7 @@ export search from './search.svg'; export refresh from './refresh.svg'; export sort from './sort.svg'; export info from './info.svg'; +export checkmark from './checkmark.svg'; +export alert from './alert.svg'; +export attention from './attention.svg'; export connectedClusters from './connectedClusters.svg'; diff --git a/modules/web-console/frontend/public/images/icons/sort.svg b/modules/web-console/frontend/public/images/icons/sort.svg index 7e4bb523d7666..8195c79ae68c2 100644 --- a/modules/web-console/frontend/public/images/icons/sort.svg +++ b/modules/web-console/frontend/public/images/icons/sort.svg @@ -1 +1 @@ - \ No newline at end of file + diff --git a/modules/web-console/frontend/views/includes/header-left.pug b/modules/web-console/frontend/views/includes/header-left.pug index 6b7fe6aa8d6aa..42d4dcb6e41c6 100644 --- a/modules/web-console/frontend/views/includes/header-left.pug +++ b/modules/web-console/frontend/views/includes/header-left.pug @@ -18,7 +18,7 @@ a(ui-sref='base.configuration.tabs' ui-sref-active='active') | Configure -.wch-nav-item(ng-controller='notebookController') +.wch-nav-item(ng-if='!$root.user.becomeUsed' ng-controller='notebookController') a(ng-if='IgniteDemoMode' ui-sref='base.sql.demo' ui-sref-active='active') | Queries @@ -41,7 +41,7 @@ .wch-content(ignite-navbar) .wch-nav-item(ng-repeat='item in navbar.items') - div(ng-if='item.children' ng-click='$event.stopPropagation()' + div(ng-if='$root.user.becomeUsed ? item.canBecomed : item.children' ng-click='$event.stopPropagation()' ng-class='{active: $state.includes(item.sref)}' bs-dropdown='item.children' data-placement='bottom-left' @@ -53,5 +53,5 @@ span {{::item.text}} span.caret - a(ng-if='!item.children' ui-sref='{{item.sref}}' ui-sref-active='active') - | {{::item.text}} \ No newline at end of file + a(ng-if='$root.user.becomeUsed ? item.canBecomed : !item.children' ui-sref='{{item.sref}}' ui-sref-active='active') + | {{::item.text}} diff --git a/modules/web-console/frontend/views/includes/header-right.pug b/modules/web-console/frontend/views/includes/header-right.pug index 56fd1029e4056..b223f2e68422a 100644 --- a/modules/web-console/frontend/views/includes/header-right.pug +++ b/modules/web-console/frontend/views/includes/header-right.pug @@ -14,12 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. -.wch-demo-toggle(ng-controller='demoController') +.wch-demo-toggle(ng-if='!$root.user.becomeUsed' ng-controller='demoController') button.btn-ignite.btn-ignite--success( ng-if='!IgniteDemoMode' ng-click='startDemo()' ) Start Demo +web-console-header-extension + .wch-nav-item(ignite-userbar) div( ng-class='{active: $state.includes("base.settings")}' diff --git a/modules/web-console/frontend/views/settings/profile.tpl.pug b/modules/web-console/frontend/views/settings/profile.tpl.pug index 54c8105b6aa7d..0dc35e40861c2 100644 --- a/modules/web-console/frontend/views/settings/profile.tpl.pug +++ b/modules/web-console/frontend/views/settings/profile.tpl.pug @@ -17,6 +17,9 @@ mixin lbl(txt) label.col-sm-2.required.labelFormField #{txt} +mixin lbl-not-required(txt) + label.col-sm-2.labelFormField #{txt} + .row(ng-controller='profileController') .docs-content .docs-header @@ -36,7 +39,11 @@ mixin lbl(txt) .details-row +lbl('Email:') .col-xs-5.col-sm-4 - input#profile-email.form-control(ignite-on-enter-focus-move='profile-company' type='email' ng-model='user.email' placeholder='Input email' required) + input#profile-email.form-control(ignite-on-enter-focus-move='profile-phone' type='email' ng-model='user.email' placeholder='Input email' required) + .details-row + +lbl-not-required('Phone:') + .col-xs-5.col-sm-4 + input#profile-phone.form-control(ignite-on-enter-focus-move='profile-company' type='tel' ng-model='user.phone' placeholder='Input phone') .details-row +lbl('Company:') .col-xs-5.col-sm-4 From 0089fd7789abaed6155e4192c096d4b4c7e8b22a Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 14 Dec 2017 12:08:58 +0700 Subject: [PATCH 172/243] IGNITE-7199 Visor CMD: minor refactoring. (cherry picked from commit cd19564) --- .../commands/tasks/VisorTasksCommand.scala | 34 ++++++++----------- .../scala/org/apache/ignite/visor/visor.scala | 1 - 2 files changed, 15 insertions(+), 20 deletions(-) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala index 0d6753eee5167..966bd6484f3ca 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala @@ -17,23 +17,21 @@ package org.apache.ignite.visor.commands.tasks +import java.util.UUID + import org.apache.ignite._ import org.apache.ignite.events.EventType._ import org.apache.ignite.internal.util.scala.impl import org.apache.ignite.internal.util.typedef.X import org.apache.ignite.internal.util.{IgniteUtils => U} +import org.apache.ignite.internal.visor.event.{VisorGridEvent, VisorGridJobEvent, VisorGridTaskEvent} +import org.apache.ignite.internal.visor.node.{VisorNodeEventsCollectorTask, VisorNodeEventsCollectorTaskArg} +import org.apache.ignite.internal.visor.util.{VisorTaskUtils => TU} import org.apache.ignite.lang.IgniteUuid import org.apache.ignite.visor.VisorTag import org.apache.ignite.visor.commands.common.{VisorConsoleCommand, VisorTextTable} import org.apache.ignite.visor.visor._ -import java.util.UUID - -import org.apache.ignite.internal.visor.event.{VisorGridEvent, VisorGridJobEvent, VisorGridTaskEvent} -import org.apache.ignite.internal.visor.node.VisorNodeEventsCollectorTask -import org.apache.ignite.internal.visor.node.VisorNodeEventsCollectorTaskArg -import org.apache.ignite.internal.visor.util.VisorTaskUtils._ - import scala.collection.JavaConversions._ import scala.language.implicitConversions import scala.util.control.Breaks._ @@ -1217,15 +1215,13 @@ class VisorTasksCommand extends VisorConsoleCommand { eLst.foreach(e => { e.nodeIds.foreach(id => { - val host = sortAddresses(ignite.cluster.node(id).addresses).headOption - - if (host.isDefined) { - var eSet = hMap.getOrElse(host.get, Set.empty[VisorExecution]) + TU.sortAddresses(ignite.cluster.node(id).addresses).headOption.foreach(host => { + var eSet = hMap.getOrElse(host, Set.empty[VisorExecution]) eSet += e - hMap += (host.get -> eSet) - } + hMap += (host -> eSet) + }) }) }) @@ -1239,11 +1235,11 @@ class VisorTasksCommand extends VisorConsoleCommand { tasksT.maxCellWidth = 55 - tasksT #=( + tasksT #= ( "Task Name(@), Oldest/Latest & Rate", "Duration", "Executions" - ) + ) println("Tasks executed on host " + host + ":") @@ -1266,7 +1262,7 @@ class VisorTasksCommand extends VisorConsoleCommand { val n = t.execs.size - tasksT +=( + tasksT += ( ( t.taskNameVar, " ", @@ -1274,12 +1270,12 @@ class VisorTasksCommand extends VisorConsoleCommand { "Latest: " + formatDateTime(t.latest), " ", "Exec. Rate: " + n + " in " + X.timeSpan2HMSM(t.timeframe) - ), + ), ( "min: " + X.timeSpan2HMSM(t.minDuration), "avg: " + X.timeSpan2HMSM(t.avgDuration), "max: " + X.timeSpan2HMSM(t.maxDuration) - ), + ), ( "Total: " + n, " ", @@ -1288,8 +1284,8 @@ class VisorTasksCommand extends VisorConsoleCommand { "Fa: " + eE + " (" + formatInt(100 * eE / n) + "%)", "Un: " + uE + " (" + formatInt(100 * uE / n) + "%)", "Ti: " + tE + " (" + formatInt(100 * tE / n) + "%)" - ) ) + ) }) tasksT.render() diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala index 41a14d00a177b..5765579989e85 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala @@ -35,7 +35,6 @@ import jline.console.ConsoleReader import org.jetbrains.annotations.Nullable import java.io._ import java.lang.{Boolean => JavaBoolean} -import java.net._ import java.text._ import java.util.concurrent._ import java.util.{Collection => JavaCollection, HashSet => JavaHashSet, _} From 37f4bf032e97385c4fb1143c955e831d6a39789a Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 14 Dec 2017 16:12:03 +0700 Subject: [PATCH 173/243] IGNITE-7172 Minor fix. (cherry picked from commit 802a166) --- .../components/page-queries/template.tpl.pug | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/components/page-queries/template.tpl.pug b/modules/web-console/frontend/app/components/page-queries/template.tpl.pug index b2173f7a69e4b..1574874e8cc26 100644 --- a/modules/web-console/frontend/app/components/page-queries/template.tpl.pug +++ b/modules/web-console/frontend/app/components/page-queries/template.tpl.pug @@ -364,12 +364,37 @@ mixin paragraph-query .row(ng-if='notebook' bs-affix style='margin-bottom: 20px;') +notebook-rename - ignite-information(data-title='With query notebook you can' style='margin-bottom: 30px') + -var example = `CREATE TABLE Person(ID INTEGER PRIMARY KEY, NAME VARCHAR(100));\nINSERT INTO Person(ID, NAME) VALUES (1, 'Ed'), (2, 'Ann'), (3, 'Emma');\nSELECT * FROM Person;`; + + ignite-information( + data-title='With query notebook you can' + style='margin-bottom: 30px' + ng-init=`example = '${example}'` + ) ul li Create any number of queries li Execute and explain SQL queries li Execute scan queries li View data in tabular form and as charts + .example + .group + .group-legend + label Examples: + .group-content + .sql-editor(ignite-ace='{\ + onLoad: aceInit({}),\ + theme: "chrome",\ + mode: "sql",\ + require: ["ace/ext/language_tools"],\ + showGutter: false,\ + advanced: {\ + enableSnippets: false,\ + enableBasicAutocompletion: true,\ + enableLiveAutocompletion: true\ + }}' + ng-model='example' + readonly='true' + ) div(ng-if='notebookLoadFailed' style='text-align: center') +notebook-error From 61f6d178712e2c04aa620ac7da9cde3bc17759b0 Mon Sep 17 00:00:00 2001 From: Dmitriy Shabalin Date: Tue, 12 Dec 2017 21:05:37 +0700 Subject: [PATCH 174/243] IGNITE-7172 Added examples on queries page. (cherry picked from commit 156e167) --- .../app/components/page-queries/style.scss | 51 +++++++++++++++++++ .../directives/information/information.pug | 2 +- .../directives/information/information.scss | 5 +- .../public/images/icons/attention.svg | 3 ++ .../frontend/public/images/icons/index.js | 1 + 5 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 modules/web-console/frontend/public/images/icons/attention.svg diff --git a/modules/web-console/frontend/app/components/page-queries/style.scss b/modules/web-console/frontend/app/components/page-queries/style.scss index 70136fd55a64b..1f315e81c970d 100644 --- a/modules/web-console/frontend/app/components/page-queries/style.scss +++ b/modules/web-console/frontend/app/components/page-queries/style.scss @@ -33,4 +33,55 @@ page-queries { .affix + .block-information { margin-top: 90px; } + + .block-information { + padding: 18px 10px 18px 36px; + + [ng-transclude] { + display: flex; + } + + [ignite-icon] { + top: 18px; + left: 14px; + width: 14px; + } + + ul { + min-width: 300px; + } + + .example { + flex-basis: 100%; + + .group { + background: white; + + .group-legend { + label { + @import "public/stylesheets/variables.scss"; + + font-size: 12px; + color: $gray-light; + background: none; + padding-left: 0; + } + } + + .group-content { + height: 66px; + + margin: 0; + padding: 5px 0; + + overflow: hidden; + } + } + } + + .sql-editor { + width: 100%; + margin: 0; + } + } } diff --git a/modules/web-console/frontend/app/directives/information/information.pug b/modules/web-console/frontend/app/directives/information/information.pug index b805d4a3d7d69..aa4d0e98e8d74 100644 --- a/modules/web-console/frontend/app/directives/information/information.pug +++ b/modules/web-console/frontend/app/directives/information/information.pug @@ -15,6 +15,6 @@ limitations under the License. .block-information - span.icon.fa.fa-info-circle(ng-if='title') + svg(ignite-icon='attention' ng-if='title') h3(ng-if='title') {{::title}} div(ng-transclude='') diff --git a/modules/web-console/frontend/app/directives/information/information.scss b/modules/web-console/frontend/app/directives/information/information.scss index 39f3c05143ba7..1992e13874629 100644 --- a/modules/web-console/frontend/app/directives/information/information.scss +++ b/modules/web-console/frontend/app/directives/information/information.scss @@ -30,12 +30,13 @@ $ignite-block-information-icon: #4a6785; margin: 20px 0; padding: 10px 10px 0 30px; + font-family: Roboto; + > h3 { - font-weight: bold; margin-bottom: 10px; } - > .icon { + > [ignite-icon] { cursor: default; color: $ignite-block-information-icon; diff --git a/modules/web-console/frontend/public/images/icons/attention.svg b/modules/web-console/frontend/public/images/icons/attention.svg new file mode 100644 index 0000000000000..cd8a3a15c206e --- /dev/null +++ b/modules/web-console/frontend/public/images/icons/attention.svg @@ -0,0 +1,3 @@ + + + diff --git a/modules/web-console/frontend/public/images/icons/index.js b/modules/web-console/frontend/public/images/icons/index.js index 48d9227833b88..1ccb32e3900a9 100644 --- a/modules/web-console/frontend/public/images/icons/index.js +++ b/modules/web-console/frontend/public/images/icons/index.js @@ -30,3 +30,4 @@ export checkmark from './checkmark.svg'; export alert from './alert.svg'; export attention from './attention.svg'; export connectedClusters from './connectedClusters.svg'; +export attention from './attention.svg'; From 24f0e47687f73e1e0646c5b18eef8dd018de8ff8 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 14 Dec 2017 17:31:14 +0700 Subject: [PATCH 175/243] IGNITE-7172 Minor fix. --- modules/web-console/frontend/public/images/icons/index.js | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/web-console/frontend/public/images/icons/index.js b/modules/web-console/frontend/public/images/icons/index.js index 1ccb32e3900a9..48d9227833b88 100644 --- a/modules/web-console/frontend/public/images/icons/index.js +++ b/modules/web-console/frontend/public/images/icons/index.js @@ -30,4 +30,3 @@ export checkmark from './checkmark.svg'; export alert from './alert.svg'; export attention from './attention.svg'; export connectedClusters from './connectedClusters.svg'; -export attention from './attention.svg'; From b4b528ec6577fea6bcdea57f63fab2ad416ab973 Mon Sep 17 00:00:00 2001 From: Ilya Borisov Date: Wed, 13 Dec 2017 11:19:10 +0700 Subject: [PATCH 176/243] IGNITE-7172 Fix minor CSS issues. (cherry picked from commit 24412f5) --- .../frontend/app/components/page-queries/style.scss | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/web-console/frontend/app/components/page-queries/style.scss b/modules/web-console/frontend/app/components/page-queries/style.scss index 1f315e81c970d..a937abff8e981 100644 --- a/modules/web-console/frontend/app/components/page-queries/style.scss +++ b/modules/web-console/frontend/app/components/page-queries/style.scss @@ -45,6 +45,7 @@ page-queries { top: 18px; left: 14px; width: 14px; + color: inherit; } ul { @@ -56,6 +57,7 @@ page-queries { .group { background: white; + border-style: solid; .group-legend { label { @@ -63,13 +65,15 @@ page-queries { font-size: 12px; color: $gray-light; - background: none; - padding-left: 0; + background: #fcfcfc; + padding: 0; + vertical-align: 1px; } } .group-content { height: 66px; + border-radius: 5px; margin: 0; padding: 5px 0; From bb55707ff6197ac4ab550b6732829d6cd9a1ea7f Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Fri, 15 Dec 2017 10:19:22 +0700 Subject: [PATCH 177/243] IGNITE-7208 Fixed pug template. (cherry picked from commit 061ec6a) --- .../frontend/app/components/page-queries/template.tpl.pug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/components/page-queries/template.tpl.pug b/modules/web-console/frontend/app/components/page-queries/template.tpl.pug index 1574874e8cc26..4419d5cd13881 100644 --- a/modules/web-console/frontend/app/components/page-queries/template.tpl.pug +++ b/modules/web-console/frontend/app/components/page-queries/template.tpl.pug @@ -369,7 +369,7 @@ mixin paragraph-query ignite-information( data-title='With query notebook you can' style='margin-bottom: 30px' - ng-init=`example = '${example}'` + ng-init=`example = "${example}"` ) ul li Create any number of queries From 89652301b1c3423ff313d53cfb7f023ce56558e0 Mon Sep 17 00:00:00 2001 From: dpavlov Date: Mon, 23 Oct 2017 17:14:03 +0300 Subject: [PATCH 178/243] IGNITE-5741 Enable test after fix - Fixes #2865. Signed-off-by: Alexey Goncharuk (cherry picked from commit 1294bef) --- .../cache/persistence/db/IgnitePdsWholeClusterRestartTest.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java index 91380f07a47cf..f52f4467557a1 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsWholeClusterRestartTest.java @@ -108,8 +108,6 @@ private void deleteWorkFiles() throws IgniteCheckedException { * @throws Exception if failed. */ public void testRestarts() throws Exception { - fail("https://issues.apache.org/jira/browse/IGNITE-5741"); - startGrids(GRID_CNT); ignite(0).active(true); From 16ab241fca1069c2094c039c8d64914f24a357ef Mon Sep 17 00:00:00 2001 From: "Andrey V. Mashenkov" Date: Wed, 13 Dec 2017 14:16:50 +0300 Subject: [PATCH 179/243] IGNITE-6423: PDS could be corrupted if partition have been evicted and owned again. This closes #3115. Fixed page memory update operations without checkpoint lock. Fixed page CRC calculation. Fixed outdated page handling. Added checkpoint lock hold assertions for memory update operations. Fixed incorrect tests. (cherry picked from commit e24d4d0) --- .../cache/persistence/IgnitePdsContinuousRestartTest.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java index 27b19501de8a9..fa89bf2afc64a 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsContinuousRestartTest.java @@ -221,11 +221,6 @@ public void testRebalncingDuringLoad_10_500_8_16() throws Exception { checkRebalancingDuringLoad(10, 500, 8, 16); } - /** {@inheritDoc} */ - @Override protected long getTestTimeout() { - return TimeUnit.MINUTES.toMillis(3); - } - /** * @throws Exception if failed. */ From 0781f1982d14994fd419087f9de0687d082b0c0f Mon Sep 17 00:00:00 2001 From: dkarachentsev Date: Fri, 8 Dec 2017 13:36:28 +0300 Subject: [PATCH 180/243] IGNITE-7086 - Backups are not updated when ReadFromBackup=true and ReadThrough happens. (cherry picked from commit c769838) --- .../processors/cache/GridCacheUtils.java | 105 ++++++++ .../distributed/dht/GridDhtCacheAdapter.java | 1 + .../dht/GridPartitionedGetFuture.java | 30 ++- .../dht/GridPartitionedSingleGetFuture.java | 27 +- .../distributed/near/GridNearGetFuture.java | 32 ++- .../store/CacheStoreReadFromBackupTest.java | 238 ++++++++++++++++++ ...eTransactionalStoreReadFromBackupTest.java | 32 +++ .../testsuites/IgniteCacheTestSuite.java | 4 + 8 files changed, 460 insertions(+), 9 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreReadFromBackupTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/cache/store/CacheTransactionalStoreReadFromBackupTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 248f2aada0a52..4bf54bf2626e1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -66,10 +66,14 @@ import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.internal.processors.dr.GridDrType; import org.apache.ignite.internal.processors.igfs.IgfsUtils; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.processors.query.schema.SchemaOperationException; @@ -84,6 +88,7 @@ import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteBiInClosure; import org.apache.ignite.lang.IgniteClosure; import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgnitePredicate; @@ -1677,6 +1682,99 @@ else if (cfg.getCacheMode() == REPLICATED) { } } + /** + * Creates closure that saves initial value to backup partition. + *

        + * Useful only when store with readThrough is used. In situation when + * get() on backup node returns successful result, it's expected that + * localPeek() will be successful as well. But it doesn't true when + * primary node loaded value from local store, in this case backups + * will remain non-initialized. + *
        + * To meet that requirement the value requested from primary should + * be saved on backup during get(). + *

        + * + * @param topVer Topology version. + * @param log Logger. + * @param cctx Cache context. + * @param key Key. + * @param readThrough Read through. + * @param skipVals Skip values. + */ + @Nullable public static BackupPostProcessingClosure createBackupPostProcessingClosure( + final AffinityTopologyVersion topVer, + final IgniteLogger log, + final GridCacheContext cctx, + final @Nullable KeyCacheObject key, + boolean readThrough, + boolean skipVals + ) { + if (!readThrough || skipVals || + (key != null && !cctx.affinity().backupsByKey(key, topVer).contains(cctx.localNode()))) + return null; + + return new BackupPostProcessingClosure() { + private void process(KeyCacheObject key, CacheObject val, GridCacheVersion ver, GridDhtCacheAdapter colocated) { + while (true) { + GridCacheEntryEx entry = null; + + try { + entry = colocated.entryEx(key, topVer); + + entry.initialValue( + val, + ver, + 0, + 0, + false, + topVer, + GridDrType.DR_BACKUP, + true); + + break; + } + catch (GridCacheEntryRemovedException ignore) { + if (log.isDebugEnabled()) + log.debug("Got removed entry during postprocessing (will retry): " + + entry); + } + catch (IgniteCheckedException e) { + U.error(log, "Error saving backup value: " + entry, e); + } + catch (GridDhtInvalidPartitionException ignored) { + break; + } + finally { + if (entry != null) + cctx.evicts().touch(entry, topVer); + } + } + } + + @Override public void apply(CacheObject val, GridCacheVersion ver) { + process(key, val, ver, cctx.dht()); + } + + @Override public void apply(Collection infos) { + if (!F.isEmpty(infos)) { + GridCacheAffinityManager aff = cctx.affinity(); + ClusterNode locNode = cctx.localNode(); + + GridDhtCacheAdapter colocated = cctx.cache().isNear() + ? ((GridNearCacheAdapter)cctx.cache()).dht() + : cctx.dht(); + + for (GridCacheEntryInfo info : infos) { + // Save backup value. + if (aff.backupsByKey(info.key(), topVer).contains(locNode)) + process(info.key(), info.value(), info.version(), colocated); + } + } + } + }; + } + /** * Checks if cache configuration belongs to persistent cache. * @@ -1752,4 +1850,11 @@ public static boolean isPersistenceEnabled(DataStorageConfiguration cfg) { return false; } + + /** + * + */ + public interface BackupPostProcessingClosure extends IgniteInClosure>, + IgniteBiInClosure{ + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java index bbb2c5b4983d3..28f9c7627ef5d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java @@ -287,6 +287,7 @@ public void dumpDebugInfo() { } } + /** {@inheritDoc} */ @Override public void onKernalStop() { super.onKernalStop(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java index 015eb82411560..3954cf97e0ff9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java @@ -41,9 +41,9 @@ import org.apache.ignite.internal.processors.cache.GridCacheMessage; import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy; import org.apache.ignite.internal.processors.cache.KeyCacheObject; -import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.util.GridLeanMap; import org.apache.ignite.internal.util.future.GridFinishedFuture; @@ -57,6 +57,7 @@ import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; @@ -224,7 +225,7 @@ private boolean isMini(IgniteInternalFuture f) { private void map( Collection keys, Map> mapped, - AffinityTopologyVersion topVer + final AffinityTopologyVersion topVer ) { Collection cacheNodes = CU.affinityNodes(cctx, topVer); @@ -331,7 +332,8 @@ private void map( })); } else { - MiniFuture fut = new MiniFuture(n, mappedKeys, topVer); + MiniFuture fut = new MiniFuture(n, mappedKeys, topVer, + CU.createBackupPostProcessingClosure(topVer, log, cctx, null, readThrough, skipVals)); GridCacheMessage req = new GridNearGetRequest( cctx.cacheId(), @@ -647,6 +649,9 @@ private class MiniFuture extends GridFutureAdapter> { /** Topology version on which this future was mapped. */ private final AffinityTopologyVersion topVer; + /** Post processing closure. */ + private final IgniteInClosure> postProcessingClos; + /** {@code True} if remapped after node left. */ private boolean remapped; @@ -654,11 +659,14 @@ private class MiniFuture extends GridFutureAdapter> { * @param node Node. * @param keys Keys. * @param topVer Topology version. + * @param postProcessingClos Post processing closure. */ - MiniFuture(ClusterNode node, LinkedHashMap keys, AffinityTopologyVersion topVer) { + MiniFuture(ClusterNode node, LinkedHashMap keys, AffinityTopologyVersion topVer, + @Nullable IgniteInClosure> postProcessingClos) { this.node = node; this.keys = keys; this.topVer = topVer; + this.postProcessingClos = postProcessingClos; } /** @@ -774,6 +782,8 @@ void onResult(final GridNearGetResponse res) { } }), F.t(node, keys), topVer); + postProcessResult(res); + onDone(createResultMap(res.entries())); return; @@ -795,12 +805,16 @@ void onResult(final GridNearGetResponse res) { } }), F.t(node, keys), topVer); + postProcessResult(res); + onDone(createResultMap(res.entries())); } }); } else { try { + postProcessResult(res); + onDone(createResultMap(res.entries())); } catch (Exception e) { @@ -809,6 +823,14 @@ void onResult(final GridNearGetResponse res) { } } + /** + * @param res Response. + */ + private void postProcessResult(final GridNearGetResponse res) { + if (postProcessingClos != null) + postProcessingClos.apply(res.entries()); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(MiniFuture.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java index 61489e5ba3b6b..f761b9c42d2e6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java @@ -39,18 +39,20 @@ import org.apache.ignite.internal.processors.cache.GridCacheFuture; import org.apache.ignite.internal.processors.cache.GridCacheFutureAdapter; import org.apache.ignite.internal.processors.cache.GridCacheMessage; +import org.apache.ignite.internal.processors.cache.GridCacheUtils.BackupPostProcessingClosure; import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy; import org.apache.ignite.internal.processors.cache.KeyCacheObject; -import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.distributed.near.CacheVersionedValue; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetRequest; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetResponse; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.CI1; import org.apache.ignite.internal.util.typedef.CIX1; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteUuid; @@ -122,6 +124,9 @@ public class GridPartitionedSingleGetFuture extends GridCacheFutureAdapter> { /** Topology version on which this future was mapped. */ private AffinityTopologyVersion topVer; + /** Post processing closure. */ + private final BackupPostProcessingClosure postProcessingClos; + /** {@code True} if remapped after node left. */ private boolean remapped; @@ -842,17 +849,19 @@ private class MiniFuture extends GridFutureAdapter> { * @param keys Keys. * @param savedEntries Saved entries. * @param topVer Topology version. + * @param postProcessingClos Post processing closure. */ MiniFuture( ClusterNode node, LinkedHashMap keys, Map savedEntries, - AffinityTopologyVersion topVer - ) { + AffinityTopologyVersion topVer, + BackupPostProcessingClosure postProcessingClos) { this.node = node; this.keys = keys; this.savedEntries = savedEntries; this.topVer = topVer; + this.postProcessingClos = postProcessingClos; } /** @@ -977,6 +986,8 @@ void onResult(final GridNearGetResponse res) { } }), F.t(node, keys), topVer); + postProcessResult(res); + // It is critical to call onDone after adding futures to compound list. onDone(loadEntries(node.id(), keys.keySet(), res.entries(), savedEntries, topVer)); @@ -998,13 +1009,26 @@ void onResult(final GridNearGetResponse res) { } }), F.t(node, keys), readyTopVer); + postProcessResult(res); + // It is critical to call onDone after adding futures to compound list. onDone(loadEntries(node.id(), keys.keySet(), res.entries(), savedEntries, topVer)); } }); } - else + else { + postProcessResult(res); + onDone(loadEntries(node.id(), keys.keySet(), res.entries(), savedEntries, topVer)); + } + } + + /** + * @param res Response. + */ + private void postProcessResult(final GridNearGetResponse res) { + if (postProcessingClos != null) + postProcessingClos.apply(res.entries()); } /** {@inheritDoc} */ diff --git a/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreReadFromBackupTest.java b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreReadFromBackupTest.java new file mode 100644 index 0000000000000..d8913dcf1992f --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreReadFromBackupTest.java @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.store; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import javax.cache.Cache; +import javax.cache.configuration.FactoryBuilder; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.CacheWriteSynchronizationMode; +import org.apache.ignite.cache.affinity.Affinity; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.lang.IgniteBiInClosure; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; +import static org.apache.ignite.cache.CacheMode.PARTITIONED; +import static org.apache.ignite.cache.CacheMode.REPLICATED; +import static org.apache.ignite.cache.CachePeekMode.BACKUP; +import static org.apache.ignite.cache.CachePeekMode.PRIMARY; + +/** + * Checks that once value is read from store, it will be loaded in + * backups as well. + */ +public class CacheStoreReadFromBackupTest extends GridCommonAbstractTest { + /** */ + public static final String CACHE_NAME = "cache"; + + /** */ + private static final Map storeMap = new ConcurrentHashMap<>(); + + /** */ + private CacheMode cacheMode = REPLICATED; + + /** */ + private int backups; + + /** Near. */ + private boolean near; + + /** */ + @SuppressWarnings("unchecked") + private CacheConfiguration cacheConfig(String cacheName) { + CacheConfiguration ccfg = new CacheConfiguration<>(cacheName); + + ccfg.setCacheMode(cacheMode); + ccfg.setBackups(backups); + ccfg.setAtomicityMode(atomicityMode()); + ccfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC); + ccfg.setAffinity(new RendezvousAffinityFunction(false, 1)); + ccfg.setReadThrough(true); + ccfg.setReadFromBackup(true); + ccfg.setCacheStoreFactory(FactoryBuilder.factoryOf(TestStore.class)); + + if (near) + ccfg.setNearConfiguration(new NearCacheConfiguration()); + + return ccfg; + } + + /** */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + return super.getConfiguration(gridName).setCacheConfiguration(cacheConfig(CACHE_NAME)); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + } + + /** + * @return Atomicity mode. + */ + protected CacheAtomicityMode atomicityMode() { + return ATOMIC; + } + + /** + * @throws Exception If failed. + */ + public void testReplicated() throws Exception { + cacheMode = REPLICATED; + backups = 0; + near = false; + + checkReadFromBackup(); + } + + /** + * @throws Exception If failed. + */ + public void testPartitioned() throws Exception { + cacheMode = PARTITIONED; + backups = 1; + near = false; + + checkReadFromBackup(); + } + + /** + * @throws Exception If failed. + */ + public void testNearReplicated() throws Exception { + cacheMode = REPLICATED; + backups = 0; + near = true; + + checkReadFromBackup(); + } + + /** + * @throws Exception If failed. + */ + public void testNearPartitioned() throws Exception { + cacheMode = PARTITIONED; + backups = 1; + near = true; + + checkReadFromBackup(); + } + + /** + * @throws Exception If failed. + */ + private void checkReadFromBackup() throws Exception { + startGridsMultiThreaded(2, true); + + checkReadSingleFromBackup(); + checkReadAllFromBackup(); + } + + /** + * @throws Exception If failed. + */ + private void checkReadSingleFromBackup() throws Exception { + storeMap.put(1, "val-1"); + + IgniteCache cache0 = grid(0).cache(CACHE_NAME); + IgniteCache cache1 = grid(1).cache(CACHE_NAME); + + // Load value on primary and backup. + assertNotNull(cache0.get(1)); + assertNotNull(cache1.get(1)); + + if (cache0.localPeek(1, PRIMARY) != null) + assertNotNull(cache1.localPeek(1, BACKUP)); + else { + assertNotNull(cache0.localPeek(1, BACKUP)); + assertNotNull(cache1.localPeek(1, PRIMARY)); + } + } + + /** + * @throws Exception If failed. + */ + private void checkReadAllFromBackup() throws Exception { + for (int i = 0; i < 100; i++) + storeMap.put(i, String.valueOf(i)); + + IgniteCache cache0 = grid(0).cache(CACHE_NAME); + IgniteCache cache1 = grid(1).cache(CACHE_NAME); + + assertEquals(storeMap.size(), cache0.getAll(storeMap.keySet()).size()); + assertEquals(storeMap.size(), cache1.getAll(storeMap.keySet()).size()); + + Affinity aff = grid(0).affinity(CACHE_NAME); + ClusterNode node0 = grid(0).cluster().localNode(); + + for (Integer key : storeMap.keySet()) { + if (aff.isPrimary(node0, key)) { + assertNotNull(cache0.localPeek(key, PRIMARY)); + assertNotNull(cache1.localPeek(key, BACKUP)); + } + else { + assertNotNull(cache0.localPeek(key, BACKUP)); + assertNotNull(cache1.localPeek(key, PRIMARY)); + } + } + } + + /** + * + */ + public static class TestStore extends CacheStoreAdapter { + /** */ + public TestStore() { + } + + /** {@inheritDoc} */ + @Override public void loadCache(IgniteBiInClosure clo, Object... args) { + for (Map.Entry e : storeMap.entrySet()) + clo.apply(e.getKey(), e.getValue()); + } + + /** {@inheritDoc} */ + @Override public String load(Integer key) { + return storeMap.get(key); + } + + /** {@inheritDoc} */ + @Override public void write(Cache.Entry entry) { + storeMap.put(entry.getKey(), entry.getValue()); + } + + /** {@inheritDoc} */ + @SuppressWarnings("SuspiciousMethodCalls") + @Override public void delete(Object key) { + storeMap.remove(key); + } + + /** {@inheritDoc} */ + @Override public void sessionEnd(boolean commit) { + // No-op. + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/cache/store/CacheTransactionalStoreReadFromBackupTest.java b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheTransactionalStoreReadFromBackupTest.java new file mode 100644 index 0000000000000..4837936621f46 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheTransactionalStoreReadFromBackupTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.store; + +import org.apache.ignite.cache.CacheAtomicityMode; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; + +/** + * + */ +public class CacheTransactionalStoreReadFromBackupTest extends CacheStoreReadFromBackupTest { + /** {@inheritDoc} */ + @Override protected CacheAtomicityMode atomicityMode() { + return TRANSACTIONAL; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java index e3ebbc16e00fe..b309b0af3db47 100755 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java @@ -20,6 +20,8 @@ import java.util.Set; import junit.framework.TestSuite; import org.apache.ignite.cache.IgniteWarmupClosureSelfTest; +import org.apache.ignite.cache.store.CacheStoreReadFromBackupTest; +import org.apache.ignite.cache.store.CacheTransactionalStoreReadFromBackupTest; import org.apache.ignite.cache.store.GridCacheBalancingStoreSelfTest; import org.apache.ignite.cache.store.GridCacheLoadOnlyStoreAdapterSelfTest; import org.apache.ignite.cache.store.GridStoreLoadCacheTest; @@ -318,6 +320,8 @@ public static TestSuite suite(Set ignoredTests) throws Exception { suite.addTestSuite(IgniteIncompleteCacheObjectSelfTest.class); suite.addTestSuite(GridStoreLoadCacheTest.class); + suite.addTestSuite(CacheStoreReadFromBackupTest.class); + suite.addTestSuite(CacheTransactionalStoreReadFromBackupTest.class); return suite; } From 1c32e658fa4517f07a674c15859f99cbb9dfadaa Mon Sep 17 00:00:00 2001 From: nikolay_tikhonov Date: Mon, 27 Nov 2017 13:08:00 +0300 Subject: [PATCH 181/243] IGNITE-2766 Ensure that cache is available after client ID changes. This closes #3077. Signed-off-by: nikolay_tikhonov (cherry picked from commit 5e7a556) --- .../internal/IgniteClientReconnectCacheTest.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java index 2e1f2f3e00d11..1c10bf1b86771 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java @@ -85,6 +85,7 @@ import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC; import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ; +import static org.junit.Assert.assertNotEquals; /** * @@ -1284,13 +1285,15 @@ static class TestClass5 implements Serializable { * @param c Cache operation closure. * @throws Exception If failed. */ - private void checkOperationInProgressFails(IgniteEx client, + private void checkOperationInProgressFails(final IgniteEx client, final CacheConfiguration ccfg, Class msgToBlock, final IgniteInClosure> c) throws Exception { Ignite srv = clientRouter(client); + final UUID id = client.localNode().id(); + TestTcpDiscoverySpi srvSpi = spi(srv); final IgniteCache cache = client.getOrCreateCache(ccfg); @@ -1306,6 +1309,8 @@ private void checkOperationInProgressFails(IgniteEx client, IgniteClientDisconnectedException e0 = null; try { + assertEquals(id, client.localNode().id()); + c.apply(cache); fail(); @@ -1329,6 +1334,8 @@ private void checkOperationInProgressFails(IgniteEx client, e0.reconnectFuture().get(); + assertNotEquals(id, client.localNode().id()); + c.apply(cache); return null; @@ -1351,6 +1358,8 @@ private void checkOperationInProgressFails(IgniteEx client, ((TestCommunicationSpi)grid(i).configuration().getCommunicationSpi()).stopBlock(false); } + assertNotEquals(id, client.localNode().id()); + cache.put(1, 1); GridTestUtils.waitForCondition(new GridAbsPredicate() { @@ -1508,4 +1517,4 @@ void stopBlock(boolean snd) { } } } -} \ No newline at end of file +} From e19cf5030930ecdb0ef6cbfc6790e46e18bf9207 Mon Sep 17 00:00:00 2001 From: dpavlov Date: Wed, 1 Nov 2017 03:17:00 +0300 Subject: [PATCH 182/243] ignite-6788 Ignite WAL reader fails on Tx marker record for persistent store with new style folder naming Signed-off-by: agura (cherry picked from commit 5542c70) --- .../reader/StandaloneGridKernalContext.java | 20 +- .../db/wal/reader/IgniteWalReaderTest.java | 283 +++++++++++------- 2 files changed, 195 insertions(+), 108 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index c0c3650fb0fe1..485458b83e93b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -18,7 +18,6 @@ package org.apache.ignite.internal.processors.cache.persistence.wal.reader; import java.io.File; -import java.io.Serializable; import java.lang.reflect.Field; import java.util.Iterator; import java.util.List; @@ -49,8 +48,8 @@ import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; -import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; +import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; import org.apache.ignite.internal.processors.cluster.ClusterProcessor; @@ -95,6 +94,9 @@ * Dummy grid kernal context */ public class StandaloneGridKernalContext implements GridKernalContext { + /** Binary metadata file store folderю */ + public static final String BINARY_META_FOLDER = "binary_meta"; + /** Config for fake Ignite instance. */ private final IgniteConfiguration cfg; @@ -123,13 +125,12 @@ public class StandaloneGridKernalContext implements GridKernalContext { * Providing {@code null} will disable unmarshall for non primitive objects, BinaryObjects will be provided
        */ StandaloneGridKernalContext(IgniteLogger log, - @Nullable final File binaryMetadataFileStoreDir, - @Nullable final File marshallerMappingFileStoreDir) throws IgniteCheckedException { + @Nullable File binaryMetadataFileStoreDir, + @Nullable File marshallerMappingFileStoreDir) throws IgniteCheckedException { this.log = log; try { - pluginProc = new StandaloneIgnitePluginProcessor( - this, config()); + pluginProc = new StandaloneIgnitePluginProcessor(this, config()); } catch (IgniteCheckedException e) { throw new IllegalStateException("Must not fail on empty providers list.", e); @@ -137,7 +138,12 @@ public class StandaloneGridKernalContext implements GridKernalContext { this.marshallerCtx = new MarshallerContextImpl(null); this.cfg = prepareIgniteConfiguration(); - this.cacheObjProcessor = binaryMetadataFileStoreDir != null ? binaryProcessor(this, binaryMetadataFileStoreDir) : null; + + // Fake folder provided to perform processor startup on empty folder. + if (binaryMetadataFileStoreDir == null) + binaryMetadataFileStoreDir = new File(BINARY_META_FOLDER).getAbsoluteFile(); + + this.cacheObjProcessor = binaryProcessor(this, binaryMetadataFileStoreDir); if (marshallerMappingFileStoreDir != null) { marshallerCtx.setMarshallerMappingFileStoreDir(marshallerMappingFileStoreDir); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java index 90f6ef5d4b5c7..1d11820a24a36 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -40,6 +41,7 @@ import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteEvents; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheRebalanceMode; @@ -68,8 +70,12 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteBiInClosure; import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.logger.NullLogger; +import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.apache.ignite.transactions.Transaction; import org.jetbrains.annotations.NotNull; @@ -77,6 +83,7 @@ import org.junit.Assert; import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED; +import static org.apache.ignite.internal.processors.cache.GridCacheOperation.CREATE; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; import static org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor.genNewStyleSubfolderName; @@ -94,12 +101,6 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { /** additional cache for testing different combinations of types in WAL */ private static final String CACHE_ADDL_NAME = "cache1"; - /** Delete DB dir before test. */ - private static final boolean deleteBefore = true; - - /** Delete DB dir after test. */ - private static final boolean deleteAfter = true; - /** Dump records to logger. Should be false for non local run */ private static final boolean dumpRecords = false; @@ -115,6 +116,9 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { /** Custom wal mode. */ private WALMode customWalMode; + /** Clear properties in afterTest method() */ + private boolean clearProperties; + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { final IgniteConfiguration cfg = super.getConfiguration(gridName); @@ -151,23 +155,22 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { @Override protected void beforeTest() throws Exception { stopAllGrids(); - if (deleteBefore) - deleteWorkFiles(); + deleteWorkFiles(); } /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { stopAllGrids(); - if (deleteAfter) - deleteWorkFiles(); + if (clearProperties) + System.clearProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS); } /** * @throws IgniteCheckedException If failed. */ - private void deleteWorkFiles() throws IgniteCheckedException { - deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false)); + private void deleteWorkFiles() throws Exception { + GridTestUtils.deleteDbFiles(); } /** @@ -197,17 +200,12 @@ public void testFillWalAndReadRecords() throws Exception { final int cntUsingMockIter = iterateAndCount(it, false); log.info("Total records loaded " + cntUsingMockIter); - assert cntUsingMockIter > 0; - assert cntUsingMockIter > cacheObjectsToWrite; + assertTrue(cntUsingMockIter > 0); + assertTrue(cntUsingMockIter > cacheObjectsToWrite); final File walArchiveDirWithConsistentId = new File(walArchive, subfolderName); final File walWorkDirWithConsistentId = new File(wal, subfolderName); - - final File binaryMeta = U.resolveWorkDirectory(workDir, "binary_meta", false); - final File binaryMetaWithConsId = new File(binaryMeta, subfolderName); - final File marshaller = U.resolveWorkDirectory(workDir, "marshaller", false); - - final IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log, PAGE_SIZE, binaryMetaWithConsId, marshaller); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(workDir, subfolderName); final int cntArchiveDir = iterateAndCount(factory.iteratorArchiveDirectory(walArchiveDirWithConsistentId)); log.info("Total records loaded using directory : " + cntArchiveDir); @@ -218,12 +216,13 @@ public void testFillWalAndReadRecords() throws Exception { log.info("Total records loaded using archive directory (file-by-file): " + cntArchiveFileByFile); - assert cntArchiveFileByFile > cacheObjectsToWrite; - assert cntArchiveDir > cacheObjectsToWrite; - assert cntArchiveDir == cntArchiveFileByFile; + assertTrue(cntArchiveFileByFile > cacheObjectsToWrite); + assertTrue(cntArchiveDir > cacheObjectsToWrite); + assertTrue(cntArchiveDir == cntArchiveFileByFile); //really count2 may be less because work dir correct loading is not supported yet - assert cntUsingMockIter >= cntArchiveDir - : "Mock based reader loaded " + cntUsingMockIter + " records but standalone has loaded only " + cntArchiveDir; + assertTrue("Mock based reader loaded " + cntUsingMockIter + " records " + + "but standalone has loaded only " + cntArchiveDir, + cntUsingMockIter >= cntArchiveDir); final File[] workFiles = walWorkDirWithConsistentId.listFiles(FileWriteAheadLogManager.WAL_SEGMENT_FILE_FILTER); @@ -231,10 +230,10 @@ public void testFillWalAndReadRecords() throws Exception { log.info("Total records loaded from work: " + cntWork); - assert cntWork + cntArchiveFileByFile == cntUsingMockIter - : "Work iterator loaded [" + cntWork + "] " + - "Archive iterator loaded [" + cntArchiveFileByFile + "]; " + - "mock iterator [" + cntUsingMockIter + "]"; + assertTrue("Work iterator loaded [" + cntWork + "] " + + "Archive iterator loaded [" + cntArchiveFileByFile + "]; " + + "mock iterator [" + cntUsingMockIter + "]", + cntWork + cntArchiveFileByFile == cntUsingMockIter); } /** @@ -312,7 +311,7 @@ public void testArchiveCompletedEventFired() throws Exception { putDummyRecords(ignite, 500); stopGrid("node0"); - assert evtRecorded.get(); + assertTrue(evtRecorded.get()); } /** @@ -407,7 +406,7 @@ public void testArchiveIncompleteSegmentAfterInactivity() throws Exception { archiveSegmentForInactivity.await(archiveIncompleteSegmentAfterInactivityMs + 1001, TimeUnit.MILLISECONDS); stopGrid("node0"); - assert recordedAfterSleep; + assertTrue(recordedAfterSleep); } /** @@ -460,8 +459,8 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { binaryMetaWithConsId, marshallerMapping); - final BiConsumer objConsumer = new BiConsumer() { - @Override public void accept(Object key, Object val) { + final IgniteBiInClosure objConsumer = new IgniteBiInClosure() { + @Override public void apply(Object key, Object val) { boolean rmv = remove(ctrlMap, key, val); if (!rmv) log.error("Unable to remove Key and value from control Map K:[" + key + "] V: [" + val + "]"); @@ -471,14 +470,14 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { assertEquals(indexedObj.iVal, indexedObj.jVal); assertEquals(indexedObj.iVal, key); for (byte datum : indexedObj.getData()) { - assert datum >= 'A' && datum <= 'A' + 10; + assertTrue(datum >= 'A' && datum <= 'A' + 10); } } } }; scanIterateAndCount(factory, workDir, subfolderName, cntEntries, txCnt, objConsumer, null); - assert ctrlMap.isEmpty() : " Control Map is not empty after reading entries " + ctrlMap; + assertTrue(" Control Map is not empty after reading entries " + ctrlMap, ctrlMap.isEmpty()); } /** @@ -510,8 +509,8 @@ private void scanIterateAndCount( final String subfolderName, final int minCntEntries, final int minTxCnt, - @Nullable final BiConsumer objConsumer, - @Nullable final Consumer dataRecordHnd) throws IgniteCheckedException { + @Nullable final IgniteBiInClosure objConsumer, + @Nullable final IgniteInClosure dataRecordHnd) throws IgniteCheckedException { final File db = U.resolveWorkDirectory(workDir, DFLT_STORE_DIR, false); final File wal = new File(db, "wal"); @@ -544,8 +543,12 @@ private void scanIterateAndCount( final int entriesWork = valuesSum(cntWork.values()); log.info("Archive directory: Tx found " + txCntObservedWork + " entries " + entriesWork); - assert entriesArch + entriesWork >= minCntEntries; - assert txCntObservedWork + txCntObservedArch >= minTxCnt; + assertTrue("entriesArch=" + entriesArch + " + entriesWork=" + entriesWork + + " >= minCntEntries=" + minCntEntries, + entriesArch + entriesWork >= minCntEntries); + assertTrue("txCntObservedWork=" + txCntObservedWork + " + txCntObservedArch=" + txCntObservedArch + + " >= minTxCnt=" + minTxCnt, + txCntObservedWork + txCntObservedArch >= minTxCnt); } /** @@ -611,11 +614,9 @@ public void testFillWalWithDifferentTypes() throws Exception { final File binaryMetaWithNodeSubfolder = new File(binaryMeta, subfolderName); final File marshallerMapping = U.resolveWorkDirectory(workDir, "marshaller", false); - final IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log, PAGE_SIZE, - binaryMetaWithNodeSubfolder, - marshallerMapping); - final BiConsumer objConsumer = new BiConsumer() { - @Override public void accept(Object key, Object val) { + final IgniteWalIteratorFactory factory = createWalIteratorFactory(workDir, subfolderName); + final IgniteBiInClosure objConsumer = new IgniteBiInClosure() { + @Override public void apply(Object key, Object val) { log.info("K: [" + key + ", " + (key != null ? key.getClass().getName() : "?") + "]" + " V: [" + val + ", " + @@ -625,12 +626,12 @@ public void testFillWalWithDifferentTypes() throws Exception { String msg = "Unable to remove pair from control map " + "K: [" + key + "] V: [" + val + "]"; log.error(msg); } - assert !(val instanceof BinaryObject); + assertFalse(val instanceof BinaryObject); } }; - final Consumer toStrChecker = new Consumer() { - @Override public void accept(DataRecord record) { + final IgniteInClosure toStrChecker = new IgniteInClosure() { + @Override public void apply(DataRecord record) { String strRepresentation = record.toString(); for (Iterator iter = ctrlStringsToSearch.iterator(); iter.hasNext(); ) { final String next = iter.next(); @@ -643,17 +644,17 @@ public void testFillWalWithDifferentTypes() throws Exception { }; scanIterateAndCount(factory, workDir, subfolderName, cntEntries, 0, objConsumer, toStrChecker); - assert ctrlMap.isEmpty() : " Control Map is not empty after reading entries: " + ctrlMap; - assert ctrlStringsToSearch.isEmpty() : " Control Map for strings in entries is not empty after" + - " reading records: " + ctrlStringsToSearch; + assertTrue(" Control Map is not empty after reading entries: " + ctrlMap, ctrlMap.isEmpty()); + assertTrue(" Control Map for strings in entries is not empty after" + + " reading records: " + ctrlStringsToSearch, ctrlStringsToSearch.isEmpty()); //Validate same WAL log with flag binary objects only final IgniteWalIteratorFactory keepBinFactory = new IgniteWalIteratorFactory(log, PAGE_SIZE, binaryMetaWithNodeSubfolder, marshallerMapping, true); - final BiConsumer binObjConsumer = new BiConsumer() { - @Override public void accept(Object key, Object val) { + final IgniteBiInClosure binObjConsumer = new IgniteBiInClosure() { + @Override public void apply(Object key, Object val) { log.info("K(KeepBinary): [" + key + ", " + (key != null ? key.getClass().getName() : "?") + "]" + " V(KeepBinary): [" + val + ", " + @@ -693,15 +694,15 @@ else if (val instanceof BinaryObject) { byte data[] = binaryObj.field("data"); for (byte datum : data) { - assert datum >= 'A' && datum <= 'A' + 10; + assertTrue(datum >= 'A' && datum <= 'A' + 10); } } } } }; - final Consumer binObjToStrChecker = new Consumer() { - @Override public void accept(DataRecord record) { + final IgniteInClosure binObjToStrChecker = new IgniteInClosure() { + @Override public void apply(DataRecord record) { String strRepresentation = record.toString(); for (Iterator iter = ctrlStringsForBinaryObjSearch.iterator(); iter.hasNext(); ) { final String next = iter.next(); @@ -714,9 +715,11 @@ else if (val instanceof BinaryObject) { }; scanIterateAndCount(keepBinFactory, workDir, subfolderName, cntEntries, 0, binObjConsumer, binObjToStrChecker); - assert ctrlMapForBinaryObjects.isEmpty() : " Control Map is not empty after reading entries: " + ctrlMapForBinaryObjects; - assert ctrlStringsForBinaryObjSearch.isEmpty() : " Control Map for strings in entries is not empty after" + - " reading records: " + ctrlStringsForBinaryObjSearch; + assertTrue(" Control Map is not empty after reading entries: " + ctrlMapForBinaryObjects, + ctrlMapForBinaryObjects.isEmpty()); + assertTrue(" Control Map for strings in entries is not empty after" + + " reading records: " + ctrlStringsForBinaryObjSearch, + ctrlStringsForBinaryObjSearch.isEmpty()); } @@ -764,7 +767,7 @@ public void testFillWalForExactSegmentsCount() throws Exception { stopGrid("node0"); final String workDir = U.defaultWorkDirectory(); - final IgniteWalIteratorFactory factory = createWalIteratorFactory(subfolderName, workDir); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(workDir, subfolderName); scanIterateAndCount(factory, workDir, subfolderName, totalEntries, 0, null, null); } @@ -787,7 +790,7 @@ public void testReadEmptyWal() throws Exception { stopGrid("node0"); final String workDir = U.defaultWorkDirectory(); - final IgniteWalIteratorFactory factory = createWalIteratorFactory(subfolderName, workDir); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(workDir, subfolderName); scanIterateAndCount(factory, workDir, subfolderName, 0, 0, null, null); } @@ -859,13 +862,13 @@ private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception { stopGrid("node0"); final String workDir = U.defaultWorkDirectory(); - final IgniteWalIteratorFactory factory = createWalIteratorFactory(subfolderName, workDir); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(workDir, subfolderName); final StringBuilder builder = new StringBuilder(); final Map operationsFound = new EnumMap<>(GridCacheOperation.class); - scanIterateAndCount(factory, workDir, subfolderName, 0, 0, null, new Consumer() { - @Override public void accept(DataRecord dataRecord) { + scanIterateAndCount(factory, workDir, subfolderName, 0, 0, null, new IgniteInClosure() { + @Override public void apply(DataRecord dataRecord) { final List entries = dataRecord.writeEntries(); builder.append("{"); @@ -902,14 +905,121 @@ private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception { } /** - * @param subfolderName Subfolder name. + * Tests transaction generation and WAL for putAll cache operation. + * @throws Exception if failed. + */ + public void testPutAllTxIntoTwoNodes() throws Exception { + final Ignite ignite = startGrid("node0"); + final Ignite ignite1 = startGrid(1); + + ignite.active(true); + + final Map map = new TreeMap<>(); + + final int cntEntries = 1000; + for (int i = 0; i < cntEntries; i++) + map.put(i, new IndexedObject(i)); + + ignite.cache(CACHE_NAME).putAll(map); + + ignite.active(false); + + final String subfolderName = genDbSubfolderName(ignite, 0); + final String subfolderName1 = genDbSubfolderName(ignite1, 1); + + stopAllGrids(); + + final String workDir = U.defaultWorkDirectory(); + final IgniteWalIteratorFactory factory = createWalIteratorFactory(workDir, subfolderName); + + final StringBuilder builder = new StringBuilder(); + final Map operationsFound = new EnumMap<>(GridCacheOperation.class); + + final IgniteInClosure drHnd = new IgniteInClosure() { + @Override public void apply(DataRecord dataRecord) { + final List entries = dataRecord.writeEntries(); + + builder.append("{"); + for (DataEntry entry : entries) { + final GridCacheOperation op = entry.op(); + final Integer cnt = operationsFound.get(op); + + operationsFound.put(op, cnt == null ? 1 : (cnt + 1)); + + if (entry instanceof UnwrapDataEntry) { + final UnwrapDataEntry entry1 = (UnwrapDataEntry)entry; + + builder.append(entry1.op()).append(" for ").append(entry1.unwrappedKey()); + final GridCacheVersion ver = entry.nearXidVersion(); + + builder.append(", "); + + if (ver != null) + builder.append("tx=").append(ver).append(", "); + } + } + + builder.append("}\n"); + } + }; + scanIterateAndCount(factory, workDir, subfolderName, 1, 1, null, drHnd); + scanIterateAndCount(factory, workDir, subfolderName1, 1, 1, null, drHnd); + + final Integer createsFound = operationsFound.get(CREATE); + + if (log.isInfoEnabled()) + log.info(builder.toString()); + + assertTrue("Create operations should be found in log: " + operationsFound, + createsFound != null && createsFound > 0); + + assertTrue("Create operations count should be at least " + cntEntries + " in log: " + operationsFound, + createsFound != null && createsFound >= cntEntries); + + } + + /** + * Tests transaction generation and WAL for putAll cache operation. + * @throws Exception if failed. + */ + public void testTxRecordsReadWoBinaryMeta() throws Exception { + clearProperties = true; + System.setProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS, "true"); + + final Ignite ignite = startGrid("node0"); + ignite.active(true); + + final Map map = new TreeMap<>(); + + for (int i = 0; i < 1000; i++) + map.put(i, new IndexedObject(i)); + + ignite.cache(CACHE_NAME).putAll(map); + + ignite.active(false); + + final String workDir = U.defaultWorkDirectory(); + final String subfolderName = genDbSubfolderName(ignite, 0); + stopAllGrids(); + + IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(new NullLogger(), + PAGE_SIZE, + null, + null, + false); + + scanIterateAndCount(factory, workDir, subfolderName, 1000, 1, null, null); + } + + /** * @param workDir Work directory. + * @param subfolderName Subfolder name. * @return WAL iterator factory. * @throws IgniteCheckedException If failed. */ @NotNull private IgniteWalIteratorFactory createWalIteratorFactory( - String subfolderName, - String workDir + final String workDir, + final String subfolderName ) throws IgniteCheckedException { final File binaryMeta = U.resolveWorkDirectory(workDir, "binary_meta", false); final File binaryMetaWithConsId = new File(binaryMeta, subfolderName); @@ -918,7 +1028,8 @@ private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception { return new IgniteWalIteratorFactory(log, PAGE_SIZE, binaryMetaWithConsId, - marshallerMapping); + marshallerMapping, + false); } /** @@ -943,8 +1054,8 @@ private int valuesSum(Iterable values) { */ private Map iterateAndCountDataRecord( final WALIterator walIter, - @Nullable final BiConsumer cacheObjHnd, - @Nullable final Consumer dataRecordHnd) throws IgniteCheckedException { + @Nullable final IgniteBiInClosure cacheObjHnd, + @Nullable final IgniteInClosure dataRecordHnd) throws IgniteCheckedException { final Map entriesUnderTxFound = new HashMap<>(); @@ -957,7 +1068,7 @@ private Map iterateAndCountDataRecord( final DataRecord dataRecord = (DataRecord)walRecord; if (dataRecordHnd != null) - dataRecordHnd.accept(dataRecord); + dataRecordHnd.apply(dataRecord); final List entries = dataRecord.writeEntries(); for (DataEntry entry : entries) { @@ -990,7 +1101,7 @@ else if (entry instanceof LazyDataEntry) { "; Value: " + unwrappedValObj); if (cacheObjHnd != null && (unwrappedKeyObj != null || unwrappedValObj != null)) - cacheObjHnd.accept(unwrappedKeyObj, unwrappedValObj); + cacheObjHnd.apply(unwrappedKeyObj, unwrappedValObj); final Integer entriesUnderTx = entriesUnderTxFound.get(globalTxId); entriesUnderTxFound.put(globalTxId, entriesUnderTx == null ? 1 : entriesUnderTx + 1); @@ -1008,36 +1119,6 @@ else if (walRecord.type() == WALRecord.RecordType.TX_RECORD && walRecord instanc return entriesUnderTxFound; } - /** - * Represents an operation that accepts a single input argument and returns no - * result. - * - * @param - */ - private interface Consumer { - /** - * Performs this operation on the given argument. - * - * @param t the input argument - */ - public void accept(T t); - } - - /** - * Represents an operation that accepts two input arguments and returns no - * result. - * - * @param - */ - private interface BiConsumer { - /** - * Performs this operation on the given argument. - * - * @param t the input argument - */ - public void accept(T t, U u); - } - /** Enum for cover binaryObject enum save/load */ enum TestEnum { /** */A, /** */B, /** */C From 36f2aab41f274dbe9c9dc37bb3876ac65176368f Mon Sep 17 00:00:00 2001 From: dpavlov Date: Wed, 4 Oct 2017 15:05:48 +0300 Subject: [PATCH 183/243] IGNITE-6839 Delete binary meta before tests, PDS compatibility tests improved - Fixes #2990. Signed-off-by: Alexey Goncharuk (cherry picked from commit 20ec6c9) --- .../DummyPersistenceCompatibilityTest.java | 225 +++++++++++++++++- .../FoldersReuseCompatibilityTest.java | 48 +++- .../reader/StandaloneGridKernalContext.java | 2 +- .../db/wal/reader/IgniteWalReaderTest.java | 110 ++++----- 4 files changed, 318 insertions(+), 67 deletions(-) diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java index 655da522b16fe..b05d5a673f32d 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java @@ -17,6 +17,11 @@ package org.apache.ignite.compatibility.persistence; +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.Serializable; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheAtomicityMode; @@ -28,14 +33,24 @@ import org.apache.ignite.configuration.PersistentStoreConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.GridCacheAbstractFullApiSelfTest; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; -/** */ +/** + * Saves data using previous version of ignite and then load this data using actual version + */ public class DummyPersistenceCompatibilityTest extends IgnitePersistenceCompatibilityAbstractTest { /** */ private static final String TEST_CACHE_NAME = DummyPersistenceCompatibilityTest.class.getSimpleName(); + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "binary_meta", false)); + } + /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); @@ -52,11 +67,41 @@ public class DummyPersistenceCompatibilityTest extends IgnitePersistenceCompatib } /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + public void testNodeStartByOldVersionPersistenceData_2_2() throws Exception { + doTestStartupWithOldVersion("2.2.0"); + } + + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + public void testNodeStartByOldVersionPersistenceData_2_1() throws Exception { + doTestStartupWithOldVersion("2.1.0"); + } + + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @throws Exception If failed. + */ + public void testNodeStartByOldVersionPersistenceData_2_3() throws Exception { + doTestStartupWithOldVersion("2.3.0"); + } + + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @param ver 3-digits version of ignite * @throws Exception If failed. */ - public void testNodeStartByOldVersionPersistenceData() throws Exception { + private void doTestStartupWithOldVersion(String ver) throws Exception { try { - startGrid(1, "2.2.0", new ConfigurationClosure(), new PostStartupClosure()); + startGrid(1, ver, new ConfigurationClosure(), new PostStartupClosure()); stopAllGrids(); @@ -66,10 +111,23 @@ public void testNodeStartByOldVersionPersistenceData() throws Exception { ignite.active(true); - IgniteCache cache = ignite.getOrCreateCache(TEST_CACHE_NAME); + IgniteCache cache = ignite.getOrCreateCache(TEST_CACHE_NAME); for (int i = 0; i < 10; i++) assertEquals("data" + i, cache.get(i)); + + assertEquals(cache.get("1"), "2"); + assertEquals(cache.get(12), 2); + assertEquals(cache.get(13L), 2L); + assertEquals(cache.get(TestEnum.A), "Enum_As_Key"); + assertEquals(cache.get("Enum_As_Value"), TestEnum.B); + assertEquals(cache.get(TestEnum.C), TestEnum.C); + assertEquals(cache.get("Serializable"), new TestSerializable(42)); + assertEquals(cache.get(new TestSerializable(42)), "Serializable_As_Key"); + assertEquals(cache.get("Externalizable"), new TestExternalizable(42)); + assertEquals(cache.get(new TestExternalizable(42)), "Externalizable_As_Key"); + assertEquals(cache.get("testStringContainer"), + new TestStringContainerToBePrinted("testStringContainer")); } finally { stopAllGrids(); @@ -82,16 +140,28 @@ private static class PostStartupClosure implements IgniteInClosure { @Override public void apply(Ignite ignite) { ignite.active(true); - CacheConfiguration cacheCfg = new CacheConfiguration<>(); + CacheConfiguration cacheCfg = new CacheConfiguration<>(); cacheCfg.setName(TEST_CACHE_NAME); cacheCfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); cacheCfg.setBackups(1); cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC); - IgniteCache cache = ignite.createCache(cacheCfg); + IgniteCache cache = ignite.createCache(cacheCfg); for (int i = 0; i < 10; i++) cache.put(i, "data" + i); + + cache.put("1", "2"); + cache.put(12, 2); + cache.put(13L, 2L); + cache.put(TestEnum.A, "Enum_As_Key"); + cache.put("Enum_As_Value", TestEnum.B); + cache.put(TestEnum.C, TestEnum.C); + cache.put("Serializable", new TestSerializable(42)); + cache.put(new TestSerializable(42), "Serializable_As_Key"); + cache.put("Externalizable", new TestExternalizable(42)); + cache.put(new TestExternalizable(42), "Externalizable_As_Key"); + cache.put("testStringContainer", new TestStringContainerToBePrinted("testStringContainer")); } } @@ -111,4 +181,147 @@ private static class ConfigurationClosure implements IgniteInClosure { /** {@inheritDoc} */ @Override public void apply(Ignite ignite) { ignite.active(true); - ignite.getOrCreateCache(CACHE_NAME).put(KEY, VAL); + + final IgniteCache cache = ignite.getOrCreateCache(CACHE_NAME); + cache.put(KEY, VAL); + cache.put("1", "2"); + cache.put(1, 2); + cache.put(1L, 2L); + cache.put(DummyPersistenceCompatibilityTest.TestEnum.A, "Enum_As_Key"); + cache.put("Enum_As_Value", DummyPersistenceCompatibilityTest.TestEnum.B); + cache.put(DummyPersistenceCompatibilityTest.TestEnum.C, DummyPersistenceCompatibilityTest.TestEnum.C); + + cache.put("Serializable", new DummyPersistenceCompatibilityTest.TestSerializable(42)); + cache.put(new DummyPersistenceCompatibilityTest.TestSerializable(42), "Serializable_As_Key"); + cache.put("Externalizable", new DummyPersistenceCompatibilityTest.TestExternalizable(42)); + cache.put(new DummyPersistenceCompatibilityTest.TestExternalizable(42), "Externalizable_As_Key"); + cache.put(KEY_OBJ, new DummyPersistenceCompatibilityTest.TestStringContainerToBePrinted(VAL)); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index 485458b83e93b..80dfc5bc69b43 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -94,7 +94,7 @@ * Dummy grid kernal context */ public class StandaloneGridKernalContext implements GridKernalContext { - /** Binary metadata file store folderю */ + /** Binary metadata file store folder. */ public static final String BINARY_META_FOLDER = "binary_meta"; /** Config for fake Ignite instance. */ diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java index 1d11820a24a36..1844bfecc1a04 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java @@ -98,17 +98,17 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { /** Cache name. */ private static final String CACHE_NAME = "cache0"; - /** additional cache for testing different combinations of types in WAL */ + /** additional cache for testing different combinations of types in WAL. */ private static final String CACHE_ADDL_NAME = "cache1"; - /** Dump records to logger. Should be false for non local run */ + /** Dump records to logger. Should be false for non local run. */ private static final boolean dumpRecords = false; - /** Page size to set */ + /** Page size to set. */ public static final int PAGE_SIZE = 4 * 1024; /** - * Field for transferring setting from test to getConfig method + * Field for transferring setting from test to getConfig method. * Archive incomplete segment after inactivity milliseconds. */ private int archiveIncompleteSegmentAfterInactivityMs; @@ -116,7 +116,7 @@ public class IgniteWalReaderTest extends GridCommonAbstractTest { /** Custom wal mode. */ private WALMode customWalMode; - /** Clear properties in afterTest method() */ + /** Clear properties in afterTest() method. */ private boolean clearProperties; /** {@inheritDoc} */ @@ -237,23 +237,23 @@ public void testFillWalAndReadRecords() throws Exception { } /** - * Iterates on records and closes iterator + * Iterates on records and closes iterator. * - * @param walIter iterator to count, will be closed - * @return count of records - * @throws IgniteCheckedException if failed to iterate + * @param walIter iterator to count, will be closed. + * @return count of records. + * @throws IgniteCheckedException if failed to iterate. */ private int iterateAndCount(WALIterator walIter) throws IgniteCheckedException { return iterateAndCount(walIter, true); } /** - * Iterates on records and closes iterator + * Iterates on records and closes iterator. * - * @param walIter iterator to count, will be closed - * @param touchEntries access data within entries - * @return count of records - * @throws IgniteCheckedException if failed to iterate + * @param walIter iterator to count, will be closed. + * @param touchEntries access data within entries. + * @return count of records. + * @throws IgniteCheckedException if failed to iterate. */ private int iterateAndCount(WALIterator walIter, boolean touchEntries) throws IgniteCheckedException { int cnt = 0; @@ -280,9 +280,9 @@ private int iterateAndCount(WALIterator walIter, boolean touchEntries) throws Ig } /** - * Tests archive completed event is fired + * Tests archive completed event is fired. * - * @throws Exception if failed + * @throws Exception if failed. */ public void testArchiveCompletedEventFired() throws Exception { final AtomicBoolean evtRecorded = new AtomicBoolean(); @@ -315,10 +315,10 @@ public void testArchiveCompletedEventFired() throws Exception { } /** - * Puts provided number of records to fill WAL + * Puts provided number of records to fill WAL. * - * @param ignite ignite instance - * @param recordsToWrite count + * @param ignite ignite instance. + * @param recordsToWrite count. */ private void putDummyRecords(Ignite ignite, int recordsToWrite) { IgniteCache cache0 = ignite.cache(CACHE_NAME); @@ -328,10 +328,10 @@ private void putDummyRecords(Ignite ignite, int recordsToWrite) { } /** - * Puts provided number of records to fill WAL + * Puts provided number of records to fill WAL. * - * @param ignite ignite instance - * @param recordsToWrite count + * @param ignite ignite instance. + * @param recordsToWrite count. */ private void putAllDummyRecords(Ignite ignite, int recordsToWrite) { IgniteCache cache0 = ignite.cache(CACHE_NAME); @@ -345,11 +345,11 @@ private void putAllDummyRecords(Ignite ignite, int recordsToWrite) { } /** - * Puts provided number of records to fill WAL under transactions + * Puts provided number of records to fill WAL under transactions. * - * @param ignite ignite instance - * @param recordsToWrite count - * @param txCnt transactions to run. If number is less then records count, txCnt records will be written + * @param ignite ignite instance. + * @param recordsToWrite count. + * @param txCnt transactions to run. If number is less then records count, txCnt records will be written. */ private IgniteCache txPutDummyRecords(Ignite ignite, int recordsToWrite, int txCnt) { IgniteCache cache0 = ignite.cache(CACHE_NAME); @@ -368,9 +368,9 @@ private IgniteCache txPutDummyRecords(Ignite ignite, int records } /** - * Tests time out based WAL segment archiving + * Tests time out based WAL segment archiving. * - * @throws Exception if failure occurs + * @throws Exception if failure occurs. */ public void testArchiveIncompleteSegmentAfterInactivity() throws Exception { final AtomicBoolean waitingForEvt = new AtomicBoolean(); @@ -410,12 +410,12 @@ public void testArchiveIncompleteSegmentAfterInactivity() throws Exception { } /** - * Removes entry by key and value from map (java 8 map method copy) + * Removes entry by key and value from map (java 8 map method copy). * * @param m map to remove from. * @param key key to remove. * @param val value to remove. - * @return true if remove was successful + * @return true if remove was successful. */ private boolean remove(Map m, Object key, Object val) { Object curVal = m.get(key); @@ -427,7 +427,7 @@ private boolean remove(Map m, Object key, Object val) { } /** - * Places records under transaction, checks its value using WAL + * Places records under transaction, checks its value using WAL. * * @throws Exception if failed. */ @@ -481,11 +481,11 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { } /** - * Generates DB subfolder name for provided node index (local) and UUID (consistent ID) + * Generates DB subfolder name for provided node index (local) and UUID (consistent ID). * * @param ignite ignite instance. * @param nodeIdx node index. - * @return folder file name + * @return folder file name. */ @NotNull private String genDbSubfolderName(Ignite ignite, int nodeIdx) { return genNewStyleSubfolderName(nodeIdx, (UUID)ignite.cluster().localNode().consistentId()); @@ -500,7 +500,7 @@ public void testTxFillWalAndExtractDataRecords() throws Exception { * @param minCntEntries minimum expected entries count to find. * @param minTxCnt minimum expected transaction count to find. * @param objConsumer object handler, called for each object found in logical data records. - * @param dataRecordHnd data handler record + * @param dataRecordHnd data handler record. * @throws IgniteCheckedException if failed. */ private void scanIterateAndCount( @@ -600,9 +600,9 @@ public void testFillWalWithDifferentTypes() throws Exception { ctrlMap.put(next.getKey(), next.getValue()); } - for (Cache.Entry next : addlCache) { - ctrlMapForBinaryObjects.put(next.getKey(), next.getValue()); - } + for (Cache.Entry next : addlCache) { + ctrlMapForBinaryObjects.put(next.getKey(), next.getValue()); + } final String subfolderName = genDbSubfolderName(ignite0, 0); @@ -724,9 +724,9 @@ else if (val instanceof BinaryObject) { } /** - * Tests archive completed event is fired + * Tests archive completed event is fired. * - * @throws Exception if failed + * @throws Exception if failed. */ public void testFillWalForExactSegmentsCount() throws Exception { customWalMode = WALMode.DEFAULT; @@ -762,7 +762,7 @@ public void testFillWalForExactSegmentsCount() throws Exception { Assert.assertTrue("Too much entries generated, but segments was not become available", totalEntries < 10000); } - final String subfolderName = U.maskForFileName(ignite.cluster().localNode().consistentId().toString()); + final String subfolderName = genDbSubfolderName(ignite, 0); stopGrid("node0"); @@ -773,7 +773,7 @@ public void testFillWalForExactSegmentsCount() throws Exception { } /** - * Tests reading of empty WAL from non filled cluster + * Tests reading of empty WAL from non filled cluster. * * @throws Exception if failed. */ @@ -785,7 +785,7 @@ public void testReadEmptyWal() throws Exception { ignite.active(true); ignite.active(false); - final String subfolderName = U.maskForFileName(ignite.cluster().localNode().consistentId().toString()); + final String subfolderName = genDbSubfolderName(ignite, 0); stopGrid("node0"); @@ -1033,8 +1033,8 @@ public void testTxRecordsReadWoBinaryMeta() throws Exception { } /** - * @param values collection with numbers - * @return sum of numbers + * @param values collection with numbers. + * @return sum of numbers. */ private int valuesSum(Iterable values) { int sum = 0; @@ -1046,11 +1046,11 @@ private int valuesSum(Iterable values) { } /** - * Iterates over data records, checks each DataRecord and its entries, finds out all transactions in WAL + * Iterates over data records, checks each DataRecord and its entries, finds out all transactions in WAL. * - * @param walIter iterator to use - * @return count of data records observed for each global TX ID. Contains null for non tx updates - * @throws IgniteCheckedException if failure + * @param walIter iterator to use. + * @return count of data records observed for each global TX ID. Contains null for non tx updates. + * @throws IgniteCheckedException if failure. */ private Map iterateAndCountDataRecord( final WALIterator walIter, @@ -1119,12 +1119,12 @@ else if (walRecord.type() == WALRecord.RecordType.TX_RECORD && walRecord instanc return entriesUnderTxFound; } - /** Enum for cover binaryObject enum save/load */ + /** Enum for cover binaryObject enum save/load. */ enum TestEnum { /** */A, /** */B, /** */C } - /** Special class to test WAL reader resistance to Serializable interface */ + /** Special class to test WAL reader resistance to Serializable interface. */ static class TestSerializable implements Serializable { /** */ private static final long serialVersionUID = 0L; @@ -1133,7 +1133,7 @@ static class TestSerializable implements Serializable { private int iVal; /** - * Creates test object + * Creates test object. * * @param iVal I value. */ @@ -1166,7 +1166,7 @@ static class TestSerializable implements Serializable { } } - /** Special class to test WAL reader resistance to Serializable interface */ + /** Special class to test WAL reader resistance to Serializable interface. */ static class TestExternalizable implements Externalizable { /** */ private static final long serialVersionUID = 0L; @@ -1180,7 +1180,7 @@ public TestExternalizable() { } /** - * Creates test object with provided value + * Creates test object with provided value. * * @param iVal I value. */ @@ -1223,7 +1223,7 @@ public TestExternalizable(int iVal) { } } - /** Container class to test toString of data records */ + /** Container class to test toString of data records. */ static class TestStringContainerToBePrinted { /** */ private String data; @@ -1262,7 +1262,7 @@ public TestStringContainerToBePrinted(String data) { } } - /** Test class for storing in ignite */ + /** Test class for storing in ignite. */ private static class Organization { /** Key. */ private final int key; From e38ecc9de2bf7ef863694ffde4a13b11ced2ecf7 Mon Sep 17 00:00:00 2001 From: dpavlov Date: Mon, 4 Dec 2017 09:31:56 +0300 Subject: [PATCH 184/243] IGNITE-7091 Fixing assertion raising because of serializer switch - Fixes #3128. Signed-off-by: Alexey Goncharuk (cherry picked from commit 6101fde) --- .../DummyPersistenceCompatibilityTest.java | 2 +- .../wal/FileWriteAheadLogManager.java | 27 +++++++++++++------ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java index b05d5a673f32d..226694e722229 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java @@ -42,7 +42,7 @@ */ public class DummyPersistenceCompatibilityTest extends IgnitePersistenceCompatibilityAbstractTest { /** */ - private static final String TEST_CACHE_NAME = DummyPersistenceCompatibilityTest.class.getSimpleName(); + protected static final String TEST_CACHE_NAME = DummyPersistenceCompatibilityTest.class.getSimpleName(); /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index 53fe77e3a9933..83ccc7904df81 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -1303,13 +1303,19 @@ private long nextAbsoluteSegmentIndex(long curIdx) throws IgniteCheckedException /** * @param absIdx Segment absolute index. - * @return {@code True} if can read, {@code false} if work segment + * @return
        • {@code True} if can read, no lock is held,
        • {@code false} if work segment, need + * release segment later, use {@link #releaseWorkSegment} for unlock
        */ @SuppressWarnings("NonPrivateFieldAccessedInSynchronizedContext") private boolean checkCanReadArchiveOrReserveWorkSegment(long absIdx) { synchronized (this) { - if (lastAbsArchivedIdx >= absIdx) + if (lastAbsArchivedIdx >= absIdx) { + if (log.isDebugEnabled()) + log.debug("Not needed to reserve WAL segment: absIdx=" + absIdx + ";" + + " lastAbsArchivedIdx=" + lastAbsArchivedIdx); + return true; + } Integer cur = locked.get(absIdx); @@ -1332,7 +1338,8 @@ private void releaseWorkSegment(long absIdx) { synchronized (this) { Integer cur = locked.get(absIdx); - assert cur != null && cur > 0; + assert cur != null && cur > 0 : "WAL Segment with Index " + absIdx + " is not locked;" + + " lastAbsArchivedIdx = " + lastAbsArchivedIdx; if (cur == 1) { locked.remove(absIdx); @@ -2641,10 +2648,13 @@ private void init() throws IgniteCheckedException { nextHandle = null; } - if (nextHandle != null) - nextHandle.workDir = !readArchive; + if (nextHandle == null) { + if (!readArchive) + releaseWorkSegment(curWalSegmIdx); + } else - releaseWorkSegment(curWalSegmIdx); + nextHandle.workDir = !readArchive; + curRec = null; return nextHandle; @@ -2652,8 +2662,9 @@ private void init() throws IgniteCheckedException { /** * @param absIdx Absolute index to check. - * @return {@code True} if we can safely read the archive, {@code false} if the segment has not been archived - * yet. In this case the corresponding work segment is reserved (will not be deleted until release). + * @return
        • {@code True} if we can safely read the archive,
        • {@code false} if the segment has + * not been archived yet. In this case the corresponding work segment is reserved (will not be deleted until + * release). Use {@link #releaseWorkSegment} for unlock
        */ private boolean canReadArchiveOrReserveWork(long absIdx) { return archiver != null && archiver.checkCanReadArchiveOrReserveWorkSegment(absIdx); From ea0085129b1d9aba063cc49e8a32d5143a2941b8 Mon Sep 17 00:00:00 2001 From: Evgeny Stanilovskiy Date: Wed, 22 Nov 2017 10:53:15 +0300 Subject: [PATCH 185/243] IGNITE-6916: node joining with enabled pds and empty disc space causes exchange to hang - Fixes #3036. Signed-off-by: Alexey Goncharuk (cherry picked from commit 99bbb53) --- .../wal/FileWriteAheadLogManager.java | 4 +- .../wal/IgniteWalFlushBackgroundSelfTest.java | 35 +++ .../db/wal/IgniteWalFlushDefaultSelfTest.java | 35 +++ .../db/wal/IgniteWalFlushLogOnlySelfTest.java | 35 +++ ...lushMultiNodeFailoverAbstractSelfTest.java | 246 ++++++++++++++++++ .../testsuites/IgnitePdsTestSuite2.java | 9 + 6 files changed, 362 insertions(+), 2 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushBackgroundSelfTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushDefaultSelfTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushLogOnlySelfTest.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushMultiNodeFailoverAbstractSelfTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index 83ccc7904df81..6f209ebb871e4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -50,6 +50,7 @@ import org.apache.ignite.events.WalSegmentArchivedEvent; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.IgnitionEx; import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.StorageException; @@ -75,7 +76,6 @@ import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.typedef.F; -import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; @@ -2399,7 +2399,7 @@ private void invalidateEnvironmentLocked(Throwable e) { new Thread() { @Override public void run() { - G.stop(gridName, true); + IgnitionEx.stop(gridName, true, true); } }.start(); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushBackgroundSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushBackgroundSelfTest.java new file mode 100644 index 0000000000000..d359a5ce40f59 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushBackgroundSelfTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.wal; + +import org.apache.ignite.configuration.WALMode; + +/** + * + */ +public class IgniteWalFlushBackgroundSelfTest extends IgniteWalFlushMultiNodeFailoverAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected int gridCount() { + return 1; + } + + /** {@inheritDoc} */ + @Override protected WALMode walMode() { + return WALMode.BACKGROUND; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushDefaultSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushDefaultSelfTest.java new file mode 100644 index 0000000000000..d60241e0f3894 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushDefaultSelfTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.wal; + +import org.apache.ignite.configuration.WALMode; + +/** + * + */ +public class IgniteWalFlushDefaultSelfTest extends IgniteWalFlushMultiNodeFailoverAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected int gridCount() { + return 1; + } + + /** {@inheritDoc} */ + @Override protected WALMode walMode() { + return WALMode.DEFAULT; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushLogOnlySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushLogOnlySelfTest.java new file mode 100644 index 0000000000000..95ffa02e4404d --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushLogOnlySelfTest.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.wal; + +import org.apache.ignite.configuration.WALMode; + +/** + * + */ +public class IgniteWalFlushLogOnlySelfTest extends IgniteWalFlushMultiNodeFailoverAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected int gridCount() { + return 1; + } + + /** {@inheritDoc} */ + @Override protected WALMode walMode() { + return WALMode.LOG_ONLY; + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushMultiNodeFailoverAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushMultiNodeFailoverAbstractSelfTest.java new file mode 100644 index 0000000000000..057e082a5717b --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFlushMultiNodeFailoverAbstractSelfTest.java @@ -0,0 +1,246 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.db.wal; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIODecorator; +import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory; +import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.Transaction; +import org.apache.ignite.transactions.TransactionConcurrency; +import org.apache.ignite.transactions.TransactionIsolation; + +import java.nio.file.OpenOption; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.nio.file.StandardOpenOption.CREATE; +import static java.nio.file.StandardOpenOption.READ; +import static java.nio.file.StandardOpenOption.WRITE; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR; + +/** + * Tests error recovery while node flushing + */ +public abstract class IgniteWalFlushMultiNodeFailoverAbstractSelfTest extends GridCommonAbstractTest { + /** */ + private static final String TEST_CACHE = "testCache"; + + /** */ + private static final int ITRS = 1000; + + /** */ + private AtomicBoolean canFail = new AtomicBoolean(); + + /** + * @return Node count. + */ + protected abstract int gridCount(); + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + stopAllGrids(); + + deleteWorkFiles(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + deleteWorkFiles(); + } + + /** {@inheritDoc} */ + @Override protected long getTestTimeout() { + return 30_000; + } + + /** {@inheritDoc} */ + protected abstract WALMode walMode(); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + CacheConfiguration cacheCfg = new CacheConfiguration(TEST_CACHE) + .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) + .setBackups(1); + + cfg.setCacheConfiguration(cacheCfg); + + DataStorageConfiguration memCfg = new DataStorageConfiguration() + .setDefaultDataRegionConfiguration( + new DataRegionConfiguration().setMaxSize(2048L * 1024 * 1024).setPersistenceEnabled(true)) + .setWalMode(this.walMode()) + .setWalSegmentSize(50_000); + + if (gridName.endsWith(String.valueOf(gridCount()))) + memCfg.setFileIOFactory(new FailingFileIOFactory(canFail)); + + cfg.setDataStorageConfiguration(memCfg); + + return cfg; + } + + /** + * Test flushing error recovery when flush is triggered while node starting + * + * @throws Exception In case of fail + */ + public void testFailWhileStart() throws Exception { + failWhilePut(true); + } + + /** + * Test flushing error recovery when flush is triggered after node started + * + * @throws Exception In case of fail + */ + public void testFailAfterStart() throws Exception { + failWhilePut(false); + } + + /** + * @throws Exception if failed. + */ + public void failWhilePut(boolean failWhileStart) throws Exception { + + final Ignite grid = startGridsMultiThreaded(gridCount()); + + grid.active(true); + + IgniteCache cache = grid.cache(TEST_CACHE); + + for (int i = 0; i < ITRS; i++) { + while (true) { + try (Transaction tx = grid.transactions().txStart( + TransactionConcurrency.PESSIMISTIC, TransactionIsolation.READ_COMMITTED)) { + cache.put(i, "testValue" + i); + + tx.commit(); + + break; + } catch (Exception expected) { + // Expected exception. + } + } + + if (i == ITRS / 4) { + try { + if (failWhileStart) + canFail.set(true); + + startGrid(gridCount()); + + waitForRebalancing(); + } catch (Exception expected) { + // There can be any exception. Do nothing. + } + } + + if (i == ITRS / 2) + canFail.set(true); + } + + + // We should await successful stop of node. + GridTestUtils.waitForCondition(new GridAbsPredicate() { + @Override + public boolean apply() { + return grid.cluster().nodes().size() == gridCount(); + } + }, getTestTimeout()); + + stopAllGrids(); + + Ignite grid0 = startGrids(gridCount() + 1); + + grid0.active(true); + + cache = grid0.cache(TEST_CACHE); + + for (int i = 0; i < ITRS; i++) + assertEquals(cache.get(i), "testValue" + i); + } + + + /** + * @throws IgniteCheckedException + */ + private void deleteWorkFiles() throws IgniteCheckedException { + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false)); + } + + /** + * Create File I/O which fails after second attempt to write to File + */ + private static class FailingFileIOFactory implements FileIOFactory { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private AtomicBoolean fail; + + /** */ + private final FileIOFactory delegateFactory = new RandomAccessFileIOFactory(); + + /** */ + FailingFileIOFactory(AtomicBoolean fail) { + this.fail = fail; + } + + /** {@inheritDoc} */ + @Override public FileIO create(File file) throws IOException { + return create(file, CREATE, READ, WRITE); + } + + /** {@inheritDoc} */ + @Override public FileIO create(File file, OpenOption... modes) throws IOException { + FileIO delegate = delegateFactory.create(file, modes); + + return new FileIODecorator(delegate) { + int writeAttempts = 2; + + @Override public int write(ByteBuffer sourceBuffer) throws IOException { + + if (--writeAttempts == 0 && fail!= null && fail.get()) + throw new IOException("No space left on device"); + + return super.write(sourceBuffer); + } + }; + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java index b1e80eaabdfbc..adfdb2c03d057 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java @@ -30,7 +30,10 @@ import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsTransactionsHangTest; import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsWholeClusterRestartTest; import org.apache.ignite.internal.processors.cache.persistence.db.filename.IgniteUidAsConsistentIdMigrationTest; +import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushBackgroundSelfTest; +import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushDefaultSelfTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushFailoverTest; +import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushLogOnlySelfTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalHistoryReservationsTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalSerializerVersionTest; import org.apache.ignite.internal.processors.cache.persistence.db.wal.crc.IgniteDataIntegrityTests; @@ -75,6 +78,12 @@ public static TestSuite suite() throws Exception { // Failover test suite.addTestSuite(IgniteWalFlushFailoverTest.class); + suite.addTestSuite(IgniteWalFlushDefaultSelfTest.class); + + suite.addTestSuite(IgniteWalFlushBackgroundSelfTest.class); + + suite.addTestSuite(IgniteWalFlushLogOnlySelfTest.class); + suite.addTestSuite(IgniteWalReaderTest.class); suite.addTestSuite(IgnitePdsExchangeDuringCheckpointTest.class); From 325f837591467595249151a860f17ec02cdd1643 Mon Sep 17 00:00:00 2001 From: dpavlov Date: Thu, 30 Nov 2017 16:21:30 +0300 Subject: [PATCH 186/243] IGNITE-7070 Ignite PDS compatibility framework improvements - Fixes #3106. Signed-off-by: Alexey Goncharuk (cherry picked from commit 83049d7) --- modules/compatibility/pom.xml | 8 ++ .../DummyPersistenceCompatibilityTest.java | 108 ++++++++++------ .../testframework/junits/Dependency.java | 117 ++++++++++++++++++ .../IgniteCompatibilityAbstractTest.java | 72 +++++++++-- .../junits/IgniteCompatibilityNodeRunner.java | 110 ++++++++++++---- .../testframework/util/MavenUtils.java | 30 ++--- 6 files changed, 351 insertions(+), 94 deletions(-) create mode 100644 modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/Dependency.java diff --git a/modules/compatibility/pom.xml b/modules/compatibility/pom.xml index 845d0cd332dfe..7dfbe68e3c5b0 100644 --- a/modules/compatibility/pom.xml +++ b/modules/compatibility/pom.xml @@ -37,6 +37,10 @@ http://ignite.apache.org + + org.apache.ignite ignite-core @@ -90,6 +94,10 @@ ${spring.version} test + + diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java index 226694e722229..64a1cbf1e78c9 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java @@ -22,10 +22,12 @@ import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.Serializable; +import javax.cache.Cache; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; +import org.apache.ignite.configuration.BinaryConfiguration; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; @@ -96,12 +98,12 @@ public void testNodeStartByOldVersionPersistenceData_2_3() throws Exception { /** * Tests opportunity to read data from previous Ignite DB version. * - * @param ver 3-digits version of ignite + * @param igniteVer 3-digits version of ignite * @throws Exception If failed. */ - private void doTestStartupWithOldVersion(String ver) throws Exception { + protected void doTestStartupWithOldVersion(String igniteVer, boolean compactFooter) throws Exception { try { - startGrid(1, ver, new ConfigurationClosure(), new PostStartupClosure()); + startGrid(1, igniteVer, new ConfigurationClosure(compactFooter), new PostStartupClosure()); stopAllGrids(); @@ -111,31 +113,67 @@ private void doTestStartupWithOldVersion(String ver) throws Exception { ignite.active(true); - IgniteCache cache = ignite.getOrCreateCache(TEST_CACHE_NAME); - - for (int i = 0; i < 10; i++) - assertEquals("data" + i, cache.get(i)); - - assertEquals(cache.get("1"), "2"); - assertEquals(cache.get(12), 2); - assertEquals(cache.get(13L), 2L); - assertEquals(cache.get(TestEnum.A), "Enum_As_Key"); - assertEquals(cache.get("Enum_As_Value"), TestEnum.B); - assertEquals(cache.get(TestEnum.C), TestEnum.C); - assertEquals(cache.get("Serializable"), new TestSerializable(42)); - assertEquals(cache.get(new TestSerializable(42)), "Serializable_As_Key"); - assertEquals(cache.get("Externalizable"), new TestExternalizable(42)); - assertEquals(cache.get(new TestExternalizable(42)), "Externalizable_As_Key"); - assertEquals(cache.get("testStringContainer"), - new TestStringContainerToBePrinted("testStringContainer")); + validateResultingCacheData(ignite.getOrCreateCache(TEST_CACHE_NAME)); } finally { stopAllGrids(); } } + /** + * Tests opportunity to read data from previous Ignite DB version. + * + * @param igniteVer 3-digits version of ignite + * @throws Exception If failed. + */ + protected void doTestStartupWithOldVersion(String igniteVer) throws Exception { + doTestStartupWithOldVersion(igniteVer, true); + } + + /** + * @param cache to be filled by different keys and values. Results may be validated in {@link + * #validateResultingCacheData(Cache)}. + */ + public static void saveCacheData(Cache cache) { + for (int i = 0; i < 10; i++) + cache.put(i, "data" + i); + + cache.put("1", "2"); + cache.put(12, 2); + cache.put(13L, 2L); + cache.put(TestEnum.A, "Enum_As_Key"); + cache.put("Enum_As_Value", TestEnum.B); + cache.put(TestEnum.C, TestEnum.C); + cache.put("Serializable", new TestSerializable(42)); + cache.put(new TestSerializable(42), "Serializable_As_Key"); + cache.put("Externalizable", new TestExternalizable(42)); + cache.put(new TestExternalizable(42), "Externalizable_As_Key"); + cache.put("testStringContainer", new TestStringContainerToBePrinted("testStringContainer")); + } + + /** + * Asserts cache contained all expected values as it was saved before. + * @param cache cache should be filled using {@link #saveCacheData(Cache)}. + */ + public static void validateResultingCacheData(Cache cache) { + for (int i = 0; i < 10; i++) + assertEquals(cache.get(i), "data" + i); + + assertEquals("2", cache.get("1")); + assertEquals(2, cache.get(12)); + assertEquals(2L, cache.get(13L)); + assertEquals("Enum_As_Key", cache.get(TestEnum.A)); + assertEquals(TestEnum.B, cache.get("Enum_As_Value")); + assertEquals(TestEnum.C, cache.get(TestEnum.C)); + assertEquals(new TestSerializable(42), cache.get("Serializable")); + assertEquals("Serializable_As_Key", cache.get(new TestSerializable(42))); + assertEquals(new TestExternalizable(42), cache.get("Externalizable")); + assertEquals("Externalizable_As_Key", cache.get(new TestExternalizable(42))); + assertEquals(new TestStringContainerToBePrinted("testStringContainer"), cache.get("testStringContainer")); + } + /** */ - private static class PostStartupClosure implements IgniteInClosure { + public static class PostStartupClosure implements IgniteInClosure { /** {@inheritDoc} */ @Override public void apply(Ignite ignite) { ignite.active(true); @@ -148,25 +186,18 @@ private static class PostStartupClosure implements IgniteInClosure { IgniteCache cache = ignite.createCache(cacheCfg); - for (int i = 0; i < 10; i++) - cache.put(i, "data" + i); - - cache.put("1", "2"); - cache.put(12, 2); - cache.put(13L, 2L); - cache.put(TestEnum.A, "Enum_As_Key"); - cache.put("Enum_As_Value", TestEnum.B); - cache.put(TestEnum.C, TestEnum.C); - cache.put("Serializable", new TestSerializable(42)); - cache.put(new TestSerializable(42), "Serializable_As_Key"); - cache.put("Externalizable", new TestExternalizable(42)); - cache.put(new TestExternalizable(42), "Externalizable_As_Key"); - cache.put("testStringContainer", new TestStringContainerToBePrinted("testStringContainer")); + saveCacheData(cache); } } /** */ - private static class ConfigurationClosure implements IgniteInClosure { + public static class ConfigurationClosure implements IgniteInClosure { + private boolean compactFooter; + + public ConfigurationClosure(boolean compactFooter) { + this.compactFooter = compactFooter; + } + /** {@inheritDoc} */ @Override public void apply(IgniteConfiguration cfg) { cfg.setLocalHost("127.0.0.1"); @@ -179,6 +210,9 @@ private static class ConfigurationClosure implements IgniteInClosure dependencies = getDependencies(ver); + StringBuilder pathBuilder = new StringBuilder(); for (URL url : ldr.getURLs()) { String path = url.getPath(); - if (!path.contains(corePathTemplate) && !path.contains(coreTestsPathTemplate)) + boolean excluded = false; + for (Dependency next : dependencies) { + if (path.contains(next.localPathTemplate())) { + excluded = true; + break; + } + } + if (!excluded) pathBuilder.append(path).append(File.pathSeparator); } - String pathToArtifact = MavenUtils.getPathToIgniteCoreArtifact(ver); + for (Dependency next : dependencies) { + final String artifactVer = next.version() != null ? next.version() : ver; + final String grpName = next.groupName() != null ? next.groupName() : "org.apache.ignite"; + String pathToArtifact = MavenUtils.getPathToIgniteArtifact(grpName, next.artifactName(), + artifactVer, next.classifier()); - pathBuilder.append(pathToArtifact).append(File.pathSeparator); - - String pathToTestsArtifact = MavenUtils.getPathToIgniteCoreArtifact(ver, "tests"); - - pathBuilder.append(pathToTestsArtifact).append(File.pathSeparator); + pathBuilder.append(pathToArtifact).append(File.pathSeparator); + } filteredJvmArgs.add("-cp"); filteredJvmArgs.add(pathBuilder.toString()); + final Collection jvmParms = getJvmParms(); + + if (jvmParms != null) + filteredJvmArgs.addAll(jvmParms); + return filteredJvmArgs; } }; @@ -201,7 +213,11 @@ protected IgniteEx startGrid(final String igniteInstanceName, final String ver, log.addListener(nodeId, new LoggedJoinNodeClosure(nodeJoinedLatch, nodeId)); - assert nodeJoinedLatch.await(NODE_JOIN_TIMEOUT, TimeUnit.MILLISECONDS) : "Node has not joined [id=" + nodeId + "]"; + final long nodeJoinTimeout = getNodeJoinTimeout(); + final boolean joined = nodeJoinedLatch.await(nodeJoinTimeout, TimeUnit.MILLISECONDS); + + assertTrue("Node has not joined [id=" + nodeId + "]/" + + "or does not completed its startup during timeout: " + nodeJoinTimeout + " ms.", joined); log.removeListener(nodeId); } @@ -212,6 +228,36 @@ protected IgniteEx startGrid(final String igniteInstanceName, final String ver, return ignite; } + /** + * Total amount of milliseconds. + * + * @return timeout in ms. + */ + protected long getNodeJoinTimeout() { + return NODE_JOIN_TIMEOUT; + } + + /** + * @return list of actual module dependencies from pom.xml + */ + @NotNull protected Collection getDependencies(String igniteVer) { + final Collection dependencies = new ArrayList<>(); + + dependencies.add(new Dependency("core", "ignite-core")); + dependencies.add(new Dependency("core", "ignite-core", true)); + + return dependencies; + } + + /** + * Allows to setup JVM arguments for standalone JVM + * + * @return additional JVM arguments + */ + protected Collection getJvmParms() { + return new ArrayList<>(); + } + /** {@inheritDoc} */ @Override protected Ignite startGrid(String igniteInstanceName, IgniteConfiguration cfg, GridSpringResourceContext ctx) throws Exception { diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityNodeRunner.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityNodeRunner.java index 3256d06de73d4..e1ed39a70b0f5 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityNodeRunner.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityNodeRunner.java @@ -22,6 +22,8 @@ import java.io.BufferedWriter; import java.io.File; import java.io.IOException; +import java.net.URL; +import java.net.URLClassLoader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; @@ -64,43 +66,105 @@ public class IgniteCompatibilityNodeRunner extends IgniteNodeRunner { * @throws Exception In case of an error. */ public static void main(String[] args) throws Exception { - X.println(GridJavaProcess.PID_MSG_PREFIX + U.jvmPid()); + try { + X.println(GridJavaProcess.PID_MSG_PREFIX + U.jvmPid()); - X.println("Starting Ignite Node... Args=" + Arrays.toString(args)); + X.println("Starting Ignite Node... Args=" + Arrays.toString(args)); - if (args.length < 3) { - throw new IllegalArgumentException("At least four arguments expected:" + - " [path/to/closure/file] [ignite-instance-name] [node-id] [sync-node-id] [optional/path/to/closure/file]"); - } + if (args.length < 3) { + throw new IllegalArgumentException("At least four arguments expected:" + + " [path/to/closure/file] [ignite-instance-name] [node-id] [sync-node-id] [optional/path/to/closure/file]"); + } + + final Thread watchdog = delayedDumpClasspath(); - IgniteConfiguration cfg = CompatibilityTestsFacade.getConfiguration(); + IgniteConfiguration cfg = CompatibilityTestsFacade.getConfiguration(); - IgniteInClosure cfgClo = readClosureFromFileAndDelete(args[0]); + IgniteInClosure cfgClo = readClosureFromFileAndDelete(args[0]); - cfgClo.apply(cfg); + cfgClo.apply(cfg); - final UUID nodeId = UUID.fromString(args[2]); - final UUID syncNodeId = UUID.fromString(args[3]); + final UUID nodeId = UUID.fromString(args[2]); + final UUID syncNodeId = UUID.fromString(args[3]); - // Ignite instance name and id must be set according to arguments - // it's used for nodes managing: start, stop etc. - cfg.setIgniteInstanceName(args[1]); - cfg.setNodeId(nodeId); + // Ignite instance name and id must be set according to arguments + // it's used for nodes managing: start, stop etc. + cfg.setIgniteInstanceName(args[1]); + cfg.setNodeId(nodeId); - final Ignite ignite = Ignition.start(cfg); + final Ignite ignite = Ignition.start(cfg); - assert ignite.cluster().node(syncNodeId) != null : "Node has not joined [id=" + nodeId + "]"; + assert ignite.cluster().node(syncNodeId) != null : "Node has not joined [id=" + nodeId + "]"; - // It needs to set private static field 'ignite' of the IgniteNodeRunner class via reflection - GridTestUtils.setFieldValue(new IgniteNodeRunner(), "ignite", ignite); + // It needs to set private static field 'ignite' of the IgniteNodeRunner class via reflection + GridTestUtils.setFieldValue(new IgniteNodeRunner(), "ignite", ignite); - if (args.length == 5) { - IgniteInClosure clo = readClosureFromFileAndDelete(args[4]); + if (args.length == 5) { + IgniteInClosure clo = readClosureFromFileAndDelete(args[4]); - clo.apply(ignite); + clo.apply(ignite); + } + + X.println(IgniteCompatibilityAbstractTest.SYNCHRONIZATION_LOG_MESSAGE_PREPARED + nodeId); + watchdog.interrupt(); + } + catch (Throwable e) { + X.println("Dumping classpath, error occurred: " + e); + dumpClasspath(); + throw e; } + } - X.println(IgniteCompatibilityAbstractTest.SYNCHRONIZATION_LOG_MESSAGE_PREPARED + nodeId); + /** + * Starts background watchdog thread which will dump main thread stacktrace and classpath dump if main thread + * will not respond with node startup finished. + * + * @return Thread to be interrupted. + */ + private static Thread delayedDumpClasspath() { + final Thread mainThread = Thread.currentThread(); + final Runnable target = new Runnable() { + @Override public void run() { + try { + final int timeout = IgniteCompatibilityAbstractTest.NODE_JOIN_TIMEOUT - 1_000; + if (timeout > 0) + Thread.sleep(timeout); + } + catch (InterruptedException ignored) { + //interrupt is correct behaviour + return; + } + + X.println("Ignite startup/Init closure/post configuration closure is probably hanging at"); + + for (StackTraceElement ste : mainThread.getStackTrace()) { + X.println("\t" + ste.toString()); + } + + X.println("\nDumping classpath"); + dumpClasspath(); + } + }; + + final Thread thread = new Thread(target); + thread.setDaemon(true); + thread.start(); + + return thread; + } + + /** + * Dumps classpath to output stream. + */ + private static void dumpClasspath() { + final ClassLoader clsLdr = IgniteCompatibilityNodeRunner.class.getClassLoader(); + if (clsLdr instanceof URLClassLoader) { + URLClassLoader ldr = (URLClassLoader)clsLdr; + + for (URL url : ldr.getURLs()) { + X.println("Classpath url: [" + url.getPath() + "]"); + } + } } /** diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java index fe73e48555c1d..b2c798d1d9af6 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java @@ -40,35 +40,23 @@ public class MavenUtils { private static String locRepPath = null; /** - * Gets a path to an artifact with given version and groupId=org.apache.ignite and artifactId=ignite-core. - * + * Gets a path to an artifact with given version and groupId=org.apache.ignite and artifactId={@code artifactName}. + *
        * At first, artifact is looked for in the Maven local repository, if it isn't exists there, it will be downloaded * and stored via Maven. - * - * @param ver Version of ignite-core artifact. - * @return Path to the artifact. - * @throws Exception In case of an error. - * @see #getPathToArtifact(String) - */ - public static String getPathToIgniteCoreArtifact(@NotNull String ver) throws Exception { - return getPathToIgniteCoreArtifact(ver, null); - } - - /** - * Gets a path to an artifact with given version and groupId=org.apache.ignite and artifactId=ignite-core. - * - * At first, artifact is looked for in the Maven local repository, if it isn't exists there, it will be downloaded - * and stored via Maven. - * - * @param ver Version of ignite-core artifact. + *
        + * @param groupName group name, e.g. 'org.apache.ignite'. + * @param ver Version of ignite or 3rd party library artifact. * @param classifier Artifact classifier. * @return Path to the artifact. * @throws Exception In case of an error. * @see #getPathToArtifact(String) */ - public static String getPathToIgniteCoreArtifact(@NotNull String ver, + public static String getPathToIgniteArtifact(@NotNull String groupName, + @NotNull String artifactName, @NotNull String ver, @Nullable String classifier) throws Exception { - String artifact = "org.apache.ignite:ignite-core:" + ver; + String artifact = groupName + + ":" + artifactName + ":" + ver; if (classifier != null) artifact += ":jar:" + classifier; From c71da0c7af2803c4622faf07eba1f218f84472dd Mon Sep 17 00:00:00 2001 From: oleg-ostanin Date: Tue, 5 Dec 2017 15:32:17 +0300 Subject: [PATCH 187/243] IGNITW-7093 added plugin to build test.jar to ignite-compatibility pom.xml - Fixes #3142. Signed-off-by: Alexey Goncharuk (cherry picked from commit 0a1651c) --- modules/compatibility/pom.xml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/modules/compatibility/pom.xml b/modules/compatibility/pom.xml index 7dfbe68e3c5b0..d7031262d578f 100644 --- a/modules/compatibility/pom.xml +++ b/modules/compatibility/pom.xml @@ -102,6 +102,17 @@ + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + org.apache.maven.plugins maven-deploy-plugin From 46769a16ed6187d18c6eb1fbb9eb199f32dfde7f Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Tue, 19 Dec 2017 15:33:03 +0300 Subject: [PATCH 188/243] IGNITE-7244 Added missing README.txt for development utils (cherry picked from commit 6e8cfe3) --- modules/dev-utils/README.txt | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 modules/dev-utils/README.txt diff --git a/modules/dev-utils/README.txt b/modules/dev-utils/README.txt new file mode 100644 index 0000000000000..f839c4fe32638 --- /dev/null +++ b/modules/dev-utils/README.txt @@ -0,0 +1,5 @@ +Apache Ignite Development Utils +------------------------ + +Special module contains Ignite development utilities. These utilities may be useful when analyzing PDS artifacts, +such as WAL or page storage, in field deployments. From bc1329ab77216175d88611f208604031750dd475 Mon Sep 17 00:00:00 2001 From: Alexey Popov Date: Tue, 12 Dec 2017 14:53:08 +0300 Subject: [PATCH 189/243] IGNITE-7170 Fix javadoc MemoryConfiguration (20% instead of 80%). This closes #3203. Signed-off-by: nikolay_tikhonov (cherry picked from commit 9656929) --- .../org/apache/ignite/configuration/MemoryConfiguration.java | 2 +- .../src/main/java/org/apache/ignite/internal/IgniteKernal.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java index c3d4e744ed7ea..f6f4e550c906d 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/MemoryConfiguration.java @@ -267,7 +267,7 @@ public long getDefaultMemoryPolicySize() { * Overrides size of default memory policy which is created automatically. * * If user doesn't specify any memory policy configuration, a default one with default size - * (80% of available RAM) is created by Ignite. + * (20% of available RAM) is created by Ignite. * * This property allows user to specify desired size of default memory policy * without having to use more verbose syntax of MemoryPolicyConfiguration elements. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index ae3aa73e0fd9e..9889c5c696ed9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -1431,7 +1431,7 @@ private void checkPhysicalRam() { long safeToUse = ram - Math.max(4L << 30, (long)(ram * 0.2)); if (total > safeToUse) { - U.quietAndWarn(log, "Nodes started on local machine require more than 80% of physical RAM what can " + + U.quietAndWarn(log, "Nodes started on local machine require more than 20% of physical RAM what can " + "lead to significant slowdown due to swapping (please decrease JVM heap size, data region " + "size or checkpoint buffer size) [required=" + (total >> 20) + "MB, available=" + (ram >> 20) + "MB]"); From d929c84e51fe28779d93b4127219c376045370a4 Mon Sep 17 00:00:00 2001 From: dkarachentsev Date: Wed, 20 Dec 2017 16:40:55 +0300 Subject: [PATCH 190/243] IGNITE-7206 Stop pings if current node is stopping. (cherry picked from commit db78735) --- .../org/apache/ignite/spi/discovery/tcp/ServerImpl.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index d15fbde036023..e87c00ef2c06d 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -729,6 +729,13 @@ private boolean pingNode(TcpDiscoveryNode node) { break; else if (!spi.failureDetectionTimeoutEnabled() && reconCnt == spi.getReconnectCount()) break; + + if (spi.isNodeStopping0()) { + if (log.isDebugEnabled()) + log.debug("Stop pinging node, because node is stopping: [rmtNodeId=" + nodeId + ']'); + + break; + } } finally { U.closeQuiet(sock); From 0abfc3409a46b04d1c79a5c8760a8a6cdc1c65f7 Mon Sep 17 00:00:00 2001 From: vsisko Date: Thu, 21 Dec 2017 16:17:14 +0700 Subject: [PATCH 191/243] IGNITE-6976 Visor CMD: Implemented simple cache put/get/remove operations. (cherry picked from commit 35622cb) --- .../rest/request/RestQueryRequest.java | 26 +- .../ignite/visor/commands/VisorConsole.scala | 1 + .../visor/commands/ack/VisorAckCommand.scala | 5 +- .../commands/alert/VisorAlertCommand.scala | 4 +- .../commands/cache/VisorCacheCommand.scala | 29 +- .../cache/VisorCacheModifyCommand.scala | 413 ++++++++++++++++++ .../commands/common/VisorConsoleCommand.scala | 45 +- .../commands/common/VisorTextTable.scala | 26 +- .../config/VisorConfigurationCommand.scala | 57 ++- .../disco/VisorDiscoveryCommand.scala | 4 +- .../commands/events/VisorEventsCommand.scala | 9 +- .../visor/commands/gc/VisorGcCommand.scala | 4 +- .../commands/kill/VisorKillCommand.scala | 4 +- .../commands/node/VisorNodeCommand.scala | 12 +- .../commands/open/VisorOpenCommand.scala | 28 +- .../commands/ping/VisorPingCommand.scala | 4 +- .../commands/start/VisorStartCommand.scala | 4 +- .../commands/tasks/VisorTasksCommand.scala | 4 +- .../commands/top/VisorTopologyCommand.scala | 4 +- .../visor/commands/vvm/VisorVvmCommand.scala | 4 +- .../scala/org/apache/ignite/visor/visor.scala | 174 ++++---- 21 files changed, 632 insertions(+), 229 deletions(-) create mode 100644 modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheModifyCommand.scala diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java index 75c74db557f37..ca24a27424829 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/request/RestQueryRequest.java @@ -1,20 +1,18 @@ /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * * Licensed to the Apache Software Foundation (ASF) under one or more - * * contributor license agreements. See the NOTICE file distributed with - * * this work for additional information regarding copyright ownership. - * * The ASF licenses this file to You under the Apache License, Version 2.0 - * * (the "License"); you may not use this file except in compliance with - * * the License. You may obtain a copy of the License at - * * - * * http://www.apache.org/licenses/LICENSE-2.0 - * * - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, - * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * * See the License for the specific language governing permissions and - * * limitations under the License. + * http://www.apache.org/licenses/LICENSE-2.0 * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.ignite.internal.processors.rest.request; diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala index a43f9ff67b594..8bf64b7b7dfe8 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/VisorConsole.scala @@ -65,6 +65,7 @@ class VisorConsole { org.apache.ignite.visor.commands.cache.VisorCacheResetCommand org.apache.ignite.visor.commands.cache.VisorCacheRebalanceCommand org.apache.ignite.visor.commands.cache.VisorCacheCommand + org.apache.ignite.visor.commands.cache.VisorCacheModifyCommand org.apache.ignite.visor.commands.config.VisorConfigurationCommand org.apache.ignite.visor.commands.deploy.VisorDeployCommand org.apache.ignite.visor.commands.disco.VisorDiscoveryCommand diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ack/VisorAckCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ack/VisorAckCommand.scala index 13c343aea88de..eadf64b23e762 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ack/VisorAckCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ack/VisorAckCommand.scala @@ -88,9 +88,7 @@ class VisorAckCommand extends VisorConsoleCommand { * @param msg Optional command argument. If `null` this function is no-op. */ def ack(msg: String) { - if (!isConnected) - adviseToConnect() - else + if (checkConnected()) { try { executeMulti(classOf[VisorAckTask], new VisorAckTaskArg(msg)) } @@ -98,6 +96,7 @@ class VisorAckCommand extends VisorConsoleCommand { case _: ClusterGroupEmptyException => scold("Topology is empty.") case e: Exception => scold("System error: " + e.getMessage) } + } } } diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/alert/VisorAlertCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/alert/VisorAlertCommand.scala index 254dc2de85706..ffa6a949079f2 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/alert/VisorAlertCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/alert/VisorAlertCommand.scala @@ -260,9 +260,7 @@ class VisorAlertCommand extends VisorConsoleCommand { breakable { assert(args != null) - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { var name: Option[String] = None var script: Option[String] = None val conditions = mutable.ArrayBuffer.empty[VisorAlertCondition] diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala index fec5a9647fe54..3571efbaafa04 100755 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala @@ -17,17 +17,18 @@ package org.apache.ignite.visor.commands.cache -import java.util.{Collection => JavaCollection, List => JavaList, Collections, UUID} +import java.util.{Collections, UUID, Collection => JavaCollection, List => JavaList} import org.apache.ignite._ import org.apache.ignite.cluster.ClusterNode import org.apache.ignite.internal.util.lang.{GridFunc => F} +import org.apache.ignite.internal.util.scala.impl import org.apache.ignite.internal.util.typedef.X import org.apache.ignite.internal.visor.cache._ import org.apache.ignite.internal.visor.util.VisorTaskUtils._ import org.apache.ignite.visor.VisorTag import org.apache.ignite.visor.commands.cache.VisorCacheCommand._ -import org.apache.ignite.visor.commands.common.VisorTextTable +import org.apache.ignite.visor.commands.common.{VisorConsoleCommand, VisorTextTable} import org.apache.ignite.visor.visor._ import org.jetbrains.annotations._ @@ -161,18 +162,8 @@ import scala.language.{implicitConversions, reflectiveCalls} * * }}} */ -class VisorCacheCommand { - /** - * Prints error message and advise. - * - * @param errMsgs Error messages. - */ - private def scold(errMsgs: Any*) { - assert(errMsgs != null) - - warn(errMsgs: _*) - warn("Type 'help cache' to see how to use this command.") - } +class VisorCacheCommand extends VisorConsoleCommand { + @impl protected val name: String = "cache" /** * ===Command=== @@ -214,14 +205,7 @@ class VisorCacheCommand { * @param args Command arguments. */ def cache(args: String) { - if (!isConnected) - adviseToConnect() - else if (!isActive) { - warn("Can not perform the operation because the cluster is inactive.", - "Note, that the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes join the cluster.", - "To activate the cluster execute following command: top -activate.") - } - else { + if (checkConnected() && checkActiveState()) { var argLst = parseArgs(args) if (hasArgFlag("i", argLst)) { @@ -639,7 +623,6 @@ class VisorCacheCommand { def askForCache(title: String, node: Option[ClusterNode], showSystem: Boolean = false, aggrData: Seq[VisorCacheAggregatedMetrics]): Option[String] = { assert(title != null) - assert(visor.visor.isConnected) if (aggrData.isEmpty) { scold("No caches found.") diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheModifyCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheModifyCommand.scala new file mode 100644 index 0000000000000..7461889f316c0 --- /dev/null +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheModifyCommand.scala @@ -0,0 +1,413 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.visor.commands.cache + +import java.util.{Calendar, Date, GregorianCalendar, UUID} + +import org.apache.ignite.internal.util.lang.{GridFunc => F} +import org.apache.ignite.internal.util.scala.impl +import org.apache.ignite.internal.visor.cache._ +import org.apache.ignite.internal.visor.util.VisorTaskUtils._ +import org.apache.ignite.visor.commands.cache.VisorCacheModifyCommand._ +import org.apache.ignite.visor.commands.common.VisorConsoleCommand +import org.apache.ignite.visor.visor._ + +/** + * ==Overview== + * Visor 'modify' command implementation. + * + * ==Help== + * {{{ + * +-----------------------------------------------------------------------------------------+ + * | modify -put | Put custom value into cache. | + * +-----------------------------------------------------------------------------------------+ + * | modify -get | Get value with specified key from cache. | + * +-----------------------------------------------------------------------------------------+ + * | modify -remove | Remove value with specified key from cache. | + * +-----------------------------------------------------------------------------------------+ + * + * }}} + * + * ====Specification==== + * {{{ + * modify -put -c= {-kt=} {-kv=} {-vt=} {-v=} + * modify -get -c= {-kt=} {-kv=} + * modify -remove -c= {-kt=} {-kv=} + * }}} + * + * ====Arguments==== + * {{{ + * -c= + * Name of the cache. + * -kt= + * Type of key. Default value is java.lang.String. Short type name can be specified. + * -kv= + * Key. Asked in interactive mode when it is not specified. + * -vt=. + * Type of value. Default value is java.lang.String. Short type name can be specified. + * Value type is equals to key type when value is not specified. + * -v= + * Value. Equals to key when it is not specified. + * Asked in interactive mode when key and value are not specified. + * }}} + * + * ====Examples==== + * {{{ + * modify -put -c=@c0 + * Put value into cache in interactive mode. + * modify -get -c=@c0 + * Get value from cache in interactive mode. + * modify -remove -c=@c0 + * Remove value from cache in interactive mode. + * modify -put -c=cache -kv=key1 + * Put value into cache with name cache with key of default String type equal to key1 + * and value equal to key. + * modify -put -c=cache -kt=java.lang.String -kv=key1 -vt=lava.lang.String -v=value1 + * Put value into cache with name cache with key of String type equal to key1 + * and value of String type equal to value1 + * modify -get -c=cache -kt=java.lang.String -kv=key1 + * Get value from cache with name cache with key of String type equal to key1 + * modify -remove -c=cache -kt=java.lang.String -kv=key1 + * Remove value from cache with name cache with key of String type equal to key1. + * + * }}} + */ +class VisorCacheModifyCommand extends VisorConsoleCommand { + @impl protected val name = "modify" + + /** + * ===Command=== + * Modify cache value in specified cache. + * + * ===Examples=== + * modify -put -c=@c0 + * Put value into cache with name taken from 'c0' memory variable in interactive mode. + *
        + * modify -get + * Get value from cache with name taken from 'c0' memory variable in interactive mode. + *
        + * modify -remove + * Remove value from cache with name taken from 'c0' memory variable in interactive mode. + *
        + * modify -put -c=cache -kt=java.lang.String -k=key1 -vt=lava.lang.String -v=value1 + * Put value into cache with name 'cache' with key of String type equal to 'key1' + * and value of String type equal to 'value1' + *
        + * modify -get -c=cache -kt=java.lang.String -k=key1 + * Get value from cache with name 'cache' with key of String type equal to 'key1' + *
        + * modify -remove -c=cache -kt=java.lang.String -k=key1 + * Remove value from cache with name 'cache' with key of String type equal to 'key1'. + * + * @param args Command arguments. + */ + def modify(args: String) { + if (checkConnected() && checkActiveState()) { + def argNonEmpty(argLst: ArgList, arg: Option[String], key: String): Boolean = { + if (hasArgName(key, argLst) && arg.forall((a) => F.isEmpty(a))) { + warn(s"Argument $key is specified and can not be empty") + + false + } + else + true + } + + var argLst = parseArgs(args) + + val put = hasArgFlag("put", argLst) + val get = hasArgFlag("get", argLst) + val remove = hasArgFlag("remove", argLst) + + if (!put && !get && !remove) { + warn("Put, get, or remove operation should be specified") + + return + } + + if (put && get || get && remove || get && remove) { + warn("Only one operation put, get or remove allowed in one command invocation") + + return + } + + if (!hasArgName("c", argLst)) { + warn("Cache name should be specified") + + return + } + + val cacheName = argValue("c", argLst) match { + case Some(dfltName) if dfltName == DFLT_CACHE_KEY || dfltName == DFLT_CACHE_NAME => + argLst = argLst.filter(_._1 != "c") ++ Seq("c" -> null) + + Some(null) + + case cn => cn + } + + if (cacheName.isEmpty) { + warn("Cache with specified name is not found") + + return + } + + val keyTypeStr = argValue("kt", argLst) + val keyStr = argValue("k", argLst) + var key: Object = null + + if (keyTypeStr.nonEmpty && keyStr.isEmpty) { + warn("Key should be specified when key type is specified") + + return + } + + val valueTypeStr = argValue("vt", argLst) + val valueStr = argValue("v", argLst) + var value: Object = null + + if (valueTypeStr.nonEmpty && valueStr.isEmpty) { + warn("Value should be specified when value type is specified") + + return + } + + if (!argNonEmpty(argLst, keyTypeStr, "kt") + || !argNonEmpty(argLst, keyStr, "k") + || !argNonEmpty(argLst, valueTypeStr, "vt") + || !argNonEmpty(argLst, valueStr, "v")) + return + + keyTypeStr match { + case Some(clsStr) => + try { + INPUT_TYPES.find(_._3.getName.indexOf(clsStr) >= 0) match { + case Some(t) => key = t._2(keyStr.get) + case None => + warn("Specified type is not allowed") + + return + } + } + catch { + case e: Throwable => + warn("Failed to read key: " + e.getMessage) + + return + } + + case None if keyStr.nonEmpty => + key = keyStr.get + + case None if put && valueStr.nonEmpty => // No-op. + + case None => + askTypedValue("key") match { + case Some(k) if k.toString.nonEmpty => key = k + case _ => + warn("Key can not be empty.") + + return + } + } + + if (put) { + valueTypeStr match { + case Some(clsStr) => + try { + INPUT_TYPES.find(_._3.getName.indexOf(clsStr) >= 0) match { + case Some(t) => value = t._2(valueStr.get) + case None => warn("Specified type is not allowed") + + return + } + } + catch { + case e: Throwable => + warn("Failed to read value: " + e.getMessage) + + return + } + case None if valueStr.nonEmpty => + value = valueStr.get + + case None => + askTypedValue("value") match { + case Some(v) if v.toString.nonEmpty => value = v + case _ => + warn("Value can not be empty.") + + return + } + } + + if (key == null) + key = value + } + + if ((get || remove) && valueTypeStr.nonEmpty) + warn("Specified value is not used by selected operation and will be ignored") + + val arg = new VisorCacheModifyTaskArg(cacheName.get, + if (put) VisorModifyCacheMode.PUT else if (get) VisorModifyCacheMode.GET else VisorModifyCacheMode.REMOVE, + key, value + ) + + try { + val taskResult = executeRandom(classOf[VisorCacheModifyTask], arg) + val resultObj = taskResult.getResult match { + case d: Date => + val cal = new GregorianCalendar() + cal.setTime(d) + + if (cal.get(Calendar.HOUR_OF_DAY) == 0 && cal.get(Calendar.MINUTE) == 0 + && cal.get(Calendar.SECOND) == 0) + formatDate(d) + else + formatDateTime(d) + + case v => v + } + val affinityNode = taskResult.getAffinityNode + + if (put) { + println("Put operation success" + "; Affinity node: " + nid8(affinityNode)) + + if (resultObj != null) + println("Previous value is: " + resultObj) + } + + if (get) { + if (resultObj != null) + println("Value with specified key: " + resultObj + "; Affinity node: " + nid8(affinityNode)) + else + println("Value with specified key not found") + } + + if (remove) { + if (resultObj != null) + println("Removed value: " + resultObj + "; Affinity node: " + nid8(affinityNode)) + else + println("Value with specified key not found") + } + } + catch { + case e: Throwable => + warn("Failed to execute cache modify operation: " + e.getMessage) + } + } + } + + /** + * ===Command=== + * Modify cache data by execution of put/get/remove command. + * + * ===Examples=== + * modify -put -c=@c0 + * Put entity in cache with name taken from 'c0' memory variable in interactive mode + */ + def modify() { + this.modify("") + } +} + +/** + * Companion object that does initialization of the command. + */ +object VisorCacheModifyCommand { + /** Singleton command */ + private val cmd = new VisorCacheModifyCommand + + /** Default cache name to show on screen. */ + private final val DFLT_CACHE_NAME = escapeName(null) + + /** Default cache key. */ + protected val DFLT_CACHE_KEY: String = DFLT_CACHE_NAME + "-" + UUID.randomUUID().toString + + addHelp( + name = "modify", + shortInfo = "Modify cache by put/get/remove value.", + longInfo = Seq( + "Execute modification of cache data:", + " ", + "Put new value into cache.", + " ", + "Get value from cache.", + " ", + "Remove value from cache." + ), + spec = Seq( + "modify -put -c= {-kt=} {-k=} {-vt=} {-v=}", + "modify -get -c= {-kt=} {-k=}", + "modify -remove -c= {-kt=} {-k=}" + ), + args = Seq( + "-c=" -> + "Name of the cache", + "-put" -> Seq( + "Put value into cache and show its affinity node.", + "If the cache previously contained a mapping for the key, the old value is shown", + "Key and value are asked in interactive mode when they are not specified.", + "Key is equals to value when key is not specified." + ), + "-get" -> Seq( + "Get value from cache and show its affinity node.", + "Key is asked in interactive mode when it is not specified." + ), + "-remove" -> Seq( + "Remove value from cache and show its affinity node.", + "Key is asked in interactive mode when it is not specified." + ), + "-kt=" -> + "Type of key. Default type is java.lang.String. Type name can be specified without package.", + "-k=" -> + "Key. Must be specified when key type is specified.", + "-vt=" -> + "Type of value. Default type is java.lang.String. Type name can be specified without package.", + "-v=" -> + "Value. Must be specified when value type is specified." + ), + examples = Seq( + "modify -put -c=@c0" -> + "Put value into cache with name taken from 'c0' memory variable in interactive mode.", + "modify -get -c=@c0" -> + "Get value from cache with name taken from 'c0' memory variable in interactive mode.", + "modify -remove -c=@c0" -> + "Remove value from cache with name taken from 'c0' memory variable in interactive mode.", + "modify -put -c=cache -v=value1" -> Seq( + "Put the value 'value1' into the cache 'cache'.", + "Other params have default values: -kt = java.lang.String , -k = value1, -vt = java.lang.String" + ), + "modify -put -c=@c0 -kt=java.lang.String -k=key1 -vt=lava.lang.String -v=value1" -> Seq( + "Put value into cache with name taken from 'c0' memory variable", + "with key of String type equal to 'key1' and value of String type equal to 'value1'" + ), + "modify -get -c=@c0 -kt=java.lang.String -k=key1" -> + "Get value from cache with name taken from 'c0' memory variable with key of String type equal to key1", + "modify -remove -c=@c0 -kt=java.lang.String -k=key1" -> + "Remove value from cache with name taken from 'c0' memory variable with key of String type equal to key1." + ), + emptyArgs = cmd.modify, + withArgs = cmd.modify + ) + + /** + * Singleton. + */ + def apply(): VisorCacheModifyCommand = cmd +} diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala index fb2b7165ad482..0658ad47a5233 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorConsoleCommand.scala @@ -1,20 +1,18 @@ /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * * Licensed to the Apache Software Foundation (ASF) under one or more - * * contributor license agreements. See the NOTICE file distributed with - * * this work for additional information regarding copyright ownership. - * * The ASF licenses this file to You under the Apache License, Version 2.0 - * * (the "License"); you may not use this file except in compliance with - * * the License. You may obtain a copy of the License at - * * - * * http://www.apache.org/licenses/LICENSE-2.0 - * * - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, - * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * * See the License for the specific language governing permissions and - * * limitations under the License. + * http://www.apache.org/licenses/LICENSE-2.0 * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.ignite.visor.commands.common @@ -57,6 +55,21 @@ trait VisorConsoleCommand { ) } + /** + * Check cluster active state and show inform message when cluster has inactive state. + * + * @return `True` when cluster is active. + */ + protected def checkActiveState(): Boolean = { + visor.isActive || { + warn("Can not perform the operation because the cluster is inactive.", + "Note, that the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes join the cluster.", + "To activate the cluster execute following command: top -activate.") + + false + } + } + /** * Prints warn message and advise. * @@ -75,14 +88,14 @@ trait VisorConsoleCommand { * @param lines Lines to join together. * @return Joined line. */ - protected def join(lines: java.lang.Iterable[_ <: Any]) = { + protected def join(lines: java.lang.Iterable[_ <: Any]): String = { if (lines == null || lines.isEmpty) NA else lines.mkString("[", ", ", "]") } - protected def join(lines: Array[_ <: Any]) = { + protected def join(lines: Array[_ <: Any]): String = { if (lines == null || lines.isEmpty) NA else diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorTextTable.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorTextTable.scala index e6fe35f8d9b7b..58e3f21764186 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorTextTable.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/common/VisorTextTable.scala @@ -1,20 +1,18 @@ /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * * Licensed to the Apache Software Foundation (ASF) under one or more - * * contributor license agreements. See the NOTICE file distributed with - * * this work for additional information regarding copyright ownership. - * * The ASF licenses this file to You under the Apache License, Version 2.0 - * * (the "License"); you may not use this file except in compliance with - * * the License. You may obtain a copy of the License at - * * - * * http://www.apache.org/licenses/LICENSE-2.0 - * * - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, - * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * * See the License for the specific language governing permissions and - * * limitations under the License. + * http://www.apache.org/licenses/LICENSE-2.0 * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.ignite.visor.commands.common diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala index 0185228877a85..67d9c142cf68f 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/config/VisorConfigurationCommand.scala @@ -83,13 +83,12 @@ class VisorConfigurationCommand extends VisorConsoleCommand { * Starts command in interactive mode. */ def config() { - if (isConnected) + if (checkConnected()) { askForNode("Select node from:") match { case Some(id) => config("-id=" + id) case None => () } - else - adviseToConnect() + } } /** @@ -103,44 +102,40 @@ class VisorConfigurationCommand extends VisorConsoleCommand { * @param args Command arguments. */ def config(args: String) { - if (!isConnected) { - adviseToConnect() - - return - } - - val argLst = parseArgs(args) + if (checkConnected()) { + val argLst = parseArgs(args) - val nid = parseNode(argLst) match { - case Left(msg) => - scold(msg) + val nid = parseNode(argLst) match { + case Left(msg) => + scold(msg) - return + return - case Right(None) => - scold("One of -id8 or -id is required.") + case Right(None) => + scold("One of -id8 or -id is required.") - return + return - case Right(Some(n)) => - assert(n != null) + case Right(Some(n)) => + assert(n != null) - n.id() - } + n.id() + } - try { - val cfg = collectConfiguration(nid) + try { + val cfg = collectConfiguration(nid) - printConfiguration(cfg) + printConfiguration(cfg) - cacheConfigurations(nid).foreach(ccfg => { - println() + cacheConfigurations(nid).foreach(ccfg => { + println() - printCacheConfiguration(s"Cache '${escapeName(ccfg.getName)}':", ccfg) - }) - } - catch { - case e: Throwable => scold(e) + printCacheConfiguration(s"Cache '${escapeName(ccfg.getName)}':", ccfg) + }) + } + catch { + case e: Throwable => scold(e) + } } } diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/disco/VisorDiscoveryCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/disco/VisorDiscoveryCommand.scala index de69cd7e86525..3b4c90f94c7c6 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/disco/VisorDiscoveryCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/disco/VisorDiscoveryCommand.scala @@ -117,9 +117,7 @@ class VisorDiscoveryCommand extends VisorConsoleCommand { * Prints discovery events fired during last two minutes. */ def disco(args: String) { - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { val argLst = parseArgs(args) val fs = argValue("t", argLst) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/events/VisorEventsCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/events/VisorEventsCommand.scala index c41eca1c88b61..e1d36c78d1791 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/events/VisorEventsCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/events/VisorEventsCommand.scala @@ -127,9 +127,7 @@ class VisorEventsCommand extends VisorConsoleCommand { * Starts command in interactive mode. */ def events() { - if (!isConnected) - adviseToConnect() - else + if (checkConnected()) { askForNode("Select node from:") match { case Some(id) => ask("Sort [c]ronologically or by [e]vent type (c/e) [c]: ", "c") match { case "c" | "C" => nl(); events("-s=t -id=" + id) @@ -138,6 +136,7 @@ class VisorEventsCommand extends VisorConsoleCommand { } case None => () } + } } /** @@ -208,7 +207,7 @@ class VisorEventsCommand extends VisorConsoleCommand { * @param args Command parameters. */ def events(args: String) { - if (isConnected) { + if (checkConnected()) { val argLst = parseArgs(args) parseNode(argLst) match { @@ -328,8 +327,6 @@ class VisorEventsCommand extends VisorConsoleCommand { all.render() } } - else - adviseToConnect() } /** diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/gc/VisorGcCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/gc/VisorGcCommand.scala index 5f642d76ff71c..6f2ddb772fb77 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/gc/VisorGcCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/gc/VisorGcCommand.scala @@ -83,7 +83,7 @@ class VisorGcCommand extends VisorConsoleCommand { def gc(args: String) { assert(args != null) - if (isConnected) { + if (checkConnected()) { val argLst = parseArgs(args) try { @@ -123,8 +123,6 @@ class VisorGcCommand extends VisorConsoleCommand { case e: IgniteException => scold(e) } } - else - adviseToConnect() } /** diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/kill/VisorKillCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/kill/VisorKillCommand.scala index c705e210bc6ff..059c38761ffba 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/kill/VisorKillCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/kill/VisorKillCommand.scala @@ -127,9 +127,7 @@ class VisorKillCommand extends VisorConsoleCommand { * @param args Command arguments. */ def kill(args: String) = breakable { - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { val argLst = parseArgs(args) val iNodes = hasArgFlag("in", argLst) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/node/VisorNodeCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/node/VisorNodeCommand.scala index 93cf2334db909..b240aa5187723 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/node/VisorNodeCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/node/VisorNodeCommand.scala @@ -93,9 +93,7 @@ class VisorNodeCommand extends VisorConsoleCommand { * Starts command in interactive mode. */ def node() { - if (!isConnected) - adviseToConnect() - else + if (checkConnected()) { askForNode("Select node from:") match { case Some(id) => ask("Detailed statistics (y/n) [n]: ", "n") match { case "n" | "N" => nl(); node("-id=" + id) @@ -104,6 +102,7 @@ class VisorNodeCommand extends VisorConsoleCommand { } case None => () } + } } /** @@ -120,9 +119,7 @@ class VisorNodeCommand extends VisorConsoleCommand { * @param args Command arguments. */ def node(@Nullable args: String) = breakable { - if (!isConnected) - adviseToConnect() - else + if (checkConnected()) { try { val argLst = parseArgs(args) @@ -264,7 +261,7 @@ class VisorNodeCommand extends VisorConsoleCommand { t += ("Cur/avg CPU load %", formatDouble(m.getCurrentCpuLoad * 100) + "/" + formatDouble(m.getAverageCpuLoad * 100) + "%") t += ("Heap memory used/max", formatMemory(m.getHeapMemoryUsed) + - "/" + formatMemory(m.getHeapMemoryMaximum)) + "/" + formatMemory(m.getHeapMemoryMaximum)) } println("Time of the snapshot: " + formatDateTime(System.currentTimeMillis)) @@ -279,6 +276,7 @@ class VisorNodeCommand extends VisorConsoleCommand { catch { case e: Exception => scold(e) } + } } } diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala index 949aa0081165d..f62ba3c4afc7f 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/open/VisorOpenCommand.scala @@ -1,20 +1,18 @@ /* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * * Licensed to the Apache Software Foundation (ASF) under one or more - * * contributor license agreements. See the NOTICE file distributed with - * * this work for additional information regarding copyright ownership. - * * The ASF licenses this file to You under the Apache License, Version 2.0 - * * (the "License"); you may not use this file except in compliance with - * * the License. You may obtain a copy of the License at - * * - * * http://www.apache.org/licenses/LICENSE-2.0 - * * - * * Unless required by applicable law or agreed to in writing, software - * * distributed under the License is distributed on an "AS IS" BASIS, - * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * * See the License for the specific language governing permissions and - * * limitations under the License. - * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.ignite.visor.commands.open diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ping/VisorPingCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ping/VisorPingCommand.scala index 97be127b7381d..307b78d672430 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ping/VisorPingCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/ping/VisorPingCommand.scala @@ -113,9 +113,7 @@ class VisorPingCommand extends VisorConsoleCommand { * @param args List of node ID8s. If empty or null - pings all nodes in the topology. */ def ping(args: String) = breakable { - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { val argLst = parseArgs(args) val res = new Result() diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/start/VisorStartCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/start/VisorStartCommand.scala index 936559227377a..619ab5d06a861 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/start/VisorStartCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/start/VisorStartCommand.scala @@ -151,9 +151,7 @@ class VisorStartCommand extends VisorConsoleCommand { def start(args: String) = breakable { assert(args != null) - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { val argLst = parseArgs(args) val fileOpt = argValue("f", argLst) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala index 966bd6484f3ca..91868662781e5 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/tasks/VisorTasksCommand.scala @@ -380,9 +380,7 @@ class VisorTasksCommand extends VisorConsoleCommand { * @param args Command arguments. */ def tasks(args: String) { - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { val argLst = parseArgs(args) if (hasArgFlag("l", argLst)) { diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/top/VisorTopologyCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/top/VisorTopologyCommand.scala index b75afc67897aa..79858eed63c2e 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/top/VisorTopologyCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/top/VisorTopologyCommand.scala @@ -145,9 +145,7 @@ class VisorTopologyCommand extends VisorConsoleCommand { def top(args: String) = breakable { assert(args != null) - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { val argLst = parseArgs(args) if (hasArgFlag("activate", argLst)) diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/vvm/VisorVvmCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/vvm/VisorVvmCommand.scala index e935256ed6484..350adc3d0f717 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/vvm/VisorVvmCommand.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/vvm/VisorVvmCommand.scala @@ -101,9 +101,7 @@ class VisorVvmCommand extends VisorConsoleCommand { * @param args Command parameters. */ def vvm(@Nullable args: String) = breakable { - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { val argLst = parseArgs(args) val vvmHome = argValue("home", argLst) getOrElse IgniteSystemProperties.getString("VVM_HOME") diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala index 5765579989e85..c24bc25e3ca8a 100644 --- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala +++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/visor.scala @@ -17,6 +17,13 @@ package org.apache.ignite.visor +import java.io._ +import java.lang.{Boolean => JavaBoolean, Byte => JavaByte, Character => JavaCharacter, Double => JavaDouble, Float => JavaFloat, Integer => JavaInteger, Long => JavaLong, Short => JavaShort} +import java.text._ +import java.util.concurrent._ +import java.util.{Collection => JavaCollection, HashSet => JavaHashSet, _} + +import jline.console.ConsoleReader import org.apache.ignite.IgniteSystemProperties.IGNITE_UPDATE_NOTIFIER import org.apache.ignite._ import org.apache.ignite.cluster.{ClusterGroup, ClusterGroupEmptyException, ClusterMetrics, ClusterNode} @@ -28,23 +35,15 @@ import org.apache.ignite.internal.cluster.ClusterGroupEmptyCheckedException import org.apache.ignite.internal.util.lang.{GridFunc => F} import org.apache.ignite.internal.util.typedef._ import org.apache.ignite.internal.util.{GridConfigurationFinder, IgniteUtils => U} -import org.apache.ignite.lang._ -import org.apache.ignite.thread.{IgniteThreadFactory, IgniteThreadPoolExecutor} -import org.apache.ignite.visor.commands.common.VisorTextTable -import jline.console.ConsoleReader -import org.jetbrains.annotations.Nullable -import java.io._ -import java.lang.{Boolean => JavaBoolean} -import java.text._ -import java.util.concurrent._ -import java.util.{Collection => JavaCollection, HashSet => JavaHashSet, _} - import org.apache.ignite.internal.visor.cache._ -import org.apache.ignite.internal.visor.node.VisorNodeEventsCollectorTaskArg -import org.apache.ignite.internal.visor.node._ +import org.apache.ignite.internal.visor.node.{VisorNodeEventsCollectorTaskArg, _} import org.apache.ignite.internal.visor.util.VisorEventMapper import org.apache.ignite.internal.visor.util.VisorTaskUtils._ import org.apache.ignite.internal.visor.{VisorMultiNodeTask, VisorTaskArgument} +import org.apache.ignite.lang._ +import org.apache.ignite.thread.{IgniteThreadFactory, IgniteThreadPoolExecutor} +import org.apache.ignite.visor.commands.common.VisorTextTable +import org.jetbrains.annotations.Nullable import scala.collection.JavaConversions._ import scala.collection.immutable @@ -133,6 +132,32 @@ object visor extends VisorTag { /** Type alias for general event filter. */ type EventFilter = Event => Boolean + private final val LOC = Locale.US + + /** Date format. */ + private final val dateFmt = new SimpleDateFormat("yyyy-MM-dd", LOC) + + /** Date time format. */ + private final val timeFmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", LOC) + + final val INPUT_TYPES: Seq[(String, (String => Object), Class[_])] = Seq( + ("java.lang.String", (value: String) => value, classOf[String]), + ("java.lang.Character", (value: String) => JavaCharacter.valueOf(value.head), classOf[JavaCharacter]), + ("java.lang.Integer", (value: String) => JavaInteger.valueOf(value), classOf[JavaInteger]), + ("java.lang.Long", (value: String) => JavaLong.valueOf(value), classOf[JavaLong]), + ("java.lang.Short", (value: String) => JavaShort.valueOf(value), classOf[JavaShort]), + ("java.lang.Byte", (value: String) => JavaByte.valueOf(value), classOf[JavaByte]), + ("java.lang.Float", (value: String) => JavaFloat.valueOf(value), classOf[JavaFloat]), + ("java.lang.Double", (value: String) => JavaDouble.valueOf(value), classOf[JavaDouble]), + ("java.lang.Boolean", (value: String) => JavaBoolean.valueOf(value), classOf[JavaBoolean]), + ("java.util.Date - Value in format yyyy-MM-dd {HH:mm:ss}", + (value: String) => try + timeFmt.parse(value) + catch { + case e: ParseException => dateFmt.parse(value) + }, classOf[Date]), + ("java.util.UUID - Value like this: CC03C3B0-C03D-4B02-82AF-3E0F85414BA6", (value: String) => UUID.fromString(value), classOf[UUID])) + /** `Nil` is for empty list, `Til` is for empty tuple. */ val Til: Arg = (null, null) @@ -172,14 +197,6 @@ object visor extends VisorTag { /** */ @volatile private var conTs: Long = 0 - private final val LOC = Locale.US - - /** Date time format. */ - private final val dtFmt = new SimpleDateFormat("MM/dd/yy, HH:mm:ss", LOC) - - /** Date format. */ - private final val dFmt = new SimpleDateFormat("dd MMMM yyyy", LOC) - private final val DEC_FMT_SYMS = new DecimalFormatSymbols(LOC) /** Number format. */ @@ -651,7 +668,7 @@ object visor extends VisorTag { private def clearNamespace(namespace: String) { assert(namespace != null) - mem.keySet.foreach(k => { + mem.keys().foreach(k => { if (k.matches(s"$namespace\\d+")) mem.remove(k) }) @@ -1165,7 +1182,7 @@ object visor extends VisorTag { * @param ts Timestamp. */ def formatDateTime(ts: Long): String = - dtFmt.format(ts) + timeFmt.format(ts) /** * Returns string representation of the date provided. Result formatted using @@ -1174,7 +1191,7 @@ object visor extends VisorTag { * @param date Date. */ def formatDateTime(date: Date): String = - dtFmt.format(date) + timeFmt.format(date) /** * Returns string representation of the timestamp provided. Result formatted @@ -1183,7 +1200,7 @@ object visor extends VisorTag { * @param ts Timestamp. */ def formatDate(ts: Long): String = - dFmt.format(ts) + dateFmt.format(ts) /** * Returns string representation of the date provided. Result formatted using @@ -1192,7 +1209,7 @@ object visor extends VisorTag { * @param date Date. */ def formatDate(date: Date): String = - dFmt.format(date) + dateFmt.format(date) /** * Base class for memory units. @@ -1328,13 +1345,19 @@ object visor extends VisorTag { } /** - * Prints standard 'not connected' error message. + * Check connection state and show inform message when Visor console is not connected to cluster. + * + * @return `True` when Visor console is connected to cluster. */ - def adviseToConnect() { - warn( - "Visor is disconnected.", - "Type 'open' to connect Visor console or 'help open' to get help." - ) + def checkConnected(): Boolean = { + isCon || { + warn( + "Visor is disconnected.", + "Type 'open' to connect Visor console or 'help open' to get help." + ) + + false + } } /** @@ -2075,6 +2098,50 @@ object visor extends VisorTag { } } + def askTypedValue(name: String): Option[Object] = { + val t = VisorTextTable() + + t #= ("#", "Type description") + + INPUT_TYPES.indices.foreach(i => t += (i, INPUT_TYPES(i)._1)) + + println("Available " + name + " types:") + + t.render() + + val a = ask("\nChoose " + name + " type ('c' to cancel) [0]: ", "0") + + if (a.toLowerCase == "c") + None + else { + try { + val parser = INPUT_TYPES(a.toInt)._2 + + try { + val input = readLineOpt("Input " + name + ": ") + + input.map(parser) + } + catch { + case e: Throwable => + nl() + + warn("Failed to parse value to specified type") + + None + } + } + catch { + case e: Throwable => + nl() + + warn("Invalid selection: " + a) + + None + } + } + } + /** * Safe `readLine` version. * @@ -2092,41 +2159,6 @@ object visor extends VisorTag { } } - /** - * Asks user to choose node id8. - * - * @return `Option` for node id8. - */ - def askNodeId(): Option[String] = { - assert(isConnected) - - val ids = ignite.cluster.forRemotes().nodes().map(nid8).toList - - ids.indices.foreach(i => println((i + 1) + ": " + ids(i))) - - nl() - - println("C: Cancel") - - nl() - - readLineOpt("Choose node: ") match { - case Some("c") | Some("C") | None => None - case Some(idx) => - try - Some(ids(idx.toInt - 1)) - catch { - case _: Throwable => - if (idx.isEmpty) - warn("Index can't be empty.") - else - warn("Invalid index: " + idx + ".") - - None - } - } - } - /** * Adds close callback. Added function will be called every time * command `close` is called. @@ -2183,9 +2215,7 @@ object visor extends VisorTag { * Disconnects from the grid. */ def close() { - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { if (pool != null) { pool.shutdown() @@ -2320,9 +2350,7 @@ object visor extends VisorTag { def log(args: String) { assert(args != null) - if (!isConnected) - adviseToConnect() - else { + if (checkConnected()) { def scold(errMsgs: Any*) { assert(errMsgs != null) From 23f5e814a25058448b05bdb35d77253e39e207eb Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 21 Dec 2017 17:29:28 +0700 Subject: [PATCH 192/243] IGNITE-7106 Improved collecting of rebalance metrics. (cherry picked from commit fa55a6e) --- .../visor/node/VisorNodeDataCollectorJob.java | 11 +++++++++ .../node/VisorNodeDataCollectorJobResult.java | 24 +++++++++++++++++++ .../node/VisorNodeDataCollectorTask.java | 13 ++++++---- .../VisorNodeDataCollectorTaskResult.java | 20 ++++++++++++++++ 4 files changed, 63 insertions(+), 5 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java index 7e921ad9f0a7e..fda23a2a917c6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java @@ -22,6 +22,7 @@ import java.util.concurrent.ConcurrentMap; import org.apache.ignite.IgniteFileSystem; import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.cache.CacheMetrics; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.FileSystemConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; @@ -182,6 +183,9 @@ protected void caches(VisorNodeDataCollectorJobResult res, VisorNodeDataCollecto List resCaches = res.getCaches(); + double total = 0; + double moving = 0; + for (String cacheName : cacheProc.cacheNames()) { if (proxyCache(cacheName)) continue; @@ -195,6 +199,11 @@ protected void caches(VisorNodeDataCollectorJobResult res, VisorNodeDataCollecto if (ca == null || !ca.context().started()) continue; + CacheMetrics cm = ca.localMetrics(); + + total += cm.getTotalPartitionsCount(); + moving += cm.getRebalancingPartitionsCount(); + resCaches.add(new VisorCache(ignite, ca, arg.isCollectCacheMetrics())); } catch(IllegalStateException | IllegalArgumentException e) { @@ -207,6 +216,8 @@ protected void caches(VisorNodeDataCollectorJobResult res, VisorNodeDataCollecto } } } + + res.setRebalance(total > 0 ? (total - moving) / total : -1); } catch (Exception e) { res.setCachesEx(new VisorExceptionWrapper(e)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java index 0612c5ea6f221..e3501572a65fc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJobResult.java @@ -90,6 +90,9 @@ public class VisorNodeDataCollectorJobResult extends VisorDataTransferObject { /** Exception while collecting persistence metrics. */ private VisorExceptionWrapper persistenceMetricsEx; + /** Rebalance percent. */ + private double rebalance; + /** * Default constructor. */ @@ -302,6 +305,25 @@ public void setPersistenceMetricsEx(VisorExceptionWrapper persistenceMetricsEx) this.persistenceMetricsEx = persistenceMetricsEx; } + /** + * @return Rebalance progress. + */ + public double getRebalance() { + return rebalance; + } + + /** + * @param rebalance Rebalance progress. + */ + public void setRebalance(double rebalance) { + this.rebalance = rebalance; + } + + /** {@inheritDoc} */ + @Override public byte getProtocolVersion() { + return V2; + } + /** {@inheritDoc} */ @Override protected void writeExternalData(ObjectOutput out) throws IOException { U.writeString(out, gridName); @@ -321,6 +343,7 @@ public void setPersistenceMetricsEx(VisorExceptionWrapper persistenceMetricsEx) out.writeBoolean(hasPendingExchange); out.writeObject(persistenceMetrics); out.writeObject(persistenceMetricsEx); + out.writeDouble(rebalance); } /** {@inheritDoc} */ @@ -342,6 +365,7 @@ public void setPersistenceMetricsEx(VisorExceptionWrapper persistenceMetricsEx) hasPendingExchange = in.readBoolean(); persistenceMetrics = (VisorPersistenceMetrics)in.readObject(); persistenceMetricsEx = (VisorExceptionWrapper)in.readObject(); + rebalance = (protoVer > V1) ? in.readDouble() : -1; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java index abd9ce28156af..fffc3bfcdbd7b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java @@ -23,6 +23,7 @@ import org.apache.ignite.cluster.ClusterGroupEmptyException; import org.apache.ignite.compute.ComputeJobResult; import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.visor.VisorMultiNodeTask; import org.apache.ignite.internal.visor.util.VisorExceptionWrapper; import org.jetbrains.annotations.Nullable; @@ -93,28 +94,28 @@ protected void reduceJobResult(VisorNodeDataCollectorTaskResult taskRes, taskRes.getErrorCounts().put(nid, jobRes.getErrorCount()); - if (!jobRes.getEvents().isEmpty()) + if (!F.isEmpty(jobRes.getEvents())) taskRes.getEvents().addAll(jobRes.getEvents()); if (jobRes.getEventsEx() != null) taskRes.getEventsEx().put(nid, jobRes.getEventsEx()); - if (!jobRes.getMemoryMetrics().isEmpty()) + if (!F.isEmpty(jobRes.getMemoryMetrics())) taskRes.getMemoryMetrics().put(nid, jobRes.getMemoryMetrics()); if (jobRes.getMemoryMetricsEx() != null) taskRes.getMemoryMetricsEx().put(nid, jobRes.getMemoryMetricsEx()); - if (!jobRes.getCaches().isEmpty()) + if (!F.isEmpty(jobRes.getCaches())) taskRes.getCaches().put(nid, jobRes.getCaches()); if (jobRes.getCachesEx() != null) taskRes.getCachesEx().put(nid, jobRes.getCachesEx()); - if (!jobRes.getIgfss().isEmpty()) + if (!F.isEmpty(jobRes.getIgfss())) taskRes.getIgfss().put(nid, jobRes.getIgfss()); - if (!jobRes.getIgfsEndpoints().isEmpty()) + if (!F.isEmpty(jobRes.getIgfsEndpoints())) taskRes.getIgfsEndpoints().put(nid, jobRes.getIgfsEndpoints()); if (jobRes.getIgfssEx() != null) @@ -129,5 +130,7 @@ protected void reduceJobResult(VisorNodeDataCollectorTaskResult taskRes, taskRes.getReadyAffinityVersions().put(nid, jobRes.getReadyAffinityVersion()); taskRes.getPendingExchanges().put(nid, jobRes.isHasPendingExchange()); + + taskRes.getRebalance().put(nid, jobRes.getRebalance()); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java index ace964cdc75b5..eb161f82c6638 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskResult.java @@ -100,6 +100,9 @@ public class VisorNodeDataCollectorTaskResult extends VisorDataTransferObject { /** Exceptions caught during collecting persistence metrics from nodes. */ private Map persistenceMetricsEx = new HashMap<>(); + /** Rebalance state on nodes. */ + private Map rebalance = new HashMap<>(); + /** * Default constructor. */ @@ -271,6 +274,18 @@ public Map getPersistenceMetricsEx() { return persistenceMetricsEx; } + /** + * @return Rebalance on nodes. + */ + public Map getRebalance() { + return rebalance; + } + + /** {@inheritDoc} */ + @Override public byte getProtocolVersion() { + return V2; + } + /** * Add specified results. * @@ -298,6 +313,7 @@ public void add(VisorNodeDataCollectorTaskResult res) { pendingExchanges.putAll(res.getPendingExchanges()); persistenceMetrics.putAll(res.getPersistenceMetrics()); persistenceMetricsEx.putAll(res.getPersistenceMetricsEx()); + rebalance.putAll(res.getRebalance()); } /** {@inheritDoc} */ @@ -321,6 +337,7 @@ public void add(VisorNodeDataCollectorTaskResult res) { U.writeMap(out, pendingExchanges); U.writeMap(out, persistenceMetrics); U.writeMap(out, persistenceMetricsEx); + U.writeMap(out, rebalance); } /** {@inheritDoc} */ @@ -344,6 +361,9 @@ public void add(VisorNodeDataCollectorTaskResult res) { pendingExchanges = U.readMap(in); persistenceMetrics = U.readMap(in); persistenceMetricsEx = U.readMap(in); + + if (protoVer > V1) + rebalance = U.readMap(in); } /** {@inheritDoc} */ From fed30164a3a8dc79304b8702e7ef4c9a2f407a38 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Thu, 21 Dec 2017 10:35:20 +0300 Subject: [PATCH 193/243] master - Added missing license file (cherry picked from commit 47c8e26) --- modules/dev-utils/licenses/apache-2.0.txt | 202 ++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 modules/dev-utils/licenses/apache-2.0.txt diff --git a/modules/dev-utils/licenses/apache-2.0.txt b/modules/dev-utils/licenses/apache-2.0.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/modules/dev-utils/licenses/apache-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From ebb20d5aab88277c620553782864dab36cf7d6d2 Mon Sep 17 00:00:00 2001 From: dpavlov Date: Wed, 4 Oct 2017 15:05:48 +0300 Subject: [PATCH 194/243] IGNITE-7244 Added missing README.txt for development utils (cherry picked from commit 6e8cfe3) --- .../development/utils/package-info.java | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 modules/dev-utils/src/main/java/org/apache/ignite/development/utils/package-info.java diff --git a/modules/dev-utils/src/main/java/org/apache/ignite/development/utils/package-info.java b/modules/dev-utils/src/main/java/org/apache/ignite/development/utils/package-info.java new file mode 100644 index 0000000000000..b3119dd79abca --- /dev/null +++ b/modules/dev-utils/src/main/java/org/apache/ignite/development/utils/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * + * Contains development utilities for Ignite useful for PDS artifacts analysis. + */ +package org.apache.ignite.development.utils; \ No newline at end of file From ac918c8553d4ce3188fd0cdbc6b1d51460ad2f49 Mon Sep 17 00:00:00 2001 From: Ilya Kasnacheev Date: Wed, 20 Dec 2017 13:05:24 +0300 Subject: [PATCH 195/243] IGNITE-7197 Avoid NPE in services() by waiting on latch --- .../service/GridServiceProcessor.java | 53 ++++-- .../processors/task/GridTaskProcessor.java | 2 + .../internal/GridJobServicesAddNodeTest.java | 172 ++++++++++++++++++ .../IgniteComputeGridTestSuite.java | 2 + 4 files changed, 216 insertions(+), 13 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/GridJobServicesAddNodeTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java index 6f1dfc7679af9..e0d19a772ddb6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java @@ -30,6 +30,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; @@ -159,11 +160,14 @@ public class GridServiceProcessor extends GridProcessorAdapter implements Ignite private ThreadLocal svcName = new ThreadLocal<>(); /** Service cache. */ - private IgniteInternalCache cache; + private volatile IgniteInternalCache serviceCache; /** Topology listener. */ private DiscoveryEventListener topLsnr = new TopologyListener(); + /** */ + private final CountDownLatch startLatch = new CountDownLatch(1); + /** * @param ctx Kernal context. */ @@ -222,19 +226,21 @@ CU.UTILITY_CACHE_NAME, new ServiceEntriesListener(), null, null * @throws IgniteCheckedException If failed. */ private void onKernalStart0() throws IgniteCheckedException { - updateUtilityCache(); - if (!ctx.clientNode()) ctx.event().addDiscoveryEventListener(topLsnr, EVTS); + updateUtilityCache(); + + startLatch.countDown(); + try { if (ctx.deploy().enabled()) ctx.cache().context().deploy().ignoreOwnership(true); if (!ctx.clientNode()) { - assert cache.context().affinityNode(); + assert serviceCache.context().affinityNode(); - cache.context().continuousQueries().executeInternalQuery( + serviceCache.context().continuousQueries().executeInternalQuery( new ServiceEntriesListener(), null, true, true, false ); } @@ -245,7 +251,7 @@ private void onKernalStart0() throws IgniteCheckedException { @Override public void run() { try { Iterable> entries = - cache.context().continuousQueries().existingEntries(false, null); + serviceCache.context().continuousQueries().existingEntries(false, null); onSystemCacheUpdated(entries); } @@ -281,7 +287,17 @@ private void onKernalStart0() throws IgniteCheckedException { * */ public void updateUtilityCache() { - cache = ctx.cache().utilityCache(); + serviceCache = ctx.cache().utilityCache(); + } + + /** + * @return Service cache. + */ + private IgniteInternalCache serviceCache() { + if (serviceCache == null) + U.awaitQuiet(startLatch); + + return serviceCache; } /** {@inheritDoc} */ @@ -298,6 +314,8 @@ public void updateUtilityCache() { this.busyLock = null; } + startLatch.countDown(); + U.shutdownNow(GridServiceProcessor.class, depExe, log); if (!ctx.clientNode()) @@ -598,7 +616,7 @@ public IgniteInternalFuture deployAll(Collection cfgs) if (cfgsCp.size() == 1) writeServiceToCache(res, cfgsCp.get(0)); else if (cfgsCp.size() > 1) { - try (Transaction tx = cache.txStart(PESSIMISTIC, READ_COMMITTED)) { + try (Transaction tx = serviceCache().txStart(PESSIMISTIC, READ_COMMITTED)) { for (ServiceConfiguration cfg : cfgsCp) { try { writeServiceToCache(res, cfg); @@ -690,7 +708,7 @@ private void writeServiceToCache(GridServiceDeploymentCompoundFuture res, Servic GridServiceDeploymentKey key = new GridServiceDeploymentKey(name); - GridServiceDeployment dep = (GridServiceDeployment)cache.getAndPutIfAbsent(key, + GridServiceDeployment dep = (GridServiceDeployment)serviceCache().getAndPutIfAbsent(key, new GridServiceDeployment(ctx.localNodeId(), cfg)); if (dep != null) { @@ -800,7 +818,7 @@ public IgniteInternalFuture cancelAll(Collection svcNames) { List toRollback = new ArrayList<>(); - try (Transaction tx = cache.txStart(PESSIMISTIC, READ_COMMITTED)) { + try (Transaction tx = serviceCache().txStart(PESSIMISTIC, READ_COMMITTED)) { for (String name : svcNames) { if (res == null) res = new GridCompoundFuture<>(); @@ -873,7 +891,7 @@ private CancelResult removeServiceFromCache(String name) throws IgniteCheckedExc GridServiceDeploymentKey key = new GridServiceDeploymentKey(name); try { - if (cache.getAndRemove(key) == null) { + if (serviceCache().getAndRemove(key) == null) { // Remove future from local map if service was not deployed. undepFuts.remove(name, fut); @@ -901,6 +919,8 @@ private CancelResult removeServiceFromCache(String name) throws IgniteCheckedExc * @throws IgniteCheckedException On error. */ public Map serviceTopology(String name, long timeout) throws IgniteCheckedException { + IgniteInternalCache cache = serviceCache(); + ClusterNode node = cache.affinity().mapKeyToNode(name); final ServiceTopologyCallable call = new ServiceTopologyCallable(name); @@ -943,7 +963,7 @@ public Collection serviceDescriptors() { ServiceDescriptorImpl desc = new ServiceDescriptorImpl(dep); try { - GridServiceAssignments assigns = (GridServiceAssignments)cache.getForcePrimary( + GridServiceAssignments assigns = (GridServiceAssignments)serviceCache().getForcePrimary( new GridServiceAssignmentsKey(dep.configuration().getName())); if (assigns != null) { @@ -1108,8 +1128,9 @@ public Collection services(String name) { * @throws IgniteCheckedException If failed. */ private void reassign(GridServiceDeployment dep, AffinityTopologyVersion topVer) throws IgniteCheckedException { - ServiceConfiguration cfg = dep.configuration(); + IgniteInternalCache cache = serviceCache(); + ServiceConfiguration cfg = dep.configuration(); Object nodeFilter = cfg.getNodeFilter(); if (nodeFilter != null) @@ -1466,6 +1487,8 @@ private void cancel(Iterable ctxs, int cancelCnt) { @SuppressWarnings("unchecked") private Iterator> serviceEntries(IgniteBiPredicate p) { try { + IgniteInternalCache cache = serviceCache(); + GridCacheQueryManager qryMgr = cache.context().queries(); CacheQuery> qry = qryMgr.createScanQuery(p, null, false); @@ -1606,6 +1629,8 @@ private void processDeployment(CacheEntryEvent cache = serviceCache(); + if (cache.cache().affinity().isPrimary(ctx.discovery().localNode(), key)) { try { cache.getAndRemove(key); @@ -1774,6 +1799,8 @@ else if (msg instanceof DynamicCacheChangeBatch) { Iterator> it = serviceEntries(ServiceAssignmentsPredicate.INSTANCE); // Clean up zombie assignments. + IgniteInternalCache cache = serviceCache(); + while (it.hasNext()) { Cache.Entry e = it.next(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java index 25a38acc094fd..871d945021514 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java @@ -198,6 +198,8 @@ private IgniteClientDisconnectedCheckedException disconnectedError(@Nullable Ign lock.writeUnlock(); } + startLatch.countDown(); + int size = tasks.size(); if (size > 0) { diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridJobServicesAddNodeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridJobServicesAddNodeTest.java new file mode 100644 index 0000000000000..4b8b494022001 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/GridJobServicesAddNodeTest.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.processors.service.DummyService; +import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.internal.util.typedef.CAX; +import org.apache.ignite.internal.util.typedef.CIX1; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.lang.IgniteCallable; +import org.apache.ignite.lang.IgniteFuture; +import org.apache.ignite.resources.IgniteInstanceResource; +import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.testframework.junits.common.GridCommonTest; + +/** + * Tests multiple parallel jobs execution, accessing services(), while starting new nodes. + */ +@GridCommonTest(group = "Kernal Self") +public class GridJobServicesAddNodeTest extends GridCommonAbstractTest { + /** */ + private static final int LOG_MOD = 100; + + /** */ + private static final int MAX_ADD_NODES = 64; + + /** IP finder. */ + private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrid(1); + startGrid(2); + + assertEquals(2, grid(1).cluster().nodes().size()); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + } + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration c = super.getConfiguration(igniteInstanceName); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(ipFinder); + + c.setDiscoverySpi(disco); + + TcpCommunicationSpi commSpi = new TcpCommunicationSpi(); + + commSpi.setSharedMemoryPort(-1); + + c.setCommunicationSpi(commSpi); + + return c; + } + + /** + * @throws Exception If test failed. + */ + public void testServiceDescriptorsJob() throws Exception { + final int tasks = 5000; + final int threads = 10; + + final Ignite ignite1 = grid(1); + final CountDownLatch latch = new CountDownLatch(tasks); + final AtomicInteger jobsCnt = new AtomicInteger(); + final AtomicInteger resCnt = new AtomicInteger(); + + ignite1.services().deployClusterSingleton("jobsSvc", new DummyService()); + + GridTestUtils.runMultiThreadedAsync(new CAX() { + @Override public void applyx() throws IgniteCheckedException { + while (true) { + int cnt = jobsCnt.incrementAndGet(); + + if (cnt > 5000) + break; + + IgniteCallable job; + + job = new ServiceDescriptorsJob(); + + IgniteFuture fut = ignite1.compute().callAsync(job); + + if (cnt % LOG_MOD == 0) + X.println("Submitted jobs: " + cnt); + + fut.listen(new CIX1>() { + @Override public void applyx(IgniteFuture f) { + try { + assert f.get(); + + long cnt = resCnt.incrementAndGet(); + + if (cnt % LOG_MOD == 0) + X.println("Results count: " + cnt); + } + finally { + latch.countDown(); + } + } + }); + + IgniteUtils.sleep(5); + } + } + }, threads, "TEST-THREAD"); + + int additionalNodesStarted = 0; + while (!latch.await(threads, TimeUnit.MILLISECONDS)) { + if (additionalNodesStarted++ <= MAX_ADD_NODES) { + startGrid(2 + additionalNodesStarted); + } + } + + assertEquals("Jobs cnt != Results cnt", jobsCnt.get() - threads, resCnt.get()); + } + + /** + * Test service enumerating job. + */ + @SuppressWarnings({"PublicInnerClass"}) + public static class ServiceDescriptorsJob implements IgniteCallable { + /** */ + @IgniteInstanceResource + private Ignite ignite; + + /** {@inheritDoc} */ + @Override public Boolean call() throws Exception { + try { + return ignite.services().serviceDescriptors().iterator().hasNext(); + } catch (Exception e) { + e.printStackTrace(); + + return false; + } finally { + Thread.sleep(10); + } + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteComputeGridTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteComputeGridTestSuite.java index ac3de73439ad7..2ffa11ebf2849 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteComputeGridTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteComputeGridTestSuite.java @@ -40,6 +40,7 @@ import org.apache.ignite.internal.GridJobCollisionCancelSelfTest; import org.apache.ignite.internal.GridJobContextSelfTest; import org.apache.ignite.internal.GridJobMasterLeaveAwareSelfTest; +import org.apache.ignite.internal.GridJobServicesAddNodeTest; import org.apache.ignite.internal.GridJobStealingSelfTest; import org.apache.ignite.internal.GridJobStealingZeroActiveJobsSelfTest; import org.apache.ignite.internal.GridJobSubjectIdSelfTest; @@ -160,6 +161,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgniteRoundRobinErrorAfterClientReconnectTest.class); suite.addTestSuite(PublicThreadpoolStarvationTest.class); suite.addTestSuite(StripedExecutorTest.class); + suite.addTestSuite(GridJobServicesAddNodeTest.class); suite.addTestSuite(IgniteComputeCustomExecutorConfigurationSelfTest.class); suite.addTestSuite(IgniteComputeCustomExecutorSelfTest.class); From 0bb705b097fa237d35f47d1f9ea7e65c53bbe9d1 Mon Sep 17 00:00:00 2001 From: mcherkasov Date: Fri, 22 Dec 2017 16:38:00 +0300 Subject: [PATCH 196/243] IGNITE-7021 IgniteOOM is not propogated to client in case of implicit transaction Signed-off-by: Andrey Gura (cherry picked from commit 21d43d6) --- .../dht/GridDhtTxPrepareFuture.java | 3 ++ .../IgniteOutOfMemoryPropagationTest.java | 41 ++++++++++--------- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java index 0fb9ee43647ca..a1f3984ca0472 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java @@ -717,6 +717,9 @@ private boolean mapIfLocked() { CIX1> resClo = new CIX1>() { @Override public void applyx(IgniteInternalFuture fut) { + if(res.error() == null && fut.error() != null) + res.error(fut.error()); + if (REPLIED_UPD.compareAndSet(GridDhtTxPrepareFuture.this, 0, 1)) sendPrepareResponse(res); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java index a13cbd478ab0c..24ff3fc7ff218 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteOutOfMemoryPropagationTest.java @@ -43,7 +43,6 @@ * */ public class IgniteOutOfMemoryPropagationTest extends GridCommonAbstractTest { - /** */ public static final int NODES = 3; @@ -54,7 +53,7 @@ public class IgniteOutOfMemoryPropagationTest extends GridCommonAbstractTest { private CacheMode mode; /** */ - private int backupsCount; + private int backupsCnt; /** */ private CacheWriteSynchronizationMode writeSyncMode; @@ -89,7 +88,7 @@ private void testOOMPropagation(boolean useStreamer) throws Exception { for (CacheAtomicityMode atomicityMode : CacheAtomicityMode.values()) { for (CacheMode cacheMode : CacheMode.values()) { for (CacheWriteSynchronizationMode writeSyncMode : CacheWriteSynchronizationMode.values()) { - for (int backupsCount = 0; backupsCount < 1; backupsCount++) { + for (int backupsCnt = 0; backupsCnt <= 1; backupsCnt++) { if (writeSyncMode == CacheWriteSynchronizationMode.FULL_ASYNC || cacheMode == CacheMode.REPLICATED) continue; @@ -102,14 +101,14 @@ private void testOOMPropagation(boolean useStreamer) throws Exception { CacheAtomicityMode.TRANSACTIONAL, cacheMode, writeSyncMode, - backupsCount, + backupsCnt, concurrency, isolation); } } } - else - checkOOMPropagation(useStreamer, atomicityMode, cacheMode, writeSyncMode, backupsCount); + + checkOOMPropagation(useStreamer, atomicityMode, cacheMode, writeSyncMode, backupsCnt); } } } @@ -118,21 +117,21 @@ private void testOOMPropagation(boolean useStreamer) throws Exception { /** */ private void checkOOMPropagation(boolean useStreamer, CacheAtomicityMode atomicityMode, CacheMode cacheMode, - CacheWriteSynchronizationMode writeSyncMode, int backupsCount) throws Exception { - checkOOMPropagation(useStreamer, atomicityMode, cacheMode, writeSyncMode, backupsCount, null, null); + CacheWriteSynchronizationMode writeSyncMode, int backupsCnt) throws Exception { + checkOOMPropagation(useStreamer, atomicityMode, cacheMode, writeSyncMode, backupsCnt, null, null); } /** */ private void checkOOMPropagation(boolean useStreamer, CacheAtomicityMode atomicityMode, CacheMode cacheMode, - CacheWriteSynchronizationMode writeSyncMode, int backupsCount, + CacheWriteSynchronizationMode writeSyncMode, int backupsCnt, TransactionConcurrency concurrency, TransactionIsolation isolation) throws Exception { Throwable t = null; System.out.println("Checking conf: CacheAtomicityMode." + atomicityMode + - " CacheMode." + mode + " CacheWriteSynchronizationMode." + writeSyncMode + " backupsCount = " + backupsCount + " CacheMode." + cacheMode + " CacheWriteSynchronizationMode." + writeSyncMode + " backupsCount = " + backupsCnt + " TransactionConcurrency." + concurrency + " TransactionIsolation." + isolation); - initGrid(atomicityMode, cacheMode, writeSyncMode, backupsCount); + initGrid(atomicityMode, cacheMode, writeSyncMode, backupsCnt); try { forceOOM(useStreamer, concurrency, isolation); } @@ -156,15 +155,15 @@ private void checkOOMPropagation(boolean useStreamer, CacheAtomicityMode atomici * @param atomicityMode atomicity mode * @param mode cache mode * @param writeSyncMode cache write synchronization mode - * @param backupsCount backups count - * @throws Exception + * @param backupsCnt backups count + * @throws Exception If failed. */ private void initGrid(CacheAtomicityMode atomicityMode, CacheMode mode, - CacheWriteSynchronizationMode writeSyncMode, int backupsCount) throws Exception { + CacheWriteSynchronizationMode writeSyncMode, int backupsCnt) throws Exception { this.atomicityMode = atomicityMode; this.mode = mode; - this.backupsCount = backupsCount; + this.backupsCnt = backupsCnt; this.writeSyncMode = writeSyncMode; Ignition.setClientMode(false); @@ -183,9 +182,13 @@ private void initGrid(CacheAtomicityMode atomicityMode, CacheMode mode, } - /** */ - public void forceOOM(boolean useStreamer, TransactionConcurrency concurrency, - TransactionIsolation isolation) throws Exception { + + /** + * @param useStreamer Use streamer. + * @param concurrency Concurrency. + * @param isolation Isolation. + */ + public void forceOOM(boolean useStreamer, TransactionConcurrency concurrency, TransactionIsolation isolation) { final IgniteCache cache = client.cache(DEFAULT_CACHE_NAME); IgniteDataStreamer streamer = client.dataStreamer(DEFAULT_CACHE_NAME); @@ -241,7 +244,7 @@ public void forceOOM(boolean useStreamer, TransactionConcurrency concurrency, baseCfg.setAtomicityMode(this.atomicityMode); baseCfg.setCacheMode(this.mode); - baseCfg.setBackups(this.backupsCount); + baseCfg.setBackups(this.backupsCnt); baseCfg.setWriteSynchronizationMode(this.writeSyncMode); cfg.setCacheConfiguration(baseCfg); From 0c575b2b797eb7d110463e421c69291955af122e Mon Sep 17 00:00:00 2001 From: Aleksei Scherbakov Date: Thu, 26 Oct 2017 17:13:18 +0300 Subject: [PATCH 197/243] ignite-6667 Reuse DiscoCache when possible. (cherry picked from commit 087479c) --- .../managers/discovery/DiscoCache.java | 45 ++++++---- .../discovery/DiscoveryCustomMessage.java | 13 ++- .../cache/CacheAffinityChangeMessage.java | 8 ++ .../ClientCacheChangeDiscoveryMessage.java | 9 ++ ...lientCacheChangeDummyDiscoveryMessage.java | 9 ++ .../cache/DynamicCacheChangeBatch.java | 9 ++ .../binary/MetadataUpdateAcceptedMessage.java | 9 ++ .../binary/MetadataUpdateProposedMessage.java | 9 ++ .../ChangeGlobalStateFinishMessage.java | 9 ++ .../cluster/ChangeGlobalStateMessage.java | 11 ++- .../continuous/AbstractContinuousMessage.java | 10 +++ .../StartRoutineAckDiscoveryMessage.java | 2 +- .../StartRoutineDiscoveryMessage.java | 2 +- .../StopRoutineAckDiscoveryMessage.java | 2 +- .../StopRoutineDiscoveryMessage.java | 2 +- .../marshaller/MappingAcceptedMessage.java | 9 ++ .../marshaller/MappingProposedMessage.java | 9 ++ .../SchemaAbstractDiscoveryMessage.java | 10 +++ .../SchemaProposeDiscoveryMessage.java | 3 +- .../IgniteDiscoveryCacheReuseSelfTest.java | 89 +++++++++++++++++++ .../IgniteSpiDiscoverySelfTestSuite.java | 4 + 21 files changed, 248 insertions(+), 25 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java index 4b57eb8140602..9ed70aa7afae1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java @@ -19,7 +19,6 @@ import java.util.Collection; import java.util.Collections; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -71,10 +70,10 @@ public class DiscoCache { private final Map> cacheGrpAffNodes; /** Node map. */ - private final Map nodeMap; + final Map nodeMap; /** Alive nodes. */ - private final Set alives = new GridConcurrentHashSet<>(); + final Set alives = new GridConcurrentHashSet<>(); /** */ private final IgniteProductVersion minNodeVer; @@ -95,6 +94,7 @@ public class DiscoCache { * @param cacheGrpAffNodes Affinity nodes by cache group ID. * @param nodeMap Node map. * @param alives Alive nodes. + * @param minNodeVer Minimum node version. */ DiscoCache( AffinityTopologyVersion topVer, @@ -108,7 +108,8 @@ public class DiscoCache { Map> allCacheNodes, Map> cacheGrpAffNodes, Map nodeMap, - Set alives) { + Set alives, + IgniteProductVersion minNodeVer) { this.topVer = topVer; this.state = state; this.loc = loc; @@ -121,19 +122,7 @@ public class DiscoCache { this.cacheGrpAffNodes = cacheGrpAffNodes; this.nodeMap = nodeMap; this.alives.addAll(alives); - - IgniteProductVersion minVer = null; - - for (int i = 0; i < allNodes.size(); i++) { - ClusterNode node = allNodes.get(i); - - if (minVer == null) - minVer = node.version(); - else if (node.version().compareTo(minVer) < 0) - minVer = node.version(); - } - - minNodeVer = minVer; + this.minNodeVer = minNodeVer; } /** @@ -326,6 +315,28 @@ private List emptyIfNull(List nodes) { return nodes == null ? Collections.emptyList() : nodes; } + /** + * @param ver Topology version. + * @param state Not {@code null} state if need override state, otherwise current state is used. + * @return Copy of discovery cache with new version. + */ + public DiscoCache copy(AffinityTopologyVersion ver, @Nullable DiscoveryDataClusterState state) { + return new DiscoCache( + ver, + state == null ? this.state : state, + loc, + rmtNodes, + allNodes, + srvNodes, + daemonNodes, + rmtNodesWithCaches, + allCacheNodes, + cacheGrpAffNodes, + nodeMap, + alives, + minNodeVer); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(DiscoCache.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java index f908b59d21575..c708c6247b2c2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.managers.discovery; import java.io.Serializable; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryNodeAddFinishedMessage; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryNodeAddedMessage; @@ -89,4 +90,14 @@ public interface DiscoveryCustomMessage extends Serializable { * @return {@code true} if message can be modified during listener notification. Changes will be send to next nodes. */ public boolean isMutable(); -} \ No newline at end of file + + /** + * Creates new discovery cache if message caused topology version change. + * + * @param mgr Discovery manager. + * @param topVer New topology version. + * @param discoCache Current discovery cache. + * @return Reused discovery cache. + */ + public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java index 8cff65eb72540..fe1014cf6e010 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java @@ -20,7 +20,9 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionExchangeId; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage; @@ -153,6 +155,12 @@ public AffinityTopologyVersion topologyVersion() { return false; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + return discoCache.copy(topVer, null); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(CacheAffinityChangeMessage.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java index 3d120f73082a9..e35d80e5c1c6f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java @@ -22,7 +22,10 @@ import java.util.Iterator; import java.util.Map; import java.util.Set; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; @@ -169,6 +172,12 @@ public void updateTimeoutObject(ClientCacheUpdateTimeout updateTimeoutObj) { return false; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(ClientCacheChangeDiscoveryMessage.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java index 44f6002c38291..6ed3ecc505f25 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java @@ -20,7 +20,10 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteUuid; @@ -101,6 +104,12 @@ Set cachesToClose() { throw new UnsupportedOperationException(); } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(ClientCacheChangeDummyDiscoveryMessage.class, this, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java index d5c820f981e39..83459a5c03589 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java @@ -19,7 +19,10 @@ import java.util.Collection; import java.util.Set; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; @@ -73,6 +76,12 @@ public DynamicCacheChangeBatch(Collection reqs) { return false; } + /** {@inheritDoc} */ + @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, + DiscoCache discoCache) { + return mgr.createDiscoCacheOnCacheChange(topVer, discoCache); + } + /** * @return Collection of change requests. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateAcceptedMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateAcceptedMessage.java index ef5370eee6b19..0416746a04b68 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateAcceptedMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateAcceptedMessage.java @@ -16,7 +16,10 @@ */ package org.apache.ignite.internal.processors.cache.binary; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; @@ -67,6 +70,12 @@ public class MetadataUpdateAcceptedMessage implements DiscoveryCustomMessage { return true; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** */ int acceptedVersion() { return acceptedVer; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateProposedMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateProposedMessage.java index 715e66843591f..f9bd66078cba2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateProposedMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateProposedMessage.java @@ -20,7 +20,10 @@ import org.apache.ignite.binary.BinaryObjectException; import org.apache.ignite.internal.binary.BinaryMetadata; import org.apache.ignite.internal.binary.BinaryMetadataHandler; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; @@ -130,6 +133,12 @@ public MetadataUpdateProposedMessage(BinaryMetadata metadata, UUID origNodeId) { return true; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** * @param err Error caused this update to be rejected. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateFinishMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateFinishMessage.java index 07711984372a2..a1fbacf36488b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateFinishMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateFinishMessage.java @@ -18,7 +18,10 @@ package org.apache.ignite.internal.processors.cluster; import java.util.UUID; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; @@ -79,6 +82,12 @@ public boolean clusterActive() { return false; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(ChangeGlobalStateFinishMessage.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateMessage.java index 6579399c72f6b..6a642bcc7811a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/ChangeGlobalStateMessage.java @@ -19,7 +19,10 @@ import java.util.List; import java.util.UUID; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.ExchangeActions; import org.apache.ignite.internal.processors.cache.StoredCacheData; import org.apache.ignite.internal.util.tostring.GridToStringExclude; @@ -112,7 +115,13 @@ void exchangeActions(ExchangeActions exchangeActions) { return false; } - /** + /** {@inheritDoc} */ + @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, + DiscoCache discoCache) { + return mgr.createDiscoCacheOnCacheChange(topVer, discoCache); + } + + /** * @return Node initiated state change. */ public UUID initiatorNodeId() { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/AbstractContinuousMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/AbstractContinuousMessage.java index 01a95dfa89240..e9754d12cd966 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/AbstractContinuousMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/AbstractContinuousMessage.java @@ -18,8 +18,12 @@ package org.apache.ignite.internal.processors.continuous; import java.util.UUID; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.lang.IgniteUuid; +import org.jetbrains.annotations.Nullable; /** * @@ -57,4 +61,10 @@ public UUID routineId() { @Override public boolean isMutable() { return false; } + + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineAckDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineAckDiscoveryMessage.java index 1765f2c36f4df..4063e05c61820 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineAckDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineAckDiscoveryMessage.java @@ -92,4 +92,4 @@ public Map errs() { @Override public String toString() { return S.toString(StartRoutineAckDiscoveryMessage.class, this, "routineId", routineId()); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineDiscoveryMessage.java index 320226bcabeef..82996d4bd7103 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StartRoutineDiscoveryMessage.java @@ -132,4 +132,4 @@ public boolean keepBinary() { @Override public String toString() { return S.toString(StartRoutineDiscoveryMessage.class, this, "routineId", routineId()); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineAckDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineAckDiscoveryMessage.java index e6305c79c2838..79d8b29a24081 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineAckDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineAckDiscoveryMessage.java @@ -45,4 +45,4 @@ public StopRoutineAckDiscoveryMessage(UUID routineId) { @Override public String toString() { return S.toString(StopRoutineAckDiscoveryMessage.class, this, "routineId", routineId()); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineDiscoveryMessage.java index 30d12d17d16a2..f6b18fd1aaa76 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/StopRoutineDiscoveryMessage.java @@ -45,4 +45,4 @@ public StopRoutineDiscoveryMessage(UUID routineId) { @Override public String toString() { return S.toString(StopRoutineDiscoveryMessage.class, this, "routineId", routineId()); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingAcceptedMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingAcceptedMessage.java index 23c285865914d..7af0559752b3c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingAcceptedMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingAcceptedMessage.java @@ -17,7 +17,10 @@ package org.apache.ignite.internal.processors.marshaller; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; @@ -59,6 +62,12 @@ public class MappingAcceptedMessage implements DiscoveryCustomMessage { return false; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** */ MarshallerMappingItem getMappingItem() { return item; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingProposedMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingProposedMessage.java index 33a2168c2262c..b4e13fba14bf0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingProposedMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/marshaller/MappingProposedMessage.java @@ -18,7 +18,10 @@ package org.apache.ignite.internal.processors.marshaller; import java.util.UUID; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteUuid; @@ -94,6 +97,12 @@ private enum ProposalStatus { return true; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** */ MarshallerMappingItem mappingItem() { return mappingItem; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaAbstractDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaAbstractDiscoveryMessage.java index 9fdc6c336a6aa..f55eae0922f46 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaAbstractDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaAbstractDiscoveryMessage.java @@ -17,11 +17,15 @@ package org.apache.ignite.internal.processors.query.schema.message; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.query.schema.operation.SchemaAbstractOperation; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteUuid; +import org.jetbrains.annotations.Nullable; /** * Abstract discovery message for schema operations. @@ -51,6 +55,12 @@ protected SchemaAbstractDiscoveryMessage(SchemaAbstractOperation op) { return id; } + /** {@inheritDoc} */ + @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + /** * @return Operation. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaProposeDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaProposeDiscoveryMessage.java index 5fd2606eae58a..0e1270b17b623 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaProposeDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/message/SchemaProposeDiscoveryMessage.java @@ -99,9 +99,8 @@ public boolean initialized() { * @param err Error. */ public void onError(SchemaOperationException err) { - if (!hasError()) { + if (!hasError()) this.err = err; - } } /** diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java new file mode 100644 index 0000000000000..c238a9a36f2b9 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spi.discovery; + +import org.apache.ignite.Ignite; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.managers.discovery.DiscoCache; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * Tests discovery cache reuse between topology events. + */ +public class IgniteDiscoveryCacheReuseSelfTest extends GridCommonAbstractTest { + /** */ + private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER); + + return cfg; + } + + /** + * Tests correct reuse of discovery cache. + * + * @throws Exception If failed. + */ + public void testDiscoCacheReuseOnNodeJoin() throws Exception { + startGridsMultiThreaded(2); + + assertDiscoCacheReuse(new AffinityTopologyVersion(2, 0), new AffinityTopologyVersion(2, 1)); + } + + /** + * Assert disco cache reuse. + * + * @param v1 First version. + * @param v2 Next version. + */ + private void assertDiscoCacheReuse(AffinityTopologyVersion v1, AffinityTopologyVersion v2) { + for (Ignite ignite : G.allGrids()) { + GridBoundedConcurrentLinkedHashMap discoCacheHist = + U.field(((IgniteEx) ignite).context().discovery(), "discoCacheHist"); + + DiscoCache discoCache1 = discoCacheHist.get(v1); + DiscoCache discoCache2 = discoCacheHist.get(v2); + + assertEquals(v1, discoCache1.version()); + assertEquals(v2, discoCache2.version()); + + String[] props = new String[] { + "state", "loc", "rmtNodes", "allNodes", "srvNodes", "daemonNodes", "rmtNodesWithCaches", + "allCacheNodes", "allCacheNodes", "cacheGrpAffNodes", "nodeMap", "minNodeVer" + }; + + for (String prop : props) + assertSame(U.field(discoCache1, prop), U.field(discoCache2, prop)); + + assertNotSame(U.field(discoCache1, "alives"), U.field(discoCache2, "alives")); + assertEquals(U.field(discoCache1, "alives"), U.field(discoCache2, "alives")); + } + } +} \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java index 4a3f3f2e0a2db..626875c57ceeb 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java @@ -20,6 +20,7 @@ import junit.framework.TestSuite; import org.apache.ignite.spi.GridTcpSpiForwardingSelfTest; import org.apache.ignite.spi.discovery.AuthenticationRestartTest; +import org.apache.ignite.spi.discovery.IgniteDiscoveryCacheReuseSelfTest; import org.apache.ignite.spi.discovery.tcp.IgniteClientConnectTest; import org.apache.ignite.spi.discovery.tcp.IgniteClientReconnectMassiveShutdownTest; import org.apache.ignite.spi.discovery.tcp.TcpClientDiscoveryMarshallerCheckSelfTest; @@ -107,6 +108,9 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(TcpDiscoverySslSecuredUnsecuredTest.class)); suite.addTest(new TestSuite(TcpDiscoverySslTrustedUntrustedTest.class)); + // Disco cache reuse. + suite.addTest(new TestSuite(IgniteDiscoveryCacheReuseSelfTest.class)); + return suite; } } From 20a89456ac693f232fb8c92389f40ec5e95fc8ea Mon Sep 17 00:00:00 2001 From: ascherbakoff Date: Thu, 16 Nov 2017 23:42:57 +0300 Subject: [PATCH 198/243] ignite-6667 Reuse DiscoCache when possible (cherry picked from commit e561ed5) --- .../discovery/GridDiscoveryManager.java | 186 ++++++++++-------- .../IgniteDiscoveryCacheReuseSelfTest.java | 7 + 2 files changed, 116 insertions(+), 77 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 77b06221f7c6c..0bff3bd0ef86f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -71,6 +71,7 @@ import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; import org.apache.ignite.internal.processors.cache.ClientCacheChangeDummyDiscoveryMessage; +import org.apache.ignite.internal.processors.cache.DynamicCacheChangeBatch; import org.apache.ignite.internal.processors.cache.DynamicCacheChangeRequest; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; import org.apache.ignite.internal.processors.cache.GridCacheAdapter; @@ -102,6 +103,7 @@ import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.lang.IgniteProductVersion; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.security.SecurityCredentials; import org.apache.ignite.plugin.segmentation.SegmentationPolicy; @@ -623,23 +625,11 @@ private void onDiscovery0( updateClientNodes(node.id()); } - DiscoCache discoCache = null; - boolean locJoinEvt = type == EVT_NODE_JOINED && node.id().equals(locNode.id()); - IgniteInternalFuture transitionWaitFut = null; - ChangeGlobalStateFinishMessage stateFinishMsg = null; - if (locJoinEvt) { - discoCache = createDiscoCache(new AffinityTopologyVersion(topVer, minorTopVer), - ctx.state().clusterState(), - locNode, - topSnapshot); - - transitionWaitFut = ctx.state().onLocalJoin(discoCache); - } - else if (type == EVT_NODE_FAILED || type == EVT_NODE_LEFT) + if (type == EVT_NODE_FAILED || type == EVT_NODE_LEFT) stateFinishMsg = ctx.state().onNodeLeft(node); final AffinityTopologyVersion nextTopVer; @@ -658,12 +648,12 @@ else if (type == EVT_NODE_FAILED || type == EVT_NODE_LEFT) else if (customMsg instanceof ChangeGlobalStateFinishMessage) { ctx.state().onStateFinishMessage((ChangeGlobalStateFinishMessage)customMsg); - discoCache = createDiscoCache(topSnap.get().topVer, - ctx.state().clusterState(), - locNode, - topSnapshot); + Snapshot snapshot = topSnap.get(); + + // Topology version does not change, but need create DiscoCache with new state. + DiscoCache discoCache = snapshot.discoCache.copy(snapshot.topVer, ctx.state().clusterState()); - topSnap.set(new Snapshot(topSnap.get().topVer, discoCache)); + topSnap.set(new Snapshot(snapshot.topVer, discoCache)); incMinorTopVer = false; } @@ -704,24 +694,30 @@ else if (customMsg instanceof ChangeGlobalStateFinishMessage) { } } + DiscoCache discoCache; + // Put topology snapshot into discovery history. // There is no race possible between history maintenance and concurrent discovery // event notifications, since SPI notifies manager about all events from this listener. if (verChanged) { - if (discoCache == null) { + Snapshot snapshot = topSnap.get(); + + if (customMsg == null) { discoCache = createDiscoCache(nextTopVer, ctx.state().clusterState(), locNode, topSnapshot); } + else + discoCache = customMsg.createDiscoCache(GridDiscoveryManager.this, nextTopVer, snapshot.discoCache); discoCacheHist.put(nextTopVer, discoCache); - boolean set = updateTopologyVersionIfGreater(nextTopVer, discoCache); - - assert set || topVer == 0 : "Topology version has not been updated [this.topVer=" + - topSnap + ", topVer=" + topVer + ", node=" + node + + assert snapshot.topVer.compareTo(nextTopVer) < 0: "Topology version out of order [this.topVer=" + + topSnap + ", topVer=" + topVer + ", node=" + node + ", nextTopVer=" + nextTopVer + ", evt=" + U.gridEventName(type) + ']'; + + topSnap.set(new Snapshot(nextTopVer, discoCache)); } else // Current version. @@ -734,8 +730,7 @@ else if (customMsg instanceof ChangeGlobalStateFinishMessage) { if (gridStartTime == 0) gridStartTime = getSpi().getGridStartTime(); - updateTopologyVersionIfGreater(new AffinityTopologyVersion(locNode.order()), - discoCache); + topSnap.set(new Snapshot(nextTopVer, discoCache)); startLatch.countDown(); @@ -752,6 +747,8 @@ else if (customMsg instanceof ChangeGlobalStateFinishMessage) { if (!isLocDaemon && !ctx.clientDisconnected()) ctx.cache().context().exchange().onLocalJoin(discoEvt, discoCache); + IgniteInternalFuture transitionWaitFut = ctx.state().onLocalJoin(discoCache); + locJoin.onDone(new DiscoveryLocalJoinData(discoEvt, discoCache, transitionWaitFut, @@ -784,7 +781,9 @@ else if (type == EVT_CLIENT_NODE_DISCONNECTED) { topHist.clear(); topSnap.set(new Snapshot(AffinityTopologyVersion.ZERO, - createDiscoCache(AffinityTopologyVersion.ZERO, ctx.state().clusterState(), locNode, Collections.singleton(locNode)))); + createDiscoCache(AffinityTopologyVersion.ZERO, ctx.state().clusterState(), locNode, + Collections.singleton(locNode)) + )); } else if (type == EVT_CLIENT_NODE_RECONNECTED) { assert locNode.isClient() : locNode; @@ -2219,6 +2218,8 @@ public void reconnect() { ArrayList rmtNodes = new ArrayList<>(topSnapshot.size()); ArrayList allNodes = new ArrayList<>(topSnapshot.size()); + IgniteProductVersion minVer = null; + for (ClusterNode node : topSnapshot) { if (alive(node)) alives.add(node.id()); @@ -2236,6 +2237,11 @@ public void reconnect() { } nodeMap.put(node.id(), node); + + if (minVer == null) + minVer = node.version(); + else if (node.version().compareTo(minVer) < 0) + minVer = node.version(); } assert !rmtNodes.contains(loc) : "Remote nodes collection shouldn't contain local node" + @@ -2246,36 +2252,7 @@ public void reconnect() { Set rmtNodesWithCaches = new TreeSet<>(GridNodeOrderComparator.INSTANCE); - for (ClusterNode node : allNodes) { - assert node.order() != 0 : "Invalid node order [locNode=" + loc + ", node=" + node + ']'; - assert !node.isDaemon(); - - for (Map.Entry e : registeredCacheGrps.entrySet()) { - CacheGroupAffinity grpAff = e.getValue(); - Integer grpId = e.getKey(); - - if (CU.affinityNode(node, grpAff.cacheFilter)) { - List nodes = cacheGrpAffNodes.get(grpId); - - if (nodes == null) - cacheGrpAffNodes.put(grpId, nodes = new ArrayList<>()); - - nodes.add(node); - } - } - - for (Map.Entry entry : registeredCaches.entrySet()) { - String cacheName = entry.getKey(); - CachePredicate filter = entry.getValue(); - - if (filter.cacheNode(node)) { - if (!node.isLocal()) - rmtNodesWithCaches.add(node); - - addToMap(allCacheNodes, cacheName, node); - } - } - } + fillAffinityNodeCaches(allNodes, allCacheNodes, cacheGrpAffNodes, rmtNodesWithCaches); return new DiscoCache( topVer, @@ -2289,7 +2266,8 @@ public void reconnect() { Collections.unmodifiableMap(allCacheNodes), Collections.unmodifiableMap(cacheGrpAffNodes), Collections.unmodifiableMap(nodeMap), - alives); + alives, + minVer); } /** @@ -2311,26 +2289,6 @@ private void addToMap(Map> cacheMap, String cacheName cacheNodes.add(rich); } - /** - * Updates topology version if current version is smaller than updated. - * - * @param updated Updated topology version. - * @param discoCache Discovery cache. - * @return {@code True} if topology was updated. - */ - private boolean updateTopologyVersionIfGreater(AffinityTopologyVersion updated, DiscoCache discoCache) { - while (true) { - Snapshot cur = topSnap.get(); - - if (updated.compareTo(cur.topVer) >= 0) { - if (topSnap.compareAndSet(cur, new Snapshot(updated, discoCache))) - return true; - } - else - return false; - } - } - /** Stops local node. */ private void stopNode() { new Thread( @@ -3061,4 +3019,78 @@ private Boolean cacheClientNode(ClusterNode node) { return clientNodes.get(node.id()); } } + + /** + * Fills affinity node caches. + * + * @param allNodes All nodes. + * @param allCacheNodes All cache nodes. + * @param cacheGrpAffNodes Cache group aff nodes. + * @param rmtNodesWithCaches Rmt nodes with caches. + */ + private void fillAffinityNodeCaches(List allNodes, Map> allCacheNodes, + Map> cacheGrpAffNodes, Set rmtNodesWithCaches) { + for (ClusterNode node : allNodes) { + assert node.order() != 0 : "Invalid node order [locNode=" + localNode() + ", node=" + node + ']'; + assert !node.isDaemon(); + + for (Map.Entry e : registeredCacheGrps.entrySet()) { + CacheGroupAffinity grpAff = e.getValue(); + Integer grpId = e.getKey(); + + if (CU.affinityNode(node, grpAff.cacheFilter)) { + List nodes = cacheGrpAffNodes.get(grpId); + + if (nodes == null) + cacheGrpAffNodes.put(grpId, nodes = new ArrayList<>()); + + nodes.add(node); + } + } + + for (Map.Entry entry : registeredCaches.entrySet()) { + String cacheName = entry.getKey(); + CachePredicate filter = entry.getValue(); + + if (filter.cacheNode(node)) { + if (!node.isLocal()) + rmtNodesWithCaches.add(node); + + addToMap(allCacheNodes, cacheName, node); + } + } + } + } + + /** + * Creates discovery cache after {@link DynamicCacheChangeBatch} received. + * + * @param topVer Topology version. + * @param discoCache Current disco cache. + * @return New discovery cache. + */ + public DiscoCache createDiscoCacheOnCacheChange(AffinityTopologyVersion topVer, + DiscoCache discoCache) { + List allNodes = discoCache.allNodes(); + Map> allCacheNodes = U.newHashMap(allNodes.size()); + Map> cacheGrpAffNodes = U.newHashMap(allNodes.size()); + Set rmtNodesWithCaches = new TreeSet<>(GridNodeOrderComparator.INSTANCE); + + fillAffinityNodeCaches(allNodes, allCacheNodes, cacheGrpAffNodes, rmtNodesWithCaches); + + return new DiscoCache( + topVer, + discoCache.state(), + discoCache.localNode(), + discoCache.remoteNodes(), + allNodes, + discoCache.serverNodes(), + discoCache.daemonNodes(), + U.sealList(rmtNodesWithCaches), + allCacheNodes, + cacheGrpAffNodes, + discoCache.nodeMap, + discoCache.alives, + discoCache.minimumNodeVersion()); + } } diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java index c238a9a36f2b9..b5cd9e1e72a9b 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/IgniteDiscoveryCacheReuseSelfTest.java @@ -46,6 +46,13 @@ public class IgniteDiscoveryCacheReuseSelfTest extends GridCommonAbstractTest { return cfg; } + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } + /** * Tests correct reuse of discovery cache. * From 72da7b22093fec367aebccf2e8e4f0d911fc010f Mon Sep 17 00:00:00 2001 From: sboikov Date: Wed, 22 Nov 2017 13:18:31 +0300 Subject: [PATCH 199/243] ignite-6973 Fixed cache deploymentId generation for cache started on activation (cherry picked from commit 39cc9b7) --- .../processors/cache/ClusterCachesInfo.java | 4 +- ...itePdsCacheAssignmentNodeRestartsTest.java | 259 ++++++++++++++++++ .../testsuites/IgniteCacheTestSuite6.java | 3 +- 3 files changed, 263 insertions(+), 3 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheAssignmentNodeRestartsTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 49ba732d8cced..2c32bf297d787 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -1214,7 +1214,7 @@ public ExchangeActions onStateChangeRequest(ChangeGlobalStateMessage msg, Affini if (storedCfgs != null) { List reqs = new ArrayList<>(); - IgniteUuid deplymentId = IgniteUuid.fromUuid(msg.requestId()); + IgniteUuid deploymentId = msg.id(); for (StoredCacheData storedCfg : storedCfgs) { CacheConfiguration ccfg = storedCfg.config(); @@ -1224,7 +1224,7 @@ public ExchangeActions onStateChangeRequest(ChangeGlobalStateMessage msg, Affini ccfg.getName(), msg.initiatorNodeId()); - req.deploymentId(deplymentId); + req.deploymentId(deploymentId); req.startCacheConfiguration(ccfg); req.cacheType(ctx.cache().cacheType(ccfg.getName())); req.schema(new QuerySchema(storedCfg.queryEntities())); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheAssignmentNodeRestartsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheAssignmentNodeRestartsTest.java new file mode 100644 index 0000000000000..b3f9b2bb9e64f --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheAssignmentNodeRestartsTest.java @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; +import org.apache.ignite.Ignite; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.WALMode; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteKernal; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; +import org.apache.ignite.internal.processors.cache.IgniteInternalCache; +import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteUuid; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; +import static org.apache.ignite.cache.CacheMode.PARTITIONED; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.IGNITE_PDS_CHECKPOINT_TEST_SKIP_SYNC; + +/** + * The test validates assignment after nodes restart with enabled persistence. + */ +public class IgnitePdsCacheAssignmentNodeRestartsTest extends GridCommonAbstractTest { + /** */ + private static TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setDataStorageConfiguration( + new DataStorageConfiguration().setDefaultDataRegionConfiguration( + new DataRegionConfiguration() + .setPersistenceEnabled(true) + .setInitialSize(50 * 1024 * 1024L) + .setMaxSize(50 * 1024 * 1024L)) + .setWalMode(WALMode.LOG_ONLY) + .setPageSize(1024)); + + ((TcpDiscoverySpi) cfg.getDiscoverySpi()).setIpFinder(ipFinder); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false)); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + deleteRecursively(U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false)); + + super.afterTest(); + } + + /** + * @param name Name. + * @param atomicityMode Atomicity mode. + * @param cacheMode Cache mode. + * @param backups Backups. + * @param grp Group. + * @return Cache configuration. + */ + private CacheConfiguration cacheConfiguration(String name, + CacheAtomicityMode atomicityMode, + CacheMode cacheMode, + int backups, + String grp) { + CacheConfiguration ccfg = new CacheConfiguration(name); + + ccfg.setAtomicityMode(atomicityMode); + ccfg.setWriteSynchronizationMode(FULL_SYNC); + ccfg.setCacheMode(cacheMode); + ccfg.setGroupName(grp); + + ccfg.setAffinity(new RendezvousAffinityFunction(false, 128)); + + if (cacheMode == PARTITIONED) + ccfg.setBackups(backups); + + return ccfg; + } + + /** + * @throws Exception If failed. + */ + public void testAssignmentAfterRestarts() throws Exception { + try { + System.setProperty(IGNITE_PDS_CHECKPOINT_TEST_SKIP_SYNC, "true"); + + final int gridsCnt = 5; + + final int groupsCnt = 2; + + final IgniteEx node = (IgniteEx) startGridsMultiThreaded(gridsCnt); + + final List cfgs = Arrays.asList( + cacheConfiguration("g1c1", TRANSACTIONAL, PARTITIONED, gridsCnt, "testGrp1"), + cacheConfiguration("g1c2", TRANSACTIONAL, PARTITIONED, gridsCnt, "testGrp1"), + cacheConfiguration("g2c1", TRANSACTIONAL, PARTITIONED, gridsCnt, "testGrp2"), + cacheConfiguration("g2c2", TRANSACTIONAL, PARTITIONED, gridsCnt, "testGrp2")); + + node.getOrCreateCaches(cfgs); + + validateDepIds(groupsCnt); + + stopAllGrids(); + + IgniteEx node2 = (IgniteEx) startGridsMultiThreaded(gridsCnt); + + validateDepIds(groupsCnt); // Deployment ids must be the same on all nodes. + + final int restartIdxFrom = 2; + + final AtomicInteger idx = new AtomicInteger(restartIdxFrom); + + IgniteInternalFuture fut = GridTestUtils.runMultiThreadedAsync(new Callable() { + @Override public Void call() throws Exception { + int nodeIdx = idx.getAndIncrement(); + + stopGrid(nodeIdx); + + return null; + } + }, gridsCnt - restartIdxFrom, "stop-node"); + + fut.get(); + + awaitPartitionMapExchange(); + + checkAffinity(); + + idx.set(restartIdxFrom); + + fut = GridTestUtils.runMultiThreadedAsync(new Callable() { + @Override public Void call() throws Exception { + int nodeIdx = idx.getAndIncrement(); + + startGrid(nodeIdx); + + return null; + } + }, gridsCnt - restartIdxFrom, "start-node"); + + fut.get(); + + awaitPartitionMapExchange(); + + AffinityTopologyVersion topVer = node2.context().cache().context().exchange().readyAffinityVersion(); + + log.info("Using version: " + topVer); + + checkAffinity(); + } + finally { + System.clearProperty(IGNITE_PDS_CHECKPOINT_TEST_SKIP_SYNC); + } + } + + /** + * @param grpCnt Group count. + */ + private void validateDepIds(int grpCnt) { + Map depIds = new HashMap<>(); + + for (Ignite ignite : G.allGrids()) { + final Map descMap = ((IgniteEx) ignite).context().cache().cacheGroupDescriptors(); + + for (Map.Entry entry : descMap.entrySet()) { + final IgniteUuid u = entry.getValue().deploymentId(); + + final IgniteUuid u0 = depIds.get(entry.getKey()); + + if (u0 == null) + depIds.put(entry.getKey(), u); + else + assertEquals("Descriptors do not match", u0, u); + } + } + + assertEquals(grpCnt + 1, depIds.size()); + } + + /** + * @throws Exception If failed. + */ + private void checkAffinity() throws Exception { + List nodes = G.allGrids(); + + ClusterNode crdNode = null; + + for (Ignite node : nodes) { + ClusterNode locNode = node.cluster().localNode(); + + if (crdNode == null || locNode.order() < crdNode.order()) + crdNode = locNode; + } + + AffinityTopologyVersion topVer = ((IgniteKernal) grid(crdNode)). + context().cache().context().exchange().readyAffinityVersion(); + + Map>> affMap = new HashMap<>(); + + for (Ignite node : nodes) { + IgniteKernal node0 = (IgniteKernal) node; + + for (IgniteInternalCache cache : node0.context().cache().caches()) { + List> aff = affMap.get(cache.name()); + List> aff0 = cache.context().affinity().assignments(topVer); + + if (aff != null) + assertEquals(aff, aff0); + else + affMap.put(cache.name(), aff0); + } + } + } +} \ No newline at end of file diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java index 8a2d6a066f840..8ad2e38406b1d 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java @@ -18,7 +18,6 @@ package org.apache.ignite.testsuites; import junit.framework.TestSuite; -import org.apache.ignite.internal.processors.cache.IgniteOutOfMemoryPropagationTest; import org.apache.ignite.internal.processors.cache.distributed.CacheExchangeMergeTest; import org.apache.ignite.internal.processors.cache.distributed.CachePartitionStateTest; import org.apache.ignite.internal.processors.cache.distributed.GridCachePartitionEvictionDuringReadThroughSelfTest; @@ -26,6 +25,7 @@ import org.apache.ignite.internal.processors.cache.distributed.IgniteOptimisticTxSuspendResumeMultiServerTest; import org.apache.ignite.internal.processors.cache.distributed.IgniteOptimisticTxSuspendResumeTest; import org.apache.ignite.internal.processors.cache.distributed.IgnitePessimisticTxSuspendResumeTest; +import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheAssignmentNodeRestartsTest; import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnTimeoutNearCacheTest; import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnTimeoutNoDeadlockDetectionTest; import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnTimeoutTest; @@ -55,6 +55,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(TxRollbackOnTimeoutNearCacheTest.class); suite.addTestSuite(IgniteCacheThreadLocalTxTest.class); + suite.addTestSuite(IgnitePdsCacheAssignmentNodeRestartsTest.class); // TODO enable this test after IGNITE-6753, now it takes too long // suite.addTestSuite(IgniteOutOfMemoryPropagationTest.class); From c5f3491fe7f4da59e2f28e27412452498afa8de1 Mon Sep 17 00:00:00 2001 From: Tim Onyschak Date: Mon, 13 Nov 2017 10:47:30 +0300 Subject: [PATCH 200/243] IGNITE-6767 Reset non-valid ner cache entry (cherry picked from commit 85027e7) --- .../distributed/near/GridNearCacheEntry.java | 2 +- .../near/GridCacheNearClientHitTest.java | 154 ++++++++++++++++++ .../testsuites/IgniteCacheTestSuite2.java | 3 +- 3 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearClientHitTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java index ea52766c6346e..baf117b95e686 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java @@ -379,7 +379,7 @@ public boolean loadedValue(@Nullable IgniteInternalTx tx, CacheObject old = this.val; boolean hasVal = hasValueUnlocked(); - if (this.dhtVer == null || this.dhtVer.compareTo(dhtVer) < 0) { + if (this.dhtVer == null || this.dhtVer.compareTo(dhtVer) < 0 || !valid(topVer)) { primaryNode(primaryNodeId, topVer); update(val, expireTime, ttl, ver, true); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearClientHitTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearClientHitTest.java new file mode 100644 index 0000000000000..1dd62e49863be --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearClientHitTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.distributed.near; + +import java.util.UUID; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.eviction.lru.LruEvictionPolicy; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import static org.apache.ignite.cache.CachePeekMode.NEAR; + +/** + * + */ +public class GridCacheNearClientHitTest extends GridCommonAbstractTest { + /** Ip finder. */ + private final static TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + + /** */ + private final static String CACHE_NAME = "test-near-cache"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(final String igniteInstanceName) throws Exception { + final IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder); + + return cfg; + } + + /** + * @param igniteInstanceName Node name. + * @return Configuration. + * @throws Exception If failed. + */ + private IgniteConfiguration getClientConfiguration(final String igniteInstanceName) throws Exception { + final IgniteConfiguration cfg = getConfiguration(igniteInstanceName); + + cfg.setClientMode(true); + + return cfg; + } + + /** + * @return Cache configuration. + */ + private CacheConfiguration cacheConfiguration() { + CacheConfiguration cfg = new CacheConfiguration<>(); + + cfg.setAtomicityMode(CacheAtomicityMode.ATOMIC); + + cfg.setCacheMode(CacheMode.PARTITIONED); + + cfg.setBackups(1); + + cfg.setCopyOnRead(false); + + cfg.setName(CACHE_NAME); + + return cfg; + } + + /** + * @return Near cache configuration. + */ + private NearCacheConfiguration nearCacheConfiguration() { + NearCacheConfiguration cfg = new NearCacheConfiguration<>(); + + cfg.setNearEvictionPolicy(new LruEvictionPolicy<>(25000)); + + return cfg; + } + + /** + * @throws Exception If failed. + */ + public void testLocalPeekAfterPrimaryNodeLeft() throws Exception { + try { + Ignite crd = startGrid("coordinator", getConfiguration("coordinator")); + + Ignite client = startGrid("client", getClientConfiguration("client")); + + Ignite srvNode = startGrid("server", getConfiguration("server")); + + awaitPartitionMapExchange(); + + IgniteCache cache = srvNode.getOrCreateCache(cacheConfiguration()); + + IgniteCache nearCache = client.createNearCache(CACHE_NAME, nearCacheConfiguration()); + + UUID serverNodeId = srvNode.cluster().localNode().id(); + + int remoteKey = 0; + for (; ; remoteKey++) { + if (crd.affinity(CACHE_NAME).mapKeyToNode(remoteKey).id().equals(serverNodeId)) + break; + } + + cache.put(remoteKey, remoteKey); + + Object value = nearCache.localPeek(remoteKey, NEAR); + + assertNull("The value should not be loaded from a remote node.", value); + + nearCache.get(remoteKey); + + value = nearCache.localPeek(remoteKey, NEAR); + + assertNotNull("The returned value should not be null.", value); + + srvNode.close(); + + awaitPartitionMapExchange(); + + value = nearCache.localPeek(remoteKey, NEAR); + + assertNull("The value should not be loaded from a remote node.", value); + + value = nearCache.get(remoteKey); + + assertNotNull("The value should be loaded from a remote node.", value); + + value = nearCache.localPeek(remoteKey, NEAR); + + assertNotNull("The returned value should not be null.", value); + } + finally { + stopAllGrids(); + } + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java index 69bb87806c6cf..65cbcc03f27ba 100644 --- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java +++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite2.java @@ -88,6 +88,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicNearEvictionEventSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicNearMultiNodeSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheAtomicNearReadersSelfTest; +import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearClientHitTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearEvictionEventSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearJobExecutionSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.GridCacheNearMultiGetSelfTest; @@ -243,7 +244,7 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(GridCacheNearPartitionedClearSelfTest.class)); suite.addTest(new TestSuite(GridCacheOffheapUpdateSelfTest.class)); - + suite.addTest(new TestSuite(GridCacheNearClientHitTest.class)); suite.addTest(new TestSuite(GridCacheNearPrimarySyncSelfTest.class)); suite.addTest(new TestSuite(GridCacheColocatedPrimarySyncSelfTest.class)); From e4dd0a612d535c534297d51f35b24e9b6f479eb7 Mon Sep 17 00:00:00 2001 From: EdShangGG Date: Tue, 19 Dec 2017 15:17:27 +0300 Subject: [PATCH 201/243] fixing issue with checkpoint lock (cherry picked from commit 8a4eadd) --- .../distributed/dht/GridPartitionedSingleGetFuture.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java index f761b9c42d2e6..a89c3ad09884b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java @@ -658,6 +658,8 @@ private void setSkipValueResult(boolean res, @Nullable GridCacheVersion ver) { * @param ver Version. */ private void setResult(@Nullable CacheObject val, @Nullable GridCacheVersion ver) { + cctx.shared().database().checkpointReadLock(); + try { assert !skipVals; @@ -679,6 +681,9 @@ private void setResult(@Nullable CacheObject val, @Nullable GridCacheVersion ver catch (Exception e) { onDone(e); } + finally { + cctx.shared().database().checkpointReadUnlock(); + } } /** From 282103eb5050796f92fb07ce1eb898d52da5bc1d Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Fri, 22 Dec 2017 15:37:16 +0300 Subject: [PATCH 202/243] Fixing checkpoint lock acquisition (cherry picked from commit 144c666) (cherry picked from commit b00be67) (cherry picked from commit 003f033) (cherry picked from commit 956092f) (cherry picked from commit db4fa5b) --- .../processors/cache/GridCacheAdapter.java | 20 +++++++++ .../processors/cache/GridCacheUtils.java | 4 ++ .../distributed/dht/GridDhtCacheAdapter.java | 4 ++ .../dht/GridDhtTxPrepareFuture.java | 5 +++ .../GridCacheDatabaseSharedManager.java | 41 +++++++++++-------- .../cache/transactions/IgniteTxManager.java | 5 +++ 6 files changed, 61 insertions(+), 18 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java index 92e40b01fb176..660a89fae6076 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java @@ -809,6 +809,8 @@ public String toString() { } if (e != null) { + ctx.shared().database().checkpointReadLock(); + try { cacheVal = e.peek(modes.heap, modes.offheap, topVer, plc); } @@ -820,6 +822,8 @@ public String toString() { } finally { ctx0.evicts().touch(e, null); + + ctx.shared().database().checkpointReadUnlock(); } } @@ -2024,7 +2028,15 @@ else if (storeEnabled) try { ctx.shared().database().ensureFreeSpace(ctx.dataRegion()); + } + catch (IgniteCheckedException e) { + // Wrap errors (will be unwrapped). + throw new GridClosureException(e); + } + ctx.shared().database().checkpointReadLock(); + + try { entry = entryEx(key); entry.unswap(); @@ -2069,6 +2081,9 @@ else if (storeEnabled) // Wrap errors (will be unwrapped). throw new GridClosureException(e); } + finally { + ctx.shared().database().checkpointReadUnlock(); + } } } }); @@ -4421,6 +4436,8 @@ private boolean clearLocally0(K key, boolean readers) { GridCacheVersion obsoleteVer = ctx.versions().next(); + ctx.shared().database().checkpointReadLock(); + try { KeyCacheObject cacheKey = ctx.toCacheKeyObject(key); @@ -4435,6 +4452,9 @@ private boolean clearLocally0(K key, boolean readers) { catch (IgniteCheckedException ex) { U.error(log, "Failed to clearLocally entry for key: " + key, ex); } + finally { + ctx.shared().database().checkpointReadUnlock(); + } return false; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 4bf54bf2626e1..a147f6e86c3cf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -1719,6 +1719,8 @@ private void process(KeyCacheObject key, CacheObject val, GridCacheVersion ver, while (true) { GridCacheEntryEx entry = null; + cctx.shared().database().checkpointReadLock(); + try { entry = colocated.entryEx(key, topVer); @@ -1748,6 +1750,8 @@ private void process(KeyCacheObject key, CacheObject val, GridCacheVersion ver, finally { if (entry != null) cctx.evicts().touch(entry, topVer); + + cctx.shared().database().checkpointReadUnlock(); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java index 28f9c7627ef5d..953b47dd4a35e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java @@ -647,6 +647,8 @@ private void loadEntry(KeyCacheObject key, if (part.reserve()) { GridCacheEntryEx entry = null; + ctx.shared().database().checkpointReadLock(); + try { long ttl = CU.ttlForLoad(plc); @@ -678,6 +680,8 @@ private void loadEntry(KeyCacheObject key, entry.context().evicts().touch(entry, topVer); part.release(); + + ctx.shared().database().checkpointReadUnlock(); } } else if (log.isDebugEnabled()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java index a1f3984ca0472..bde15c254fc9a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java @@ -347,6 +347,8 @@ private void onEntriesLocked() { ExpiryPolicy expiry = cacheCtx.expiryForTxEntry(txEntry); + cctx.database().checkpointReadLock(); + try { if ((txEntry.op() == CREATE || txEntry.op() == UPDATE) && txEntry.conflictExpireTime() == CU.EXPIRE_TIME_CALCULATE) { @@ -509,6 +511,9 @@ else if (retVal) catch (GridCacheEntryRemovedException e) { assert false : "Got entry removed exception while holding transactional lock on entry [e=" + e + ", cached=" + cached + ']'; } + finally { + cctx.database().checkpointReadUnlock(); + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index d9c66209b1187..dd56ed3bf154a 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -1694,45 +1694,50 @@ private void restorePartitionState( // TODO: https://issues.apache.org/jira/browse/IGNITE-6097 grp.offheap().onPartitionInitialCounterUpdated(i, 0); - long partMetaId = pageMem.partitionMetaPageId(grpId, i); - long partMetaPage = pageMem.acquirePage(grpId, partMetaId); + checkpointReadLock(); try { - long pageAddr = pageMem.writeLock(grpId, partMetaId, partMetaPage); - - boolean changed = false; + long partMetaId = pageMem.partitionMetaPageId(grpId, i); + long partMetaPage = pageMem.acquirePage(grpId, partMetaId); try { - PagePartitionMetaIO io = PagePartitionMetaIO.VERSIONS.forPage(pageAddr); + long pageAddr = pageMem.writeLock(grpId, partMetaId, partMetaPage); + + boolean changed = false; - T2 fromWal = partStates.get(new T2<>(grpId, i)); + try { + PagePartitionMetaIO io = PagePartitionMetaIO.VERSIONS.forPage(pageAddr); - if (fromWal != null) { + T2 fromWal = partStates.get(new T2<>(grpId, i));if (fromWal != null) { int stateId = fromWal.get1(); - io.setPartitionState(pageAddr, (byte)stateId); + io.setPartitionState(pageAddr, (byte)stateId); - changed = updateState(part, stateId); + changed = updateState(part, stateId); - if (stateId == GridDhtPartitionState.OWNING.ordinal()) { - grp.offheap().onPartitionInitialCounterUpdated(i, fromWal.get2()); + if (stateId == GridDhtPartitionState.OWNING.ordinal()) {grp.offheap().onPartitionInitialCounterUpdated(i, fromWal.get2()); + + if (part.initialUpdateCounter() < fromWal.get2() ) { - if (part.initialUpdateCounter() < fromWal.get2()) { part.initialUpdateCounter(fromWal.get2()); - changed = true; + changed = true; + } } } + else + changed = updateState(part, (int)io.getPartitionState(pageAddr)); + } + finally { + pageMem.writeUnlock(grpId, partMetaId, partMetaPage, null, changed); } - else - changed = updateState(part, (int)io.getPartitionState(pageAddr)); } finally { - pageMem.writeUnlock(grpId, partMetaId, partMetaPage, null, changed); + pageMem.releasePage(grpId, partMetaId, partMetaPage); } } finally { - pageMem.releasePage(grpId, partMetaId, partMetaPage); + checkpointReadUnlock(); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java index 77634bdccb23f..c6b1bd76ac099 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java @@ -1589,6 +1589,8 @@ private boolean lockMultiple(IgniteInternalTx tx, Iterable entrie GridCacheContext cacheCtx = txEntry1.context(); while (true) { + cctx.database().checkpointReadLock(); + try { GridCacheEntryEx entry1 = txEntry1.cached(); @@ -1641,6 +1643,9 @@ private boolean lockMultiple(IgniteInternalTx tx, Iterable entrie throw new IgniteCheckedException("Entry lock has been cancelled for transaction: " + tx); } + finally { + cctx.database().checkpointReadUnlock(); + } } } From 42e37371114d426fbaa9878597df8d769c10cd8b Mon Sep 17 00:00:00 2001 From: Dmitriy Shabalin Date: Wed, 27 Dec 2017 14:41:03 +0700 Subject: [PATCH 203/243] IGNITE-7257 Web Console: Fixed reconnection after change profile. (cherry picked from commit 48a76f6) --- modules/web-console/frontend/app/app.js | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js index a1cd6eb8c6980..332bc24b2875f 100644 --- a/modules/web-console/frontend/app/app.js +++ b/modules/web-console/frontend/app/app.js @@ -300,7 +300,16 @@ angular.module('ignite-console', [ $root.gettingStarted = gettingStarted; }]) .run(['$rootScope', 'AgentManager', ($root, agentMgr) => { - $root.$on('user', () => agentMgr.connect()); + let lastUser; + + $root.$on('user', (e, user) => { + if (lastUser) + return; + + lastUser = user; + + agentMgr.connect(); + }); }]) .run(['$transitions', ($transitions) => { $transitions.onSuccess({ }, (trans) => { From 62432a6fb09a7677ad0c01e3fa56745af63e5a7e Mon Sep 17 00:00:00 2001 From: Ilya Borisov Date: Wed, 27 Dec 2017 10:58:37 +0700 Subject: [PATCH 204/243] IGNITE-7168 Remove remains of web agent download feature from web-console-header. (cherry picked from commit 30027df) --- .../app/components/web-console-header/component.js | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/modules/web-console/frontend/app/components/web-console-header/component.js b/modules/web-console/frontend/app/components/web-console-header/component.js index fbdbcfc4af307..3b1f2f2bd6979 100644 --- a/modules/web-console/frontend/app/components/web-console-header/component.js +++ b/modules/web-console/frontend/app/components/web-console-header/component.js @@ -23,12 +23,6 @@ export default { controller: class { static $inject = ['$rootScope', '$scope', '$state', 'IgniteBranding', 'UserNotifications']; - static webAgentDownloadVisibleStates = [ - 'base.configuration', - 'base.sql', - 'base.settings' - ]; - static connectedClustersUnvisibleStates = [ '403', '404' ]; @@ -37,21 +31,14 @@ export default { Object.assign(this, {$rootScope, $scope, $state, branding, UserNotifications}); } - setWebAgentDownloadVisible() { - this.isWebAgentDownloadVisible = - this.constructor.webAgentDownloadVisibleStates.some((state) => this.$state.includes(state)); - } - setConnectedClustersVisible() { this.isConnectedClustersVisible = !this.constructor.connectedClustersUnvisibleStates.some((state) => this.$state.includes(state)); } $onInit() { - this.setWebAgentDownloadVisible(); this.setConnectedClustersVisible(); - this.$scope.$on('$stateChangeSuccess', () => this.setWebAgentDownloadVisible()); this.$scope.$on('$stateChangeSuccess', () => this.setConnectedClustersVisible()); } }, From 06fafb6f6c4d52a84dd59da5f687cca90197aaa6 Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 28 Dec 2017 17:07:58 +0700 Subject: [PATCH 205/243] IGNITE-6647 Web Console: Support recreate index in migrations. (cherry picked from commit a1b1f6c) --- modules/web-console/backend/index.js | 6 ++-- .../backend/migrations/recreate-index.js | 30 +++++++++++++++++++ 2 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 modules/web-console/backend/migrations/recreate-index.js diff --git a/modules/web-console/backend/index.js b/modules/web-console/backend/index.js index 266fa54e9a775..013de47ebc2bf 100644 --- a/modules/web-console/backend/index.js +++ b/modules/web-console/backend/index.js @@ -106,11 +106,13 @@ const init = ([settings, apiSrv, agentsHnd, browsersHnd]) => { * @param dbConnectionUri Mongo connection url. * @param group Migrations group. * @param migrationsPath Migrations path. + * @param collectionName Name of collection where migrations write info about applied scripts. */ -const migrate = (dbConnectionUri, group, migrationsPath) => { +const migrate = (dbConnectionUri, group, migrationsPath, collectionName) => { const migrator = new MigrateMongoose({ migrationsPath, dbConnectionUri, + collectionName, autosync: true }); @@ -144,7 +146,7 @@ injector.log.debug = () => {}; Promise.all([injector('settings'), injector('mongo')]) .then(([{mongoUrl}]) => { return migrate(mongoUrl, 'Ignite', path.join(__dirname, 'migrations')) - .then(() => migrate(mongoUrl, 'Ignite Modules', path.join(igniteModules, 'migrations'))); + .then(() => migrate(mongoUrl, 'Ignite Modules', path.join(igniteModules, 'migrations'), 'migrationsModules')); }) .then(() => Promise.all([injector('settings'), injector('api-server'), injector('agents-handler'), injector('browsers-handler')])) .then(init) diff --git a/modules/web-console/backend/migrations/recreate-index.js b/modules/web-console/backend/migrations/recreate-index.js new file mode 100644 index 0000000000000..328ed431b217b --- /dev/null +++ b/modules/web-console/backend/migrations/recreate-index.js @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +module.exports = function(done, model, oldIdxName, oldIdx, newIdx) { + model.indexExists(oldIdxName) + .then((exists) => { + if (exists) { + return model.dropIndex(oldIdx) + .then(() => model.createIndex(newIdx, {unique: true})); + } + }) + .then(() => done()) + .catch(done); +}; From 4d3701d4b491e11e02ded0fc9f7cc0037b8268a6 Mon Sep 17 00:00:00 2001 From: vsisko Date: Wed, 10 Jan 2018 15:44:54 +0700 Subject: [PATCH 206/243] IGNITE-7034 Web Console: Hide connected clusters in "Become this user" mode. (cherry picked from commit f2edb7e) --- .../frontend/app/components/web-console-header/template.pug | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/web-console/frontend/app/components/web-console-header/template.pug b/modules/web-console/frontend/app/components/web-console-header/template.pug index 660874e12e2ec..c342090063413 100644 --- a/modules/web-console/frontend/app/components/web-console-header/template.pug +++ b/modules/web-console/frontend/app/components/web-console-header/template.pug @@ -24,10 +24,10 @@ | You are now in #[b Demo Mode]. #[a(ng-click='closeDemo();') Close Demo?] .wch-content.container - connected-clusters(ng-if='$ctrl.$rootScope.user && !$ctrl.$rootScope.IgniteDemoMode && $ctrl.isConnectedClustersVisible') + connected-clusters(ng-if='$ctrl.$rootScope.user && !$ctrl.$rootScope.IgniteDemoMode && $ctrl.isConnectedClustersVisible && !$root.user.becomeUsed') a(ui-sref='signin') img.wch-logo(ng-src='{{::$ctrl.branding.headerLogo}}') - + .wch-slot.wch-slot-left(ng-transclude='slotLeft') .wch-slot.wch-slot-right(ng-transclude='slotRight') From 2330b2cf7ec636ce085b1d9c7287a62af5082ba8 Mon Sep 17 00:00:00 2001 From: Aleksei Scherbakov Date: Thu, 16 Nov 2017 12:45:11 +0300 Subject: [PATCH 207/243] ignite-6858 Fail query if thread has is cache lock and exchange is in progress (cherry picked from commit caad1e9) --- .../distributed/near/GridNearTxLocal.java | 7 +- .../processors/query/h2/IgniteH2Indexing.java | 18 +++++ .../h2/twostep/GridReduceQueryExecutor.java | 11 ++- .../IgniteCacheQueryNodeRestartSelfTest2.java | 75 ++++++++++++++++--- ...IgniteCacheQueryNodeRestartTxSelfTest.java | 36 +++++++++ .../IgniteCacheQuerySelfTestSuite2.java | 2 + 6 files changed, 134 insertions(+), 15 deletions(-) create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartTxSelfTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java index 085f0b76a09e7..a3fddafbe31c0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java @@ -4122,14 +4122,13 @@ public boolean addTimeoutHandler() { /** {@inheritDoc} */ @Override public void onTimeout() { if (state(MARKED_ROLLBACK, true) || (state() == MARKED_ROLLBACK)) { - if (log.isDebugEnabled()) - log.debug("Will rollback tx on timeout: " + this); - cctx.kernalContext().closure().runLocalSafe(new Runnable() { @Override public void run() { - // Note: if rollback asynchonously on timeout should not clear thread map + // Note: if rollback asynchronously on timeout should not clear thread map // since thread started tx still should be able to see this tx. rollbackNearTxLocalAsync(true); + + U.warn(log, "Transaction was rolled back because the timeout is reached: " + GridNearTxLocal.this); } }); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 690b9ff13c9a6..8390ae800fba3 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -72,6 +72,8 @@ import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; +import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.query.CacheQueryPartitionInfo; import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable; @@ -2421,6 +2423,22 @@ public AffinityTopologyVersion readyTopologyVersion() { return ctx.cache().context().exchange().readyAffinityVersion(); } + /** + * @param readyVer Ready topology version. + * + * @return {@code true} If pending distributed exchange exists because server topology is changed. + */ + public boolean serverTopologyChanged(AffinityTopologyVersion readyVer) { + GridDhtPartitionsExchangeFuture fut = ctx.cache().context().exchange().lastTopologyFuture(); + + if (fut.isDone()) + return false; + + AffinityTopologyVersion initVer = fut.initialVersion(); + + return initVer.compareTo(readyVer) > 0 && !CU.clientNode(fut.firstEvent().node()); + } + /** * @param topVer Topology version. * @throws IgniteCheckedException If failed. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java index f85cd94a10d62..8e994aa88f810 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java @@ -90,6 +90,7 @@ import org.apache.ignite.lang.IgniteBiClosure; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.transactions.TransactionException; import org.h2.command.ddl.CreateTableData; import org.h2.engine.Session; import org.h2.index.Index; @@ -560,9 +561,15 @@ public Iterator> query( AffinityTopologyVersion topVer = h2.readyTopologyVersion(); + // Check if topology is changed while retrying on locked topology. + if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) { + throw new CacheException(new TransactionException("Server topology is changed during query " + + "execution inside a transaction. It's recommended to rollback and retry transaction.")); + } + List cacheIds = qry.cacheIds(); - Collection nodes = null; + Collection nodes; // Explicit partition mapping for unstable topology. Map partsMap = null; @@ -1737,4 +1744,4 @@ Map queryPartitionsMap() { return qryMap; } } -} \ No newline at end of file +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java index 627b3eb34ea0d..bda503ee3aad6 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartSelfTest2.java @@ -32,20 +32,25 @@ import org.apache.ignite.cache.query.QueryCancelledException; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.cluster.ClusterTopologyException; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.util.GridRandom; import org.apache.ignite.internal.util.typedef.CAX; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.apache.ignite.transactions.TransactionException; +import org.apache.ignite.transactions.TransactionTimeoutException; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMode.PARTITIONED; @@ -224,7 +229,7 @@ public void testRestarts() throws Exception { IgniteInternalFuture fut1 = multithreadedAsync(new CAX() { @Override public void applyx() throws IgniteCheckedException { - GridRandom rnd = new GridRandom(); + final GridRandom rnd = new GridRandom(); while (!qrysDone.get()) { int g; @@ -235,28 +240,43 @@ public void testRestarts() throws Exception { while (!locks.compareAndSet(g, 0, 1)); try { + final IgniteEx grid = grid(g); + if (rnd.nextBoolean()) { // Partitioned query. - IgniteCache cache = grid(g).cache("pu"); + final IgniteCache cache = grid.cache("pu"); - SqlFieldsQuery qry = new SqlFieldsQuery(PARTITIONED_QRY); + final SqlFieldsQuery qry = new SqlFieldsQuery(PARTITIONED_QRY); boolean smallPageSize = rnd.nextBoolean(); if (smallPageSize) qry.setPageSize(3); + final IgniteCache co = grid.cache("co"); + try { - assertEquals(pRes, cache.query(qry).getAll()); + runQuery(grid, new Runnable() { + @Override public void run() { + if (rnd.nextBoolean()) + co.get(rnd.nextInt(COMPANY_CNT)); // Get lock run test with open transaction. + + assertEquals(pRes, cache.query(qry).getAll()); + } + }); } catch (CacheException e) { // Interruptions are expected here. - if (e.getCause() instanceof IgniteInterruptedCheckedException) + if (e.getCause() instanceof IgniteInterruptedCheckedException || + e.getCause() instanceof InterruptedException || + e.getCause() instanceof ClusterTopologyException || + e.getCause() instanceof TransactionTimeoutException || + e.getCause() instanceof TransactionException) continue; if (e.getCause() instanceof QueryCancelledException) fail("Retry is expected"); if (!smallPageSize) - e.printStackTrace(); + U.error(grid.log(), "On large page size must retry.", e); assertTrue("On large page size must retry.", smallPageSize); @@ -286,13 +306,13 @@ public void testRestarts() throws Exception { continue; if (!failedOnRemoteFetch) { - e.printStackTrace(); + U.error(grid.log(), "Must fail inside of GridResultPage.fetchNextPage or subclass.", e); fail("Must fail inside of GridResultPage.fetchNextPage or subclass."); } } } else { // Replicated query. - IgniteCache cache = grid(g).cache("co"); + IgniteCache cache = grid.cache("co"); assertEquals(rRes, cache.query(new SqlFieldsQuery(REPLICATED_QRY)).getAll()); } @@ -358,7 +378,14 @@ public void testRestarts() throws Exception { restartsDone.set(true); - fut2.get(); + try { + fut2.get(20_000); + } + catch (IgniteFutureTimeoutCheckedException e) { + U.dumpThreads(log); + + fail("Stopping restarts timeout."); + } info("Restarts stopped."); @@ -379,13 +406,27 @@ public void testRestarts() throws Exception { stopAllGrids(); } + /** + * Run query closure. + * + * @param grid Grid. + * @param qryRunnable Query runnable. + */ + protected void runQuery(IgniteEx grid, Runnable qryRunnable) { + qryRunnable.run(); + } + /** * */ private static class Person implements Serializable { + /** */ @QuerySqlField(index = true) int id; + /** + * @param id Person ID. + */ Person(int id) { this.id = id; } @@ -395,12 +436,18 @@ private static class Person implements Serializable { * */ private static class Purchase implements Serializable { + /** */ @QuerySqlField(index = true) int personId; + /** */ @QuerySqlField(index = true) int productId; + /** + * @param personId Person ID. + * @param productId Product ID. + */ Purchase(int personId, int productId) { this.personId = personId; this.productId = productId; @@ -411,9 +458,13 @@ private static class Purchase implements Serializable { * */ private static class Company implements Serializable { + /** */ @QuerySqlField(index = true) int id; + /** + * @param id ID. + */ Company(int id) { this.id = id; } @@ -423,12 +474,18 @@ private static class Company implements Serializable { * */ private static class Product implements Serializable { + /** */ @QuerySqlField(index = true) int id; + /** */ @QuerySqlField(index = true) int companyId; + /** + * @param id ID. + * @param companyId Company ID. + */ Product(int id, int companyId) { this.id = id; this.companyId = companyId; diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartTxSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartTxSelfTest.java new file mode 100644 index 0000000000000..ae06c4237783a --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheQueryNodeRestartTxSelfTest.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.distributed.near; + +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.transactions.Transaction; + +import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; +import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ; + +/** + * Test for distributed queries with node restarts inside transactions. + */ +public class IgniteCacheQueryNodeRestartTxSelfTest extends IgniteCacheQueryNodeRestartSelfTest2 { + /** {@inheritDoc} */ + @Override protected void runQuery(IgniteEx grid, Runnable qryRunnable) { + try(Transaction tx = grid.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) { + qryRunnable.run(); + } + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java index 91e447881b88e..abe06ec9c61c9 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java @@ -31,6 +31,7 @@ import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartDistributedJoinSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartSelfTest2; +import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeRestartTxSelfTest; import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryStopOnCancelOrTimeoutDistributedJoinSelfTest; import org.apache.ignite.internal.processors.cache.index.DynamicColumnsConcurrentAtomicPartitionedSelfTest; import org.apache.ignite.internal.processors.cache.index.DynamicColumnsConcurrentAtomicReplicatedSelfTest; @@ -85,6 +86,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgniteCacheQueryNodeFailTest.class); suite.addTestSuite(IgniteCacheQueryNodeRestartSelfTest.class); suite.addTestSuite(IgniteCacheQueryNodeRestartSelfTest2.class); + suite.addTestSuite(IgniteCacheQueryNodeRestartTxSelfTest.class); suite.addTestSuite(IgniteCacheSqlQueryMultiThreadedSelfTest.class); suite.addTestSuite(IgniteCachePartitionedQueryMultiThreadedSelfTest.class); suite.addTestSuite(CacheScanPartitionQueryFallbackSelfTest.class); From d1b6334099eb66fd9132d811474256ba910f0a66 Mon Sep 17 00:00:00 2001 From: Dmitriy Shabalin Date: Thu, 11 Jan 2018 16:22:33 +0700 Subject: [PATCH 208/243] IGNITE-7036 Web Console: Improved export of grouped data on Admin panel. (cherry picked from commit fdc130c) --- .../list-of-registered-users.controller.js | 4 ---- .../list-of-registered-users/list-of-registered-users.tpl.pug | 3 +-- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.controller.js b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.controller.js index f82ccbea26fe6..6c5575302c57e 100644 --- a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.controller.js +++ b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.controller.js @@ -261,10 +261,6 @@ export default class IgniteListOfRegisteredUsersCtrl { } } - exportCsv() { - this.gridApi.exporter.csvExport('visible', 'visible'); - } - groupByUser() { this.groupBy = 'user'; diff --git a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug index 11ff7bc6fffa9..6aa4e31d8a93a 100644 --- a/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug +++ b/modules/web-console/frontend/app/components/list-of-registered-users/list-of-registered-users.tpl.pug @@ -45,8 +45,7 @@ include /app/helpers/jade/mixins required: false, options: '$ctrl.actionOptions' }) - button.btn-ignite.btn-ignite--primary-outline(ng-click='$ctrl.exportCsv()' bs-tooltip='' data-title='Export table to CSV' data-placement='top') - svg(ignite-icon='csv') + grid-export(grid-api='$ctrl.gridApi') form.ui-grid-settings-dateperiod(name=form novalidate) -var form = 'admin' +ignite-form-field-datepicker('Period: from', '$ctrl.params.startDate', '"startDate"', null, '$ctrl.params.endDate') From 84cb9078ae00fa7b895ba26004fd120466ec3cea Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 11 Jan 2018 16:30:07 +0700 Subject: [PATCH 209/243] ignite-2.3.2 Version for release. --- modules/web-console/frontend/app/services/Version.service.js | 4 ---- 1 file changed, 4 deletions(-) diff --git a/modules/web-console/frontend/app/services/Version.service.js b/modules/web-console/frontend/app/services/Version.service.js index 22d0732cff835..8b67eb7d5a206 100644 --- a/modules/web-console/frontend/app/services/Version.service.js +++ b/modules/web-console/frontend/app/services/Version.service.js @@ -76,10 +76,6 @@ export default class IgniteVersion { this.webConsole = '2.2.0'; this.supportedVersions = [ - { - label: 'Ignite 2.4', - ignite: '2.4.0' - }, { label: 'Ignite 2.3', ignite: '2.3.0' From 142322a6727453e661f77cd431cf2527c0f4bd2b Mon Sep 17 00:00:00 2001 From: Vasiliy Sisko Date: Thu, 11 Jan 2018 21:22:59 +0700 Subject: [PATCH 210/243] IGNITE-6995 Web Console: Fixed code generation for near cache for server and client node. (cherry picked from commit e361cb6) --- .../generator/ConfigurationGenerator.js | 5 +++-- .../generator/JavaTransformer.service.js | 11 ++++++++--- .../generator/SpringTransformer.service.js | 12 +++++++++--- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js index 0c40890f4f0cd..39538c2801bb9 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js @@ -1881,7 +1881,8 @@ export default class IgniteConfigurationGenerator { /** * Generate eviction policy object. * @param {Object} ccfg Parent configuration. - * @param {String} name Property name. + * @param {Function} available Function to check feature is supported in Ignite current version. + * @param {Boolean} near Near cache flag. * @param {Object} src Source. * @param {Object} dflt Default. * @returns {Object} Parent configuration. @@ -2335,7 +2336,7 @@ export default class IgniteConfigurationGenerator { this.cacheNodeFilter(cache, igfs ? [igfs] : [], ccfg); this.cacheConcurrency(cache, available, ccfg); this.cacheRebalance(cache, ccfg); - this.cacheNearServer(cache, ccfg); + this.cacheNearServer(cache, available, ccfg); this.cacheStatistics(cache, ccfg); this.cacheDomains(cache.domains, available, ccfg); diff --git a/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js b/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js index e5f4804100152..74b3fc68c4f07 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js @@ -17,7 +17,9 @@ import AbstractTransformer from './AbstractTransformer'; import StringBuilder from './StringBuilder'; +import VersionService from 'app/services/Version.service'; +const versionService = new VersionService(); const STORE_FACTORY = ['org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactory']; // Descriptors for generation of demo data. @@ -884,12 +886,13 @@ export default class IgniteJavaTransformer extends AbstractTransformer { * Build Java startup class with configuration. * * @param {Bean} cfg + * @param available Function to check target version of generated source to appropriate for generation. * @param pkg Package name. * @param {String} clsName Class name for generate factory class otherwise generate code snippet. * @param {Array.} clientNearCaches Is client node. * @returns {StringBuilder} */ - static igniteConfiguration(cfg, pkg, clsName, clientNearCaches) { + static igniteConfiguration(cfg, available, pkg, clsName, clientNearCaches) { const sb = new StringBuilder(); sb.append(`package ${pkg};`); @@ -903,7 +906,7 @@ export default class IgniteJavaTransformer extends AbstractTransformer { imports.push('org.apache.ignite.configuration.NearCacheConfiguration'); _.forEach(clientNearCaches, (cache) => { - const nearCacheBean = this.generator.cacheNearClient(cache); + const nearCacheBean = this.generator.cacheNearClient(cache, available); nearCacheBean.cacheName = cache.name; @@ -1045,7 +1048,9 @@ export default class IgniteJavaTransformer extends AbstractTransformer { const clientNearCaches = client ? _.filter(cluster.caches, (cache) => cache.cacheMode === 'PARTITIONED' && _.get(cache, 'clientNearConfiguration.enabled')) : []; - return this.igniteConfiguration(cfg, pkg, clsName, clientNearCaches); + const available = versionService.since.bind(versionService, targetVer.ignite); + + return this.igniteConfiguration(cfg, available, pkg, clsName, clientNearCaches); } /** diff --git a/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js b/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js index f0bfdb65a61ff..9e35e5dba5424 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js @@ -19,6 +19,9 @@ import _ from 'lodash'; import AbstractTransformer from './AbstractTransformer'; import StringBuilder from './StringBuilder'; +import VersionService from 'app/services/Version.service'; + +const versionService = new VersionService(); export default class IgniteSpringTransformer extends AbstractTransformer { static escapeXml(str) { @@ -252,10 +255,11 @@ export default class IgniteSpringTransformer extends AbstractTransformer { * Build final XML. * * @param {Bean} cfg Ignite configuration. + * @param available Function to check target version of generated source to appropriate for generation. * @param {Boolean} clientNearCaches * @returns {StringBuilder} */ - static igniteConfiguration(cfg, clientNearCaches) { + static igniteConfiguration(cfg, available, clientNearCaches) { const sb = new StringBuilder(); // 0. Add header. @@ -302,7 +306,7 @@ export default class IgniteSpringTransformer extends AbstractTransformer { _.forEach(clientNearCaches, (cache) => { this.commentBlock(sb, `Configuration of near cache for cache "${cache.name}"`); - this.appendBean(sb, this.generator.cacheNearClient(cache), true); + this.appendBean(sb, this.generator.cacheNearClient(cache, available), true); sb.emptyLine(); }); @@ -322,6 +326,8 @@ export default class IgniteSpringTransformer extends AbstractTransformer { const clientNearCaches = client ? _.filter(cluster.caches, (cache) => cache.cacheMode === 'PARTITIONED' && _.get(cache, 'clientNearConfiguration.enabled')) : []; - return this.igniteConfiguration(cfg, clientNearCaches); + const available = versionService.since.bind(versionService, targetVer.ignite); + + return this.igniteConfiguration(cfg, available, clientNearCaches); } } From 698cdfae6368a1add30fa32fc8972ed481df1f57 Mon Sep 17 00:00:00 2001 From: Vasiliy Sisko Date: Fri, 12 Jan 2018 16:12:46 +0700 Subject: [PATCH 211/243] IGNITE-7391 Web Console: Fixed code generation. (cherry picked from commit fe87476) --- .../configuration/generator/JavaTransformer.service.js | 10 +++++----- .../generator/SpringTransformer.service.js | 10 +++++----- .../states/configuration/summary/summary.worker.js | 8 ++++---- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js b/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js index 74b3fc68c4f07..a8d6d2d56ca95 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/JavaTransformer.service.js @@ -886,13 +886,15 @@ export default class IgniteJavaTransformer extends AbstractTransformer { * Build Java startup class with configuration. * * @param {Bean} cfg - * @param available Function to check target version of generated source to appropriate for generation. + * @param {Object} targetVer Version of Ignite for generated project. * @param pkg Package name. * @param {String} clsName Class name for generate factory class otherwise generate code snippet. * @param {Array.} clientNearCaches Is client node. * @returns {StringBuilder} */ - static igniteConfiguration(cfg, available, pkg, clsName, clientNearCaches) { + static igniteConfiguration(cfg, targetVer, pkg, clsName, clientNearCaches) { + const available = versionService.since.bind(versionService, targetVer.ignite); + const sb = new StringBuilder(); sb.append(`package ${pkg};`); @@ -1048,9 +1050,7 @@ export default class IgniteJavaTransformer extends AbstractTransformer { const clientNearCaches = client ? _.filter(cluster.caches, (cache) => cache.cacheMode === 'PARTITIONED' && _.get(cache, 'clientNearConfiguration.enabled')) : []; - const available = versionService.since.bind(versionService, targetVer.ignite); - - return this.igniteConfiguration(cfg, available, pkg, clsName, clientNearCaches); + return this.igniteConfiguration(cfg, targetVer, pkg, clsName, clientNearCaches); } /** diff --git a/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js b/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js index 9e35e5dba5424..a4b616c30a8f1 100644 --- a/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js +++ b/modules/web-console/frontend/app/modules/configuration/generator/SpringTransformer.service.js @@ -255,11 +255,13 @@ export default class IgniteSpringTransformer extends AbstractTransformer { * Build final XML. * * @param {Bean} cfg Ignite configuration. - * @param available Function to check target version of generated source to appropriate for generation. + * @param {Object} targetVer Version of Ignite for generated project. * @param {Boolean} clientNearCaches * @returns {StringBuilder} */ - static igniteConfiguration(cfg, available, clientNearCaches) { + static igniteConfiguration(cfg, targetVer, clientNearCaches) { + const available = versionService.since.bind(versionService, targetVer.ignite); + const sb = new StringBuilder(); // 0. Add header. @@ -326,8 +328,6 @@ export default class IgniteSpringTransformer extends AbstractTransformer { const clientNearCaches = client ? _.filter(cluster.caches, (cache) => cache.cacheMode === 'PARTITIONED' && _.get(cache, 'clientNearConfiguration.enabled')) : []; - const available = versionService.since.bind(versionService, targetVer.ignite); - - return this.igniteConfiguration(cfg, available, clientNearCaches); + return this.igniteConfiguration(cfg, targetVer, clientNearCaches); } } diff --git a/modules/web-console/frontend/app/modules/states/configuration/summary/summary.worker.js b/modules/web-console/frontend/app/modules/states/configuration/summary/summary.worker.js index 11f19ee61e320..1939906961bba 100644 --- a/modules/web-console/frontend/app/modules/states/configuration/summary/summary.worker.js +++ b/modules/web-console/frontend/app/modules/states/configuration/summary/summary.worker.js @@ -90,13 +90,13 @@ onmessage = function(e) { if (cluster.discovery.kind === 'Kubernetes') zip.file(`${metaPath}/ignite-service.yaml`, kubernetesConfig(cluster)); - zip.file(`${metaPath}/${serverXml}`, spring.igniteConfiguration(cfg).asString()); - zip.file(`${metaPath}/${clientXml}`, spring.igniteConfiguration(clientCfg, clientNearCaches).asString()); + zip.file(`${metaPath}/${serverXml}`, spring.igniteConfiguration(cfg, targetVer).asString()); + zip.file(`${metaPath}/${clientXml}`, spring.igniteConfiguration(clientCfg, targetVer, clientNearCaches).asString()); const cfgPath = `${srcPath}/config`; - zip.file(`${cfgPath}/ServerConfigurationFactory.java`, java.igniteConfiguration(cfg, 'config', 'ServerConfigurationFactory').asString()); - zip.file(`${cfgPath}/ClientConfigurationFactory.java`, java.igniteConfiguration(clientCfg, 'config', 'ClientConfigurationFactory', clientNearCaches).asString()); + zip.file(`${cfgPath}/ServerConfigurationFactory.java`, java.igniteConfiguration(cfg, targetVer, 'config', 'ServerConfigurationFactory').asString()); + zip.file(`${cfgPath}/ClientConfigurationFactory.java`, java.igniteConfiguration(clientCfg, targetVer, 'config', 'ClientConfigurationFactory', clientNearCaches).asString()); if (java.isDemoConfigured(cluster, demo)) { zip.file(`${srcPath}/demo/DemoStartup.java`, java.nodeStartup(cluster, 'demo.DemoStartup', From b924697dd3d07461eae63b803fe0a22839eca77e Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Tue, 12 Dec 2017 19:11:46 +0300 Subject: [PATCH 212/243] IGNITE-7177 Correctly handle custom messages which do not change affinity (cherry picked from commit 7cf049e) --- .../cache/CacheAffinitySharedManager.java | 31 ++++++++++++++----- .../GridDhtPartitionsExchangeFuture.java | 14 +++++++-- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index a67aeec0648a0..4f475dd025581 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -682,6 +682,28 @@ private void scheduleClientChangeMessage(Map startedCaches, Se cctx.time().addTimeoutObject(timeoutObj); } + /** + * @param fut Exchange future. + * @param crd Coordinator flag. + * @param exchActions Exchange actions. + */ + public void onCustomMessageNoAffinityChange( + GridDhtPartitionsExchangeFuture fut, + boolean crd, + @Nullable final ExchangeActions exchActions + ) { + final ExchangeDiscoveryEvents evts = fut.context().events(); + + forAllCacheGroups(crd, new IgniteInClosureX() { + @Override public void applyx(GridAffinityAssignmentCache aff) { + if (exchActions != null && exchActions.cacheGroupStopping(aff.groupId())) + return; + + aff.clientEventTopologyChange(evts.lastEvent(), evts.topologyVersion()); + } + }); + } + /** * Called on exchange initiated for cache start/stop request. * @@ -702,14 +724,7 @@ public void onCacheChangeRequest( caches.updateCachesInfo(exchActions); // Affinity did not change for existing caches. - forAllCacheGroups(crd, new IgniteInClosureX() { - @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { - if (exchActions.cacheGroupStopping(aff.groupId())) - return; - - aff.clientEventTopologyChange(evts.lastEvent(), evts.topologyVersion()); - } - }); + onCustomMessageNoAffinityChange(fut, crd, exchActions); for (ExchangeActions.CacheActionData action : exchActions.cacheStartRequests()) { DynamicCacheDescriptor cacheDesc = action.descriptor(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java index 7468b25a56248..96d8b7e495da9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java @@ -588,9 +588,7 @@ else if (msg instanceof DynamicCacheChangeBatch) { exchange = onCacheChangeRequest(crdNode); } else if (msg instanceof SnapshotDiscoveryMessage) { - exchange = CU.clientNode(firstDiscoEvt.eventNode()) ? - onClientNodeEvent(crdNode) : - onServerNodeEvent(crdNode); + exchange = onCustomMessageNoAffinityChange(crdNode); } else { assert affChangeMsg != null : this; @@ -892,6 +890,16 @@ private ExchangeType onCacheChangeRequest(boolean crd) throws IgniteCheckedExcep return cctx.kernalContext().clientNode() ? ExchangeType.CLIENT : ExchangeType.ALL; } + /** + * @param crd Coordinator flag. + * @return Exchange type. + */ + private ExchangeType onCustomMessageNoAffinityChange(boolean crd) { + cctx.affinity().onCustomMessageNoAffinityChange(this, crd, exchActions); + + return cctx.kernalContext().clientNode() ? ExchangeType.CLIENT : ExchangeType.ALL; + } + /** * @param crd Coordinator flag. * @throws IgniteCheckedException If failed. From ca72c8212984a45d20d84f84b14178f5ac219bd7 Mon Sep 17 00:00:00 2001 From: sboikov Date: Tue, 24 Oct 2017 11:19:43 +0300 Subject: [PATCH 213/243] Fixed affinityReadyFuture (lastFinishedFuture could be null for compute jobs). (cherry picked from commit 62cb4fb) --- .../internal/processors/cache/GridCacheAffinityManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java index f0cdd225568ef..c9ee38cf898d6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java @@ -99,7 +99,7 @@ public IgniteInternalFuture affinityReadyFuture(Affinit IgniteInternalFuture fut = aff.readyFuture(topVer); - return fut != null ? fut : cctx.shared().exchange().lastFinishedFuture(); + return fut != null ? fut : new GridFinishedFuture<>(aff.lastVersion()); } /** From 4b74a68cfd9b34f37985227ffc2c3100f0be9cab Mon Sep 17 00:00:00 2001 From: Evgeny Stanilovskiy Date: Tue, 21 Nov 2017 13:12:21 +0300 Subject: [PATCH 214/243] IGNITE-6437: DataStructure can not be obtained on client if it is created on server node (cherry picked from commit ed4616e) --- .../DataStructuresProcessor.java | 18 ++- .../IgniteClientReconnectCollectionsTest.java | 131 ++++++++---------- 2 files changed, 69 insertions(+), 80 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java index 33a2fd2133362..b26acdd694de7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java @@ -55,6 +55,7 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.GridProcessorAdapter; import org.apache.ignite.internal.processors.cache.CacheType; +import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; import org.apache.ignite.internal.processors.cache.GridCacheAdapter; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheInternal; @@ -952,6 +953,7 @@ public void removeQueue(final String name, final GridCacheContext cctx) throws I assert name != null; assert type.isCollection() : type; + assert !create || cfg != null; if (grpName == null) { if (cfg != null && cfg.getGroupName() != null) @@ -960,17 +962,23 @@ public void removeQueue(final String name, final GridCacheContext cctx) throws I grpName = DEFAULT_DS_GROUP_NAME; } - assert !create || cfg != null; - final String metaCacheName = ATOMICS_CACHE_NAME + "@" + grpName; IgniteInternalCache metaCache0 = ctx.cache().cache(metaCacheName); if (metaCache0 == null) { - if (!create) - return null; + CacheConfiguration ccfg = null; + + if (!create) { + DynamicCacheDescriptor desc = ctx.cache().cacheDescriptor(metaCacheName); + + if (desc == null) + return null; + } + else + ccfg = metaCacheConfiguration(cfg, metaCacheName, grpName); - ctx.cache().dynamicStartCache(metaCacheConfiguration(cfg, metaCacheName, grpName), + ctx.cache().dynamicStartCache(ccfg, metaCacheName, null, CacheType.DATA_STRUCTURES, diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCollectionsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCollectionsTest.java index 8ee669ce35194..3f0e33d3046ab 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCollectionsTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCollectionsTest.java @@ -38,6 +38,20 @@ * */ public class IgniteClientReconnectCollectionsTest extends IgniteClientReconnectAbstractTest { + /** */ + private static final CollectionConfiguration TX_CFGS = new CollectionConfiguration(); + + /** */ + private static final CollectionConfiguration ATOMIC_CONF = new CollectionConfiguration(); + + static { + TX_CFGS.setCacheMode(PARTITIONED); + TX_CFGS.setAtomicityMode(TRANSACTIONAL); + + ATOMIC_CONF.setCacheMode(PARTITIONED); + ATOMIC_CONF.setAtomicityMode(ATOMIC); + } + /** {@inheritDoc} */ @Override protected int serverCount() { return 1; @@ -52,17 +66,12 @@ public class IgniteClientReconnectCollectionsTest extends IgniteClientReconnectA * @throws Exception If failed. */ public void testCollectionsReconnectClusterRestart() throws Exception { - CollectionConfiguration colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(TRANSACTIONAL); - Ignite client = grid(serverCount()); assertTrue(client.cluster().localNode().isClient()); - final IgniteQueue queue = client.queue("q", 0, colCfg); - final IgniteSet set = client.set("s", colCfg); + final IgniteQueue queue = client.queue("q", 0, TX_CFGS); + final IgniteSet set = client.set("s", TX_CFGS); Ignite srv = grid(0); @@ -88,12 +97,16 @@ public void testCollectionsReconnectClusterRestart() throws Exception { } }, IllegalStateException.class, null); - try (IgniteQueue queue2 = client.queue("q", 0, colCfg)) { + try (IgniteQueue queue2 = client.queue("q", 0, TX_CFGS)) { queue2.add(1); + + assert queue2.size() == 1 : queue2.size(); } - try (IgniteSet set2 = client.set("s", colCfg)) { + try (IgniteSet set2 = client.set("s", TX_CFGS)) { set2.add(1); + + assert set2.size() == 1 : set2.size(); } } @@ -101,114 +114,82 @@ public void testCollectionsReconnectClusterRestart() throws Exception { * @throws Exception If failed. */ public void testQueueReconnect() throws Exception { - CollectionConfiguration colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(TRANSACTIONAL); + queueReconnect(TX_CFGS); - queueReconnect(colCfg); - - colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(ATOMIC); - - queueReconnect(colCfg); + queueReconnect(ATOMIC_CONF); } /** * @throws Exception If failed. */ public void testQueueReconnectRemoved() throws Exception { - CollectionConfiguration colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(TRANSACTIONAL); - - queueReconnectRemoved(colCfg); + queueReconnectRemoved(TX_CFGS); - colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(ATOMIC); - - queueReconnectRemoved(colCfg); + queueReconnectRemoved(ATOMIC_CONF); } /** * @throws Exception If failed. */ public void testQueueReconnectInProgress() throws Exception { - CollectionConfiguration colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(TRANSACTIONAL); - - queueReconnectInProgress(colCfg); + queueReconnectInProgress(TX_CFGS); - colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(ATOMIC); - - queueReconnectInProgress(colCfg); + queueReconnectInProgress(ATOMIC_CONF); } /** * @throws Exception If failed. */ public void testSetReconnect() throws Exception { - CollectionConfiguration colCfg = new CollectionConfiguration(); - - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(TRANSACTIONAL); - - setReconnect(colCfg); - - colCfg = new CollectionConfiguration(); + setReconnect(TX_CFGS); - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(ATOMIC); - - setReconnect(colCfg); + setReconnect(ATOMIC_CONF); } /** * @throws Exception If failed. */ public void testSetReconnectRemoved() throws Exception { - CollectionConfiguration colCfg = new CollectionConfiguration(); + setReconnectRemove(TX_CFGS); - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(ATOMIC); + setReconnectRemove(ATOMIC_CONF); + } - setReconnectRemove(colCfg); + /** + * @throws Exception If failed. + */ + public void testSetReconnectInProgress() throws Exception { + setReconnectInProgress(TX_CFGS); - colCfg = new CollectionConfiguration(); + setReconnectInProgress(ATOMIC_CONF); + } - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(TRANSACTIONAL); + /** + * @throws Exception If failed. + */ + public void testServerReconnect() throws Exception { + serverNodeReconnect(TX_CFGS); - setReconnectRemove(colCfg); + serverNodeReconnect(ATOMIC_CONF); } /** + * @param colCfg Collection configuration. * @throws Exception If failed. */ - public void testSetReconnectInProgress() throws Exception { - CollectionConfiguration colCfg = new CollectionConfiguration(); + private void serverNodeReconnect(CollectionConfiguration colCfg) throws Exception { + final Ignite client = grid(serverCount()); - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(ATOMIC); + final Ignite srv = clientRouter(client); - setReconnectInProgress(colCfg); + assertNotNull(srv.queue("q", 0, colCfg)); + assertNotNull(srv.set("s", colCfg)); - colCfg = new CollectionConfiguration(); + reconnectClientNode(client, srv, null); - colCfg.setCacheMode(PARTITIONED); - colCfg.setAtomicityMode(TRANSACTIONAL); + IgniteQueue q = client.queue("q", 0, null); - setReconnectInProgress(colCfg); + assertNotNull(q); } /** @@ -495,4 +476,4 @@ private void queueReconnectInProgress(final CollectionConfiguration colCfg) thro assertEquals("1", clientQueue.poll()); } -} \ No newline at end of file +} From 29876c22fe15d957e6746eaee57281872b461ea8 Mon Sep 17 00:00:00 2001 From: sboikov Date: Wed, 29 Nov 2017 11:34:23 +0300 Subject: [PATCH 215/243] Minor: moved custom events processing in GridContinuousProcessor's methods. (cherry picked from commit eef5afd) --- .../continuous/GridContinuousProcessor.java | 132 +++++++++++------- 1 file changed, 85 insertions(+), 47 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java index fa52be2bf4883..571d654a19be6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java @@ -176,8 +176,10 @@ public GridContinuousProcessor(GridKernalContext ctx) { @Override public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StartRoutineDiscoveryMessage msg) { - if (!snd.id().equals(ctx.localNodeId()) && !ctx.isStopping()) - processStartRequest(snd, msg); + if (ctx.isStopping()) + return; + + processStartRequest(snd, msg); } }); @@ -186,39 +188,10 @@ public GridContinuousProcessor(GridKernalContext ctx) { @Override public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StartRoutineAckDiscoveryMessage msg) { - StartFuture fut = startFuts.remove(msg.routineId()); - - if (fut != null) { - if (msg.errs().isEmpty()) { - LocalRoutineInfo routine = locInfos.get(msg.routineId()); - - // Update partition counters. - if (routine != null && routine.handler().isQuery()) { - Map>> cntrsPerNode = msg.updateCountersPerNode(); - Map> cntrs = msg.updateCounters(); - - GridCacheAdapter interCache = - ctx.cache().internalCache(routine.handler().cacheName()); - - GridCacheContext cctx = interCache != null ? interCache.context() : null; - - if (cctx != null && cntrsPerNode != null && !cctx.isLocal() && cctx.affinityNode()) - cntrsPerNode.put(ctx.localNodeId(), - toCountersMap(cctx.topology().localUpdateCounters(false))); - - routine.handler().updateCounters(topVer, cntrsPerNode, cntrs); - } - - fut.onRemoteRegistered(); - } - else { - IgniteCheckedException firstEx = F.first(msg.errs().values()); - - fut.onDone(firstEx); + if (ctx.isStopping()) + return; - stopRoutine(msg.routineId()); - } - } + processStartAckRequest(topVer, msg); } }); @@ -227,16 +200,10 @@ public GridContinuousProcessor(GridKernalContext ctx) { @Override public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StopRoutineDiscoveryMessage msg) { - if (!snd.id().equals(ctx.localNodeId())) { - UUID routineId = msg.routineId(); - - unregisterRemote(routineId); - } + if (ctx.isStopping()) + return; - for (Map clientInfo : clientInfos.values()) { - if (clientInfo.remove(msg.routineId()) != null) - break; - } + processStopRequest(snd, msg); } }); @@ -245,10 +212,10 @@ public GridContinuousProcessor(GridKernalContext ctx) { @Override public void onCustomEvent(AffinityTopologyVersion topVer, ClusterNode snd, StopRoutineAckDiscoveryMessage msg) { - StopFuture fut = stopFuts.remove(msg.routineId()); + if (ctx.isStopping()) + return; - if (fut != null) - fut.onDone(); + processStopAckRequest(msg); } }); @@ -459,7 +426,7 @@ private Map copyLocalInfos(Map l /** {@inheritDoc} */ @Override public void onJoiningNodeDataReceived(JoiningNodeDiscoveryData data) { if (log.isDebugEnabled()) { - log.info("onJoiningNodeDataReceived [joining=" + data.joiningNodeId() + + log.debug("onJoiningNodeDataReceived [joining=" + data.joiningNodeId() + ", loc=" + ctx.localNodeId() + ", data=" + data.joiningNodeData() + ']'); @@ -975,12 +942,83 @@ private void sendNotification(UUID nodeId, ackC); } + /** + * @param msg Message. + */ + private void processStopAckRequest(StopRoutineAckDiscoveryMessage msg) { + StopFuture fut = stopFuts.remove(msg.routineId()); + + if (fut != null) + fut.onDone(); + } + + /** + * @param snd Sender node. + * @param msg Message/ + */ + private void processStopRequest(ClusterNode snd, StopRoutineDiscoveryMessage msg) { + if (!snd.id().equals(ctx.localNodeId())) { + UUID routineId = msg.routineId(); + + unregisterRemote(routineId); + } + + for (Map clientInfo : clientInfos.values()) { + if (clientInfo.remove(msg.routineId()) != null) + break; + } + } + + /** + * @param topVer Topology version. + * @param msg Message. + */ + private void processStartAckRequest(AffinityTopologyVersion topVer, + StartRoutineAckDiscoveryMessage msg) { + StartFuture fut = startFuts.remove(msg.routineId()); + + if (fut != null) { + if (msg.errs().isEmpty()) { + LocalRoutineInfo routine = locInfos.get(msg.routineId()); + + // Update partition counters. + if (routine != null && routine.handler().isQuery()) { + Map>> cntrsPerNode = msg.updateCountersPerNode(); + Map> cntrs = msg.updateCounters(); + + GridCacheAdapter interCache = + ctx.cache().internalCache(routine.handler().cacheName()); + + GridCacheContext cctx = interCache != null ? interCache.context() : null; + + if (cctx != null && cntrsPerNode != null && !cctx.isLocal() && cctx.affinityNode()) + cntrsPerNode.put(ctx.localNodeId(), + toCountersMap(cctx.topology().localUpdateCounters(false))); + + routine.handler().updateCounters(topVer, cntrsPerNode, cntrs); + } + + fut.onRemoteRegistered(); + } + else { + IgniteCheckedException firstEx = F.first(msg.errs().values()); + + fut.onDone(firstEx); + + stopRoutine(msg.routineId()); + } + } + } + /** * @param node Sender. * @param req Start request. */ private void processStartRequest(ClusterNode node, StartRoutineDiscoveryMessage req) { UUID routineId = req.routineId(); + if (node.id().equals(ctx.localNodeId())) + return; + StartRequestData data = req.startRequestData(); GridContinuousHandler hnd = data.handler(); From be91de45756b93366b31b0b8cc3567efdce82384 Mon Sep 17 00:00:00 2001 From: Andrey Kuznetsov Date: Fri, 22 Dec 2017 11:52:45 +0300 Subject: [PATCH 216/243] IGNITE-6015: Rollback on exception in commitIfLocked. (cherry picked from commit 9f98ca1) --- .../GridDistributedTxRemoteAdapter.java | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java index 7a10c10ca38b5..f84fb99d1bb78 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java @@ -324,18 +324,24 @@ private void doneRemote(IgniteTxEntry txEntry, /** {@inheritDoc} */ @Override public boolean onOwnerChanged(GridCacheEntryEx entry, GridCacheMvccCandidate owner) { + if (!hasWriteKey(entry.txKey())) + return false; + try { - if (hasWriteKey(entry.txKey())) { - commitIfLocked(); + commitIfLocked(); - return true; - } + return true; } catch (IgniteCheckedException e) { U.error(log, "Failed to commit remote transaction: " + this, e); - } - return false; + invalidate(true); + systemInvalidate(true); + + rollbackRemoteTx(); + + return false; + } } /** {@inheritDoc} */ From 8acf356ca01e79198f8456369c59f4889bb8ad35 Mon Sep 17 00:00:00 2001 From: Evgeny Stanilovskiy Date: Mon, 25 Dec 2017 13:32:01 +0300 Subject: [PATCH 217/243] ignite-5955 Skip disconnected clients in GridAbstractTest.awaitTopologyChange (cherry picked from commit 100bf0b) --- .../apache/ignite/testframework/junits/GridAbstractTest.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java index 4965d166c06fd..7e4b501a3ffe2 100755 --- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java @@ -119,6 +119,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.internal.GridKernalState.DISCONNECTED; import static org.apache.ignite.testframework.config.GridTestProperties.BINARY_MARSHALLER_USE_SIMPLE_NAME_MAPPER; /** @@ -2102,7 +2103,7 @@ private void awaitTopologyChange() throws IgniteInterruptedCheckedException { for (Ignite g : G.allGrids()) { final GridKernalContext ctx = ((IgniteKernal)g).context(); - if (ctx.isStopping() || !g.active()) + if (ctx.isStopping() || ctx.gateway().getState() == DISCONNECTED || !g.active()) continue; AffinityTopologyVersion topVer = ctx.discovery().topologyVersionEx(); From 6a9c177391bb5efd5864c7ef08e6712024b96c5a Mon Sep 17 00:00:00 2001 From: Aleksei Scherbakov Date: Sat, 2 Dec 2017 20:36:29 +0300 Subject: [PATCH 218/243] ignite-7049 Fixed error in tx timeout processing for optimistic/serializable tx (cherry picked from commit cd0d2eb) --- ...OptimisticSerializableTxPrepareFuture.java | 2 +- .../transactions/TxRollbackOnTimeoutTest.java | 186 +++++++++++++++++- 2 files changed, 186 insertions(+), 2 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java index 1da05899c8701..beb1e160c7ad6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java @@ -184,7 +184,7 @@ private void onError(@Nullable GridDistributedTxMapping m, Throwable e) { } } - if (e instanceof IgniteTxOptimisticCheckedException || e instanceof IgniteTxTimeoutCheckedException) { + if (e instanceof IgniteTxOptimisticCheckedException) { if (m != null) tx.removeMapping(m.primary().id()); } diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java index e1c6c1079666c..6aa3bdd0f15d9 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java @@ -18,24 +18,31 @@ package org.apache.ignite.internal.processors.cache.transactions; import java.util.Collection; +import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicBoolean; import javax.cache.CacheException; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; +import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.TestRecordingCommunicationSpi; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareResponse; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; import org.apache.ignite.testframework.GridTestUtils; @@ -44,8 +51,11 @@ import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionDeadlockException; import org.apache.ignite.transactions.TransactionIsolation; +import org.apache.ignite.transactions.TransactionOptimisticException; import org.apache.ignite.transactions.TransactionTimeoutException; +import org.jsr166.LongAdder8; +import static java.lang.Thread.sleep; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC; @@ -55,6 +65,9 @@ * Tests an ability to eagerly rollback timed out transactions. */ public class TxRollbackOnTimeoutTest extends GridCommonAbstractTest { + /** */ + private static final long DURATION = 60 * 1000L; + /** */ private static final long TX_MIN_TIMEOUT = 1; @@ -73,6 +86,8 @@ public class TxRollbackOnTimeoutTest extends GridCommonAbstractTest { ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER); + cfg.setCommunicationSpi(new TestRecordingCommunicationSpi()); + boolean client = "client".equals(igniteInstanceName); cfg.setClientMode(client); @@ -372,6 +387,175 @@ public void testSimple() throws Exception { } } + /** + * Test timeouts with random values and different tx configurations. + */ + public void testRandomMixedTxConfigurations() throws Exception { + final Ignite client = startClient(); + + final AtomicBoolean stop = new AtomicBoolean(); + + final long seed = System.currentTimeMillis(); + + final Random r = new Random(seed); + + log.info("Using seed: " + seed); + + final int threadsCnt = Runtime.getRuntime().availableProcessors() * 2; + + for (int k = 0; k < threadsCnt; k++) + grid(0).cache(CACHE_NAME).put(k, (long)0); + + final TransactionConcurrency[] TC_VALS = TransactionConcurrency.values(); + final TransactionIsolation[] TI_VALS = TransactionIsolation.values(); + + final LongAdder8 cntr0 = new LongAdder8(); + final LongAdder8 cntr1 = new LongAdder8(); + final LongAdder8 cntr2 = new LongAdder8(); + final LongAdder8 cntr3 = new LongAdder8(); + + final IgniteInternalFuture fut = multithreadedAsync(new Runnable() { + @Override public void run() { + while (!stop.get()) { + int nodeId = r.nextInt(GRID_CNT + 1); + + Ignite node = nodeId == GRID_CNT || nearCacheEnabled() ? client : grid(nodeId); + + TransactionConcurrency conc = TC_VALS[r.nextInt(TC_VALS.length)]; + TransactionIsolation isolation = TI_VALS[r.nextInt(TI_VALS.length)]; + + int k = r.nextInt(threadsCnt); + + long timeout = r.nextInt(200) + 50; + + // Roughly 50% of transactions should time out. + try (Transaction tx = node.transactions().txStart(conc, isolation, timeout, 1)) { + cntr0.add(1); + + final Long v = (Long)node.cache(CACHE_NAME).get(k); + + final int delay = r.nextInt(400); + + if (delay > 0) + sleep(delay); + + node.cache(CACHE_NAME).put(k, v + 1); + + tx.commit(); + + cntr1.add(1); + } + catch (TransactionOptimisticException | InterruptedException e) { + // Expected. + cntr3.add(1); + } + catch (TransactionTimeoutException e) { + cntr2.add(1); + } + catch (CacheException e) { + assertEquals(TransactionTimeoutException.class, X.getCause(e).getClass()); + + cntr2.add(1); + } + } + } + }, threadsCnt, "tx-async-thread"); + + sleep(DURATION); + + stop.set(true); + + fut.get(10_000); + + log.info("Tx test stats: started=" + cntr0.sum() + + ", completed=" + cntr1.sum() + + ", failed=" + cntr3.sum() + + ", timedOut=" + cntr2.sum()); + + assertEquals("Expected finished count same as started count", cntr0.sum(), cntr1.sum() + cntr2.sum() + cntr3.sum()); + } + + /** + * Tests timeout on DHT primary node for all tx configurations. + * + * @throws Exception If failed. + */ + public void testTimeoutOnPrimaryDHTNode() throws Exception { + final ClusterNode n0 = grid(0).affinity(CACHE_NAME).mapKeyToNode(0); + + final Ignite prim = G.ignite(n0.id()); + + for (TransactionConcurrency concurrency : TransactionConcurrency.values()) { + for (TransactionIsolation isolation : TransactionIsolation.values()) + testTimeoutOnPrimaryDhtNode0(prim, concurrency, isolation); + } + } + + /** + * + * @param prim Primary node. + * @param conc Concurrency. + * @param isolation Isolation. + + * @throws Exception If failed. + */ + private void testTimeoutOnPrimaryDhtNode0(final Ignite prim, final TransactionConcurrency conc, + final TransactionIsolation isolation) + throws Exception { + + log.info("concurrency=" + conc + ", isolation=" + isolation); + + // Force timeout on primary DHT node by blocking DHT prepare response. + toggleBlocking(GridDhtTxPrepareResponse.class, prim, true); + + final int val = 0; + + try { + multithreaded(new Runnable() { + @Override public void run() { + try (Transaction txOpt = prim.transactions().txStart(conc, isolation, 300, 1)) { + + prim.cache(CACHE_NAME).put(val, val); + + txOpt.commit(); + } + } + }, 1, "tx-async-thread"); + + fail(); + } + catch (TransactionTimeoutException e) { + // Expected. + } + + toggleBlocking(GridDhtTxPrepareResponse.class, prim, false); + + AffinityTopologyVersion topVer = new AffinityTopologyVersion(GRID_CNT + 1, 0); + + for (Ignite ignite : G.allGrids()) + ((IgniteEx)ignite).context().cache().context().partitionReleaseFuture(topVer).get(10_000); + } + + /** + * @param cls Message class. + * @param nodeToBlock Node to block. + * @param block Block. + */ + private void toggleBlocking(Class cls, Ignite nodeToBlock, boolean block) { + for (Ignite ignite : G.allGrids()) { + if (ignite == nodeToBlock) + continue; + + final TestRecordingCommunicationSpi spi = + (TestRecordingCommunicationSpi)ignite.configuration().getCommunicationSpi(); + + if (block) + spi.blockMessages(cls, nodeToBlock.name()); + else + spi.stopBlock(true); + } + } + /** * @param concurrency Concurrency. * @param isolation Isolation. @@ -652,4 +836,4 @@ private void waitingTxUnblockedOnThreadDeath0(final Ignite near, fut2.get(); } -} \ No newline at end of file +} From 9f60a89334c4ede54aaae98040c9a2b5c15e72e2 Mon Sep 17 00:00:00 2001 From: sboikov Date: Thu, 11 Jan 2018 13:15:07 +0300 Subject: [PATCH 219/243] ignite-7377 Correctly re-create cache entry for tx entry (cherry picked from commit ba9c110) --- .../cache/transactions/IgniteTxManager.java | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java index c6b1bd76ac099..932acf668b007 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java @@ -1612,7 +1612,7 @@ private boolean lockMultiple(IgniteInternalTx tx, Iterable entrie if (txEntry2 == txEntry1) break; - txEntry2.cached().txUnlock(tx); + txUnlock(tx, txEntry2); } return false; @@ -1652,6 +1652,33 @@ private boolean lockMultiple(IgniteInternalTx tx, Iterable entrie return true; } + /** + * @param tx Transaction. + * @param txEntry Entry to unlock. + */ + private void txUnlock(IgniteInternalTx tx, IgniteTxEntry txEntry) { + while (true) { + try { + txEntry.cached().txUnlock(tx); + + break; + } + catch (GridCacheEntryRemovedException ignored) { + if (log.isDebugEnabled()) + log.debug("Got removed entry in TM txUnlock(..) method (will retry): " + txEntry); + + try { + txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), tx.topologyVersion())); + } + catch (GridDhtInvalidPartitionException e) { + tx.addInvalidPartition(txEntry.context(), e.partition()); + + break; + } + } + } + } + /** * @param tx Owning transaction. * @param entries Entries to unlock. From 250c0df85bc66e50133eb52ad5bbcb07744de15d Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Fri, 17 Nov 2017 15:48:12 +0300 Subject: [PATCH 220/243] IGNITE-6947 Abandon remap after single map if future is done (fixes NPE) (cherry picked from commit 38f66c7) --- .../near/GridNearOptimisticTxPrepareFuture.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java index 6d7a86215c462..6cf076b2708b8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java @@ -368,13 +368,6 @@ private void prepareSingle(IgniteTxEntry write, boolean topLocked, boolean remap GridDistributedTxMapping mapping = map(write, topVer, null, topLocked, remap); - if (mapping.primary().isLocal()) { - if (write.context().isNear()) - tx.nearLocallyMapped(true); - else if (write.context().isColocated()) - tx.colocatedLocallyMapped(true); - } - if (isDone()) { if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this); @@ -382,6 +375,13 @@ else if (write.context().isColocated()) return; } + if (mapping.primary().isLocal()) { + if (write.context().isNear()) + tx.nearLocallyMapped(true); + else if (write.context().isColocated()) + tx.colocatedLocallyMapped(true); + } + if (keyLockFut != null) keyLockFut.onAllKeysAdded(); From f495c6df498f8ccbc1878064495f09fcd721e399 Mon Sep 17 00:00:00 2001 From: dkarachentsev Date: Thu, 11 Jan 2018 16:48:43 +0300 Subject: [PATCH 221/243] ignite-7340 Fix flaky GridServiceProcessorMultiNodeConfigSelfTest#checkDeployOnEachNodeUpdateTopology Signed-off-by: Andrey Gura (cherry picked from commit 6136ecd) --- .../internal/processors/service/GridServiceProcessor.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java index e0d19a772ddb6..6dd71dd685f6e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java @@ -1280,6 +1280,14 @@ private void reassign(GridServiceDeployment dep, AffinityTopologyVersion topVer) * @param assigns Assignments. */ private void redeploy(GridServiceAssignments assigns) { + if (assigns.topologyVersion() < ctx.discovery().topologyVersion()) { + if (log.isDebugEnabled()) + log.debug("Skip outdated assignment [assigns=" + assigns + + ", topVer=" + ctx.discovery().topologyVersion() + ']'); + + return; + } + String svcName = assigns.name(); Integer assignCnt = assigns.assigns().get(ctx.localNodeId()); From 9580c5327326114c3f0ed93f927bc62e6f3297ff Mon Sep 17 00:00:00 2001 From: Ilya Kasnacheev Date: Wed, 11 Oct 2017 15:29:04 +0300 Subject: [PATCH 222/243] IGNITE-6542 Reliably close SocketChannel in TcpCommunicationSpi. Also fix forceClose() in GridTcpNioCommunicationClient which became wrong when migrated from int to bool. - Fixes #2787. Signed-off-by: Alexey Goncharuk (cherry picked from commit b0158fb) --- .../nio/GridAbstractCommunicationClient.java | 2 +- .../tcp/TcpCommunicationSpi.java | 378 +++++++++--------- 2 files changed, 192 insertions(+), 188 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridAbstractCommunicationClient.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridAbstractCommunicationClient.java index 6302d843b0be0..ed7e929145bd3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridAbstractCommunicationClient.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridAbstractCommunicationClient.java @@ -59,7 +59,7 @@ protected GridAbstractCommunicationClient(int connIdx, @Nullable GridNioMetricsL /** {@inheritDoc} */ @Override public void forceClose() { - closed.set(false); + closed.set(true); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java index 6df7208315773..6bc0794cdb828 100755 --- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java @@ -2905,12 +2905,7 @@ else if (log.isDebugEnabled()) } try { - safeHandshake(client, - null, - node.id(), - timeoutHelper.nextTimeoutChunk(connTimeout0), - null, - null); + safeShmemHandshake(client, node.id(), timeoutHelper.nextTimeoutChunk(connTimeout0)); } catch (HandshakeTimeoutException | IgniteSpiOperationTimeoutException e) { client.forceClose(); @@ -3078,7 +3073,6 @@ private LinkedHashSet nodeAddresses(ClusterNode node) throws protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) throws IgniteCheckedException { LinkedHashSet addrs = nodeAddresses(node); - boolean conn = false; GridCommunicationClient client = null; IgniteCheckedException errs = null; @@ -3094,7 +3088,9 @@ protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) int lastWaitingTimeout = 1; - while (!conn) { // Reconnection on handshake timeout. + while (client == null) { // Reconnection on handshake timeout. + boolean needWait = false; + if (addr.getAddress().isLoopbackAddress() && addr.getPort() == boundTcpPort) { if (log.isDebugEnabled()) log.debug("Skipping local address [addr=" + addr + @@ -3134,7 +3130,7 @@ protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) return null; } - Long rcvCnt = null; + Long rcvCnt; Map meta = new HashMap<>(); @@ -3155,7 +3151,7 @@ protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) Integer handshakeConnIdx = connIdx; - rcvCnt = safeHandshake(ch, + rcvCnt = safeTcpHandshake(ch, recoveryDesc, node.id(), timeoutHelper.nextTimeoutChunk(connTimeout0), @@ -3163,34 +3159,17 @@ protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) handshakeConnIdx); if (rcvCnt == ALREADY_CONNECTED) { - recoveryDesc.release(); - return null; } else if (rcvCnt == NODE_STOPPING) { - recoveryDesc.release(); - throw new ClusterTopologyCheckedException("Remote node started stop procedure: " + node.id()); } else if (rcvCnt == NEED_WAIT) { - recoveryDesc.release(); - - U.closeQuiet(ch); - - if (lastWaitingTimeout < 60000) - lastWaitingTimeout *= 2; - - U.sleep(lastWaitingTimeout); + needWait = true; continue; } - } - finally { - if (recoveryDesc != null && rcvCnt == null) - recoveryDesc.release(); - } - try { meta.put(CONN_IDX_META, connKey); if (recoveryDesc != null) { @@ -3202,13 +3181,20 @@ else if (rcvCnt == NEED_WAIT) { GridNioSession ses = nioSrvr.createSession(ch, meta, false, null).get(); client = new GridTcpNioCommunicationClient(connIdx, ses, log); - - conn = true; } finally { - if (!conn) { + if (client == null) { + U.closeQuiet(ch); + if (recoveryDesc != null) recoveryDesc.release(); + + if (needWait) { + if (lastWaitingTimeout < 60000) + lastWaitingTimeout *= 2; + + U.sleep(lastWaitingTimeout); + } } } } @@ -3330,7 +3316,7 @@ else if (X.hasCause(e, SocketTimeoutException.class)) } } - if (conn) + if (client != null) break; } @@ -3386,6 +3372,42 @@ private static boolean connectionError(IgniteCheckedException errs) { * Performs handshake in timeout-safe way. * * @param client Client. + * @param rmtNodeId Remote node. + * @param timeout Timeout for handshake. + * @throws IgniteCheckedException If handshake failed or wasn't completed withing timeout. + */ + @SuppressWarnings("ThrowFromFinallyBlock") + private void safeShmemHandshake( + GridCommunicationClient client, + UUID rmtNodeId, + long timeout + ) throws IgniteCheckedException { + HandshakeTimeoutObject obj = new HandshakeTimeoutObject<>(client, + U.currentTimeMillis() + timeout); + + addTimeoutObject(obj); + + try { + client.doHandshake(new HandshakeClosure(rmtNodeId)); + } + finally { + boolean cancelled = obj.cancel(); + + if (cancelled) + removeTimeoutObject(obj); + + // Ignoring whatever happened after timeout - reporting only timeout event. + if (!cancelled) + throw new HandshakeTimeoutException( + new IgniteSpiOperationTimeoutException("Failed to perform handshake due to timeout " + + "(consider increasing 'connectionTimeout' configuration property).")); + } + } + + /** + * Performs handshake in timeout-safe way. + * + * @param ch Socket channel. * @param recovery Recovery descriptor if use recovery handshake, otherwise {@code null}. * @param rmtNodeId Remote node. * @param timeout Timeout for handshake. @@ -3395,233 +3417,215 @@ private static boolean connectionError(IgniteCheckedException errs) { * @return Handshake response. */ @SuppressWarnings("ThrowFromFinallyBlock") - private long safeHandshake( - T client, + private long safeTcpHandshake( + SocketChannel ch, @Nullable GridNioRecoveryDescriptor recovery, UUID rmtNodeId, long timeout, GridSslMeta sslMeta, @Nullable Integer handshakeConnIdx ) throws IgniteCheckedException { - HandshakeTimeoutObject obj = new HandshakeTimeoutObject<>(client, U.currentTimeMillis() + timeout); + HandshakeTimeoutObject obj = new HandshakeTimeoutObject<>(ch, U.currentTimeMillis() + timeout); addTimeoutObject(obj); long rcvCnt = 0; try { - if (client instanceof GridCommunicationClient) - ((GridCommunicationClient)client).doHandshake(new HandshakeClosure(rmtNodeId)); - else { - SocketChannel ch = (SocketChannel)client; + BlockingSslHandler sslHnd = null; - boolean success = false; + ByteBuffer buf; - try { - BlockingSslHandler sslHnd = null; + if (isSslEnabled()) { + assert sslMeta != null; - ByteBuffer buf; + sslHnd = new BlockingSslHandler(sslMeta.sslEngine(), ch, directBuf, ByteOrder.nativeOrder(), log); - if (isSslEnabled()) { - assert sslMeta != null; + if (!sslHnd.handshake()) + throw new HandshakeException("SSL handshake is not completed."); - sslHnd = new BlockingSslHandler(sslMeta.sslEngine(), ch, directBuf, ByteOrder.nativeOrder(), log); + ByteBuffer handBuff = sslHnd.applicationBuffer(); - if (!sslHnd.handshake()) - throw new HandshakeException("SSL handshake is not completed."); + if (handBuff.remaining() < NodeIdMessage.MESSAGE_FULL_SIZE) { + buf = ByteBuffer.allocate(1000); - ByteBuffer handBuff = sslHnd.applicationBuffer(); + int read = ch.read(buf); - if (handBuff.remaining() < NodeIdMessage.MESSAGE_FULL_SIZE) { - buf = ByteBuffer.allocate(1000); + if (read == -1) + throw new HandshakeException("Failed to read remote node ID (connection closed)."); - int read = ch.read(buf); + buf.flip(); - if (read == -1) - throw new HandshakeException("Failed to read remote node ID (connection closed)."); - - buf.flip(); - - buf = sslHnd.decode(buf); - } - else - buf = handBuff; - } - else { - buf = ByteBuffer.allocate(NodeIdMessage.MESSAGE_FULL_SIZE); + buf = sslHnd.decode(buf); + } + else + buf = handBuff; + } + else { + buf = ByteBuffer.allocate(NodeIdMessage.MESSAGE_FULL_SIZE); - for (int i = 0; i < NodeIdMessage.MESSAGE_FULL_SIZE; ) { - int read = ch.read(buf); + for (int i = 0; i < NodeIdMessage.MESSAGE_FULL_SIZE; ) { + int read = ch.read(buf); - if (read == -1) - throw new HandshakeException("Failed to read remote node ID (connection closed)."); + if (read == -1) + throw new HandshakeException("Failed to read remote node ID (connection closed)."); - i += read; - } - } + i += read; + } + } - UUID rmtNodeId0 = U.bytesToUuid(buf.array(), Message.DIRECT_TYPE_SIZE); + UUID rmtNodeId0 = U.bytesToUuid(buf.array(), Message.DIRECT_TYPE_SIZE); - if (!rmtNodeId.equals(rmtNodeId0)) - throw new HandshakeException("Remote node ID is not as expected [expected=" + rmtNodeId + - ", rcvd=" + rmtNodeId0 + ']'); - else if (log.isDebugEnabled()) - log.debug("Received remote node ID: " + rmtNodeId0); + if (!rmtNodeId.equals(rmtNodeId0)) + throw new HandshakeException("Remote node ID is not as expected [expected=" + rmtNodeId + + ", rcvd=" + rmtNodeId0 + ']'); + else if (log.isDebugEnabled()) + log.debug("Received remote node ID: " + rmtNodeId0); - if (isSslEnabled()) { - assert sslHnd != null; + if (isSslEnabled()) { + assert sslHnd != null; - ch.write(sslHnd.encrypt(ByteBuffer.wrap(U.IGNITE_HEADER))); - } - else - ch.write(ByteBuffer.wrap(U.IGNITE_HEADER)); + ch.write(sslHnd.encrypt(ByteBuffer.wrap(U.IGNITE_HEADER))); + } + else + ch.write(ByteBuffer.wrap(U.IGNITE_HEADER)); - ClusterNode locNode = getLocalNode(); + ClusterNode locNode = getLocalNode(); - if (locNode == null) - throw new IgniteCheckedException("Local node has not been started or " + - "fully initialized [isStopping=" + getSpiContext().isStopping() + ']'); + if (locNode == null) + throw new IgniteCheckedException("Local node has not been started or " + + "fully initialized [isStopping=" + getSpiContext().isStopping() + ']'); - if (recovery != null) { - HandshakeMessage msg; + if (recovery != null) { + HandshakeMessage msg; - int msgSize = HandshakeMessage.MESSAGE_FULL_SIZE; + int msgSize = HandshakeMessage.MESSAGE_FULL_SIZE; - if (handshakeConnIdx != null) { - msg = new HandshakeMessage2(locNode.id(), - recovery.incrementConnectCount(), - recovery.received(), - handshakeConnIdx); + if (handshakeConnIdx != null) { + msg = new HandshakeMessage2(locNode.id(), + recovery.incrementConnectCount(), + recovery.received(), + handshakeConnIdx); - msgSize += 4; - } - else { - msg = new HandshakeMessage(locNode.id(), - recovery.incrementConnectCount(), - recovery.received()); - } + msgSize += 4; + } + else { + msg = new HandshakeMessage(locNode.id(), + recovery.incrementConnectCount(), + recovery.received()); + } - if (log.isDebugEnabled()) - log.debug("Writing handshake message [locNodeId=" + locNode.id() + - ", rmtNode=" + rmtNodeId + ", msg=" + msg + ']'); + if (log.isDebugEnabled()) + log.debug("Writing handshake message [locNodeId=" + locNode.id() + + ", rmtNode=" + rmtNodeId + ", msg=" + msg + ']'); - buf = ByteBuffer.allocate(msgSize); + buf = ByteBuffer.allocate(msgSize); - buf.order(ByteOrder.nativeOrder()); + buf.order(ByteOrder.nativeOrder()); - boolean written = msg.writeTo(buf, null); + boolean written = msg.writeTo(buf, null); - assert written; + assert written; - buf.flip(); + buf.flip(); - if (isSslEnabled()) { - assert sslHnd != null; + if (isSslEnabled()) { + assert sslHnd != null; - ch.write(sslHnd.encrypt(buf)); - } - else - ch.write(buf); - } - else { - if (isSslEnabled()) { - assert sslHnd != null; + ch.write(sslHnd.encrypt(buf)); + } + else + ch.write(buf); + } + else { + if (isSslEnabled()) { + assert sslHnd != null; - ch.write(sslHnd.encrypt(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType))); - } - else - ch.write(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType)); - } + ch.write(sslHnd.encrypt(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType))); + } + else + ch.write(ByteBuffer.wrap(nodeIdMessage().nodeIdBytesWithType)); + } - if (recovery != null) { - if (log.isDebugEnabled()) - log.debug("Waiting for handshake [rmtNode=" + rmtNodeId + ']'); + if (recovery != null) { + if (log.isDebugEnabled()) + log.debug("Waiting for handshake [rmtNode=" + rmtNodeId + ']'); - if (isSslEnabled()) { - assert sslHnd != null; + if (isSslEnabled()) { + assert sslHnd != null; - buf = ByteBuffer.allocate(1000); - buf.order(ByteOrder.nativeOrder()); + buf = ByteBuffer.allocate(1000); + buf.order(ByteOrder.nativeOrder()); - ByteBuffer decode = ByteBuffer.allocate(2 * buf.capacity()); - decode.order(ByteOrder.nativeOrder()); + ByteBuffer decode = ByteBuffer.allocate(2 * buf.capacity()); + decode.order(ByteOrder.nativeOrder()); - for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) { - int read = ch.read(buf); + for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) { + int read = ch.read(buf); - if (read == -1) - throw new HandshakeException("Failed to read remote node recovery handshake " + - "(connection closed)."); + if (read == -1) + throw new HandshakeException("Failed to read remote node recovery handshake " + + "(connection closed)."); - buf.flip(); + buf.flip(); - ByteBuffer decode0 = sslHnd.decode(buf); + ByteBuffer decode0 = sslHnd.decode(buf); - i += decode0.remaining(); + i += decode0.remaining(); - decode = appendAndResizeIfNeeded(decode, decode0); + decode = appendAndResizeIfNeeded(decode, decode0); - buf.clear(); - } + buf.clear(); + } - decode.flip(); + decode.flip(); - rcvCnt = decode.getLong(Message.DIRECT_TYPE_SIZE); + rcvCnt = decode.getLong(Message.DIRECT_TYPE_SIZE); - if (decode.limit() > RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE) { - decode.position(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE); + if (decode.limit() > RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE) { + decode.position(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE); - sslMeta.decodedBuffer(decode); - } + sslMeta.decodedBuffer(decode); + } - ByteBuffer inBuf = sslHnd.inputBuffer(); + ByteBuffer inBuf = sslHnd.inputBuffer(); - if (inBuf.position() > 0) - sslMeta.encodedBuffer(inBuf); - } - else { - buf = ByteBuffer.allocate(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE); + if (inBuf.position() > 0) + sslMeta.encodedBuffer(inBuf); + } + else { + buf = ByteBuffer.allocate(RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE); - buf.order(ByteOrder.nativeOrder()); + buf.order(ByteOrder.nativeOrder()); - for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) { - int read = ch.read(buf); + for (int i = 0; i < RecoveryLastReceivedMessage.MESSAGE_FULL_SIZE; ) { + int read = ch.read(buf); - if (read == -1) - throw new HandshakeException("Failed to read remote node recovery handshake " + - "(connection closed)."); + if (read == -1) + throw new HandshakeException("Failed to read remote node recovery handshake " + + "(connection closed)."); - i += read; - } + i += read; + } - rcvCnt = buf.getLong(Message.DIRECT_TYPE_SIZE); - } + rcvCnt = buf.getLong(Message.DIRECT_TYPE_SIZE); + } - if (log.isDebugEnabled()) - log.debug("Received handshake message [rmtNode=" + rmtNodeId + ", rcvCnt=" + rcvCnt + ']'); + if (log.isDebugEnabled()) + log.debug("Received handshake message [rmtNode=" + rmtNodeId + ", rcvCnt=" + rcvCnt + ']'); - if (rcvCnt == -1) { - if (log.isDebugEnabled()) - log.debug("Connection rejected, will retry client creation [rmtNode=" + rmtNodeId + ']'); - } - else - success = true; - } - else - success = true; - } - catch (IOException e) { + if (rcvCnt == -1) { if (log.isDebugEnabled()) - log.debug("Failed to read from channel: " + e); - - throw new IgniteCheckedException("Failed to read from channel.", e); - } - finally { - if (!success) - U.closeQuiet(ch); + log.debug("Connection rejected, will retry client creation [rmtNode=" + rmtNodeId + ']'); } } } + catch (IOException e) { + if (log.isDebugEnabled()) + log.debug("Failed to read from channel: " + e); + + throw new IgniteCheckedException("Failed to read from channel.", e); + } finally { boolean cancelled = obj.cancel(); From ff79d6e70f524ed7858368565f4d28387f1bcac8 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Fri, 15 Dec 2017 16:37:50 +0300 Subject: [PATCH 223/243] IGNITE-7212 Fixed infinite loop in TCP communication SPI skipping local address (cherry picked from commit 86dc36d) --- .../ignite/spi/communication/tcp/TcpCommunicationSpi.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java index 6bc0794cdb828..b82c4000a5aeb 100755 --- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java @@ -3096,7 +3096,7 @@ protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) log.debug("Skipping local address [addr=" + addr + ", locAddrs=" + node.attribute(createSpiAttributeName(ATTR_ADDRS)) + ", node=" + node + ']'); - continue; + break; } try { From 2f54fc3aef61c257ba3080b33aa964cc0fa0e71b Mon Sep 17 00:00:00 2001 From: sboikov Date: Mon, 18 Dec 2017 14:41:46 +0300 Subject: [PATCH 224/243] GridNioServer: ses can be null on close if async connect failed (cherry picked from commit edc66af) --- .../internal/util/nio/GridNioServer.java | 59 +++++++++++-------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java index 1d595d2485f0f..97f6020e6d97b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java @@ -2293,7 +2293,11 @@ private void processSelectedKeysOptimized(SelectionKey[] keys) throws ClosedByIn else if (log.isDebugEnabled()) log.debug("Failed to process selector key [ses=" + ses + ", err=" + e + ']'); - close(ses, new GridNioException(e)); + // Can be null if async connect failed. + if (ses != null) + close(ses, new GridNioException(e)); + else + closeKey(key); } } } @@ -2516,6 +2520,34 @@ private void register(NioOperationFuture fut) { } } + /** + * @param key Key. + */ + private void closeKey(SelectionKey key) { + // Shutdown input and output so that remote client will see correct socket close. + Socket sock = ((SocketChannel)key.channel()).socket(); + + try { + try { + sock.shutdownInput(); + } + catch (IOException ignored) { + // No-op. + } + + try { + sock.shutdownOutput(); + } + catch (IOException ignored) { + // No-op. + } + } + finally { + U.close(key, log); + U.close(sock, log); + } + } + /** * Closes the session and all associated resources, then notifies the listener. * @@ -2536,8 +2568,6 @@ protected boolean close(final GridSelectorNioSessionImpl ses, @Nullable final Ig sessions.remove(ses); workerSessions.remove(ses); - SelectionKey key = ses.key(); - if (ses.setClosed()) { ses.onClosed(); @@ -2549,28 +2579,7 @@ protected boolean close(final GridSelectorNioSessionImpl ses, @Nullable final Ig ((DirectBuffer)ses.readBuffer()).cleaner().clean(); } - // Shutdown input and output so that remote client will see correct socket close. - Socket sock = ((SocketChannel)key.channel()).socket(); - - try { - try { - sock.shutdownInput(); - } - catch (IOException ignored) { - // No-op. - } - - try { - sock.shutdownOutput(); - } - catch (IOException ignored) { - // No-op. - } - } - finally { - U.close(key, log); - U.close(sock, log); - } + closeKey(ses.key()); if (e != null) filterChain.onExceptionCaught(ses, e); From af1144880887c87c7c21fce25c0cbf1dc39b008e Mon Sep 17 00:00:00 2001 From: sboikov Date: Tue, 10 Oct 2017 14:35:00 +0300 Subject: [PATCH 225/243] Removed unused GridDhtLocalPartition.evictHist. (cherry picked from commit 74c4849) --- .../dht/GridDhtLocalPartition.java | 59 ------------------- .../preloader/GridDhtPartitionDemander.java | 9 --- 2 files changed, 68 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java index 536d5209cb9b6..c813a5758dbf2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java @@ -17,10 +17,8 @@ package org.apache.ignite.internal.processors.cache.distributed.dht; -import java.util.HashMap; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -127,10 +125,6 @@ public class GridDhtLocalPartition extends GridCacheConcurrentMapImpl implements @GridToStringExclude private final long createTime = U.currentTimeMillis(); - /** Eviction history. */ - @GridToStringExclude - private final Map evictHist = new HashMap<>(); - /** Lock. */ @GridToStringExclude private final ReentrantLock lock = new ReentrantLock(); @@ -428,53 +422,6 @@ public void unlock() { lock.unlock(); } - /** - * @param key Key. - * @param ver Version. - */ - public void onEntryEvicted(KeyCacheObject key, GridCacheVersion ver) { - assert key != null; - assert ver != null; - assert lock.isHeldByCurrentThread(); // Only one thread can enter this method at a time. - - if (state() != MOVING) - return; - - Map evictHist0 = evictHist; - - if (evictHist0 != null) { - GridCacheVersion ver0 = evictHist0.get(key); - - if (ver0 == null || ver0.isLess(ver)) { - GridCacheVersion ver1 = evictHist0.put(key, ver); - - assert ver1 == ver0; - } - } - } - - /** - * Cache preloader should call this method within partition lock. - * - * @param key Key. - * @param ver Version. - * @return {@code True} if preloading is permitted. - */ - public boolean preloadingPermitted(KeyCacheObject key, GridCacheVersion ver) { - assert key != null; - assert ver != null; - assert lock.isHeldByCurrentThread(); // Only one thread can enter this method at a time. - - if (state() != MOVING) - return false; - - GridCacheVersion ver0 = evictHist.get(key); - - // Permit preloading if version in history - // is missing or less than passed in. - return ver0 == null || ver0.isLess(ver); - } - /** * Reserves a partition so it won't be cleared. * @@ -593,9 +540,6 @@ boolean own() { if (log.isDebugEnabled()) log.debug("Owned partition: " + this); - // No need to keep history any more. - evictHist.clear(); - return true; } } @@ -637,9 +581,6 @@ boolean markLost() { if (log.isDebugEnabled()) log.debug("Marked partition as LOST: " + this); - // No need to keep history any more. - evictHist.clear(); - return true; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index 5407c7df8f931..ed296cdc1d2aa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -674,15 +674,6 @@ public void handleSupplyMessage( try { // Loop through all received entries and try to preload them. for (GridCacheEntryInfo entry : e.getValue().infos()) { - if (!part.preloadingPermitted(entry.key(), entry.version())) { - if (log.isDebugEnabled()) - log.debug("Preloading is not permitted for entry due to " + - "evictions [key=" + entry.key() + - ", ver=" + entry.version() + ']'); - - continue; - } - if (!preloadEntry(node, p, entry, topVer)) { if (log.isDebugEnabled()) log.debug("Got entries for invalid partition during " + From 1e3cd0d232faa87533dd78995a1a81196e8c5e10 Mon Sep 17 00:00:00 2001 From: sboikov Date: Tue, 24 Oct 2017 11:24:06 +0300 Subject: [PATCH 226/243] ignite-6700 Tcp discovery: ignore message's failedNodes list received from failed nodes. (cherry picked from commit 5f69d26) --- .../ignite/spi/discovery/tcp/ServerImpl.java | 45 +++++-- .../spi/discovery/tcp/TcpDiscoverySpi.java | 18 +++ .../discovery/tcp/TcpDiscoverySelfTest.java | 110 ++++++++++++++++++ 3 files changed, 163 insertions(+), 10 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java index e87c00ef2c06d..b9f381b5250ff 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java @@ -1992,8 +1992,36 @@ private void cleanIpFinder() { * @param msg Message. */ private void processMessageFailedNodes(TcpDiscoveryAbstractMessage msg) { - if (msg.failedNodes() != null) { - for (UUID nodeId : msg.failedNodes()) { + Collection msgFailedNodes = msg.failedNodes(); + + if (msgFailedNodes != null) { + UUID sndId = msg.senderNodeId(); + + if (sndId != null) { + if (ring.node(sndId) == null) { + if (log.isDebugEnabled()) { + log.debug("Ignore message failed nodes, sender node is not alive [nodeId=" + sndId + + ", failedNodes=" + msgFailedNodes + ']'); + } + + return; + } + + synchronized (mux) { + for (TcpDiscoveryNode failedNode : failedNodes.keySet()) { + if (failedNode.id().equals(sndId)) { + if (log.isDebugEnabled()) { + log.debug("Ignore message failed nodes, sender node is in fail list [nodeId=" + sndId + + ", failedNodes=" + msgFailedNodes + ']'); + } + + return; + } + } + } + } + + for (UUID nodeId : msgFailedNodes) { TcpDiscoveryNode failedNode = ring.node(nodeId); if (failedNode != null) { @@ -2859,9 +2887,6 @@ else if (log.isTraceEnabled()) log.trace("Next node remains the same [nextId=" + next.id() + ", nextOrder=" + next.internalOrder() + ']'); - // Flag that shows whether next node exists and accepts incoming connections. - boolean nextNodeExists = sock != null; - final boolean sameHost = U.sameMacs(locNode, next); List locNodeAddrs = U.arrayList(locNode.socketAddresses()); @@ -2886,8 +2911,6 @@ else if (log.isTraceEnabled()) if (timeoutHelper == null) timeoutHelper = new IgniteSpiOperationTimeoutHelper(spi, true); - nextNodeExists = false; - boolean success = false; boolean openSock = false; @@ -3016,8 +3039,6 @@ else if (!spi.failureDetectionTimeoutEnabled() && (e instanceof sock = null; } else { - // Next node exists and accepts incoming messages. - nextNodeExists = true; // Resetting timeout control object to let the code below to use a new one // for the next bunch of operations. timeoutHelper = null; @@ -3109,7 +3130,11 @@ else if (!spi.failureDetectionTimeoutEnabled() && (e instanceof if (latencyCheck && log.isInfoEnabled()) log.info("Latency check message has been written to socket: " + msg.id()); - spi.writeToSocket(sock, out, msg, timeoutHelper.nextTimeoutChunk(spi.getSocketTimeout())); + spi.writeToSocket(newNextNode ? newNext : next, + sock, + out, + msg, + timeoutHelper.nextTimeoutChunk(spi.getSocketTimeout())); long tstamp0 = U.currentTimeMillis(); diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java index acc7233b83f3b..3b83b2e19d54a 100644 --- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java +++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java @@ -1483,6 +1483,24 @@ protected void startMessageProcess(TcpDiscoveryAbstractMessage msg) { // No-op, intended for usage in tests. } + /** + * @param node Target node. + * @param sock Socket. + * @param out Stream to write to. + * @param msg Message. + * @param timeout Timeout. + * @throws IOException If IO failed or write timed out. + * @throws IgniteCheckedException If marshalling failed. + */ + protected void writeToSocket( + ClusterNode node, + Socket sock, + OutputStream out, + TcpDiscoveryAbstractMessage msg, + long timeout) throws IOException, IgniteCheckedException { + writeToSocket(sock, out, msg, timeout); + } + /** * Writes message to the socket. * diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySelfTest.java index bf48fcca165c2..d6d484ca5956c 100644 --- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySelfTest.java @@ -44,6 +44,7 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteIllegalStateException; import org.apache.ignite.Ignition; +import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.events.DiscoveryEvent; @@ -52,6 +53,7 @@ import org.apache.ignite.internal.GridComponent; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; import org.apache.ignite.internal.processors.continuous.StartRoutineAckDiscoveryMessage; @@ -77,6 +79,7 @@ import org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryAbstractMessage; +import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryConnectionCheckMessage; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryCustomEventMessage; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryMetricsUpdateMessage; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryNodeAddFinishedMessage; @@ -2068,6 +2071,42 @@ public void testDuplicatedDiscoveryDataRemoved() throws Exception { } } + /** + * @throws Exception If failed. + */ + public void testFailedNodeRestoreConnection() throws Exception { + try { + TestRestoreConnectedSpi.startTest = false; + + for (int i = 1; i < 5; i++) { + nodeSpi.set(new TestRestoreConnectedSpi(3)); + + startGrid(i); + } + + awaitPartitionMapExchange(); + + info("Start fail test"); + + TestRestoreConnectedSpi.startTest = true; + + waitNodeStop(getTestIgniteInstanceName(3)); + + U.sleep(5000); + + for (int i = 1; i < 5; i++) { + if (i != 3) { + Ignite node = ignite(i); + + assertEquals(3, node.cluster().nodes().size()); + } + } + } + finally { + stopAllGrids(); + } + } + /** * @param nodeName Node name. * @throws Exception If failed. @@ -2168,6 +2207,77 @@ private static class Organization { } */ private static class Employee { } + /** + * + */ + private static class TestRestoreConnectedSpi extends TcpDiscoverySpi { + /** */ + static volatile boolean startTest; + + /** */ + private long sleepEndTime; + + /** */ + private long errNodeOrder; + + /** */ + private ClusterNode errNext; + + /** + * @param errNodeOrder + */ + TestRestoreConnectedSpi(long errNodeOrder) { + this.errNodeOrder = errNodeOrder; + } + + /** {@inheritDoc} */ + @Override protected void writeToSocket(ClusterNode node, + Socket sock, + OutputStream out, + TcpDiscoveryAbstractMessage msg, + long timeout) throws IOException, IgniteCheckedException { + if (startTest && !(msg instanceof TcpDiscoveryConnectionCheckMessage)) { + if (node.order() == errNodeOrder) { + log.info("Fail write on message send [node=" + node.id() + ", msg=" + msg + ']'); + + throw new SocketTimeoutException(); + } + else if (locNode.order() == errNodeOrder) { + if (sleepEndTime == 0) { + errNext = node; + + sleepEndTime = System.currentTimeMillis() + 3000; + } + + long sleepTime = sleepEndTime - System.currentTimeMillis(); + + if (sleepTime > 0) { + log.info("Start sleep on message send: " + msg); + + try { + U.sleep(sleepTime); + } + catch (IgniteInterruptedCheckedException e) { + log.error("Interrupted on socket write: " + e, e); + + throw new IOException(e); + } + + log.info("Stop sleep on message send: " + msg); + + if (node.equals(errNext)) { + log.info("Fail write after sleep [node=" + node.id() + ", msg=" + msg + ']'); + + throw new SocketTimeoutException(); + } + } + } + } + + super.writeToSocket(node, sock, out, msg, timeout); + } + } + /** * */ From 48b0adfe6a662b60130a03cb4c84d027ca65917a Mon Sep 17 00:00:00 2001 From: sboikov Date: Wed, 29 Nov 2017 11:36:49 +0300 Subject: [PATCH 227/243] BinaryMetadataTransport: do not cache local node id, it can change after client reconnect. (cherry picked from commit 89c82f5) --- .../cache/binary/BinaryMetadataTransport.java | 20 ++++++++----------- 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java index 3a77190b60247..def7caa0123e9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java @@ -68,9 +68,6 @@ final class BinaryMetadataTransport { /** */ private final IgniteLogger log; - /** */ - private final UUID locNodeId; - /** */ private final boolean clientNode; @@ -117,8 +114,6 @@ final class BinaryMetadataTransport { discoMgr = ctx.discovery(); - locNodeId = ctx.localNodeId(); - clientNode = ctx.clientNode(); discoMgr.setCustomEventListener(MetadataUpdateProposedMessage.class, new MetadataUpdateProposedListener()); @@ -170,7 +165,7 @@ GridFutureAdapter requestMetadataUpdate(BinaryMetadata met unlabeledFutures.add(resFut); if (!stopping) - discoMgr.sendCustomEvent(new MetadataUpdateProposedMessage(metadata, locNodeId)); + discoMgr.sendCustomEvent(new MetadataUpdateProposedMessage(metadata, ctx.localNodeId())); else resFut.onDone(MetadataUpdateResult.createUpdateDisabledResult()); } @@ -299,7 +294,7 @@ private final class MetadataUpdateProposedListener implements CustomEventListene acceptedVer = msg.acceptedVersion(); } - if (locNodeId.equals(msg.origNodeId())) { + if (ctx.localNodeId().equals(msg.origNodeId())) { MetadataUpdateResultFuture fut = unlabeledFutures.poll(); if (msg.rejected()) @@ -540,12 +535,16 @@ private SyncKey(int typeId, int ver) { this.ver = ver; } - /** */ + /** + * @return Type ID. + */ int typeId() { return typeId; } - /** */ + /** + * @return Version. + */ int version() { return ver; } @@ -627,7 +626,6 @@ private final class MetadataRequestListener implements GridMessageListener { * Listener is registered on each client node and listens for metadata responses from cluster. */ private final class MetadataResponseListener implements GridMessageListener { - /** {@inheritDoc} */ @Override public void onMessage(UUID nodeId, Object msg, byte plc) { assert msg instanceof MetadataResponseMessage : msg; @@ -674,8 +672,6 @@ private final class MetadataResponseListener implements GridMessageListener { fut.onDone(MetadataUpdateResult.createFailureResult(new BinaryObjectException(e))); } } - - } /** From 614b877a7e2b0b83df1b6e0fcee1faa29a509243 Mon Sep 17 00:00:00 2001 From: Alexey Goncharuk Date: Mon, 30 Oct 2017 18:18:53 +0300 Subject: [PATCH 228/243] IGNITE-6793 More debug output (cherry picked from commit 0134f2b) --- .../dht/preloader/InitNewCoordinatorFuture.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/InitNewCoordinatorFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/InitNewCoordinatorFuture.java index b5acd4b0e8fcf..596fa8c4f8976 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/InitNewCoordinatorFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/InitNewCoordinatorFuture.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_NODE_JOINED; @@ -113,6 +114,10 @@ public void init(GridDhtPartitionsExchangeFuture exchFut) throws IgniteCheckedEx nodes.add(node); } + else if (!node.isLocal()) { + if (log.isInfoEnabled()) + log.info("Init new coordinator future will skip remote node: " + node); + } } if (exchFut.context().mergeExchanges() && !curDiscoCache.version().equals(discoCache.version())) { @@ -148,8 +153,10 @@ public void init(GridDhtPartitionsExchangeFuture exchFut) throws IgniteCheckedEx } if (log.isInfoEnabled()) { - log.info("Try restore exchange result [allNodes=" + awaited + - ", joined=" + joinedNodes.keySet() + ']'); + log.info("Try restore exchange result [awaited=" + awaited + + ", joined=" + joinedNodes.keySet() + + ", nodes=" + U.nodeIds(nodes) + + ", discoAllNodes=" + U.nodeIds(discoCache.allNodes()) + ']'); } if (!nodes.isEmpty()) { From 1962fa6bc0beda23421b9da39dd8fa47c3691dc4 Mon Sep 17 00:00:00 2001 From: devozerov Date: Thu, 9 Nov 2017 11:01:05 +0300 Subject: [PATCH 229/243] IGNITE-6480: SQL: implemented base parser/lexer and CREATE INDEX command support. This closes #3001. --- .../ignite/internal/sql/SqlKeyword.java | 237 ++++++++++++ .../apache/ignite/internal/sql/SqlLexer.java | 213 ++++++++++ .../internal/sql/SqlLexerLookAheadToken.java | 75 ++++ .../ignite/internal/sql/SqlLexerToken.java | 48 +++ .../internal/sql/SqlLexerTokenType.java | 112 ++++++ .../internal/sql/SqlParseException.java | 99 +++++ .../apache/ignite/internal/sql/SqlParser.java | 174 +++++++++ .../ignite/internal/sql/SqlParserUtils.java | 363 ++++++++++++++++++ .../internal/sql/command/SqlCommand.java | 43 +++ .../sql/command/SqlCreateIndexCommand.java | 200 ++++++++++ .../internal/sql/command/SqlIndexColumn.java | 61 +++ .../sql/command/SqlQualifiedName.java | 70 ++++ .../internal/sql/SqlParserSelfTest.java | 198 ++++++++++ .../processors/query/h2/IgniteH2Indexing.java | 66 +++- .../query/h2/ddl/DdlStatementsProcessor.java | 80 +++- .../IgniteCacheQuerySelfTestSuite.java | 3 + 16 files changed, 2037 insertions(+), 5 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerToken.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerTokenType.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParseException.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCommand.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCreateIndexCommand.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlIndexColumn.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlQualifiedName.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserSelfTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java new file mode 100644 index 0000000000000..ac826cc50262f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.util.typedef.F; + +import java.lang.reflect.Field; +import java.util.HashSet; + +/** + * SQL keyword constants. + */ +public class SqlKeyword { + /** Keyword: ASC. */ + public static final String ASC = "ASC"; + + /** Keyword: BIGINT */ + public static final String BIGINT = "BIGINT"; + + /** Keyword: BIT. */ + public static final String BIT = "BIT"; + + /** Keyword: BOOL. */ + public static final String BOOL = "BOOL"; + + /** Keyword: BOOLEAN. */ + public static final String BOOLEAN = "BOOLEAN"; + + /** Keyword: CASCADE. */ + public static final String CASCADE = "CASCADE"; + + /** Keyword: CHAR. */ + public static final String CHAR = "CHAR"; + + /** Keyword: CHARACTER. */ + public static final String CHARACTER = "CHARACTER"; + + /** Keyword: CREATE. */ + public static final String CREATE = "CREATE"; + + /** Keyword: DATE. */ + public static final String DATE = "DATE"; + + /** Keyword: DATETIME. */ + public static final String DATETIME = "DATETIME"; + + /** Keyword: DEC. */ + public static final String DEC = "DEC"; + + /** Keyword: DECIMAL. */ + public static final String DECIMAL = "DECIMAL"; + + /** Keyword: DESC. */ + public static final String DESC = "DESC"; + + /** Keyword: DOUBLE. */ + public static final String DOUBLE = "DOUBLE"; + + /** Keyword: DROP. */ + public static final String DROP = "DROP"; + + /** Keyword: EXISTS. */ + public static final String EXISTS = "EXISTS"; + + /** Keyword: FLOAT. */ + public static final String FLOAT = "FLOAT"; + + /** Keyword: FLOAT4. */ + public static final String FLOAT4 = "FLOAT4"; + + /** Keyword: FLOAT8. */ + public static final String FLOAT8 = "FLOAT8"; + + /** Keyword: FULLTEXT. */ + public static final String FULLTEXT = "FULLTEXT"; + + /** Keyword: UNIQUE. */ + public static final String HASH = "HASH"; + + /** Keyword: IF. */ + public static final String IF = "IF"; + + /** Keyword: INDEX. */ + public static final String INDEX = "INDEX"; + + /** Keyword: INT. */ + public static final String INT = "INT"; + + /** Keyword: INT2. */ + public static final String INT2 = "INT2"; + + /** Keyword: INT4. */ + public static final String INT4 = "INT4"; + + /** Keyword: INT8. */ + public static final String INT8 = "INT8"; + + /** Keyword: INTEGER. */ + public static final String INTEGER = "INTEGER"; + + /** Keyword: KEY. */ + public static final String KEY = "KEY"; + + /** Keyword: LONGVARCHAR. */ + public static final String LONGVARCHAR = "LONGVARCHAR"; + + /** Keyword: MEDIUMINT. */ + public static final String MEDIUMINT = "MEDIUMINT"; + + /** Keyword: NCHAR. */ + public static final String NCHAR = "NCHAR"; + + /** Keyword: NOT. */ + public static final String NOT = "NOT"; + + /** Keyword: NUMBER. */ + public static final String NUMBER = "NUMBER"; + + /** Keyword: NUMERIC. */ + public static final String NUMERIC = "NUMERIC"; + + /** Keyword: NVARCHAR. */ + public static final String NVARCHAR = "NVARCHAR"; + + /** Keyword: NVARCHAR2. */ + public static final String NVARCHAR2 = "NVARCHAR2"; + + /** Keyword: ON. */ + public static final String ON = "ON"; + + /** Keyword: PRECISION. */ + public static final String PRECISION = "PRECISION"; + + /** Keyword: PRIMARY. */ + public static final String PRIMARY = "PRIMARY"; + + /** Keyword: REAL. */ + public static final String REAL = "REAL"; + + /** Keyword: RESTRICT. */ + public static final String RESTRICT = "RESTRICT"; + + /** Keyword: SIGNED. */ + public static final String SIGNED = "SIGNED"; + + /** Keyword: SMALLDATETIME. */ + public static final String SMALLDATETIME = "SMALLDATETIME"; + + /** Keyword: SMALLINT. */ + public static final String SMALLINT = "SMALLINT"; + + /** Keyword: SPATIAL. */ + public static final String SPATIAL = "SPATIAL"; + + /** Keyword: TABLE. */ + public static final String TABLE = "TABLE"; + + /** Keyword: TIME. */ + public static final String TIME = "TIME"; + + /** Keyword: TIMESTAMP. */ + public static final String TIMESTAMP = "TIMESTAMP"; + + /** Keyword: TINYINT. */ + public static final String TINYINT = "TINYINT"; + + /** Keyword: UNIQUE. */ + public static final String UNIQUE = "UNIQUE"; + + /** Keyword: UUID. */ + public static final String UUID = "UUID"; + + /** Keyword: VARCHAR. */ + public static final String VARCHAR = "VARCHAR"; + + /** Keyword: VARCHAR2. */ + public static final String VARCHAR2 = "VARCHAR2"; + + /** Keyword: VARCHAR_CASESENSITIVE. */ + public static final String VARCHAR_CASESENSITIVE = "VARCHAR_CASESENSITIVE"; + + /** Keyword: YEAR. */ + public static final String YEAR = "YEAR"; + + /** All keywords. */ + private static final HashSet KEYWORDS; + + static { + KEYWORDS = new HashSet<>(); + + try { + for (Field field : SqlKeyword.class.getDeclaredFields()) { + if (F.eq(String.class, field.getType())) { + String val = (String) field.get(null); + + KEYWORDS.add(val); + } + } + } + catch (ReflectiveOperationException e) { + throw new IgniteException("Failed to initialize keywords collection.", e); + } + } + + /** + * Check if string is a keyword. + * + * @param str String. + * @return {@code True} if it is a keyword. + */ + public static boolean isKeyword(String str) { + return KEYWORDS.contains(str); + } + + /** + * Private constructor. + */ + private SqlKeyword() { + // No-op. + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java new file mode 100644 index 0000000000000..a8009b7296f87 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; + +/** + * SQL lexer. + */ +public class SqlLexer implements SqlLexerToken { + /** Original input. */ + private final String sql; + + /** Input characters. */ + private final char[] inputChars; + + /** Current position. */ + private int pos; + + /** Current token start. */ + private int tokenPos; + + /** Current token. */ + private String token; + + /** Token type. */ + private SqlLexerTokenType tokenTyp; + + /** + * Constructor. + * + * @param sql Input. + */ + public SqlLexer(String sql) { + assert sql != null; + + this.sql = sql; + + // Additional slot for look-ahead convenience. + inputChars = new char[sql.length() + 1]; + + for (int i = 0; i < sql.length(); i++) + inputChars[i] = sql.charAt(i); + } + + /** + * Get next token without lexer state change. + * + * @return Next token. + */ + public SqlLexerToken lookAhead() { + int pos0 = pos; + String token0 = token; + int tokenPos0 = tokenPos; + SqlLexerTokenType tokenTyp0 = tokenTyp; + + try { + if (shift()) + return new SqlLexerLookAheadToken(sql, token, tokenPos, tokenTyp); + else + return new SqlLexerLookAheadToken(sql, null, tokenPos, SqlLexerTokenType.EOF); + } + finally { + pos = pos0; + token = token0; + tokenPos = tokenPos0; + tokenTyp = tokenTyp0; + } + } + + /** + * Shift lexer to the next position. + * + * @return {@code True} if next token was found, {@code false} in case of end-of-file. + */ + public boolean shift() { + while (!eod()) { + int tokenStartPos0 = pos; + + String token0 = null; + SqlLexerTokenType tokenTyp0 = null; + + char c = inputChars[pos++]; + + switch (c) { + case '-': + if (inputChars[pos] == '-') { + // Full-line comment. + pos++; + + while (!eod()) { + char c1 = inputChars[pos]; + + if (c1 == '\n' || c1 == '\r') + break; + + pos++; + } + } + else { + // Minus. + token0 = "-"; + tokenTyp0 = SqlLexerTokenType.MINUS; + } + + break; + + case '\"': + while (true) { + if (eod()) { + throw new SqlParseException(sql, tokenStartPos0, IgniteQueryErrorCode.PARSING, + "Unclosed quoted identifier."); + } + + char c1 = inputChars[pos]; + + pos++; + + if (c1 == '\"') + break; + } + + token0 = sql.substring(tokenStartPos0 + 1, pos - 1); + tokenTyp0 = SqlLexerTokenType.QUOTED; + + break; + + case '.': + case ',': + case ';': + case '(': + case ')': + token0 = Character.toString(c); + tokenTyp0 = SqlLexerTokenType.forChar(c); + + break; + + default: + if (c <= ' ' || Character.isSpaceChar(c)) + continue; + + while (!eod()) { + char c1 = inputChars[pos]; + + if (!Character.isJavaIdentifierPart(c1)) + break; + + pos++; + } + + token0 = sql.substring(tokenStartPos0, pos).toUpperCase(); + tokenTyp0 = SqlLexerTokenType.DEFAULT; + } + + if (tokenTyp0 != null) { + token = token0; + tokenPos = tokenStartPos0; + tokenTyp = tokenTyp0; + + return true; + } + } + + return false; + } + + /** {@inheritDoc} */ + public String sql() { + return sql; + } + + /** {@inheritDoc} */ + public String token() { + return token; + } + + /** {@inheritDoc} */ + public char tokenFirstChar() { + return token.charAt(0); + } + + /** {@inheritDoc} */ + public int tokenPosition() { + return tokenPos; + } + + /** {@inheritDoc} */ + public SqlLexerTokenType tokenType() { + return tokenTyp; + } + + /** + * @return {@code True} if end of data is reached. + */ + private boolean eod() { + return pos == inputChars.length - 1; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java new file mode 100644 index 0000000000000..e697473a2e42b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +/** + * Plain immutable look-ahead parser token. + */ +public class SqlLexerLookAheadToken implements SqlLexerToken { + /** SQL. */ + private final String sql; + + /** Token. */ + private final String token; + + /** Token position. */ + private final int tokenPos; + + /** Token type. */ + private final SqlLexerTokenType tokenTyp; + + /** + * Constructor. + * + * @param sql Original SQL. + * @param token Token. + * @param tokenPos Token position. + * @param tokenTyp Token type. + */ + public SqlLexerLookAheadToken(String sql, String token, int tokenPos, SqlLexerTokenType tokenTyp) { + this.sql = sql; + this.token = token; + this.tokenPos = tokenPos; + this.tokenTyp = tokenTyp; + } + + /** {@inheritDoc} */ + public String sql() { + return sql; + } + + /** {@inheritDoc} */ + @Override public String token() { + return token; + } + + /** {@inheritDoc} */ + @Override public char tokenFirstChar() { + return token.charAt(0); + } + + /** {@inheritDoc} */ + @Override public int tokenPosition() { + return tokenPos; + } + + /** {@inheritDoc} */ + @Override public SqlLexerTokenType tokenType() { + return tokenTyp; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerToken.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerToken.java new file mode 100644 index 0000000000000..a17263588d92a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerToken.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +/** + * SQL parser token interface. + */ +public interface SqlLexerToken { + /** + * @return Original SQL. + */ + public String sql(); + + /** + * @return Current token. + */ + public String token(); + + /** + * @return First character of the current token. + */ + public char tokenFirstChar(); + + /** + * @return Current token start position. + */ + public int tokenPosition(); + + /** + * @return Token type. + */ + public SqlLexerTokenType tokenType(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerTokenType.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerTokenType.java new file mode 100644 index 0000000000000..693832bc35bba --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerTokenType.java @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the 'License'); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an 'AS IS' BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import java.util.HashMap; + +/** + * Lexer token type. + */ +public enum SqlLexerTokenType { + /** Standard word. */ + DEFAULT, + + /** Quoted phrase. */ + QUOTED, + + /** Minus sign. */ + MINUS('-'), + + /** Dot. */ + DOT('.'), + + /** Comma. */ + COMMA(','), + + /** Parenthesis: left. */ + PARENTHESIS_LEFT('('), + + /** Parenthesis: right. */ + PARENTHESIS_RIGHT(')'), + + /** Semicolon. */ + SEMICOLON(';'), + + /** End of string. */ + EOF; + + /** Mapping from character to type.. */ + private static final HashMap CHAR_TO_TYP = new HashMap<>(); + + /** Character. */ + private final Character c; + + /** Character as string. */ + private final String str; + + static { + for (SqlLexerTokenType typ : SqlLexerTokenType.values()) { + Character c = typ.asChar(); + + if (c != null) + CHAR_TO_TYP.put(c, typ); + } + } + + /** + * Get token type for character. + * + * @param c Character. + * @return Type. + */ + public static SqlLexerTokenType forChar(char c) { + return CHAR_TO_TYP.get(c); + } + + /** + * Constructor. + */ + SqlLexerTokenType() { + this(null); + } + + /** + * Constructor. + * + * @param c Corresponding character. + */ + SqlLexerTokenType(Character c) { + this.c = c; + + str = c != null ? c.toString() : null; + } + + /** + * @return Character. + */ + public Character asChar() { + return c; + } + + /** + * @return Character as string. + */ + public String asString() { + return str; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParseException.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParseException.java new file mode 100644 index 0000000000000..96d385da51fff --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParseException.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Parse exception. + */ +public class SqlParseException extends IgniteException { + /** */ + private static final long serialVersionUID = 0L; + + /** SQL command. */ + private final String sql; + + /** Position. */ + private final int pos; + + /** Error code. */ + private final int code; + + /** + * Constructor. + * + * @param sql SQL command. + * @param pos Position. + * @param code Error code (parsing, unsupported operation, etc.). + * @param msg Message. + */ + public SqlParseException(String sql, int pos, int code, String msg) { + super(prepareMessage(sql, pos, msg)); + + this.sql = sql; + this.pos = pos; + this.code = code; + } + + /** + * Prepare message. + * + * @param sql Original SQL. + * @param pos Position. + * @param msg Message. + * @return Prepared message. + */ + private static String prepareMessage(String sql, int pos, String msg) { + String sql0; + + if (pos == sql.length()) + sql0 = sql + "[*]"; + else + sql0 = sql.substring(0, pos) + "[*]" + sql.substring(pos); + + return "Failed to parse SQL statement \"" + sql0 + "\": " + msg; + } + + /** + * @return SQL command. + */ + public String sql() { + return sql; + } + + /** + * @return Position. + */ + public int position() { + return pos; + } + + /** + * @return Error code. + */ + public int code() { + return code; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlParseException.class, this, "msg", getMessage()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java new file mode 100644 index 0000000000000..9e0eee0767636 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.internal.sql.command.SqlCommand; +import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; +import org.jetbrains.annotations.Nullable; + +import static org.apache.ignite.internal.sql.SqlKeyword.CREATE; +import static org.apache.ignite.internal.sql.SqlKeyword.DROP; +import static org.apache.ignite.internal.sql.SqlKeyword.HASH; +import static org.apache.ignite.internal.sql.SqlKeyword.INDEX; +import static org.apache.ignite.internal.sql.SqlKeyword.PRIMARY; +import static org.apache.ignite.internal.sql.SqlKeyword.SPATIAL; +import static org.apache.ignite.internal.sql.SqlKeyword.TABLE; +import static org.apache.ignite.internal.sql.SqlKeyword.UNIQUE; +import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnexpectedToken; +import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnsupported; +import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnsupportedIfMatchesKeyword; +import static org.apache.ignite.internal.sql.SqlParserUtils.matchesKeyword; + +/** + * SQL parser. + */ +public class SqlParser { + /** Scheme name. */ + private final String schemaName; + + /** Lexer. */ + private final SqlLexer lex; + + /** + * Constructor. + * + * @param schemaName Schema name. + * @param sql Original SQL. + */ + public SqlParser(@Nullable String schemaName, String sql) { + this.schemaName = schemaName; + + lex = new SqlLexer(sql); + } + + /** + * Get next command. + * + * @return Command or {@code null} if end of script is reached. + */ + public SqlCommand nextCommand() { + SqlCommand cmd = nextCommand0(); + + if (cmd != null) { + if (cmd.schemaName() == null) + cmd.schemaName(schemaName); + } + + return cmd; + } + + /** + * Get next command. + * + * @return Command or {@code null} if end of script is reached. + */ + private SqlCommand nextCommand0() { + while (true) { + if (!lex.shift()) + return null; + + switch (lex.tokenType()) { + case SEMICOLON: + // Empty command, skip. + continue; + + case DEFAULT: + SqlCommand cmd = null; + + switch (lex.token()) { + case CREATE: + cmd = processCreate(); + + break; + + case DROP: + cmd = processDrop(); + + break; + } + + if (cmd != null) { + // If there is something behind the command, this is a syntax error. + if (lex.shift() && lex.tokenType() != SqlLexerTokenType.SEMICOLON) + throw errorUnexpectedToken(lex); + + return cmd; + } + else + throw errorUnexpectedToken(lex, CREATE, DROP); + + case QUOTED: + case MINUS: + case DOT: + case COMMA: + case PARENTHESIS_LEFT: + case PARENTHESIS_RIGHT: + default: + throw errorUnexpectedToken(lex); + } + } + } + + /** + * Process CREATE keyword. + * + * @return Command. + */ + private SqlCommand processCreate() { + if (lex.shift() && lex.tokenType() == SqlLexerTokenType.DEFAULT) { + SqlCommand cmd = null; + + switch (lex.token()) { + case INDEX: + cmd = new SqlCreateIndexCommand(); + + break; + + case TABLE: + throw errorUnsupported(lex); + + case SPATIAL: + if (lex.shift() && matchesKeyword(lex, INDEX)) + cmd = new SqlCreateIndexCommand().spatial(true); + else + throw errorUnexpectedToken(lex, INDEX); + + break; + } + + if (cmd != null) + return cmd.parse(lex); + + errorUnsupportedIfMatchesKeyword(lex, HASH, PRIMARY, UNIQUE); + } + + throw errorUnexpectedToken(lex, INDEX, TABLE, SPATIAL); + } + + /** + * Process DROP keyword. + * + * @return Command. + */ + private SqlCommand processDrop() { + if (lex.shift() && lex.tokenType() == SqlLexerTokenType.DEFAULT) + throw errorUnsupported(lex); + + throw errorUnexpectedToken(lex, INDEX, TABLE); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java new file mode 100644 index 0000000000000..cfe4b6f12b933 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java @@ -0,0 +1,363 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.sql.command.SqlQualifiedName; +import org.apache.ignite.internal.util.typedef.F; + +import static org.apache.ignite.internal.sql.SqlKeyword.EXISTS; +import static org.apache.ignite.internal.sql.SqlKeyword.IF; +import static org.apache.ignite.internal.sql.SqlKeyword.NOT; + +/** + * Parser utility methods. + */ +public class SqlParserUtils { + /** + * Parse IF EXISTS statement. + * + * @param lex Lexer. + * @return {@code True} if statement is found. + */ + public static boolean parseIfExists(SqlLexer lex) { + SqlLexerToken token = lex.lookAhead(); + + if (matchesKeyword(token, IF)) { + lex.shift(); + + skipIfMatchesKeyword(lex, EXISTS); + + return true; + } + + return false; + } + + /** + * Parse IF NOT EXISTS statement. + * + * @param lex Lexer. + * @return {@code True} if statement is found. + */ + public static boolean parseIfNotExists(SqlLexer lex) { + SqlLexerToken token = lex.lookAhead(); + + if (matchesKeyword(token, IF)) { + lex.shift(); + + skipIfMatchesKeyword(lex, NOT); + skipIfMatchesKeyword(lex, EXISTS); + + return true; + } + + return false; + } + + /** + * Skip commr or right parenthesis. + * + * @param lex Lexer. + * @return {@code True} if right parenthesis is found. + */ + public static boolean skipCommaOrRightParenthesis(SqlLexer lex) { + if (lex.shift()) { + switch (lex.tokenType()) { + case COMMA: + return false; + + case PARENTHESIS_RIGHT: + return true; + } + } + + throw errorUnexpectedToken(lex, ",", ")"); + } + + /** + * Parse integer value. + * + * @param lex Lexer. + * @return Integer value. + */ + public static int parseInt(SqlLexer lex) { + if (lex.shift() && lex.tokenType() == SqlLexerTokenType.DEFAULT) { + try { + return Integer.parseInt(lex.token()); + } + catch (NumberFormatException e) { + // No-op. + } + } + + throw errorUnexpectedToken(lex, "[number]"); + } + + /** + * Process name. + * + * @param lex Lexer. + * @param additionalExpTokens Additional expected tokens in case of error. + * @return Name. + */ + public static String parseIdentifier(SqlLexer lex, String... additionalExpTokens) { + if (lex.shift() && isVaildIdentifier(lex)) + return lex.token(); + + throw errorUnexpectedToken(lex, "[identifier]", additionalExpTokens); + } + + /** + * Process qualified name. + * + * @param lex Lexer. + * @param additionalExpTokens Additional expected tokens in case of error. + * @return Qualified name. + */ + public static SqlQualifiedName parseQualifiedIdentifier(SqlLexer lex, String... additionalExpTokens) { + if (lex.shift() && isVaildIdentifier(lex)) { + SqlQualifiedName res = new SqlQualifiedName(); + + String first = lex.token(); + + SqlLexerToken nextToken = lex.lookAhead(); + + if (nextToken.tokenType() == SqlLexerTokenType.DOT) { + lex.shift(); + + String second = parseIdentifier(lex); + + return res.schemaName(first).name(second); + } + else + return res.name(first); + } + + throw errorUnexpectedToken(lex, "[qualified identifier]", additionalExpTokens); + } + + /** + * Check if token is identifier. + * + * @param token Token. + * @return {@code True} if we are standing on possible identifier. + */ + public static boolean isVaildIdentifier(SqlLexerToken token) { + switch (token.tokenType()) { + case DEFAULT: + char c = token.tokenFirstChar(); + + if ((c >= 'A' && c <= 'Z') || c == '_') { + if (SqlKeyword.isKeyword(token.token())) + throw errorUnexpectedToken(token, "[identifier]"); + + return true; + } + + throw error(token, "Illegal identifier name: " + token.token()); + + case QUOTED: + return true; + + default: + return false; + } + } + + /** + * Check if current lexer token matches expected. + * + * @param token Token.. + * @param expKeyword Expected keyword. + * @return {@code True} if matches. + */ + public static boolean matchesKeyword(SqlLexerToken token, String expKeyword) { + return token.tokenType() == SqlLexerTokenType.DEFAULT && expKeyword.equals(token.token()); + } + + /** + * Skip token if it matches expected keyword. + * + * @param lex Lexer. + * @param expKeyword Expected keyword. + */ + public static void skipIfMatchesKeyword(SqlLexer lex, String expKeyword) { + if (lex.shift() && matchesKeyword(lex, expKeyword)) + return; + + throw errorUnexpectedToken(lex, expKeyword); + } + + /** + * Skip next token if it matches expected type. + * + * @param lex Lexer. + * @param tokenTyp Expected token type. + */ + public static void skipIfMatches(SqlLexer lex, SqlLexerTokenType tokenTyp) { + if (lex.shift() && F.eq(lex.tokenType(), tokenTyp)) + return; + + throw errorUnexpectedToken(lex, tokenTyp.asString()); + } + + /** + * Create parse exception referring to current lexer position. + * + * @param token Token. + * @param msg Message. + * @return Exception. + */ + public static SqlParseException error(SqlLexerToken token, String msg) { + return error0(token, IgniteQueryErrorCode.PARSING, msg); + } + + /** + * Create parse exception referring to current lexer position. + * + * @param token Token. + * @param code Error code. + * @param msg Message. + * @return Exception. + */ + private static SqlParseException error0(SqlLexerToken token, int code, String msg) { + return new SqlParseException(token.sql(), token.tokenPosition(), code, msg); + } + + /** + * Create generic parse exception due to unexpected token. + * + * @param token Token. + * @return Exception. + */ + public static SqlParseException errorUnexpectedToken(SqlLexerToken token) { + return errorUnexpectedToken0(token); + } + + /** + * Throw unsupported token exception if passed keyword is found. + * + * @param token Token. + * @param keyword Keyword. + */ + public static void errorUnsupportedIfMatchesKeyword(SqlLexerToken token, String keyword) { + if (matchesKeyword(token, keyword)) + throw errorUnsupported(token); + } + + /** + * Throw unsupported token exception if one of passed keywords is found. + * + * @param token Token. + * @param keywords Keywords. + */ + public static void errorUnsupportedIfMatchesKeyword(SqlLexerToken token, String... keywords) { + if (F.isEmpty(keywords)) + return; + + for (String keyword : keywords) + errorUnsupportedIfMatchesKeyword(token, keyword); + } + + /** + * Error on unsupported keyword. + * + * @param token Token. + * @return Error. + */ + public static SqlParseException errorUnsupported(SqlLexerToken token) { + throw error0(token, IgniteQueryErrorCode.UNSUPPORTED_OPERATION, + "Unsupported keyword: \"" + token.token() + "\""); + } + + /** + * Create generic parse exception due to unexpected token. + * + * @param lex Lexer. + * @param expToken Expected token. + * @return Exception. + */ + public static SqlParseException errorUnexpectedToken(SqlLexer lex, String expToken) { + return errorUnexpectedToken0(lex, expToken); + } + + /** + * Create generic parse exception due to unexpected token. + * + * @param token Token. + * @param firstExpToken First expected token. + * @param expTokens Additional expected tokens (if any). + * @return Exception. + */ + public static SqlParseException errorUnexpectedToken(SqlLexerToken token, String firstExpToken, + String... expTokens) { + if (F.isEmpty(expTokens)) + return errorUnexpectedToken0(token, firstExpToken); + else { + String[] expTokens0 = new String[expTokens.length + 1]; + + expTokens0[0] = firstExpToken; + + System.arraycopy(expTokens, 0, expTokens0, 1, expTokens.length); + + throw errorUnexpectedToken0(token, expTokens0); + } + } + + /** + * Create generic parse exception due to unexpected token. + * + * @param token Token. + * @param expTokens Expected tokens (if any). + * @return Exception. + */ + @SuppressWarnings("StringConcatenationInsideStringBufferAppend") + private static SqlParseException errorUnexpectedToken0(SqlLexerToken token, String... expTokens) { + String token0 = token.token(); + + StringBuilder msg = new StringBuilder( + token0 == null ? "Unexpected end of command" : "Unexpected token: \"" + token0 + "\""); + + if (!F.isEmpty(expTokens)) { + msg.append(" (expected: "); + + boolean first = true; + + for (String expToken : expTokens) { + if (first) + first = false; + else + msg.append(", "); + + msg.append("\"" + expToken + "\""); + } + + msg.append(")"); + } + + throw error(token, msg.toString()); + } + + /** + * Private constructor. + */ + private SqlParserUtils() { + // No-op. + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCommand.java new file mode 100644 index 0000000000000..61ff31fd12b08 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCommand.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql.command; + +import org.apache.ignite.internal.sql.SqlLexer; + +/** + * Generic SQL command. + */ +public interface SqlCommand { + /** + * Parse command. + * + * @param lex Lexer. + * @return This instance. + */ + public SqlCommand parse(SqlLexer lex); + + /** + * @return Schema name. + */ + public String schemaName(); + + /** + * @param schemaName Schema name. + */ + public void schemaName(String schemaName); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCreateIndexCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCreateIndexCommand.java new file mode 100644 index 0000000000000..897aea5a31c51 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCreateIndexCommand.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql.command; + +import org.apache.ignite.internal.sql.SqlLexer; +import org.apache.ignite.internal.sql.SqlLexerTokenType; +import org.apache.ignite.internal.sql.SqlLexerToken; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.Set; + +import static org.apache.ignite.internal.sql.SqlKeyword.ASC; +import static org.apache.ignite.internal.sql.SqlKeyword.DESC; +import static org.apache.ignite.internal.sql.SqlKeyword.IF; +import static org.apache.ignite.internal.sql.SqlKeyword.ON; +import static org.apache.ignite.internal.sql.SqlParserUtils.error; +import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnexpectedToken; +import static org.apache.ignite.internal.sql.SqlParserUtils.matchesKeyword; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseIdentifier; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseIfNotExists; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseQualifiedIdentifier; +import static org.apache.ignite.internal.sql.SqlParserUtils.skipCommaOrRightParenthesis; +import static org.apache.ignite.internal.sql.SqlParserUtils.skipIfMatchesKeyword; + +/** + * CREATE INDEX command. + */ +public class SqlCreateIndexCommand implements SqlCommand { + /** Schema name. */ + private String schemaName; + + /** Table name. */ + private String tblName; + + /** Index name. */ + private String idxName; + + /** IF NOT EXISTS flag. */ + private boolean ifNotExists; + + /** Spatial index flag. */ + private boolean spatial; + + /** Columns. */ + @GridToStringInclude + private Collection cols; + + /** Column names. */ + @GridToStringExclude + private Set colNames; + + /** {@inheritDoc} */ + @Override public String schemaName() { + return schemaName; + } + + /** {@inheritDoc} */ + @Override public void schemaName(String schemaName) { + this.schemaName = schemaName; + } + + /** + * @return Table name. + */ + public String tableName() { + return tblName; + } + + /** + * @return Index name. + */ + public String indexName() { + return idxName; + } + + /** + * @return IF NOT EXISTS flag. + */ + public boolean ifNotExists() { + return ifNotExists; + } + + /** + * @return Spatial index flag. + */ + public boolean spatial() { + return spatial; + } + + /** + * @param spatial Spatial index flag. + * @return This instance. + */ + public SqlCreateIndexCommand spatial(boolean spatial) { + this.spatial = spatial; + + return this; + } + + /** + * @return Columns. + */ + public Collection columns() { + return cols != null ? cols : Collections.emptySet(); + } + + /** {@inheritDoc} */ + @Override public SqlCommand parse(SqlLexer lex) { + ifNotExists = parseIfNotExists(lex); + + idxName = parseIdentifier(lex, IF); + + skipIfMatchesKeyword(lex, ON); + + SqlQualifiedName tblQName = parseQualifiedIdentifier(lex); + + schemaName = tblQName.schemaName(); + tblName = tblQName.name(); + + parseColumnList(lex); + + return this; + } + + /* + * @param lex Lexer. + */ + private void parseColumnList(SqlLexer lex) { + if (!lex.shift() || lex.tokenType() != SqlLexerTokenType.PARENTHESIS_LEFT) + throw errorUnexpectedToken(lex, "("); + + while (true) { + perseIndexColumn(lex); + + if (skipCommaOrRightParenthesis(lex)) + break; + } + } + + /** + * @param lex Lexer. + */ + private void perseIndexColumn(SqlLexer lex) { + String name = parseIdentifier(lex); + boolean desc = false; + + SqlLexerToken nextToken = lex.lookAhead(); + + if (matchesKeyword(nextToken, ASC) || matchesKeyword(nextToken, DESC)) { + lex.shift(); + + if (matchesKeyword(lex, DESC)) + desc = true; + } + + addColumn(lex, new SqlIndexColumn(name, desc)); + } + + /** + * @param lex Lexer. + * @param col Column. + */ + private void addColumn(SqlLexer lex, SqlIndexColumn col) { + if (cols == null) { + cols = new LinkedList<>(); + colNames = new HashSet<>(); + } + + if (!colNames.add(col.name())) + throw error(lex, "Column already defined: " + col.name()); + + cols.add(col); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlCreateIndexCommand.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlIndexColumn.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlIndexColumn.java new file mode 100644 index 0000000000000..227c02a8a17d4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlIndexColumn.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql.command; + +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Index column definition. + */ +public class SqlIndexColumn { + /** Column name. */ + private final String name; + + /** Descending flag. */ + private final boolean desc; + + /** + * Constructor. + * + * @param name Column name. + * @param desc Descending flag. + */ + public SqlIndexColumn(String name, boolean desc) { + this.name = name; + this.desc = desc; + } + + /** + * @return Column name. + */ + public String name() { + return name; + } + + /** + * @return Descending flag. + */ + public boolean descending() { + return desc; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlIndexColumn.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlQualifiedName.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlQualifiedName.java new file mode 100644 index 0000000000000..965e0eff52cf7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlQualifiedName.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql.command; + +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * SQL qualified name. + */ +public class SqlQualifiedName { + /** Schema name. */ + private String schemaName; + + /** Object name. */ + private String name; + + /** + * @return Schema name. + */ + public String schemaName() { + return schemaName; + } + + /** + * @param schemaName Schema name. + * @return This instance. + */ + public SqlQualifiedName schemaName(String schemaName) { + this.schemaName = schemaName; + + return this; + } + + /** + * @return Object name. + */ + public String name() { + return name; + } + + /** + * @param name Object name. + * @return This instance. + */ + public SqlQualifiedName name(String name) { + this.name = name; + + return this; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlQualifiedName.class, this); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserSelfTest.java new file mode 100644 index 0000000000000..98a6aae9b12f1 --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserSelfTest.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; +import org.apache.ignite.internal.sql.command.SqlIndexColumn; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.Callable; + +/** + * Test for parser. + */ +@SuppressWarnings({"UnusedReturnValue", "ThrowableNotThrown"}) +public class SqlParserSelfTest extends GridCommonAbstractTest { + /** + * Tests for CREATE INDEX command. + * + * @throws Exception If failed. + */ + public void testCreateIndex() throws Exception { + // Base. + parseValidate(null, "CREATE INDEX idx ON tbl(a)", null, "TBL", "IDX", "A", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a ASC)", null, "TBL", "IDX", "A", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a DESC)", null, "TBL", "IDX", "A", true); + + // Case (in)sensitivity. + parseValidate(null, "CREATE INDEX IDX ON TBL(COL)", null, "TBL", "IDX", "COL", false); + parseValidate(null, "CREATE INDEX iDx ON tBl(cOl)", null, "TBL", "IDX", "COL", false); + + parseValidate(null, "CREATE INDEX \"idx\" ON tbl(col)", null, "TBL", "idx", "COL", false); + parseValidate(null, "CREATE INDEX \"iDx\" ON tbl(col)", null, "TBL", "iDx", "COL", false); + + parseValidate(null, "CREATE INDEX idx ON \"tbl\"(col)", null, "tbl", "IDX", "COL", false); + parseValidate(null, "CREATE INDEX idx ON \"tBl\"(col)", null, "tBl", "IDX", "COL", false); + + parseValidate(null, "CREATE INDEX idx ON tbl(\"col\")", null, "TBL", "IDX", "col", false); + parseValidate(null, "CREATE INDEX idx ON tbl(\"cOl\")", null, "TBL", "IDX", "cOl", false); + + parseValidate(null, "CREATE INDEX idx ON tbl(\"cOl\" ASC)", null, "TBL", "IDX", "cOl", false); + parseValidate(null, "CREATE INDEX idx ON tbl(\"cOl\" DESC)", null, "TBL", "IDX", "cOl", true); + + // Columns. + parseValidate(null, "CREATE INDEX idx ON tbl(a, b)", null, "TBL", "IDX", "A", false, "B", false); + + parseValidate(null, "CREATE INDEX idx ON tbl(a ASC, b)", null, "TBL", "IDX", "A", false, "B", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a, b ASC)", null, "TBL", "IDX", "A", false, "B", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a ASC, b ASC)", null, "TBL", "IDX", "A", false, "B", false); + + parseValidate(null, "CREATE INDEX idx ON tbl(a DESC, b)", null, "TBL", "IDX", "A", true, "B", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a, b DESC)", null, "TBL", "IDX", "A", false, "B", true); + parseValidate(null, "CREATE INDEX idx ON tbl(a DESC, b DESC)", null, "TBL", "IDX", "A", true, "B", true); + + parseValidate(null, "CREATE INDEX idx ON tbl(a ASC, b DESC)", null, "TBL", "IDX", "A", false, "B", true); + parseValidate(null, "CREATE INDEX idx ON tbl(a DESC, b ASC)", null, "TBL", "IDX", "A", true, "B", false); + + parseValidate(null, "CREATE INDEX idx ON tbl(a, b, c)", null, "TBL", "IDX", "A", false, "B", false, "C", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a DESC, b, c)", null, "TBL", "IDX", "A", true, "B", false, "C", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a, b DESC, c)", null, "TBL", "IDX", "A", false, "B", true, "C", false); + parseValidate(null, "CREATE INDEX idx ON tbl(a, b, c DESC)", null, "TBL", "IDX", "A", false, "B", false, "C", true); + + // Negative cases. + parseError(null, "CREATE INDEX idx ON tbl()", "Unexpected token"); + parseError(null, "CREATE INDEX idx ON tbl(a, a)", "Column already defined: A"); + parseError(null, "CREATE INDEX idx ON tbl(a, b, a)", "Column already defined: A"); + parseError(null, "CREATE INDEX idx ON tbl(b, a, a)", "Column already defined: A"); + + // Tests with schema. + parseValidate(null, "CREATE INDEX idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); + parseValidate(null, "CREATE INDEX idx ON \"schema\".tbl(a)", "schema", "TBL", "IDX", "A", false); + parseValidate(null, "CREATE INDEX idx ON \"sChema\".tbl(a)", "sChema", "TBL", "IDX", "A", false); + + parseValidate("SCHEMA", "CREATE INDEX idx ON tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); + parseValidate("schema", "CREATE INDEX idx ON tbl(a)", "schema", "TBL", "IDX", "A", false); + parseValidate("sChema", "CREATE INDEX idx ON tbl(a)", "sChema", "TBL", "IDX", "A", false); + + // NOT EXISTS + SqlCreateIndexCommand cmd; + + cmd = parseValidate(null, "CREATE INDEX idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); + assertFalse(cmd.ifNotExists()); + + cmd = parseValidate(null, "CREATE INDEX IF NOT EXISTS idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); + assertTrue(cmd.ifNotExists()); + + parseError(null, "CREATE INDEX IF idx ON tbl(a)", "Unexpected token: \"IDX\""); + parseError(null, "CREATE INDEX IF NOT idx ON tbl(a)", "Unexpected token: \"IDX\""); + parseError(null, "CREATE INDEX IF EXISTS idx ON tbl(a)", "Unexpected token: \"EXISTS\""); + parseError(null, "CREATE INDEX NOT EXISTS idx ON tbl(a)", "Unexpected token: \"NOT\""); + + // SPATIAL + cmd = parseValidate(null, "CREATE INDEX idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); + assertFalse(cmd.spatial()); + + cmd = parseValidate(null, "CREATE SPATIAL INDEX idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); + assertTrue(cmd.spatial()); + + // UNIQUE + parseError(null, "CREATE UNIQUE INDEX idx ON tbl(a)", "Unsupported keyword: \"UNIQUE\""); + + // HASH + parseError(null, "CREATE HASH INDEX idx ON tbl(a)", "Unsupported keyword: \"HASH\""); + + // PRIMARY KEY + parseError(null, "CREATE PRIMARY KEY INDEX idx ON tbl(a)", "Unsupported keyword: \"PRIMARY\""); + } + + /** + * Make sure that parse error occurs. + * + * @param schema Schema. + * @param sql SQL. + * @param msg Expected error message. + */ + private static void parseError(final String schema, final String sql, String msg) { + GridTestUtils.assertThrows(null, new Callable() { + @Override public Void call() throws Exception { + new SqlParser(schema, sql).nextCommand(); + + return null; + } + }, SqlParseException.class, msg); + } + + /** + * Parse and validate SQL script. + * + * @param schema Schema. + * @param sql SQL. + * @param expSchemaName Expected schema name. + * @param expTblName Expected table name. + * @param expIdxName Expected index name. + * @param expColDefs Expected column definitions. + * @return Command. + */ + private static SqlCreateIndexCommand parseValidate(String schema, String sql, String expSchemaName, + String expTblName, String expIdxName, Object... expColDefs) { + SqlCreateIndexCommand cmd = (SqlCreateIndexCommand)new SqlParser(schema, sql).nextCommand(); + + validate(cmd, expSchemaName, expTblName, expIdxName, expColDefs); + + return cmd; + } + + /** + * Validate create index command. + * + * @param cmd Command. + * @param expSchemaName Expected schema name. + * @param expTblName Expected table name. + * @param expIdxName Expected index name. + * @param expColDefs Expected column definitions. + */ + private static void validate(SqlCreateIndexCommand cmd, String expSchemaName, String expTblName, String expIdxName, + Object... expColDefs) { + assertEquals(expSchemaName, cmd.schemaName()); + assertEquals(expTblName, cmd.tableName()); + assertEquals(expIdxName, cmd.indexName()); + + if (F.isEmpty(expColDefs) || expColDefs.length % 2 == 1) + throw new IllegalArgumentException("Column definitions must be even."); + + Collection cols = cmd.columns(); + + assertEquals(expColDefs.length / 2, cols.size()); + + Iterator colIter = cols.iterator(); + + for (int i = 0; i < expColDefs.length;) { + SqlIndexColumn col = colIter.next(); + + String expColName = (String)expColDefs[i++]; + Boolean expDesc = (Boolean) expColDefs[i++]; + + assertEquals(expColName, col.name()); + assertEquals(expDesc, (Boolean)col.descending()); + } + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 8390ae800fba3..60b22055492c0 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -118,6 +118,9 @@ import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitorClosure; import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor; +import org.apache.ignite.internal.sql.SqlParser; +import org.apache.ignite.internal.sql.command.SqlCommand; +import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap; import org.apache.ignite.internal.util.GridEmptyCloseableIterator; import org.apache.ignite.internal.util.GridSpinBusyLock; @@ -1327,9 +1330,65 @@ UpdateResult runDistributedUpdate( }; } + /** + * Try executing query using native facilities. + * + * @param schemaName Schema name. + * @param qry Query. + * @return Result or {@code null} if cannot parse/process this query. + */ + private List>> tryQueryDistributedSqlFieldsNative(String schemaName, SqlFieldsQuery qry) { + // Heuristic check for fast return. + if (!qry.getSql().toUpperCase().contains("INDEX")) + return null; + + // Parse. + SqlCommand cmd; + + try { + SqlParser parser = new SqlParser(schemaName, qry.getSql()); + + cmd = parser.nextCommand(); + + // No support for multiple commands for now. + if (parser.nextCommand() != null) + return null; + + // Only CREATE INDEX is supported for now. + if (!(cmd instanceof SqlCreateIndexCommand)) + return null; + } + catch (Exception e) { + // Cannot parse, return. + if (log.isDebugEnabled()) + log.debug("Failed to parse SQL with native parser [qry=" + qry.getSql() + ", err=" + e + ']'); + + return null; + } + + // Execute. + try { + List>> ress = new ArrayList<>(1); + + FieldsQueryCursor> res = ddlProc.runDdlStatement(qry.getSql(), cmd); + + ress.add(res); + + return ress; + } + catch (IgniteCheckedException e) { + throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + qry.getSql() + ']', e); + } + } + /** {@inheritDoc} */ @Override public List>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId, boolean failOnMultipleStmts) { + List>> res = tryQueryDistributedSqlFieldsNative(schemaName, qry); + + if (res != null) + return res; + Connection c = connectionForSchema(schemaName); final boolean enforceJoinOrder = qry.isEnforceJoinOrder(); @@ -1342,6 +1401,7 @@ UpdateResult runDistributedUpdate( H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, distributedJoins, enforceJoinOrder, qry.isLocal()); + H2TwoStepCachedQuery cachedQry = twoStepCache.get(cachedQryKey); if (cachedQry != null) { @@ -1351,14 +1411,12 @@ UpdateResult runDistributedUpdate( List meta = cachedQry.meta(); - List>> res = Collections.singletonList(executeTwoStepsQuery(schemaName, qry.getPageSize(), qry.getPartitions(), + return Collections.singletonList(executeTwoStepsQuery(schemaName, qry.getPageSize(), qry.getPartitions(), qry.getArgs(), keepBinary, qry.isLazy(), qry.getTimeout(), cancel, sqlQry, enforceJoinOrder, twoStepQry, meta)); - - return res; } - List>> res = new ArrayList<>(1); + res = new ArrayList<>(1); Object[] argsOrig = qry.getArgs(); int firstArg = 0; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index d29a06399a7a9..fd425c2a03892 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -27,6 +27,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.QueryIndex; +import org.apache.ignite.cache.QueryIndexType; import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.GridKernalContext; @@ -50,6 +51,9 @@ import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement; import org.apache.ignite.internal.processors.query.schema.SchemaOperationException; +import org.apache.ignite.internal.sql.command.SqlCommand; +import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; +import org.apache.ignite.internal.sql.command.SqlIndexColumn; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.typedef.F; import org.h2.command.Prepared; @@ -86,6 +90,79 @@ public void start(final GridKernalContext ctx, IgniteH2Indexing idx) { this.idx = idx; } + /** + * Run DDL statement. + * + * @param sql Original SQL. + * @param cmd Command. + * @return Result. + * @throws IgniteCheckedException On error. + */ + @SuppressWarnings("unchecked") + public FieldsQueryCursor> runDdlStatement(String sql, SqlCommand cmd) throws IgniteCheckedException{ + IgniteInternalFuture fut; + + try { + if (cmd instanceof SqlCreateIndexCommand) { + SqlCreateIndexCommand cmd0 = (SqlCreateIndexCommand)cmd; + + GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName()); + + if (tbl == null) + throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName()); + + assert tbl.rowDescriptor() != null; + + QueryIndex newIdx = new QueryIndex(); + + newIdx.setName(cmd0.indexName()); + + newIdx.setIndexType(cmd0.spatial() ? QueryIndexType.GEOSPATIAL : QueryIndexType.SORTED); + + LinkedHashMap flds = new LinkedHashMap<>(); + + // Let's replace H2's table and property names by those operated by GridQueryProcessor. + GridQueryTypeDescriptor typeDesc = tbl.rowDescriptor().type(); + + for (SqlIndexColumn col : cmd0.columns()) { + GridQueryProperty prop = typeDesc.property(col.name()); + + if (prop == null) + throw new SchemaOperationException(SchemaOperationException.CODE_COLUMN_NOT_FOUND, col.name()); + + flds.put(prop.name(), !col.descending()); + } + + newIdx.setFields(flds); + + fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), + newIdx, cmd0.ifNotExists()); + } + else + throw new IgniteSQLException("Unsupported DDL operation: " + sql, + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + + if (fut != null) + fut.get(); + + QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl(Collections.singletonList + (Collections.singletonList(0L)), null, false); + + resCur.fieldsMeta(UPDATE_RESULT_META); + + return resCur; + } + catch (SchemaOperationException e) { + throw convert(e); + } + catch (IgniteSQLException e) { + throw e; + } + catch (Exception e) { + throw new IgniteSQLException("Unexpected DDL operation failure: " + e.getMessage(), e); + } + } + /** * Execute DDL statement. * @@ -97,7 +174,6 @@ public void start(final GridKernalContext ctx, IgniteH2Indexing idx) { @SuppressWarnings({"unchecked", "ThrowableResultOfMethodCallIgnored"}) public FieldsQueryCursor> runDdlStatement(String sql, Prepared prepared) throws IgniteCheckedException { - IgniteInternalFuture fut = null; try { @@ -402,6 +478,8 @@ private static QueryEntity toQueryEntity(GridSqlCreateTable createTbl) { } } + assert valCol != null; + valTypeName = DataType.getTypeClassName(valCol.column().getType()); res.setValueFieldName(valCol.columnName()); diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index 8d3025c0d0593..30a276486a601 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -155,6 +155,7 @@ import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryDistributedJoinsTest; import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryTest; import org.apache.ignite.internal.processors.sql.SqlConnectorConfigurationValidationSelfTest; +import org.apache.ignite.internal.sql.SqlParserSelfTest; import org.apache.ignite.spi.communication.tcp.GridOrderedMessageCancelSelfTest; import org.apache.ignite.testframework.IgniteTestSuite; @@ -169,6 +170,8 @@ public class IgniteCacheQuerySelfTestSuite extends TestSuite { public static TestSuite suite() throws Exception { IgniteTestSuite suite = new IgniteTestSuite("Ignite Cache Queries Test Suite"); + suite.addTestSuite(SqlParserSelfTest.class); + suite.addTestSuite(SqlConnectorConfigurationValidationSelfTest.class); suite.addTestSuite(ClientConnectorConfigurationValidationSelfTest.class); From e001887f0dcd9c08c1d2771625c181c9b48687a2 Mon Sep 17 00:00:00 2001 From: devozerov Date: Thu, 9 Nov 2017 14:37:54 +0300 Subject: [PATCH 230/243] IGNITE-6848: SQL parser: support DROP INDEX command. This closes #3006. (cherry picked from commit a1b6a33) --- .../apache/ignite/internal/sql/SqlLexer.java | 6 ++ .../apache/ignite/internal/sql/SqlParser.java | 25 +++-- .../ignite/internal/sql/SqlParserUtils.java | 9 +- .../sql/command/SqlDropIndexCommand.java | 80 +++++++++++++++ .../sql/SqlParserAbstractSelfTest.java | 46 +++++++++ ...java => SqlParserCreateIndexSelfTest.java} | 46 +++------ .../sql/SqlParserDropIndexSelfTest.java | 99 +++++++++++++++++++ .../processors/query/h2/IgniteH2Indexing.java | 5 +- .../query/h2/ddl/DdlStatementsProcessor.java | 20 +++- .../IgniteCacheQuerySelfTestSuite.java | 6 +- 10 files changed, 287 insertions(+), 55 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlDropIndexCommand.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserAbstractSelfTest.java rename modules/core/src/test/java/org/apache/ignite/internal/sql/{SqlParserSelfTest.java => SqlParserCreateIndexSelfTest.java} (80%) create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserDropIndexSelfTest.java diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java index a8009b7296f87..3fd6fa9da4720 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java @@ -176,6 +176,10 @@ public boolean shift() { } } + token = null; + tokenPos = pos; + tokenTyp = SqlLexerTokenType.EOF; + return false; } @@ -191,6 +195,8 @@ public String token() { /** {@inheritDoc} */ public char tokenFirstChar() { + assert tokenTyp != SqlLexerTokenType.EOF; + return token.charAt(0); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java index 9e0eee0767636..19f526d146ea6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java @@ -19,6 +19,7 @@ import org.apache.ignite.internal.sql.command.SqlCommand; import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; +import org.apache.ignite.internal.sql.command.SqlDropIndexCommand; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.sql.SqlKeyword.CREATE; @@ -27,10 +28,8 @@ import static org.apache.ignite.internal.sql.SqlKeyword.INDEX; import static org.apache.ignite.internal.sql.SqlKeyword.PRIMARY; import static org.apache.ignite.internal.sql.SqlKeyword.SPATIAL; -import static org.apache.ignite.internal.sql.SqlKeyword.TABLE; import static org.apache.ignite.internal.sql.SqlKeyword.UNIQUE; import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnexpectedToken; -import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnsupported; import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnsupportedIfMatchesKeyword; import static org.apache.ignite.internal.sql.SqlParserUtils.matchesKeyword; @@ -139,9 +138,6 @@ private SqlCommand processCreate() { break; - case TABLE: - throw errorUnsupported(lex); - case SPATIAL: if (lex.shift() && matchesKeyword(lex, INDEX)) cmd = new SqlCreateIndexCommand().spatial(true); @@ -157,7 +153,7 @@ private SqlCommand processCreate() { errorUnsupportedIfMatchesKeyword(lex, HASH, PRIMARY, UNIQUE); } - throw errorUnexpectedToken(lex, INDEX, TABLE, SPATIAL); + throw errorUnexpectedToken(lex, INDEX, SPATIAL); } /** @@ -166,9 +162,20 @@ private SqlCommand processCreate() { * @return Command. */ private SqlCommand processDrop() { - if (lex.shift() && lex.tokenType() == SqlLexerTokenType.DEFAULT) - throw errorUnsupported(lex); + if (lex.shift() && lex.tokenType() == SqlLexerTokenType.DEFAULT) { + SqlCommand cmd = null; + + switch (lex.token()) { + case INDEX: + cmd = new SqlDropIndexCommand(); + + break; + } + + if (cmd != null) + return cmd.parse(lex); + } - throw errorUnexpectedToken(lex, INDEX, TABLE); + throw errorUnexpectedToken(lex, INDEX); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java index cfe4b6f12b933..2f3b3dac5bfd0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java @@ -163,14 +163,7 @@ public static boolean isVaildIdentifier(SqlLexerToken token) { case DEFAULT: char c = token.tokenFirstChar(); - if ((c >= 'A' && c <= 'Z') || c == '_') { - if (SqlKeyword.isKeyword(token.token())) - throw errorUnexpectedToken(token, "[identifier]"); - - return true; - } - - throw error(token, "Illegal identifier name: " + token.token()); + return ((c >= 'A' && c <= 'Z') || c == '_') && !SqlKeyword.isKeyword(token.token()); case QUOTED: return true; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlDropIndexCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlDropIndexCommand.java new file mode 100644 index 0000000000000..1a1ea8757e9ee --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlDropIndexCommand.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql.command; + +import org.apache.ignite.internal.sql.SqlLexer; +import org.apache.ignite.internal.util.typedef.internal.S; + +import static org.apache.ignite.internal.sql.SqlKeyword.IF; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseIfExists; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseQualifiedIdentifier; + +/** + * DROP INDEX command. + */ +public class SqlDropIndexCommand implements SqlCommand { + /** Schema name. */ + private String schemaName; + + /** Index name. */ + private String idxName; + + /** IF EXISTS flag. */ + private boolean ifExists; + + /** {@inheritDoc} */ + @Override public String schemaName() { + return schemaName; + } + + /** {@inheritDoc} */ + @Override public void schemaName(String schemaName) { + this.schemaName = schemaName; + } + + /** + * @return Index name. + */ + public String indexName() { + return idxName; + } + + /** + * @return IF EXISTS flag. + */ + public boolean ifExists() { + return ifExists; + } + + /** {@inheritDoc} */ + @Override public SqlCommand parse(SqlLexer lex) { + ifExists = parseIfExists(lex); + + SqlQualifiedName idxQName = parseQualifiedIdentifier(lex, IF); + + schemaName = idxQName.schemaName(); + idxName = idxQName.name(); + + return this; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlDropIndexCommand.class, this); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserAbstractSelfTest.java new file mode 100644 index 0000000000000..c0952012e092f --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserAbstractSelfTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import java.util.concurrent.Callable; + +/** + * Common class for SQL parser tests. + */ +@SuppressWarnings("ThrowableNotThrown") +public abstract class SqlParserAbstractSelfTest extends GridCommonAbstractTest { + /** + * Make sure that parse error occurs. + * + * @param schema Schema. + * @param sql SQL. + * @param msg Expected error message. + */ + protected static void assertParseError(final String schema, final String sql, String msg) { + GridTestUtils.assertThrows(null, new Callable() { + @Override public Void call() throws Exception { + new SqlParser(schema, sql).nextCommand(); + + return null; + } + }, SqlParseException.class, msg); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserCreateIndexSelfTest.java similarity index 80% rename from modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserSelfTest.java rename to modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserCreateIndexSelfTest.java index 98a6aae9b12f1..5de0a3ab4ec05 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserSelfTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserCreateIndexSelfTest.java @@ -20,18 +20,15 @@ import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; import org.apache.ignite.internal.sql.command.SqlIndexColumn; import org.apache.ignite.internal.util.typedef.F; -import org.apache.ignite.testframework.GridTestUtils; -import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import java.util.Collection; import java.util.Iterator; -import java.util.concurrent.Callable; /** - * Test for parser. + * Tests for SQL parser: CREATE INDEX. */ @SuppressWarnings({"UnusedReturnValue", "ThrowableNotThrown"}) -public class SqlParserSelfTest extends GridCommonAbstractTest { +public class SqlParserCreateIndexSelfTest extends SqlParserAbstractSelfTest { /** * Tests for CREATE INDEX command. * @@ -79,10 +76,10 @@ public void testCreateIndex() throws Exception { parseValidate(null, "CREATE INDEX idx ON tbl(a, b, c DESC)", null, "TBL", "IDX", "A", false, "B", false, "C", true); // Negative cases. - parseError(null, "CREATE INDEX idx ON tbl()", "Unexpected token"); - parseError(null, "CREATE INDEX idx ON tbl(a, a)", "Column already defined: A"); - parseError(null, "CREATE INDEX idx ON tbl(a, b, a)", "Column already defined: A"); - parseError(null, "CREATE INDEX idx ON tbl(b, a, a)", "Column already defined: A"); + assertParseError(null, "CREATE INDEX idx ON tbl()", "Unexpected token"); + assertParseError(null, "CREATE INDEX idx ON tbl(a, a)", "Column already defined: A"); + assertParseError(null, "CREATE INDEX idx ON tbl(a, b, a)", "Column already defined: A"); + assertParseError(null, "CREATE INDEX idx ON tbl(b, a, a)", "Column already defined: A"); // Tests with schema. parseValidate(null, "CREATE INDEX idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); @@ -102,10 +99,10 @@ public void testCreateIndex() throws Exception { cmd = parseValidate(null, "CREATE INDEX IF NOT EXISTS idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); assertTrue(cmd.ifNotExists()); - parseError(null, "CREATE INDEX IF idx ON tbl(a)", "Unexpected token: \"IDX\""); - parseError(null, "CREATE INDEX IF NOT idx ON tbl(a)", "Unexpected token: \"IDX\""); - parseError(null, "CREATE INDEX IF EXISTS idx ON tbl(a)", "Unexpected token: \"EXISTS\""); - parseError(null, "CREATE INDEX NOT EXISTS idx ON tbl(a)", "Unexpected token: \"NOT\""); + assertParseError(null, "CREATE INDEX IF idx ON tbl(a)", "Unexpected token: \"IDX\""); + assertParseError(null, "CREATE INDEX IF NOT idx ON tbl(a)", "Unexpected token: \"IDX\""); + assertParseError(null, "CREATE INDEX IF EXISTS idx ON tbl(a)", "Unexpected token: \"EXISTS\""); + assertParseError(null, "CREATE INDEX NOT EXISTS idx ON tbl(a)", "Unexpected token: \"NOT\""); // SPATIAL cmd = parseValidate(null, "CREATE INDEX idx ON schema.tbl(a)", "SCHEMA", "TBL", "IDX", "A", false); @@ -115,30 +112,13 @@ public void testCreateIndex() throws Exception { assertTrue(cmd.spatial()); // UNIQUE - parseError(null, "CREATE UNIQUE INDEX idx ON tbl(a)", "Unsupported keyword: \"UNIQUE\""); + assertParseError(null, "CREATE UNIQUE INDEX idx ON tbl(a)", "Unsupported keyword: \"UNIQUE\""); // HASH - parseError(null, "CREATE HASH INDEX idx ON tbl(a)", "Unsupported keyword: \"HASH\""); + assertParseError(null, "CREATE HASH INDEX idx ON tbl(a)", "Unsupported keyword: \"HASH\""); // PRIMARY KEY - parseError(null, "CREATE PRIMARY KEY INDEX idx ON tbl(a)", "Unsupported keyword: \"PRIMARY\""); - } - - /** - * Make sure that parse error occurs. - * - * @param schema Schema. - * @param sql SQL. - * @param msg Expected error message. - */ - private static void parseError(final String schema, final String sql, String msg) { - GridTestUtils.assertThrows(null, new Callable() { - @Override public Void call() throws Exception { - new SqlParser(schema, sql).nextCommand(); - - return null; - } - }, SqlParseException.class, msg); + assertParseError(null, "CREATE PRIMARY KEY INDEX idx ON tbl(a)", "Unsupported keyword: \"PRIMARY\""); } /** diff --git a/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserDropIndexSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserDropIndexSelfTest.java new file mode 100644 index 0000000000000..a0af3a62d9d7b --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserDropIndexSelfTest.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +import org.apache.ignite.internal.sql.command.SqlDropIndexCommand; + +/** + * Tests for SQL parser: CREATE INDEX. + */ +public class SqlParserDropIndexSelfTest extends SqlParserAbstractSelfTest { + /** + * Tests for DROP INDEX command. + * + * @throws Exception If failed. + */ + public void testDropIndex() throws Exception { + // Base. + parseValidate(null, "DROP INDEX idx", null, "IDX"); + parseValidate(null, "DROP INDEX IDX", null, "IDX"); + parseValidate(null, "DROP INDEX iDx", null, "IDX"); + + parseValidate(null, "DROP INDEX \"idx\"", null, "idx"); + parseValidate(null, "DROP INDEX \"IDX\"", null, "IDX"); + parseValidate(null, "DROP INDEX \"iDx\"", null, "iDx"); + + assertParseError(null, "DROP INDEX", "Unexpected"); + + // Schema. + parseValidate("SCHEMA", "DROP INDEX idx", "SCHEMA", "IDX"); + parseValidate("schema", "DROP INDEX idx", "schema", "IDX"); + parseValidate("sChema", "DROP INDEX idx", "sChema", "IDX"); + + parseValidate(null, "DROP INDEX \"SCHEMA\".idx", "SCHEMA", "IDX"); + parseValidate(null, "DROP INDEX \"schema\".idx", "schema", "IDX"); + parseValidate(null, "DROP INDEX \"sChema\".idx", "sChema", "IDX"); + + parseValidate(null, "DROP INDEX \"schema\".\"idx\"", "schema", "idx"); + + assertParseError(null, "DROP INDEX .idx", "Unexpected"); + + // IF EXISTS + SqlDropIndexCommand cmd; + + cmd = parseValidate(null, "DROP INDEX schema.idx", "SCHEMA", "IDX"); + assertFalse(cmd.ifExists()); + + cmd = parseValidate(null, "DROP INDEX IF EXISTS schema.idx", "SCHEMA", "IDX"); + assertTrue(cmd.ifExists()); + + assertParseError(null, "DROP INDEX IF idx", "Unexpected token: \"IDX\""); + + assertParseError(null, "DROP INDEX EXISTS idx", "Unexpected token: \"EXISTS\""); + } + + /** + * Parse and validate SQL script. + * + * @param schema Schema. + * @param sql SQL. + * @param expSchemaName Expected schema name. + * @param expIdxName Expected index name. + * @return Command. + */ + private static SqlDropIndexCommand parseValidate(String schema, String sql, String expSchemaName, + String expIdxName) { + SqlDropIndexCommand cmd = (SqlDropIndexCommand)new SqlParser(schema, sql).nextCommand(); + + validate(cmd, expSchemaName, expIdxName); + + return cmd; + } + + /** + * Validate command. + * + * @param cmd Command. + * @param expSchemaName Expected schema name. + * @param expIdxName Expected index name. + */ + private static void validate(SqlDropIndexCommand cmd, String expSchemaName, String expIdxName) { + assertEquals(expSchemaName, cmd.schemaName()); + assertEquals(expIdxName, cmd.indexName()); + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 60b22055492c0..381a8dcde7378 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -121,6 +121,7 @@ import org.apache.ignite.internal.sql.SqlParser; import org.apache.ignite.internal.sql.command.SqlCommand; import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; +import org.apache.ignite.internal.sql.command.SqlDropIndexCommand; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap; import org.apache.ignite.internal.util.GridEmptyCloseableIterator; import org.apache.ignite.internal.util.GridSpinBusyLock; @@ -1354,8 +1355,8 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri if (parser.nextCommand() != null) return null; - // Only CREATE INDEX is supported for now. - if (!(cmd instanceof SqlCreateIndexCommand)) + // Only CREATE/DROP INDEX is supported for now. + if (!(cmd instanceof SqlCreateIndexCommand || cmd instanceof SqlDropIndexCommand)) return null; } catch (Exception e) { diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index fd425c2a03892..3c8d9feae7450 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -53,6 +53,7 @@ import org.apache.ignite.internal.processors.query.schema.SchemaOperationException; import org.apache.ignite.internal.sql.command.SqlCommand; import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; +import org.apache.ignite.internal.sql.command.SqlDropIndexCommand; import org.apache.ignite.internal.sql.command.SqlIndexColumn; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.typedef.F; @@ -135,9 +136,26 @@ public FieldsQueryCursor> runDdlStatement(String sql, SqlCommand cmd) th newIdx.setFields(flds); - fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd.schemaName(), typeDesc.tableName(), + fut = ctx.query().dynamicIndexCreate(tbl.cacheName(), cmd0.schemaName(), typeDesc.tableName(), newIdx, cmd0.ifNotExists()); } + else if (cmd instanceof SqlDropIndexCommand) { + SqlDropIndexCommand cmd0 = (SqlDropIndexCommand)cmd; + + GridH2Table tbl = idx.dataTableForIndex(cmd0.schemaName(), cmd0.indexName()); + + if (tbl != null) { + fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd0.schemaName(), cmd0.indexName(), + cmd0.ifExists()); + } + else { + if (cmd0.ifExists()) + fut = new GridFinishedFuture(); + else + throw new SchemaOperationException(SchemaOperationException.CODE_INDEX_NOT_FOUND, + cmd0.indexName()); + } + } else throw new IgniteSQLException("Unsupported DDL operation: " + sql, IgniteQueryErrorCode.UNSUPPORTED_OPERATION); diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index 30a276486a601..7b3b271a33c52 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -155,7 +155,8 @@ import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryDistributedJoinsTest; import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryTest; import org.apache.ignite.internal.processors.sql.SqlConnectorConfigurationValidationSelfTest; -import org.apache.ignite.internal.sql.SqlParserSelfTest; +import org.apache.ignite.internal.sql.SqlParserCreateIndexSelfTest; +import org.apache.ignite.internal.sql.SqlParserDropIndexSelfTest; import org.apache.ignite.spi.communication.tcp.GridOrderedMessageCancelSelfTest; import org.apache.ignite.testframework.IgniteTestSuite; @@ -170,7 +171,8 @@ public class IgniteCacheQuerySelfTestSuite extends TestSuite { public static TestSuite suite() throws Exception { IgniteTestSuite suite = new IgniteTestSuite("Ignite Cache Queries Test Suite"); - suite.addTestSuite(SqlParserSelfTest.class); + suite.addTestSuite(SqlParserCreateIndexSelfTest.class); + suite.addTestSuite(SqlParserDropIndexSelfTest.class); suite.addTestSuite(SqlConnectorConfigurationValidationSelfTest.class); suite.addTestSuite(ClientConnectorConfigurationValidationSelfTest.class); From e1ee5ca25ca3c609aa72f8035c0c2750eec07e6e Mon Sep 17 00:00:00 2001 From: devozerov Date: Thu, 14 Dec 2017 15:59:37 +0300 Subject: [PATCH 231/243] IGNITE-7200: SQL: simplified DML module structure and restored encapsulation. This closes #3225. (cherry picked from commit 03bb551) --- .../query/h2/DmlStatementsProcessor.java | 637 ++---------------- .../processors/query/h2/UpdateResult.java | 4 +- .../query/h2/{sql => dml}/DmlAstUtils.java | 99 +-- .../query/h2/dml/DmlBatchSender.java | 232 +++++++ .../query/h2/dml/DmlDistributedPlanInfo.java | 56 ++ .../h2/dml/DmlPageProcessingErrorResult.java | 76 +++ .../query/h2/dml/DmlPageProcessingResult.java | 68 ++ .../processors/query/h2/dml/DmlUtils.java | 118 ++++ .../processors/query/h2/dml/FastUpdate.java | 175 +++++ .../query/h2/dml/FastUpdateArguments.java | 53 -- .../processors/query/h2/dml/UpdatePlan.java | 389 ++++++++--- .../query/h2/dml/UpdatePlanBuilder.java | 82 ++- .../query/h2/sql/GridSqlQueryParser.java | 1 + 13 files changed, 1175 insertions(+), 815 deletions(-) rename modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/{sql => dml}/DmlAstUtils.java (88%) create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlDistributedPlanInfo.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingErrorResult.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingResult.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java delete mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdateArguments.java diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java index 9e55442a422d1..fc4b94498bd9d 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java @@ -17,42 +17,29 @@ package org.apache.ignite.internal.processors.query.h2; -import java.lang.reflect.Array; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; -import java.sql.Time; -import java.sql.Timestamp; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; -import java.util.Date; -import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.UUID; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import javax.cache.processor.EntryProcessor; import javax.cache.processor.EntryProcessorException; -import javax.cache.processor.EntryProcessorResult; import javax.cache.processor.MutableEntry; -import org.apache.ignite.IgniteCache; + import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.binary.BinaryObject; -import org.apache.ignite.binary.BinaryObjectBuilder; import org.apache.ignite.cache.query.SqlFieldsQuery; -import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.GridKernalContext; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheOperationContext; -import org.apache.ignite.internal.processors.cache.GridCacheAdapter; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; @@ -61,21 +48,19 @@ import org.apache.ignite.internal.processors.query.GridQueryCancel; import org.apache.ignite.internal.processors.query.GridQueryFieldsResult; import org.apache.ignite.internal.processors.query.GridQueryFieldsResultAdapter; -import org.apache.ignite.internal.processors.query.GridQueryProperty; -import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.IgniteSQLException; -import org.apache.ignite.internal.processors.query.QueryUtils; -import org.apache.ignite.internal.processors.query.h2.dml.FastUpdateArguments; +import org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender; +import org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedPlanInfo; +import org.apache.ignite.internal.processors.query.h2.dml.FastUpdate; import org.apache.ignite.internal.processors.query.h2.dml.UpdateMode; import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan; import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlanBuilder; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap; import org.apache.ignite.internal.util.lang.IgniteSingletonIterator; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T3; import org.apache.ignite.internal.util.typedef.X; -import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteInClosure; @@ -85,19 +70,10 @@ import org.h2.command.dml.Insert; import org.h2.command.dml.Merge; import org.h2.command.dml.Update; -import org.h2.table.Column; -import org.h2.util.DateTimeUtils; -import org.h2.util.LocalDateTimeUtils; -import org.h2.value.Value; -import org.h2.value.ValueDate; -import org.h2.value.ValueTime; -import org.h2.value.ValueTimestamp; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.createJdbcSqlException; import static org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing.UPDATE_RESULT_META; -import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.DEFAULT_COLUMNS_COUNT; /** * @@ -142,7 +118,7 @@ public void onCacheStop(String cacheName) { while (iter.hasNext()) { UpdatePlan plan = iter.next().getValue(); - if (F.eq(cacheName, plan.tbl.cacheName())) + if (F.eq(cacheName, plan.cacheContext().name())) iter.remove(); } } @@ -169,7 +145,7 @@ private UpdateResult updateSqlFields(String schemaName, Connection conn, Prepare UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null); - GridCacheContext cctx = plan.tbl.rowDescriptor().context(); + GridCacheContext cctx = plan.cacheContext(); for (int i = 0; i < DFLT_DML_RERUN_ATTEMPTS; i++) { CacheOperationContext opCtx = cctx.operationContextPerCall(); @@ -281,20 +257,20 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Obje UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, true, idx, null, null, null); - if (!F.eq(streamer.cacheName(), plan.tbl.rowDescriptor().context().name())) + if (!F.eq(streamer.cacheName(), plan.cacheContext().name())) throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + " in connection options", IgniteQueryErrorCode.UNSUPPORTED_OPERATION); - if (plan.mode == UpdateMode.INSERT && plan.rowsNum > 0) { - assert plan.isLocSubqry; + if (plan.mode() == UpdateMode.INSERT && plan.rowCount() > 0) { + assert plan.isLocalSubquery(); - final GridCacheContext cctx = plan.tbl.rowDescriptor().context(); + final GridCacheContext cctx = plan.cacheContext(); QueryCursorImpl> cur; - final ArrayList> data = new ArrayList<>(plan.rowsNum); + final ArrayList> data = new ArrayList<>(plan.rowCount()); - final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQry, + final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQuery(), F.asList(args), null, false, 0, null); QueryCursorImpl> stepCur = new QueryCursorImpl<>(new Iterable>() { @@ -317,18 +293,18 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Obje } }, null); - if (plan.rowsNum == 1) { - IgniteBiTuple t = rowToKeyValue(cctx, cur.iterator().next(), plan); + if (plan.rowCount() == 1) { + IgniteBiTuple t = plan.processRow(cur.iterator().next()); streamer.addData(t.getKey(), t.getValue()); return 1; } - Map rows = new LinkedHashMap<>(plan.rowsNum); + Map rows = new LinkedHashMap<>(plan.rowCount()); for (List row : cur) { - final IgniteBiTuple t = rowToKeyValue(cctx, row, plan); + final IgniteBiTuple t = plan.processRow(row); rows.put(t.getKey(), t.getValue()); } @@ -367,13 +343,15 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo UpdatePlan plan = getPlanForStatement(schemaName, c, prepared, fieldsQry, loc, errKeysPos); - if (plan.fastUpdateArgs != null) { + FastUpdate fastUpdate = plan.fastUpdate(); + + if (fastUpdate != null) { assert F.isEmpty(failedKeys) && errKeysPos == null; - return doFastUpdate(plan, fieldsQry.getArgs()); + return fastUpdate.execute(plan.cacheContext().cache(), fieldsQry.getArgs()); } - if (plan.distributed != null) { + if (plan.distributedPlan() != null) { UpdateResult result = doDistributedUpdate(schemaName, fieldsQry, plan, cancel); // null is returned in case not all nodes support distributed DML. @@ -381,14 +359,14 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo return result; } - assert !F.isEmpty(plan.selectQry); + assert !F.isEmpty(plan.selectQuery()); QueryCursorImpl> cur; // Do a two-step query only if locality flag is not set AND if plan's SELECT corresponds to an actual // sub-query and not some dummy stuff like "select 1, 2, 3;" - if (!loc && !plan.isLocSubqry) { - SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQry, fieldsQry.isCollocated()) + if (!loc && !plan.isLocalSubquery()) { + SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQuery(), fieldsQry.isCollocated()) .setArgs(fieldsQry.getArgs()) .setDistributedJoins(fieldsQry.isDistributedJoins()) .setEnforceJoinOrder(fieldsQry.isEnforceJoinOrder()) @@ -400,7 +378,7 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo cancel, mainCacheId, true).get(0); } else { - final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQry, + final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQuery(), F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel); cur = new QueryCursorImpl<>(new Iterable>() { @@ -430,7 +408,7 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo */ private UpdateResult processDmlSelectResult(GridCacheContext cctx, UpdatePlan plan, Iterable> cursor, int pageSize) throws IgniteCheckedException { - switch (plan.mode) { + switch (plan.mode()) { case MERGE: return new UpdateResult(doMerge(plan, cursor, pageSize), X.EMPTY_OBJECT_ARRAY); @@ -444,7 +422,7 @@ private UpdateResult processDmlSelectResult(GridCacheContext cctx, UpdatePlan pl return doDelete(cctx, cursor, pageSize); default: - throw new IgniteSQLException("Unexpected DML operation [mode=" + plan.mode + ']', + throw new IgniteSQLException("Unexpected DML operation [mode=" + plan.mode() + ']', IgniteQueryErrorCode.UNEXPECTED_OPERATION); } } @@ -479,46 +457,6 @@ private UpdatePlan getPlanForStatement(String schema, Connection conn, Prepared return res; } - /** - * Perform single cache operation based on given args. - * @param plan Update plan. - * @param args Query parameters. - * @return 1 if an item was affected, 0 otherwise. - * @throws IgniteCheckedException if failed. - */ - @SuppressWarnings({"unchecked", "ConstantConditions"}) - private static UpdateResult doFastUpdate(UpdatePlan plan, Object[] args) throws IgniteCheckedException { - GridCacheContext cctx = plan.tbl.rowDescriptor().context(); - - FastUpdateArguments singleUpdate = plan.fastUpdateArgs; - - assert singleUpdate != null; - - boolean valBounded = (singleUpdate.val != FastUpdateArguments.NULL_ARGUMENT); - - if (singleUpdate.newVal != FastUpdateArguments.NULL_ARGUMENT) { // Single item UPDATE - Object key = singleUpdate.key.apply(args); - Object newVal = singleUpdate.newVal.apply(args); - - if (valBounded) { - Object val = singleUpdate.val.apply(args); - - return (cctx.cache().replace(key, val, newVal) ? UpdateResult.ONE : UpdateResult.ZERO); - } - else - return (cctx.cache().replace(key, newVal) ? UpdateResult.ONE : UpdateResult.ZERO); - } - else { // Single item DELETE - Object key = singleUpdate.key.apply(args); - Object val = singleUpdate.val.apply(args); - - if (singleUpdate.val == FastUpdateArguments.NULL_ARGUMENT) // No _val bound in source query - return cctx.cache().remove(key) ? UpdateResult.ONE : UpdateResult.ZERO; - else - return cctx.cache().remove(key, val) ? UpdateResult.ONE : UpdateResult.ZERO; - } - } - /** * @param schemaName Schema name. * @param fieldsQry Initial query. @@ -529,13 +467,15 @@ private static UpdateResult doFastUpdate(UpdatePlan plan, Object[] args) throws */ private UpdateResult doDistributedUpdate(String schemaName, SqlFieldsQuery fieldsQry, UpdatePlan plan, GridQueryCancel cancel) throws IgniteCheckedException { - assert plan.distributed != null; + DmlDistributedPlanInfo distributedPlan = plan.distributedPlan(); + + assert distributedPlan != null; if (cancel == null) cancel = new GridQueryCancel(); - return idx.runDistributedUpdate(schemaName, fieldsQry, plan.distributed.getCacheIds(), - plan.distributed.isReplicatedOnly(), cancel); + return idx.runDistributedUpdate(schemaName, fieldsQry, distributedPlan.getCacheIds(), + distributedPlan.isReplicatedOnly(), cancel); } /** @@ -548,7 +488,7 @@ private UpdateResult doDistributedUpdate(String schemaName, SqlFieldsQuery field @SuppressWarnings({"unchecked", "ConstantConditions", "ThrowableResultOfMethodCallIgnored"}) private UpdateResult doDelete(GridCacheContext cctx, Iterable> cursor, int pageSize) throws IgniteCheckedException { - BatchSender sender = new BatchSender(cctx, pageSize); + DmlBatchSender sender = new DmlBatchSender(cctx, pageSize); for (List row : cursor) { if (row.size() != 2) { @@ -594,84 +534,18 @@ private UpdateResult doDelete(GridCacheContext cctx, Iterable> cursor, i @SuppressWarnings({"unchecked", "ThrowableResultOfMethodCallIgnored"}) private UpdateResult doUpdate(UpdatePlan plan, Iterable> cursor, int pageSize) throws IgniteCheckedException { - GridH2RowDescriptor desc = plan.tbl.rowDescriptor(); - - GridCacheContext cctx = desc.context(); - - boolean bin = cctx.binaryMarshaller(); - - String[] updatedColNames = plan.colNames; - - int valColIdx = plan.valColIdx; + GridCacheContext cctx = plan.cacheContext(); - boolean hasNewVal = (valColIdx != -1); - - // Statement updates distinct properties if it does not have _val in updated columns list - // or if its list of updated columns includes only _val, i.e. is single element. - boolean hasProps = !hasNewVal || updatedColNames.length > 1; - - BatchSender sender = new BatchSender(cctx, pageSize); + DmlBatchSender sender = new DmlBatchSender(cctx, pageSize); for (List row : cursor) { - Object key = row.get(0); - - Object newVal; - - Map newColVals = new HashMap<>(); - - for (int i = 0; i < plan.colNames.length; i++) { - if (hasNewVal && i == valColIdx - 2) - continue; - - GridQueryProperty prop = plan.tbl.rowDescriptor().type().property(plan.colNames[i]); - - assert prop != null : "Unknown property: " + plan.colNames[i]; - - newColVals.put(plan.colNames[i], convert(row.get(i + 2), desc, prop.type(), plan.colTypes[i])); - } - - newVal = plan.valSupplier.apply(row); - - if (newVal == null) - throw new IgniteSQLException("New value for UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE); - - // Skip key and value - that's why we start off with 3rd column - for (int i = 0; i < plan.tbl.getColumns().length - DEFAULT_COLUMNS_COUNT; i++) { - Column c = plan.tbl.getColumn(i + DEFAULT_COLUMNS_COUNT); - - if (desc.isKeyValueOrVersionColumn(c.getColumnId())) - continue; - - GridQueryProperty prop = desc.type().property(c.getName()); - - if (prop.key()) - continue; // Don't get values of key's columns - we won't use them anyway - - boolean hasNewColVal = newColVals.containsKey(c.getName()); - - if (!hasNewColVal) - continue; - - Object colVal = newColVals.get(c.getName()); - - // UPDATE currently does not allow to modify key or its fields, so we must be safe to pass null as key. - desc.setColumnValue(null, newVal, colVal, i); - } - - if (bin && hasProps) { - assert newVal instanceof BinaryObjectBuilder; - - newVal = ((BinaryObjectBuilder) newVal).build(); - } - - desc.type().validateKeyAndValue(key, newVal); - - Object srcVal = row.get(1); + T3 row0 = plan.processRowForUpdate(row); - if (bin && !(srcVal instanceof BinaryObject)) - srcVal = cctx.grid().binary().toBinary(srcVal); + Object key = row0.get1(); + Object oldVal = row0.get2(); + Object newVal = row0.get3(); - sender.add(key, new ModifyingEntryProcessor(srcVal, new EntryValueUpdater(newVal))); + sender.add(key, new ModifyingEntryProcessor(oldVal, new EntryValueUpdater(newVal))); } sender.flush(); @@ -698,120 +572,6 @@ private UpdateResult doUpdate(UpdatePlan plan, Iterable> cursor, int pag return new UpdateResult(sender.updateCount(), sender.failedKeys().toArray()); } - /** - * Convert value to column's expected type by means of H2. - * - * @param val Source value. - * @param desc Row descriptor. - * @param expCls Expected value class. - * @param type Expected column type to convert to. - * @return Converted object. - * @throws IgniteCheckedException if failed. - */ - @SuppressWarnings({"ConstantConditions", "SuspiciousSystemArraycopy"}) - private static Object convert(Object val, GridH2RowDescriptor desc, Class expCls, int type) - throws IgniteCheckedException { - if (val == null) - return null; - - Class currCls = val.getClass(); - - try { - if (val instanceof Date && currCls != Date.class && expCls == Date.class) { - // H2 thinks that java.util.Date is always a Timestamp, while binary marshaller expects - // precise Date instance. Let's satisfy it. - return new Date(((Date) val).getTime()); - } - - // User-given UUID is always serialized by H2 to byte array, so we have to deserialize manually - if (type == Value.UUID && currCls == byte[].class) - return U.unmarshal(desc.context().marshaller(), (byte[]) val, - U.resolveClassLoader(desc.context().gridConfig())); - - if (LocalDateTimeUtils.isJava8DateApiPresent()) { - if (val instanceof Timestamp && LocalDateTimeUtils.isLocalDateTime(expCls)) - return LocalDateTimeUtils.valueToLocalDateTime(ValueTimestamp.get((Timestamp) val)); - - if (val instanceof Date && LocalDateTimeUtils.isLocalDate(expCls)) - return LocalDateTimeUtils.valueToLocalDate(ValueDate.fromDateValue( - DateTimeUtils.dateValueFromDate(((Date) val).getTime()))); - - if (val instanceof Time && LocalDateTimeUtils.isLocalTime(expCls)) - return LocalDateTimeUtils.valueToLocalTime(ValueTime.get((Time) val)); - } - - // We have to convert arrays of reference types manually - - // see https://issues.apache.org/jira/browse/IGNITE-4327 - // Still, we only can convert from Object[] to something more precise. - if (type == Value.ARRAY && currCls != expCls) { - if (currCls != Object[].class) - throw new IgniteCheckedException("Unexpected array type - only conversion from Object[] " + - "is assumed"); - - // Why would otherwise type be Value.ARRAY? - assert expCls.isArray(); - - Object[] curr = (Object[]) val; - - Object newArr = Array.newInstance(expCls.getComponentType(), curr.length); - - System.arraycopy(curr, 0, newArr, 0, curr.length); - - return newArr; - } - - return H2Utils.convert(val, desc, type); - } - catch (Exception e) { - throw new IgniteSQLException("Value conversion failed [from=" + currCls.getName() + ", to=" + - expCls.getName() +']', IgniteQueryErrorCode.CONVERSION_FAILED, e); - } - } - - /** - * Process errors of entry processor - split the keys into duplicated/concurrently modified and those whose - * processing yielded an exception. - * - * @param res Result of {@link GridCacheAdapter#invokeAll)} - * @return pair [array of duplicated/concurrently modified keys, SQL exception for erroneous keys] (exception is - * null if all keys are duplicates/concurrently modified ones). - */ - private static PageProcessingErrorResult splitErrors(Map> res) { - Set errKeys = new LinkedHashSet<>(res.keySet()); - - SQLException currSqlEx = null; - - SQLException firstSqlEx = null; - - int errors = 0; - - // Let's form a chain of SQL exceptions - for (Map.Entry> e : res.entrySet()) { - try { - e.getValue().get(); - } - catch (EntryProcessorException ex) { - SQLException next = createJdbcSqlException("Failed to process key '" + e.getKey() + '\'', - IgniteQueryErrorCode.ENTRY_PROCESSING); - - next.initCause(ex); - - if (currSqlEx != null) - currSqlEx.setNextException(next); - else - firstSqlEx = next; - - currSqlEx = next; - - errKeys.remove(e.getKey()); - - errors++; - } - } - - return new PageProcessingErrorResult(errKeys.toArray(), firstSqlEx, errors); - } - /** * Execute MERGE statement plan. * @param cursor Cursor to take inserted data from. @@ -821,13 +581,11 @@ private static PageProcessingErrorResult splitErrors(Map> cursor, int pageSize) throws IgniteCheckedException { - GridH2RowDescriptor desc = plan.tbl.rowDescriptor(); - - GridCacheContext cctx = desc.context(); + GridCacheContext cctx = plan.cacheContext(); // If we have just one item to put, just do so - if (plan.rowsNum == 1) { - IgniteBiTuple t = rowToKeyValue(cctx, cursor.iterator().next(), plan); + if (plan.rowCount() == 1) { + IgniteBiTuple t = plan.processRow(cursor.iterator().next()); cctx.cache().put(t.getKey(), t.getValue()); @@ -841,7 +599,7 @@ private long doMerge(UpdatePlan plan, Iterable> cursor, int pageSize) th for (Iterator> it = cursor.iterator(); it.hasNext();) { List row = it.next(); - IgniteBiTuple t = rowToKeyValue(cctx, row, plan); + IgniteBiTuple t = plan.processRow(row); rows.put(t.getKey(), t.getValue()); @@ -868,13 +626,11 @@ private long doMerge(UpdatePlan plan, Iterable> cursor, int pageSize) th */ @SuppressWarnings({"unchecked", "ConstantConditions"}) private long doInsert(UpdatePlan plan, Iterable> cursor, int pageSize) throws IgniteCheckedException { - GridH2RowDescriptor desc = plan.tbl.rowDescriptor(); - - GridCacheContext cctx = desc.context(); + GridCacheContext cctx = plan.cacheContext(); // If we have just one item to put, just do so - if (plan.rowsNum == 1) { - IgniteBiTuple t = rowToKeyValue(cctx, cursor.iterator().next(), plan); + if (plan.rowCount() == 1) { + IgniteBiTuple t = plan.processRow(cursor.iterator().next()); if (cctx.cache().putIfAbsent(t.getKey(), t.getValue())) return 1; @@ -884,10 +640,10 @@ private long doInsert(UpdatePlan plan, Iterable> cursor, int pageSize) t } else { // Keys that failed to INSERT due to duplication. - BatchSender sender = new BatchSender(cctx, pageSize); + DmlBatchSender sender = new DmlBatchSender(cctx, pageSize); for (List row : cursor) { - final IgniteBiTuple keyValPair = rowToKeyValue(cctx, row, plan); + final IgniteBiTuple keyValPair = plan.processRow(row); sender.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue())); } @@ -915,124 +671,6 @@ private long doInsert(UpdatePlan plan, Iterable> cursor, int pageSize) t } } - /** - * Execute given entry processors and collect errors, if any. - * @param cctx Cache context. - * @param rows Rows to process. - * @return Triple [number of rows actually changed; keys that failed to update (duplicates or concurrently - * updated ones); chain of exceptions for all keys whose processing resulted in error, or null for no errors]. - * @throws IgniteCheckedException If failed. - */ - @SuppressWarnings({"unchecked", "ConstantConditions"}) - private static PageProcessingResult processPage(GridCacheContext cctx, - Map> rows) throws IgniteCheckedException { - Map> res = cctx.cache().invokeAll(rows); - - if (F.isEmpty(res)) - return new PageProcessingResult(rows.size(), null, null); - - PageProcessingErrorResult splitRes = splitErrors(res); - - int keysCnt = splitRes.errKeys.length; - - return new PageProcessingResult(rows.size() - keysCnt - splitRes.cnt, splitRes.errKeys, splitRes.ex); - } - - /** - * Convert row presented as an array of Objects into key-value pair to be inserted to cache. - * @param cctx Cache context. - * @param row Row to process. - * @param plan Update plan. - * @throws IgniteCheckedException if failed. - */ - @SuppressWarnings({"unchecked", "ConstantConditions", "ResultOfMethodCallIgnored"}) - private IgniteBiTuple rowToKeyValue(GridCacheContext cctx, List row, UpdatePlan plan) - throws IgniteCheckedException { - GridH2RowDescriptor rowDesc = plan.tbl.rowDescriptor(); - GridQueryTypeDescriptor desc = rowDesc.type(); - - Object key = plan.keySupplier.apply(row); - - if (QueryUtils.isSqlType(desc.keyClass())) { - assert plan.keyColIdx != -1; - - key = convert(key, rowDesc, desc.keyClass(), plan.colTypes[plan.keyColIdx]); - } - - Object val = plan.valSupplier.apply(row); - - if (QueryUtils.isSqlType(desc.valueClass())) { - assert plan.valColIdx != -1; - - val = convert(val, rowDesc, desc.valueClass(), plan.colTypes[plan.valColIdx]); - } - - if (key == null) { - if (F.isEmpty(desc.keyFieldName())) - throw new IgniteSQLException("Key for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY); - else - throw new IgniteSQLException("Null value is not allowed for column '" + desc.keyFieldName() + "'", - IgniteQueryErrorCode.NULL_KEY); - } - - if (val == null) { - if (F.isEmpty(desc.valueFieldName())) - throw new IgniteSQLException("Value for INSERT, MERGE, or UPDATE must not be null", - IgniteQueryErrorCode.NULL_VALUE); - else - throw new IgniteSQLException("Null value is not allowed for column '" + desc.valueFieldName() + "'", - IgniteQueryErrorCode.NULL_VALUE); - } - - Map newColVals = new HashMap<>(); - - for (int i = 0; i < plan.colNames.length; i++) { - if (i == plan.keyColIdx || i == plan.valColIdx) - continue; - - String colName = plan.colNames[i]; - - GridQueryProperty prop = desc.property(colName); - - assert prop != null; - - Class expCls = prop.type(); - - newColVals.put(colName, convert(row.get(i), rowDesc, expCls, plan.colTypes[i])); - } - - // We update columns in the order specified by the table for a reason - table's - // column order preserves their precedence for correct update of nested properties. - Column[] cols = plan.tbl.getColumns(); - - // First 3 columns are _key, _val and _ver. Skip 'em. - for (int i = DEFAULT_COLUMNS_COUNT; i < cols.length; i++) { - if (plan.tbl.rowDescriptor().isKeyValueOrVersionColumn(i)) - continue; - - String colName = cols[i].getName(); - - if (!newColVals.containsKey(colName)) - continue; - - Object colVal = newColVals.get(colName); - - desc.setValue(colName, key, val, colVal); - } - - if (cctx.binaryMarshaller()) { - if (key instanceof BinaryObjectBuilder) - key = ((BinaryObjectBuilder) key).build(); - - if (val instanceof BinaryObjectBuilder) - val = ((BinaryObjectBuilder) val).build(); - } - - desc.validateKeyAndValue(key, val); - - return new IgniteBiTuple<>(key, val); - } - /** * * @param schemaName Schema name. @@ -1164,7 +802,7 @@ static boolean isDmlStatement(Prepared stmt) { static void checkUpdateResult(UpdateResult r) { if (!F.isEmpty(r.errorKeys())) { String msg = "Failed to update some keys because they had been modified concurrently " + - "[keys=" + r.errorKeys() + ']'; + "[keys=" + Arrays.toString(r.errorKeys()) + ']'; SQLException conEx = createJdbcSqlException(msg, IgniteQueryErrorCode.CONCURRENT_UPDATE); @@ -1172,175 +810,4 @@ static void checkUpdateResult(UpdateResult r) { } } - /** Result of processing an individual page with {@link IgniteCache#invokeAll} including error details, if any. */ - private final static class PageProcessingResult { - /** Number of successfully processed items. */ - final long cnt; - - /** Keys that failed to be updated or deleted due to concurrent modification of values. */ - @NotNull - final Object[] errKeys; - - /** Chain of exceptions corresponding to failed keys. Null if no keys yielded an exception. */ - final SQLException ex; - - /** */ - @SuppressWarnings("ConstantConditions") - private PageProcessingResult(long cnt, Object[] errKeys, SQLException ex) { - this.cnt = cnt; - this.errKeys = U.firstNotNull(errKeys, X.EMPTY_OBJECT_ARRAY); - this.ex = ex; - } - } - - /** Result of splitting keys whose processing resulted into an exception from those skipped by - * logic of {@link EntryProcessor}s (most likely INSERT duplicates, or UPDATE/DELETE keys whose values - * had been modified concurrently), counting and collecting entry processor exceptions. - */ - private final static class PageProcessingErrorResult { - /** Keys that failed to be processed by {@link EntryProcessor} (not due to an exception). */ - @NotNull - final Object[] errKeys; - - /** Number of entries whose processing resulted into an exception. */ - final int cnt; - - /** Chain of exceptions corresponding to failed keys. Null if no keys yielded an exception. */ - final SQLException ex; - - /** */ - @SuppressWarnings("ConstantConditions") - private PageProcessingErrorResult(@NotNull Object[] errKeys, SQLException ex, int exCnt) { - errKeys = U.firstNotNull(errKeys, X.EMPTY_OBJECT_ARRAY); - // When exceptions count must be zero, exceptions chain must be not null, and vice versa. - assert exCnt == 0 ^ ex != null; - - this.errKeys = errKeys; - this.cnt = exCnt; - this.ex = ex; - } - } - - /** - * Batch sender class. - */ - private static class BatchSender { - /** Cache context. */ - private final GridCacheContext cctx; - - /** Batch size. */ - private final int size; - - /** Batches. */ - private final Map>> batches = new HashMap<>(); - - /** Result count. */ - private long updateCnt; - - /** Failed keys. */ - private List failedKeys; - - /** Exception. */ - private SQLException err; - - /** - * Constructor. - * - * @param cctx Cache context. - * @param size Batch. - */ - public BatchSender(GridCacheContext cctx, int size) { - this.cctx = cctx; - this.size = size; - } - - /** - * Add entry to batch. - * - * @param key Key. - * @param proc Processor. - */ - public void add(Object key, EntryProcessor proc) throws IgniteCheckedException { - ClusterNode node = cctx.affinity().primaryByKey(key, AffinityTopologyVersion.NONE); - - if (node == null) - throw new IgniteCheckedException("Failed to map key to node."); - - UUID nodeId = node.id(); - - Map> batch = batches.get(nodeId); - - if (batch == null) { - batch = new HashMap<>(); - - batches.put(nodeId, batch); - } - - batch.put(key, proc); - - if (batch.size() >= size) { - sendBatch(batch); - - batch.clear(); - } - } - - /** - * Flush any remaining entries. - * - * @throws IgniteCheckedException If failed. - */ - public void flush() throws IgniteCheckedException { - for (Map> batch : batches.values()) { - if (!batch.isEmpty()) - sendBatch(batch); - } - } - - /** - * @return Update count. - */ - public long updateCount() { - return updateCnt; - } - - /** - * @return Failed keys. - */ - public List failedKeys() { - return failedKeys != null ? failedKeys : Collections.emptyList(); - } - - /** - * @return Error. - */ - public SQLException error() { - return err; - } - - /** - * Send the batch. - * - * @param batch Batch. - * @throws IgniteCheckedException If failed. - */ - private void sendBatch(Map> batch) - throws IgniteCheckedException { - PageProcessingResult pageRes = processPage(cctx, batch); - - updateCnt += pageRes.cnt; - - if (failedKeys == null) - failedKeys = new ArrayList<>(); - - failedKeys.addAll(F.asList(pageRes.errKeys)); - - if (pageRes.ex != null) { - if (err == null) - err = pageRes.ex; - else - err.setNextException(pageRes.ex); - } - } - } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java index de0e63fde5aec..32381ba12baf7 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/UpdateResult.java @@ -25,10 +25,10 @@ */ public final class UpdateResult { /** Result to return for operations that affected 1 item - mostly to be used for fast updates and deletes. */ - final static UpdateResult ONE = new UpdateResult(1, X.EMPTY_OBJECT_ARRAY); + public static final UpdateResult ONE = new UpdateResult(1, X.EMPTY_OBJECT_ARRAY); /** Result to return for operations that affected 0 items - mostly to be used for fast updates and deletes. */ - final static UpdateResult ZERO = new UpdateResult(0, X.EMPTY_OBJECT_ARRAY); + public static final UpdateResult ZERO = new UpdateResult(0, X.EMPTY_OBJECT_ARRAY); /** Number of processed items. */ private final long cnt; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/DmlAstUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java similarity index 88% rename from modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/DmlAstUtils.java rename to modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java index 4537ffcb624cf..054e70822bfca 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/DmlAstUtils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java @@ -15,21 +15,39 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.query.h2.sql; +package org.apache.ignite.internal.processors.query.h2.dml; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.query.IgniteSQLException; -import org.apache.ignite.internal.processors.query.h2.dml.FastUpdateArgument; -import org.apache.ignite.internal.processors.query.h2.dml.FastUpdateArguments; import org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlAlias; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlArray; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlAst; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlConst; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlDelete; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlFunction; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlFunctionType; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlJoin; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlKeyword; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperation; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlParameter; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlSubquery; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlTable; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlType; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlUnion; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlUpdate; import org.apache.ignite.internal.util.lang.IgnitePair; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; @@ -168,7 +186,7 @@ public static GridSqlSelect selectForDelete(GridSqlDelete del, @Nullable Integer * @return {@code null} if given statement directly updates {@code _val} column with a literal or param value * and filters by single non expression key (and, optionally, by single non expression value). */ - public static FastUpdateArguments getFastUpdateArgs(GridSqlUpdate update) { + public static FastUpdate getFastUpdateArgs(GridSqlUpdate update) { IgnitePair filter = findKeyValueEqualityCondition(update.where()); if (filter == null) @@ -190,40 +208,20 @@ public static FastUpdateArguments getFastUpdateArgs(GridSqlUpdate update) { if (!(set instanceof GridSqlConst || set instanceof GridSqlParameter)) return null; - return new FastUpdateArguments(operandForElement(filter.getKey()), operandForElement(filter.getValue()), - operandForElement(set)); - } - - /** - * Create operand based on exact type of SQL element. - * - * @param el element. - * @return Operand. - */ - private static FastUpdateArgument operandForElement(GridSqlElement el) { - assert el == null ^ (el instanceof GridSqlConst || el instanceof GridSqlParameter); - - if (el == null) - return FastUpdateArguments.NULL_ARGUMENT; - - if (el instanceof GridSqlConst) - return new ValueArgument(((GridSqlConst)el).value().getObject()); - else - return new ParamArgument(((GridSqlParameter)el).index()); + return FastUpdate.create(filter.getKey(), filter.getValue(), set); } /** * @param del DELETE statement. * @return {@code true} if given statement filters by single non expression key. */ - public static FastUpdateArguments getFastDeleteArgs(GridSqlDelete del) { + public static FastUpdate getFastDeleteArgs(GridSqlDelete del) { IgnitePair filter = findKeyValueEqualityCondition(del.where()); if (filter == null) return null; - return new FastUpdateArguments(operandForElement(filter.getKey()), operandForElement(filter.getValue()), - FastUpdateArguments.NULL_ARGUMENT); + return FastUpdate.create(filter.getKey(), filter.getValue(), null); } /** @@ -231,6 +229,7 @@ public static FastUpdateArguments getFastDeleteArgs(GridSqlDelete del) { * @return Whether given element corresponds to {@code WHERE _key = ?}, and key is a literal expressed * in query or a query param. */ + @SuppressWarnings("RedundantCast") private static IgnitePair findKeyValueEqualityCondition(GridSqlElement where) { if (where == null || !(where instanceof GridSqlOperation)) return null; @@ -462,8 +461,9 @@ private static GridSqlElement injectKeysFilterParam(GridSqlElement where, GridSq * @param paramIdxs Parameter indexes. * @return Extracted parameters list. */ + @SuppressWarnings("unused") private static List findParams(GridSqlQuery qry, Object[] params, ArrayList target, - IntArray paramIdxs) { + IntArray paramIdxs) { if (qry instanceof GridSqlSelect) return findParams((GridSqlSelect)qry, params, target, paramIdxs); @@ -486,7 +486,7 @@ private static List findParams(GridSqlQuery qry, Object[] params, ArrayL * @return Extracted parameters list. */ private static List findParams(GridSqlSelect qry, Object[] params, ArrayList target, - IntArray paramIdxs) { + IntArray paramIdxs) { if (params.length == 0) return target; @@ -511,7 +511,7 @@ private static List findParams(GridSqlSelect qry, Object[] params, Array * @param paramIdxs Parameter indexes. */ private static void findParams(@Nullable GridSqlElement el, Object[] params, ArrayList target, - IntArray paramIdxs) { + IntArray paramIdxs) { if (el == null) return; @@ -550,6 +550,7 @@ else if (el instanceof GridSqlSubquery) * @param c Closure each found table and subquery will be passed to. If returns {@code true} the we need to stop. * @return {@code true} If we have found. */ + @SuppressWarnings("RedundantCast") private static boolean findTablesInFrom(GridSqlElement from, IgnitePredicate c) { if (from == null) return false; @@ -605,40 +606,4 @@ public static GridSqlTable gridTableForElement(GridSqlElement target) { return tbls.iterator().next(); } - - /** Simple constant value based operand. */ - private final static class ValueArgument implements FastUpdateArgument { - /** Value to return. */ - private final Object val; - - /** */ - private ValueArgument(Object val) { - this.val = val; - } - - /** {@inheritDoc} */ - @Override public Object apply(Object[] arg) throws IgniteCheckedException { - return val; - } - } - - /** Simple constant value based operand. */ - private final static class ParamArgument implements FastUpdateArgument { - /** Value to return. */ - private final int paramIdx; - - /** */ - private ParamArgument(int paramIdx) { - assert paramIdx >= 0; - - this.paramIdx = paramIdx; - } - - /** {@inheritDoc} */ - @Override public Object apply(Object[] arg) throws IgniteCheckedException { - assert arg.length > paramIdx; - - return arg[paramIdx]; - } - } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java new file mode 100644 index 0000000000000..a4a60c3b20ea5 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java @@ -0,0 +1,232 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.dml; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.GridCacheAdapter; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.util.typedef.F; + +import javax.cache.processor.EntryProcessor; +import javax.cache.processor.EntryProcessorException; +import javax.cache.processor.EntryProcessorResult; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.createJdbcSqlException; + +/** + * Batch sender class. + */ +public class DmlBatchSender { + /** Cache context. */ + private final GridCacheContext cctx; + + /** Batch size. */ + private final int size; + + /** Batches. */ + private final Map>> batches = new HashMap<>(); + + /** Result count. */ + private long updateCnt; + + /** Failed keys. */ + private List failedKeys; + + /** Exception. */ + private SQLException err; + + /** + * Constructor. + * + * @param cctx Cache context. + * @param size Batch. + */ + public DmlBatchSender(GridCacheContext cctx, int size) { + this.cctx = cctx; + this.size = size; + } + + /** + * Add entry to batch. + * + * @param key Key. + * @param proc Processor. + */ + public void add(Object key, EntryProcessor proc) throws IgniteCheckedException { + ClusterNode node = cctx.affinity().primaryByKey(key, AffinityTopologyVersion.NONE); + + if (node == null) + throw new IgniteCheckedException("Failed to map key to node."); + + UUID nodeId = node.id(); + + Map> batch = batches.get(nodeId); + + if (batch == null) { + batch = new HashMap<>(); + + batches.put(nodeId, batch); + } + + batch.put(key, proc); + + if (batch.size() >= size) { + sendBatch(batch); + + batch.clear(); + } + } + + /** + * Flush any remaining entries. + * + * @throws IgniteCheckedException If failed. + */ + public void flush() throws IgniteCheckedException { + for (Map> batch : batches.values()) { + if (!batch.isEmpty()) + sendBatch(batch); + } + } + + /** + * @return Update count. + */ + public long updateCount() { + return updateCnt; + } + + /** + * @return Failed keys. + */ + public List failedKeys() { + return failedKeys != null ? failedKeys : Collections.emptyList(); + } + + /** + * @return Error. + */ + public SQLException error() { + return err; + } + + /** + * Send the batch. + * + * @param batch Batch. + * @throws IgniteCheckedException If failed. + */ + private void sendBatch(Map> batch) + throws IgniteCheckedException { + DmlPageProcessingResult pageRes = processPage(cctx, batch); + + updateCnt += pageRes.count(); + + if (failedKeys == null) + failedKeys = new ArrayList<>(); + + failedKeys.addAll(F.asList(pageRes.errorKeys())); + + if (pageRes.error() != null) { + if (err == null) + err = error(); + else + err.setNextException(error()); + } + } + + /** + * Execute given entry processors and collect errors, if any. + * @param cctx Cache context. + * @param rows Rows to process. + * @return Triple [number of rows actually changed; keys that failed to update (duplicates or concurrently + * updated ones); chain of exceptions for all keys whose processing resulted in error, or null for no errors]. + * @throws IgniteCheckedException If failed. + */ + @SuppressWarnings({"unchecked", "ConstantConditions"}) + private static DmlPageProcessingResult processPage(GridCacheContext cctx, + Map> rows) throws IgniteCheckedException { + Map> res = cctx.cache().invokeAll(rows); + + if (F.isEmpty(res)) + return new DmlPageProcessingResult(rows.size(), null, null); + + DmlPageProcessingErrorResult splitRes = splitErrors(res); + + int keysCnt = splitRes.errorKeys().length; + + return new DmlPageProcessingResult(rows.size() - keysCnt - splitRes.errorCount(), splitRes.errorKeys(), + splitRes.error()); + } + + /** + * Process errors of entry processor - split the keys into duplicated/concurrently modified and those whose + * processing yielded an exception. + * + * @param res Result of {@link GridCacheAdapter#invokeAll)} + * @return pair [array of duplicated/concurrently modified keys, SQL exception for erroneous keys] (exception is + * null if all keys are duplicates/concurrently modified ones). + */ + private static DmlPageProcessingErrorResult splitErrors(Map> res) { + Set errKeys = new LinkedHashSet<>(res.keySet()); + + SQLException currSqlEx = null; + + SQLException firstSqlEx = null; + + int errors = 0; + + // Let's form a chain of SQL exceptions + for (Map.Entry> e : res.entrySet()) { + try { + e.getValue().get(); + } + catch (EntryProcessorException ex) { + SQLException next = createJdbcSqlException("Failed to process key '" + e.getKey() + '\'', + IgniteQueryErrorCode.ENTRY_PROCESSING); + + next.initCause(ex); + + if (currSqlEx != null) + currSqlEx.setNextException(next); + else + firstSqlEx = next; + + currSqlEx = next; + + errKeys.remove(e.getKey()); + + errors++; + } + } + + return new DmlPageProcessingErrorResult(errKeys.toArray(), firstSqlEx, errors); + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlDistributedPlanInfo.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlDistributedPlanInfo.java new file mode 100644 index 0000000000000..44c6e22b61de4 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlDistributedPlanInfo.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.dml; + +import java.util.List; + +/** + * Additional information about distributed update plan. + */ +public final class DmlDistributedPlanInfo { + /** Whether update involves only replicated caches. */ + private final boolean replicatedOnly; + + /** Identifiers of caches involved in update (used for cluster nodes mapping). */ + private final List cacheIds; + + /** + * Constructor. + * + * @param replicatedOnly Whether all caches are replicated. + * @param cacheIds List of cache identifiers. + */ + public DmlDistributedPlanInfo(boolean replicatedOnly, List cacheIds) { + this.replicatedOnly = replicatedOnly; + this.cacheIds = cacheIds; + } + + /** + * @return {@code true} in case all involved caches are replicated. + */ + public boolean isReplicatedOnly() { + return replicatedOnly; + } + + /** + * @return cache identifiers. + */ + public List getCacheIds() { + return cacheIds; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingErrorResult.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingErrorResult.java new file mode 100644 index 0000000000000..02e7359d1defe --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingErrorResult.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.dml; + +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import javax.cache.processor.EntryProcessor; +import java.sql.SQLException; + +/** + * Result of splitting keys whose processing resulted into an exception from those skipped by + * logic of {@link EntryProcessor}s (most likely INSERT duplicates, or UPDATE/DELETE keys whose values + * had been modified concurrently), counting and collecting entry processor exceptions. + */ +public final class DmlPageProcessingErrorResult { + /** Keys that failed to be processed by {@link EntryProcessor} (not due to an exception). */ + private final Object[] errKeys; + + /** Number of entries whose processing resulted into an exception. */ + private final int cnt; + + /** Chain of exceptions corresponding to failed keys. Null if no keys yielded an exception. */ + private final SQLException ex; + + /** */ + @SuppressWarnings("ConstantConditions") + public DmlPageProcessingErrorResult(@NotNull Object[] errKeys, SQLException ex, int exCnt) { + errKeys = U.firstNotNull(errKeys, X.EMPTY_OBJECT_ARRAY); + // When exceptions count must be zero, exceptions chain must be not null, and vice versa. + assert exCnt == 0 ^ ex != null; + + this.errKeys = errKeys; + this.cnt = exCnt; + this.ex = ex; + } + + /** + * @return Number of entries whose processing resulted into an exception. + */ + public int errorCount() { + return cnt; + } + + /** + * @return Error keys. + */ + public Object[] errorKeys() { + return errKeys; + } + + /** + * @return Error. + */ + @Nullable + public SQLException error() { + return ex; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingResult.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingResult.java new file mode 100644 index 0000000000000..f2db3a77229ff --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlPageProcessingResult.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.dml; + +import org.apache.ignite.IgniteCache; +import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; + +import java.sql.SQLException; + +/** + * Result of processing an individual page with {@link IgniteCache#invokeAll} including error details, if any. + */ +public final class DmlPageProcessingResult { + /** Number of successfully processed items. */ + private final long cnt; + + /** Keys that failed to be updated or deleted due to concurrent modification of values. */ + private final Object[] errKeys; + + /** Chain of exceptions corresponding to failed keys. Null if no keys yielded an exception. */ + private final SQLException ex; + + /** */ + @SuppressWarnings("ConstantConditions") + public DmlPageProcessingResult(long cnt, Object[] errKeys, @Nullable SQLException ex) { + this.cnt = cnt; + this.errKeys = U.firstNotNull(errKeys, X.EMPTY_OBJECT_ARRAY); + this.ex = ex; + } + + /** + * @return Number of successfully processed items. + */ + public long count() { + return cnt; + } + + /** + * @return Error keys. + */ + public Object[] errorKeys() { + return errKeys; + } + + /** + * @return Error. + */ + @Nullable public SQLException error() { + return ex; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java new file mode 100644 index 0000000000000..6621fc22425d6 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.dml; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.query.IgniteSQLException; +import org.apache.ignite.internal.processors.query.h2.H2Utils; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.h2.util.DateTimeUtils; +import org.h2.util.LocalDateTimeUtils; +import org.h2.value.Value; +import org.h2.value.ValueDate; +import org.h2.value.ValueTime; +import org.h2.value.ValueTimestamp; + +import java.lang.reflect.Array; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Date; + +/** + * DML utility methods. + */ +public class DmlUtils { + /** + * Convert value to column's expected type by means of H2. + * + * @param val Source value. + * @param desc Row descriptor. + * @param expCls Expected value class. + * @param type Expected column type to convert to. + * @return Converted object. + * @throws IgniteCheckedException if failed. + */ + @SuppressWarnings({"ConstantConditions", "SuspiciousSystemArraycopy"}) + public static Object convert(Object val, GridH2RowDescriptor desc, Class expCls, int type) + throws IgniteCheckedException { + if (val == null) + return null; + + Class currCls = val.getClass(); + + try { + if (val instanceof Date && currCls != Date.class && expCls == Date.class) { + // H2 thinks that java.util.Date is always a Timestamp, while binary marshaller expects + // precise Date instance. Let's satisfy it. + return new Date(((Date) val).getTime()); + } + + // User-given UUID is always serialized by H2 to byte array, so we have to deserialize manually + if (type == Value.UUID && currCls == byte[].class) + return U.unmarshal(desc.context().marshaller(), (byte[]) val, + U.resolveClassLoader(desc.context().gridConfig())); + + if (LocalDateTimeUtils.isJava8DateApiPresent()) { + if (val instanceof Timestamp && LocalDateTimeUtils.isLocalDateTime(expCls)) + return LocalDateTimeUtils.valueToLocalDateTime(ValueTimestamp.get((Timestamp) val)); + + if (val instanceof Date && LocalDateTimeUtils.isLocalDate(expCls)) + return LocalDateTimeUtils.valueToLocalDate(ValueDate.fromDateValue( + DateTimeUtils.dateValueFromDate(((Date) val).getTime()))); + + if (val instanceof Time && LocalDateTimeUtils.isLocalTime(expCls)) + return LocalDateTimeUtils.valueToLocalTime(ValueTime.get((Time) val)); + } + + // We have to convert arrays of reference types manually - + // see https://issues.apache.org/jira/browse/IGNITE-4327 + // Still, we only can convert from Object[] to something more precise. + if (type == Value.ARRAY && currCls != expCls) { + if (currCls != Object[].class) + throw new IgniteCheckedException("Unexpected array type - only conversion from Object[] " + + "is assumed"); + + // Why would otherwise type be Value.ARRAY? + assert expCls.isArray(); + + Object[] curr = (Object[]) val; + + Object newArr = Array.newInstance(expCls.getComponentType(), curr.length); + + System.arraycopy(curr, 0, newArr, 0, curr.length); + + return newArr; + } + + return H2Utils.convert(val, desc, type); + } + catch (Exception e) { + throw new IgniteSQLException("Value conversion failed [from=" + currCls.getName() + ", to=" + + expCls.getName() +']', IgniteQueryErrorCode.CONVERSION_FAILED, e); + } + } + + /** + * Private constructor. + */ + private DmlUtils() { + // No-op. + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java new file mode 100644 index 0000000000000..e662245a40166 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.dml; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.GridCacheAdapter; +import org.apache.ignite.internal.processors.query.h2.UpdateResult; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlConst; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlParameter; +import org.jetbrains.annotations.Nullable; + +/** + * Arguments for fast, query-less UPDATE or DELETE - key and, optionally, value and new value. + */ +public final class FastUpdate { + /** Operand that always evaluates as {@code null}. */ + private final static FastUpdateArgument NULL_ARG = new ConstantArgument(null); + + /** Operand to compute key. */ + private final FastUpdateArgument keyArg; + + /** Operand to compute value. */ + private final FastUpdateArgument valArg; + + /** Operand to compute new value. */ + private final FastUpdateArgument newValArg; + + /** + * Create fast update instance. + * + * @param key Key element. + * @param val Value element. + * @param newVal New value element (if any) + * @return Fast update. + */ + public static FastUpdate create(GridSqlElement key, GridSqlElement val, @Nullable GridSqlElement newVal) { + FastUpdateArgument keyArg = argument(key); + FastUpdateArgument valArg = argument(val); + FastUpdateArgument newValArg = argument(newVal); + + return new FastUpdate(keyArg, valArg, newValArg); + } + + /** + * Constructor. + * + * @param keyArg Key argument. + * @param valArg Value argument. + * @param newValArg New value argument. + */ + private FastUpdate(FastUpdateArgument keyArg, FastUpdateArgument valArg, FastUpdateArgument newValArg) { + this.keyArg = keyArg; + this.valArg = valArg; + this.newValArg = newValArg; + } + + /** + * Perform single cache operation based on given args. + * + * @param cache Cache. + * @param args Query parameters. + * @return 1 if an item was affected, 0 otherwise. + * @throws IgniteCheckedException if failed. + */ + @SuppressWarnings({"unchecked", "ConstantConditions"}) + public UpdateResult execute(GridCacheAdapter cache, Object[] args) throws IgniteCheckedException { + Object key = keyArg.apply(args); + + assert key != null; + + Object val = valArg.apply(args); + Object newVal = newValArg.apply(args); + + boolean res; + + if (newVal != null) { + // Update. + if (val != null) + res = cache.replace(key, val, newVal); + else + res = cache.replace(key, newVal); + } + else { + // Delete. + if (val != null) + res = cache.remove(key, val); + else + res = cache.remove(key); + } + + return res ? UpdateResult.ONE : UpdateResult.ZERO; + } + + /** + * Create argument for AST element. + * + * @param el Element. + * @return Argument. + */ + private static FastUpdateArgument argument(@Nullable GridSqlElement el) { + assert el == null ^ (el instanceof GridSqlConst || el instanceof GridSqlParameter); + + if (el == null) + return NULL_ARG; + + if (el instanceof GridSqlConst) + return new ConstantArgument(((GridSqlConst)el).value().getObject()); + else + return new ParamArgument(((GridSqlParameter)el).index()); + } + + /** + * Value argument. + */ + private static class ConstantArgument implements FastUpdateArgument { + /** Value to return. */ + private final Object val; + + /** + * Constructor. + * + * @param val Value. + */ + private ConstantArgument(Object val) { + this.val = val; + } + + /** {@inheritDoc} */ + @Override public Object apply(Object[] arg) throws IgniteCheckedException { + return val; + } + } + + /** + * Parameter argument. + */ + private static class ParamArgument implements FastUpdateArgument { + /** Value to return. */ + private final int paramIdx; + + /** + * Constructor. + * + * @param paramIdx Parameter index. + */ + private ParamArgument(int paramIdx) { + assert paramIdx >= 0; + + this.paramIdx = paramIdx; + } + + /** {@inheritDoc} */ + @Override public Object apply(Object[] arg) throws IgniteCheckedException { + assert arg.length > paramIdx; + + return arg[paramIdx]; + } + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdateArguments.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdateArguments.java deleted file mode 100644 index cb47704c83749..0000000000000 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdateArguments.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.processors.query.h2.dml; - -import org.apache.ignite.IgniteCheckedException; - -/** - * Arguments for fast, query-less UPDATE or DELETE - key and, optionally, value and new value. - */ -public final class FastUpdateArguments { - /** Operand to compute key. */ - public final FastUpdateArgument key; - - /** Operand to compute value. */ - public final FastUpdateArgument val; - - /** Operand to compute new value. */ - public final FastUpdateArgument newVal; - - /** */ - public FastUpdateArguments(FastUpdateArgument key, FastUpdateArgument val, FastUpdateArgument newVal) { - assert key != null && key != NULL_ARGUMENT; - assert val != null; - assert newVal != null; - - this.key = key; - this.val = val; - this.newVal = newVal; - } - - /** Operand that always evaluates as {@code null}. */ - public final static FastUpdateArgument NULL_ARGUMENT = new FastUpdateArgument() { - /** {@inheritDoc} */ - @Override public Object apply(Object[] arg) throws IgniteCheckedException { - return null; - } - }; -} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index a99d811cacbe5..31dc52de30ae0 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -17,64 +17,108 @@ package org.apache.ignite.internal.processors.query.h2.dml; -import java.util.List; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.binary.BinaryObjectBuilder; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.query.GridQueryProperty; +import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; +import org.apache.ignite.internal.processors.query.IgniteSQLException; +import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T3; +import org.apache.ignite.lang.IgniteBiTuple; +import org.h2.table.Column; +import org.jetbrains.annotations.Nullable; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.DEFAULT_COLUMNS_COUNT; /** * Update plan - where to take data to update cache from and how to construct new keys and values, if needed. */ public final class UpdatePlan { /** Initial statement to drive the rest of the logic. */ - public final UpdateMode mode; + private final UpdateMode mode; /** Target table to be affected by initial DML statement. */ - public final GridH2Table tbl; + private final GridH2Table tbl; /** Column names to set or update. */ - public final String[] colNames; + private final String[] colNames; - /** - * Expected column types to set or insert/merge. - * @see org.h2.value.Value - */ - public final int[] colTypes; + /** Column types to set for insert/merge. */ + private final int[] colTypes; /** Method to create key for INSERT or MERGE, ignored for UPDATE and DELETE. */ - public final KeyValueSupplier keySupplier; + private final KeyValueSupplier keySupplier; /** Method to create value to put to cache, ignored for DELETE. */ - public final KeyValueSupplier valSupplier; + private final KeyValueSupplier valSupplier; - /** Index of key column, if it's explicitly mentioned in column list of MERGE or INSERT, - * ignored for UPDATE and DELETE. */ - public final int keyColIdx; + /** Key column index. */ + private final int keyColIdx; - /** Index of value column, if it's explicitly mentioned in column list. Ignored for UPDATE and DELETE. */ - public final int valColIdx; + /** Value column index. */ + private final int valColIdx; /** SELECT statement built upon initial DML statement. */ - public final String selectQry; + private final String selectQry; /** Subquery flag - {@code true} if {@link #selectQry} is an actual subquery that retrieves data from some cache. */ - public final boolean isLocSubqry; + private final boolean isLocSubqry; /** Number of rows in rows based MERGE or INSERT. */ - public final int rowsNum; + private final int rowsNum; /** Arguments for fast UPDATE or DELETE. */ - public final FastUpdateArguments fastUpdateArgs; + private final FastUpdate fastUpdate; /** Additional info for distributed update. */ - public final DistributedPlanInfo distributed; + private final DmlDistributedPlanInfo distributed; - /** */ - private UpdatePlan(UpdateMode mode, GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier keySupplier, - KeyValueSupplier valSupplier, int keyColIdx, int valColIdx, String selectQry, boolean isLocSubqry, - int rowsNum, FastUpdateArguments fastUpdateArgs, DistributedPlanInfo distributed) { + /** + * Constructor. + * + * @param mode Mode. + * @param tbl Table. + * @param colNames Column names. + * @param colTypes Column types. + * @param keySupplier Key supplier. + * @param valSupplier Value supplier. + * @param keyColIdx Key column index. + * @param valColIdx value column index. + * @param selectQry Select query. + * @param isLocSubqry Local subquery flag. + * @param rowsNum Rows number. + * @param fastUpdate Fast update (if any). + * @param distributed Distributed plan (if any) + */ + public UpdatePlan( + UpdateMode mode, + GridH2Table tbl, + String[] colNames, + int[] colTypes, + KeyValueSupplier keySupplier, + KeyValueSupplier valSupplier, + int keyColIdx, + int valColIdx, + String selectQry, + boolean isLocSubqry, + int rowsNum, + @Nullable FastUpdate fastUpdate, + @Nullable DmlDistributedPlanInfo distributed + ) { this.colNames = colNames; this.colTypes = colTypes; this.rowsNum = rowsNum; + assert mode != null; assert tbl != null; @@ -86,85 +130,260 @@ private UpdatePlan(UpdateMode mode, GridH2Table tbl, String[] colNames, int[] co this.valColIdx = valColIdx; this.selectQry = selectQry; this.isLocSubqry = isLocSubqry; - this.fastUpdateArgs = fastUpdateArgs; + this.fastUpdate = fastUpdate; this.distributed = distributed; } - /** */ - public static UpdatePlan forMerge(GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier keySupplier, - KeyValueSupplier valSupplier, int keyColIdx, int valColIdx, String selectQry, boolean isLocSubqry, - int rowsNum, DistributedPlanInfo distributed) { - assert !F.isEmpty(colNames); - - return new UpdatePlan(UpdateMode.MERGE, tbl, colNames, colTypes, keySupplier, valSupplier, keyColIdx, valColIdx, - selectQry, isLocSubqry, rowsNum, null, distributed); + /** + * Constructor for delete operation or fast update. + * + * @param mode Mode. + * @param tbl Table. + * @param selectQry Select query. + * @param fastUpdate Fast update arguments (if any). + * @param distributed Distributed plan (if any) + */ + public UpdatePlan( + UpdateMode mode, + GridH2Table tbl, + String selectQry, + @Nullable FastUpdate fastUpdate, + @Nullable DmlDistributedPlanInfo distributed + ) { + this( + mode, + tbl, + null, + null, + null, + null, + -1, + -1, + selectQry, + false, + 0, + fastUpdate, + distributed + ); } - /** */ - public static UpdatePlan forInsert(GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier keySupplier, - KeyValueSupplier valSupplier, int keyColIdx, int valColIdx, String selectQry, boolean isLocSubqry, - int rowsNum, DistributedPlanInfo distributed) { - assert !F.isEmpty(colNames); + /** + * Convert a row into key-value pair. + * + * @param row Row to process. + * @throws IgniteCheckedException if failed. + */ + public IgniteBiTuple processRow(List row) throws IgniteCheckedException { + GridH2RowDescriptor rowDesc = tbl.rowDescriptor(); + GridQueryTypeDescriptor desc = rowDesc.type(); - return new UpdatePlan(UpdateMode.INSERT, tbl, colNames, colTypes, keySupplier, valSupplier, keyColIdx, - valColIdx, selectQry, isLocSubqry, rowsNum, null, distributed); - } + GridCacheContext cctx = rowDesc.context(); - /** */ - public static UpdatePlan forUpdate(GridH2Table tbl, String[] colNames, int[] colTypes, KeyValueSupplier valSupplier, - int valColIdx, String selectQry, DistributedPlanInfo distributed) { - assert !F.isEmpty(colNames); + Object key = keySupplier.apply(row); - return new UpdatePlan(UpdateMode.UPDATE, tbl, colNames, colTypes, null, valSupplier, -1, valColIdx, selectQry, - false, 0, null, distributed); - } + if (QueryUtils.isSqlType(desc.keyClass())) { + assert keyColIdx != -1; - /** */ - public static UpdatePlan forDelete(GridH2Table tbl, String selectQry, DistributedPlanInfo distributed) { - return new UpdatePlan(UpdateMode.DELETE, tbl, null, null, null, null, -1, -1, selectQry, false, 0, null, - distributed); - } + key = DmlUtils.convert(key, rowDesc, desc.keyClass(), colTypes[keyColIdx]); + } + + Object val = valSupplier.apply(row); + + if (QueryUtils.isSqlType(desc.valueClass())) { + assert valColIdx != -1; + + val = DmlUtils.convert(val, rowDesc, desc.valueClass(), colTypes[valColIdx]); + } + + if (key == null) { + if (F.isEmpty(desc.keyFieldName())) + throw new IgniteSQLException("Key for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY); + else + throw new IgniteSQLException("Null value is not allowed for column '" + desc.keyFieldName() + "'", + IgniteQueryErrorCode.NULL_KEY); + } - /** */ - public static UpdatePlan forFastUpdate(UpdateMode mode, GridH2Table tbl, FastUpdateArguments fastUpdateArgs) { - assert mode == UpdateMode.UPDATE || mode == UpdateMode.DELETE; + if (val == null) { + if (F.isEmpty(desc.valueFieldName())) + throw new IgniteSQLException("Value for INSERT, MERGE, or UPDATE must not be null", + IgniteQueryErrorCode.NULL_VALUE); + else + throw new IgniteSQLException("Null value is not allowed for column '" + desc.valueFieldName() + "'", + IgniteQueryErrorCode.NULL_VALUE); + } + + Map newColVals = new HashMap<>(); + + for (int i = 0; i < colNames.length; i++) { + if (i == keyColIdx || i == valColIdx) + continue; + + String colName = colNames[i]; + + GridQueryProperty prop = desc.property(colName); + + assert prop != null; + + Class expCls = prop.type(); + + newColVals.put(colName, DmlUtils.convert(row.get(i), rowDesc, expCls, colTypes[i])); + } + + // We update columns in the order specified by the table for a reason - table's + // column order preserves their precedence for correct update of nested properties. + Column[] cols = tbl.getColumns(); + + // First 3 columns are _key, _val and _ver. Skip 'em. + for (int i = DEFAULT_COLUMNS_COUNT; i < cols.length; i++) { + if (tbl.rowDescriptor().isKeyValueOrVersionColumn(i)) + continue; + + String colName = cols[i].getName(); - return new UpdatePlan(mode, tbl, null, null, null, null, -1, -1, null, false, 0, fastUpdateArgs, null); + if (!newColVals.containsKey(colName)) + continue; + + Object colVal = newColVals.get(colName); + + desc.setValue(colName, key, val, colVal); + } + + if (cctx.binaryMarshaller()) { + if (key instanceof BinaryObjectBuilder) + key = ((BinaryObjectBuilder) key).build(); + + if (val instanceof BinaryObjectBuilder) + val = ((BinaryObjectBuilder) val).build(); + } + + desc.validateKeyAndValue(key, val); + + return new IgniteBiTuple<>(key, val); } /** - * Additional information about distributed update plan. + * Convert a row into value. + * + * @param row Row to process. + * @throws IgniteCheckedException if failed. */ - public final static class DistributedPlanInfo { - /** Whether update involves only replicated caches. */ - private final boolean replicatedOnly; - - /** Identifiers of caches involved in update (used for cluster nodes mapping). */ - private final List cacheIds; - - /** - * Constructor. - * - * @param replicatedOnly Whether all caches are replicated. - * @param cacheIds List of cache identifiers. - */ - DistributedPlanInfo(boolean replicatedOnly, List cacheIds) { - this.replicatedOnly = replicatedOnly; - this.cacheIds = cacheIds; + public T3 processRowForUpdate(List row) throws IgniteCheckedException { + GridH2RowDescriptor rowDesc = tbl.rowDescriptor(); + GridQueryTypeDescriptor desc = rowDesc.type(); + + GridCacheContext cctx = rowDesc.context(); + + boolean hasNewVal = (valColIdx != -1); + + boolean hasProps = !hasNewVal || colNames.length > 1; + + Object key = row.get(0); + + Object oldVal = row.get(1); + + if (cctx.binaryMarshaller() && !(oldVal instanceof BinaryObject)) + oldVal = cctx.grid().binary().toBinary(oldVal); + + Object newVal; + + Map newColVals = new HashMap<>(); + + for (int i = 0; i < colNames.length; i++) { + if (hasNewVal && i == valColIdx - 2) + continue; + + GridQueryProperty prop = tbl.rowDescriptor().type().property(colNames[i]); + + assert prop != null : "Unknown property: " + colNames[i]; + + newColVals.put(colNames[i], DmlUtils.convert(row.get(i + 2), rowDesc, prop.type(), colTypes[i])); } - /** - * @return {@code true} in case all involved caches are replicated. - */ - public boolean isReplicatedOnly() { - return replicatedOnly; + newVal = valSupplier.apply(row); + + if (newVal == null) + throw new IgniteSQLException("New value for UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE); + + // Skip key and value - that's why we start off with 3rd column + for (int i = 0; i < tbl.getColumns().length - DEFAULT_COLUMNS_COUNT; i++) { + Column c = tbl.getColumn(i + DEFAULT_COLUMNS_COUNT); + + if (rowDesc.isKeyValueOrVersionColumn(c.getColumnId())) + continue; + + GridQueryProperty prop = desc.property(c.getName()); + + if (prop.key()) + continue; // Don't get values of key's columns - we won't use them anyway + + boolean hasNewColVal = newColVals.containsKey(c.getName()); + + if (!hasNewColVal) + continue; + + Object colVal = newColVals.get(c.getName()); + + // UPDATE currently does not allow to modify key or its fields, so we must be safe to pass null as key. + rowDesc.setColumnValue(null, newVal, colVal, i); } - /** - * @return cache identifiers. - */ - public List getCacheIds() { - return cacheIds; + if (cctx.binaryMarshaller() && hasProps) { + assert newVal instanceof BinaryObjectBuilder; + + newVal = ((BinaryObjectBuilder) newVal).build(); } + + desc.validateKeyAndValue(key, newVal); + + return new T3<>(key, oldVal, newVal); + } + + /** + * @return Update mode. + */ + public UpdateMode mode() { + return mode; + } + + /** + * @return Cache context. + */ + public GridCacheContext cacheContext() { + return tbl.cache(); + } + + /** + * @return Distributed plan info (for skip-reducer mode). + */ + @Nullable public DmlDistributedPlanInfo distributedPlan() { + return distributed; + } + + /** + * @return Row count. + */ + public int rowCount() { + return rowsNum; + } + + /** + * @return Select query. + */ + public String selectQuery() { + return selectQry; + } + + /** + * @return Local subquery flag. + */ + @Nullable public boolean isLocalSubquery() { + return isLocSubqry; + } + + /** + * @return Fast update. + */ + @Nullable public FastUpdate fastUpdate() { + return fastUpdate; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index c84526692250e..a5516392dca31 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -40,7 +40,6 @@ import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; -import org.apache.ignite.internal.processors.query.h2.sql.DmlAstUtils; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlDelete; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement; @@ -213,15 +212,26 @@ else if (stmt instanceof GridSqlMerge) { String selectSql = sel.getSQL(); - UpdatePlan.DistributedPlanInfo distributed = (rowsNum == 0 && !F.isEmpty(selectSql)) ? + DmlDistributedPlanInfo distributed = (rowsNum == 0 && !F.isEmpty(selectSql)) ? checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()) : null; - if (stmt instanceof GridSqlMerge) - return UpdatePlan.forMerge(tbl.dataTable(), colNames, colTypes, keySupplier, valSupplier, keyColIdx, - valColIdx, selectSql, !isTwoStepSubqry, rowsNum, distributed); - else - return UpdatePlan.forInsert(tbl.dataTable(), colNames, colTypes, keySupplier, valSupplier, keyColIdx, - valColIdx, selectSql, !isTwoStepSubqry, rowsNum, distributed); + UpdateMode mode = stmt instanceof GridSqlMerge ? UpdateMode.MERGE : UpdateMode.INSERT; + + return new UpdatePlan( + mode, + tbl.dataTable(), + colNames, + colTypes, + keySupplier, + valSupplier, + keyColIdx, + valColIdx, + selectSql, + !isTwoStepSubqry, + rowsNum, + null, + distributed + ); } /** @@ -241,7 +251,7 @@ private static UpdatePlan planForUpdate(GridSqlStatement stmt, boolean loc, Igni throws IgniteCheckedException { GridSqlElement target; - FastUpdateArguments fastUpdate; + FastUpdate fastUpdate; UpdateMode mode; @@ -249,7 +259,7 @@ private static UpdatePlan planForUpdate(GridSqlStatement stmt, boolean loc, Igni // Let's verify that user is not trying to mess with key's columns directly verifyUpdateColumns(stmt); - GridSqlUpdate update = (GridSqlUpdate) stmt; + GridSqlUpdate update = (GridSqlUpdate)stmt; target = update.target(); fastUpdate = DmlAstUtils.getFastUpdateArgs(update); mode = UpdateMode.UPDATE; @@ -266,16 +276,23 @@ else if (stmt instanceof GridSqlDelete) { GridSqlTable tbl = DmlAstUtils.gridTableForElement(target); - GridH2Table gridTbl = tbl.dataTable(); + GridH2Table h2Tbl = tbl.dataTable(); - GridH2RowDescriptor desc = gridTbl.rowDescriptor(); + GridH2RowDescriptor desc = h2Tbl.rowDescriptor(); if (desc == null) - throw new IgniteSQLException("Row descriptor undefined for table '" + gridTbl.getName() + "'", + throw new IgniteSQLException("Row descriptor undefined for table '" + h2Tbl.getName() + "'", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR); - if (fastUpdate != null) - return UpdatePlan.forFastUpdate(mode, gridTbl, fastUpdate); + if (fastUpdate != null) { + return new UpdatePlan( + mode, + h2Tbl, + null, + fastUpdate, + null + ); + } else { GridSqlSelect sel; @@ -311,28 +328,47 @@ else if (stmt instanceof GridSqlDelete) { int newValColIdx = (hasNewVal ? valColIdx : 1); - KeyValueSupplier newValSupplier = createSupplier(desc.context(), desc.type(), newValColIdx, hasProps, + KeyValueSupplier valSupplier = createSupplier(desc.context(), desc.type(), newValColIdx, hasProps, false, true); sel = DmlAstUtils.selectForUpdate((GridSqlUpdate) stmt, errKeysPos); String selectSql = sel.getSQL(); - UpdatePlan.DistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : + DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()); - return UpdatePlan.forUpdate(gridTbl, colNames, colTypes, newValSupplier, valColIdx, selectSql, - distributed); + return new UpdatePlan( + UpdateMode.UPDATE, + h2Tbl, + colNames, + colTypes, + null, + valSupplier, + -1, + valColIdx, + selectSql, + false, + 0, + null, + distributed + ); } else { sel = DmlAstUtils.selectForDelete((GridSqlDelete) stmt, errKeysPos); String selectSql = sel.getSQL(); - UpdatePlan.DistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : + DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null : checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()); - return UpdatePlan.forDelete(gridTbl, selectSql, distributed); + return new UpdatePlan( + UpdateMode.DELETE, + h2Tbl, + selectSql, + null, + distributed + ); } } } @@ -546,7 +582,7 @@ private static boolean updateAffectsKeyColumns(GridH2Table gridTbl, Set * @return distributed update plan info, or {@code null} if cannot be distributed. * @throws IgniteCheckedException if failed. */ - private static UpdatePlan.DistributedPlanInfo checkPlanCanBeDistributed(IgniteH2Indexing idx, + private static DmlDistributedPlanInfo checkPlanCanBeDistributed(IgniteH2Indexing idx, Connection conn, SqlFieldsQuery fieldsQry, boolean loc, String selectQry, String cacheName) throws IgniteCheckedException { @@ -570,7 +606,7 @@ private static UpdatePlan.DistributedPlanInfo checkPlanCanBeDistributed(IgniteH2 boolean distributed = qry.skipMergeTable() && qry.mapQueries().size() == 1 && !qry.mapQueries().get(0).hasSubQueries(); - return distributed ? new UpdatePlan.DistributedPlanInfo(qry.isReplicatedOnly(), + return distributed ? new DmlDistributedPlanInfo(qry.isReplicatedOnly(), idx.collectCacheIds(CU.cacheId(cacheName), qry)): null; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java index 46b2aeed9d27e..d16468cd7f804 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.processors.query.h2.dml.DmlAstUtils; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.util.typedef.F; From 55a68e099674cef4f8cac374299f9766b70921c0 Mon Sep 17 00:00:00 2001 From: gg-shq Date: Wed, 7 Feb 2018 14:28:04 +0300 Subject: [PATCH 232/243] IGNITE-6917: Implemented SQL COPY command This closes #3419 (cherry picked from commit 25d38cc) --- .../internal/jdbc2/JdbcBulkLoadSelfTest.java | 185 ++++++ .../jdbc/suite/IgniteJdbcDriverTestSuite.java | 14 + .../jdbc/thin/JdbcThinAbstractSelfTest.java | 77 +++ .../JdbcThinBulkLoadAbstractSelfTest.java | 600 ++++++++++++++++++ ...BulkLoadAtomicPartitionedNearSelfTest.java | 39 ++ ...ThinBulkLoadAtomicPartitionedSelfTest.java | 39 ++ ...cThinBulkLoadAtomicReplicatedSelfTest.java | 39 ++ ...dTransactionalPartitionedNearSelfTest.java | 39 ++ ...kLoadTransactionalPartitionedSelfTest.java | 39 ++ ...lkLoadTransactionalReplicatedSelfTest.java | 39 ++ .../JdbcThinDynamicIndexAbstractSelfTest.java | 1 - .../clients/src/test/resources/bulkload0.csv | 0 .../clients/src/test/resources/bulkload1.csv | 1 + .../clients/src/test/resources/bulkload2.csv | 2 + .../src/test/resources/bulkload2_utf.csv | 2 + .../apache/ignite/IgniteSystemProperties.java | 3 + .../cache/query/BulkLoadContextCursor.java | 97 +++ .../internal/jdbc/thin/JdbcThinStatement.java | 68 +- .../ignite/internal/jdbc2/JdbcQueryTask.java | 12 +- .../bulkload/BulkLoadAckClientParameters.java | 92 +++ .../bulkload/BulkLoadCacheWriter.java | 31 + .../bulkload/BulkLoadCsvFormat.java | 159 +++++ .../bulkload/BulkLoadCsvParser.java | 65 ++ .../processors/bulkload/BulkLoadFormat.java | 33 + .../processors/bulkload/BulkLoadParser.java | 61 ++ .../bulkload/BulkLoadProcessor.java | 104 +++ .../bulkload/BulkLoadStreamerWriter.java | 65 ++ .../pipeline/CharsetDecoderBlock.java | 132 ++++ .../pipeline/CsvLineProcessorBlock.java | 70 ++ .../bulkload/pipeline/LineSplitterBlock.java | 72 +++ .../bulkload/pipeline/PipelineBlock.java | 66 ++ .../pipeline/StrListAppenderBlock.java | 52 ++ .../odbc/jdbc/JdbcBulkLoadAckResult.java | 111 ++++ .../odbc/jdbc/JdbcBulkLoadBatchRequest.java | 183 ++++++ .../odbc/jdbc/JdbcBulkLoadProcessor.java | 144 +++++ .../processors/odbc/jdbc/JdbcRequest.java | 7 + .../odbc/jdbc/JdbcRequestHandler.java | 93 ++- .../processors/odbc/jdbc/JdbcResult.java | 8 + .../ignite/internal/sql/SqlKeyword.java | 15 + .../apache/ignite/internal/sql/SqlParser.java | 18 +- .../sql/command/SqlBulkLoadCommand.java | 273 ++++++++ .../sql/SqlParserBulkLoadSelfTest.java | 70 ++ .../query/h2/DmlStatementsProcessor.java | 100 +++ .../processors/query/h2/IgniteH2Indexing.java | 42 +- .../query/h2/ddl/DdlStatementsProcessor.java | 3 + .../processors/query/h2/dml/UpdateMode.java | 11 +- .../processors/query/h2/dml/UpdatePlan.java | 20 +- .../query/h2/dml/UpdatePlanBuilder.java | 85 +++ .../IgniteCacheQuerySelfTestSuite.java | 2 + parent/pom.xml | 4 +- 50 files changed, 3452 insertions(+), 35 deletions(-) create mode 100644 modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedNearSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicReplicatedSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedNearSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalReplicatedSelfTest.java create mode 100644 modules/clients/src/test/resources/bulkload0.csv create mode 100644 modules/clients/src/test/resources/bulkload1.csv create mode 100644 modules/clients/src/test/resources/bulkload2.csv create mode 100644 modules/clients/src/test/resources/bulkload2_utf.csv create mode 100644 modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadAckClientParameters.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadParser.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadProcessor.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadStreamerWriter.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CsvLineProcessorBlock.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/LineSplitterBlock.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadProcessor.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlBulkLoadCommand.java create mode 100644 modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserBulkLoadSelfTest.java diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java new file mode 100644 index 0000000000000..d9506cf718ffc --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc2; + +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import java.io.Serializable; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collections; +import java.util.Properties; +import java.util.concurrent.Callable; + +import static org.apache.ignite.IgniteJdbcDriver.CFG_URL_PREFIX; +import static org.apache.ignite.cache.CacheMode.PARTITIONED; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; + +/** COPY command test for the regular JDBC driver. */ +public class JdbcBulkLoadSelfTest extends GridCommonAbstractTest { + /** JDBC URL. */ + private static final String BASE_URL = CFG_URL_PREFIX + + "cache=default@modules/clients/src/test/config/jdbc-config.xml"; + + /** Connection. */ + protected Connection conn; + + /** The logger. */ + protected transient IgniteLogger log; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + return getConfiguration0(gridName); + } + + /** + * @param gridName Grid name. + * @return Grid configuration used for starting the grid. + * @throws Exception If failed. + */ + private IgniteConfiguration getConfiguration0(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + CacheConfiguration cache = defaultCacheConfiguration(); + + cache.setCacheMode(PARTITIONED); + cache.setBackups(1); + cache.setWriteSynchronizationMode(FULL_SYNC); + cache.setIndexedTypes( + Integer.class, Person.class + ); + + cfg.setCacheConfiguration(cache); + cfg.setLocalHost("127.0.0.1"); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + ipFinder.setAddresses(Collections.singleton("127.0.0.1:47500..47501")); + + disco.setIpFinder(ipFinder); + + cfg.setDiscoverySpi(disco); + + cfg.setConnectorConfiguration(new ConnectorConfiguration()); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrids(2); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + } + + /** + * Establishes the JDBC connection. + * + * @return Connection to use for the test. + * @throws Exception if failed. + */ + private Connection createConnection() throws Exception { + Properties props = new Properties(); + + return DriverManager.getConnection(BASE_URL, props); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + U.closeQuiet(conn); + + ignite(0).cache(DEFAULT_CACHE_NAME).clear(); + + super.afterTest(); + } + + /** + * This is more a placeholder for implementation of IGNITE-7553. + * + * @throws Exception if failed. + */ + public void testBulkLoadThrows() throws Exception { + GridTestUtils.assertThrows(null, new Callable() { + @Override public Object call() throws Exception { + conn = createConnection(); + + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("copy from \"dummy.csv\" into Person" + + " (_key, id, firstName, lastName) format csv"); + + return null; + } + } + }, SQLException.class, "COPY command is currently supported only in thin JDBC driver."); + } + + /** + * A test class for creating a query entity. + */ + @SuppressWarnings("UnusedDeclaration") + private static class Person implements Serializable { + /** ID. */ + @QuerySqlField + private final int id; + + /** First name. */ + @QuerySqlField(index = false) + private final String firstName; + + /** Last name. */ + @QuerySqlField(index = false) + private final String lastName; + + /** Age. */ + @QuerySqlField + private final int age; + + /** + * @param id ID. + * @param firstName First name + * @param lastName Last name + * @param age Age. + */ + private Person(int id, String firstName, String lastName, int age) { + assert !F.isEmpty(firstName); + assert !F.isEmpty(lastName); + assert age > 0; + + this.id = id; + this.firstName = firstName; + this.lastName = lastName; + this.age = age; + } + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java index bec388a0a9bb4..3278e0f6c7e8f 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java @@ -35,6 +35,12 @@ import org.apache.ignite.jdbc.JdbcStatementSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinAutoCloseServerCursorTest; import org.apache.ignite.jdbc.thin.JdbcThinBatchSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadAtomicPartitionedNearSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadAtomicPartitionedSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadAtomicReplicatedSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalPartitionedNearSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalPartitionedSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalReplicatedSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinComplexQuerySelfTest; import org.apache.ignite.jdbc.thin.JdbcThinConnectionSelfTest; @@ -151,6 +157,14 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(JdbcThinDynamicIndexTransactionalPartitionedSelfTest.class)); suite.addTest(new TestSuite(JdbcThinDynamicIndexTransactionalReplicatedSelfTest.class)); + // New thin JDBC driver, DML tests + suite.addTest(new TestSuite(JdbcThinBulkLoadAtomicPartitionedNearSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinBulkLoadAtomicPartitionedSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinBulkLoadAtomicReplicatedSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinBulkLoadTransactionalPartitionedNearSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinBulkLoadTransactionalPartitionedSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinBulkLoadTransactionalReplicatedSelfTest.class)); + // New thin JDBC driver, full SQL tests suite.addTest(new TestSuite(JdbcThinComplexDmlDdlSelfTest.class)); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java index 1c38df21fdb03..2ba36c369c227 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java @@ -17,9 +17,22 @@ package org.apache.ignite.jdbc.thin; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.concurrent.Callable; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; +import org.apache.ignite.internal.processors.port.GridPortRecord; +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; @@ -93,4 +106,68 @@ interface RunnableX { */ void run() throws Exception; } + + /** + * @param node Node to connect to. + * @param params Connection parameters. + * @return Thin JDBC connection to specified node. + */ + static Connection connect(IgniteEx node, String params) throws SQLException { + Collection recs = node.context().ports().records(); + + GridPortRecord cliLsnrRec = null; + + for (GridPortRecord rec : recs) { + if (rec.clazz() == ClientListenerProcessor.class) { + cliLsnrRec = rec; + + break; + } + } + + assertNotNull(cliLsnrRec); + + String connStr = "jdbc:ignite:thin://127.0.0.1:" + cliLsnrRec.port(); + + if (!F.isEmpty(params)) + connStr += "/?" + params; + + return DriverManager.getConnection(connStr); + } + + /** + * @param sql Statement. + * @param args Arguments. + * @return Result set. + * @throws RuntimeException if failed. + */ + static List> execute(Connection conn, String sql, Object... args) throws SQLException { + try (PreparedStatement s = conn.prepareStatement(sql)) { + for (int i = 0; i < args.length; i++) + s.setObject(i + 1, args[i]); + + if (s.execute()) { + List> res = new ArrayList<>(); + + try (ResultSet rs = s.getResultSet()) { + ResultSetMetaData meta = rs.getMetaData(); + + int cnt = meta.getColumnCount(); + + while (rs.next()) { + List row = new ArrayList<>(cnt); + + for (int i = 1; i <= cnt; i++) + row.add(rs.getObject(i)); + + res.add(row); + } + } + + return res; + } + else + return Collections.emptyList(); + } + } } \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java new file mode 100644 index 0000000000000..970f82592f6aa --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java @@ -0,0 +1,600 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.BatchUpdateException; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; +import java.util.concurrent.Callable; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.testframework.GridTestUtils; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK; +import static org.apache.ignite.cache.CacheMode.PARTITIONED; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.internal.util.IgniteUtils.resolveIgnitePath; + +/** + * COPY statement tests. + */ +public abstract class JdbcThinBulkLoadAbstractSelfTest extends JdbcThinAbstractDmlStatementSelfTest { + /** Default table name. */ + private static final String TBL_NAME = "Person"; + + /** JDBC statement. */ + private Statement stmt; + + /** A CSV file with zero records */ + private static final String BULKLOAD_EMPTY_CSV_FILE = + Objects.requireNonNull(resolveIgnitePath("/modules/clients/src/test/resources/bulkload0.csv")) + .getAbsolutePath(); + + /** A CSV file with one record. */ + private static final String BULKLOAD_ONE_LINE_CSV_FILE = + Objects.requireNonNull(resolveIgnitePath("/modules/clients/src/test/resources/bulkload1.csv")) + .getAbsolutePath(); + + /** A CSV file with two records. */ + private static final String BULKLOAD_TWO_LINES_CSV_FILE = + Objects.requireNonNull(resolveIgnitePath("/modules/clients/src/test/resources/bulkload2.csv")) + .getAbsolutePath(); + + /** A file with UTF records. */ + private static final String BULKLOAD_UTF_CSV_FILE = + Objects.requireNonNull(resolveIgnitePath("/modules/clients/src/test/resources/bulkload2_utf.csv")) + .getAbsolutePath(); + + /** Basic COPY statement used in majority of the tests. */ + public static final String BASIC_SQL_COPY_STMT = + "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\"" + + " into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"; + + /** {@inheritDoc} */ + @Override protected CacheConfiguration cacheConfig() { + return cacheConfigWithIndexedTypes(); + } + + /** + * Creates cache configuration with {@link QueryEntity} created + * using {@link CacheConfiguration#setIndexedTypes(Class[])} call. + * + * @return The cache configuration. + */ + @SuppressWarnings("unchecked") + private CacheConfiguration cacheConfigWithIndexedTypes() { + CacheConfiguration cache = defaultCacheConfiguration(); + + cache.setCacheMode(cacheMode()); + cache.setAtomicityMode(atomicityMode()); + cache.setWriteSynchronizationMode(FULL_SYNC); + + if (cacheMode() == PARTITIONED) + cache.setBackups(1); + + if (nearCache()) + cache.setNearConfiguration(new NearCacheConfiguration()); + + cache.setIndexedTypes( + String.class, Person.class + ); + + return cache; + } + + /** + * Returns true if we are testing near cache. + * + * @return true if we are testing near cache. + */ + protected abstract boolean nearCache(); + + /** + * Returns cache atomicity mode we are testing. + * + * @return The cache atomicity mode we are testing. + */ + protected abstract CacheAtomicityMode atomicityMode(); + + /** + * Returns cache mode we are testing. + * + * @return The cache mode we are testing. + */ + protected abstract CacheMode cacheMode(); + + /** + * Creates cache configuration with {@link QueryEntity} created + * using {@link CacheConfiguration#setQueryEntities(Collection)} call. + * + * @return The cache configuration. + */ + private CacheConfiguration cacheConfigWithQueryEntity() { + CacheConfiguration cache = defaultCacheConfiguration(); + + cache.setCacheMode(PARTITIONED); + cache.setBackups(1); + cache.setWriteSynchronizationMode(FULL_SYNC); + + QueryEntity e = new QueryEntity(); + + e.setKeyType(String.class.getName()); + e.setValueType("Person"); + + e.addQueryField("id", Integer.class.getName(), null); + e.addQueryField("age", Integer.class.getName(), null); + e.addQueryField("firstName", String.class.getName(), null); + e.addQueryField("lastName", String.class.getName(), null); + + cache.setQueryEntities(Collections.singletonList(e)); + + return cache; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + System.setProperty(IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK, "TRUE"); + + stmt = conn.createStatement(); + + assertNotNull(stmt); + assertFalse(stmt.isClosed()); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + if (stmt != null && !stmt.isClosed()) + stmt.close(); + + assertTrue(stmt.isClosed()); + + System.clearProperty(IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK); + + super.afterTest(); + } + + /** + * Dead-on-arrival test. Imports two-entry CSV file into a table and checks + * the created entries using SELECT statement. + * + * @throws SQLException If failed. + */ + public void testBasicStatement() throws SQLException { + int updatesCnt = stmt.executeUpdate(BASIC_SQL_COPY_STMT); + + assertEquals(2, updatesCnt); + + checkCacheContents(TBL_NAME, true, 2); + } + + /** + * Imports two-entry CSV file with UTF-8 characters into a table and checks + * the created entries using SELECT statement. + * + * @throws SQLException If failed. + */ + public void testUtf() throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from \"" + BULKLOAD_UTF_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); + + assertEquals(2, updatesCnt); + + checkUtfCacheContents(TBL_NAME, true, 2); + } + + /** + * Imports two-entry CSV file with UTF-8 characters into a table using batch size of one byte + * (thus splitting each two-byte UTF-8 character into two batches) + * and checks the created entries using SELECT statement. + * + * @throws SQLException If failed. + */ + public void testUtfBatchSize_1() throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from \"" + BULKLOAD_UTF_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv batch_size 1"); + + assertEquals(2, updatesCnt); + + checkUtfCacheContents(TBL_NAME, true, 2); + } + + /** + * Imports one-entry CSV file into a table and checks the entry created using SELECT statement. + * + * @throws SQLException If failed. + */ + public void testOneLineFile() throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from \"" + BULKLOAD_ONE_LINE_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); + + assertEquals(1, updatesCnt); + + checkCacheContents(TBL_NAME, true, 1); + } + + /** + * Imports zero-entry CSV file into a table and checks that no entries are created + * using SELECT statement. + * + * @throws SQLException If failed. + */ + public void testEmptyFile() throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from \"" + BULKLOAD_EMPTY_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); + + assertEquals(0, updatesCnt); + + checkCacheContents(TBL_NAME, true, 0); + } + + /** + * Checks that error is reported for a non-existent file. + */ + public void testWrongFileName() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeUpdate( + "copy from \"nonexistent\" into Person" + + " (_key, age, firstName, lastName)" + + " format csv"); + + return null; + } + }, SQLException.class, "Failed to read file: 'nonexistent'"); + } + + /** + * Checks that error is reported if the destination table is missing. + */ + public void testMissingTable() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeUpdate( + "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into Peterson" + + " (_key, age, firstName, lastName)" + + " format csv"); + + return null; + } + }, SQLException.class, "Table does not exist: PETERSON"); + } + + /** + * Checks that error is reported when a non-existing column is specified in the SQL command. + */ + public void testWrongColumnName() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeUpdate( + "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into Person" + + " (_key, age, firstName, lostName)" + + " format csv"); + + return null; + } + }, SQLException.class, "Column \"LOSTNAME\" not found"); + } + + /** + * Checks that error is reported if field read from CSV file cannot be converted to the type of the column. + */ + public void testWrongColumnType() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeUpdate( + "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into Person" + + " (_key, firstName, age, lastName)" + + " format csv"); + + return null; + } + }, SQLException.class, "Value conversion failed [from=java.lang.String, to=java.lang.Integer]"); + } + + /** + * Checks that if even a subset of fields is imported, the imported fields are set correctly. + * + * @throws SQLException If failed. + */ + public void testFieldsSubset() throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName)" + + " format csv"); + + assertEquals(2, updatesCnt); + + checkCacheContents(TBL_NAME, false, 2); + } + + /** + * Checks that bulk load works when we create table using 'CREATE TABLE' command. + * + * The majority of the tests in this class use {@link CacheConfiguration#setIndexedTypes(Class[])} + * to create the table. + * + * @throws SQLException If failed. + */ + public void testCreateAndBulkLoadTable() throws SQLException { + String tblName = QueryUtils.DFLT_SCHEMA + ".\"PersonTbl\""; + + execute(conn, "create table " + tblName + + " (id int primary key, age int, firstName varchar(30), lastName varchar(30))"); + + int updatesCnt = stmt.executeUpdate( + "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into " + tblName + + "(_key, age, firstName, lastName)" + + " format csv"); + + assertEquals(2, updatesCnt); + + checkCacheContents(tblName, true, 2); + } + + /** + * Checks that bulk load works when we create table with {@link CacheConfiguration#setQueryEntities(Collection)}. + * + * The majority of the tests in this class use {@link CacheConfiguration#setIndexedTypes(Class[])} + * to create a table. + * + * @throws SQLException If failed. + */ + @SuppressWarnings("unchecked") + public void testConfigureQueryEntityAndBulkLoad() throws SQLException { + ignite(0).getOrCreateCache(cacheConfigWithQueryEntity()); + + int updatesCnt = stmt.executeUpdate(BASIC_SQL_COPY_STMT); + + assertEquals(2, updatesCnt); + + checkCacheContents(TBL_NAME, true, 2); + } + + /** + * Checks that bulk load works when we use batch size of 1 byte and thus + * create multiple batches per COPY. + * + * @throws SQLException If failed. + */ + public void testBatchSize_1() throws SQLException { + int updatesCnt = stmt.executeUpdate(BASIC_SQL_COPY_STMT + " batch_size 1"); + + assertEquals(2, updatesCnt); + + checkCacheContents(TBL_NAME, true, 2); + } + + /** + * Verifies exception thrown if COPY is added into a batch. + * + * @throws SQLException If failed. + */ + public void testMultipleStatement() throws SQLException { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.addBatch(BASIC_SQL_COPY_STMT); + + stmt.addBatch("copy from \"" + BULKLOAD_ONE_LINE_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); + + stmt.addBatch("copy from \"" + BULKLOAD_UTF_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); + + stmt.executeBatch(); + + return null; + } + }, BatchUpdateException.class, "COPY command cannot be executed in batch mode."); + } + + /** + * Verifies that COPY command is rejected by Statement.executeQuery(). + * + * @throws SQLException If failed. + */ + public void testExecuteQuery() throws SQLException { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeQuery(BASIC_SQL_COPY_STMT); + + return null; + } + }, SQLException.class, "The query isn't SELECT query"); + } + + /** + * Verifies that COPY command works in Statement.execute(). + * + * @throws SQLException If failed. + */ + public void testExecute() throws SQLException { + boolean isRowSet = stmt.execute(BASIC_SQL_COPY_STMT); + + assertFalse(isRowSet); + + checkCacheContents(TBL_NAME, true, 2); + } + + /** + * Verifies that COPY command can be called with PreparedStatement.executeUpdate(). + * + * @throws SQLException If failed. + */ + public void testPreparedStatementWithExecuteUpdate() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement(BASIC_SQL_COPY_STMT); + + int updatesCnt = pstmt.executeUpdate(); + + assertEquals(2, updatesCnt); + + checkCacheContents(TBL_NAME, true, 2); + } + + /** + * Verifies that COPY command reports an error when used with PreparedStatement parameter. + * + * @throws SQLException If failed. + */ + public void testPreparedStatementWithParameter() throws SQLException { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + PreparedStatement pstmt = conn.prepareStatement( + "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format ?"); + + pstmt.setString(1, "csv"); + + pstmt.executeUpdate(); + + return null; + } + }, SQLException.class, "Unexpected token: \"?\" (expected: \"[identifier]\""); + } + + /** + * Verifies that COPY command can be called with PreparedStatement.execute(). + * + * @throws SQLException If failed. + */ + public void testPreparedStatementWithExecute() throws SQLException { + PreparedStatement pstmt = conn.prepareStatement(BASIC_SQL_COPY_STMT); + + boolean isRowSet = pstmt.execute(); + + assertFalse(isRowSet); + + checkCacheContents(TBL_NAME, true, 2); + } + + /** + * Verifies that COPY command is rejected by PreparedStatement.executeQuery(). + */ + public void testPreparedStatementWithExecuteQuery() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + PreparedStatement pstmt = conn.prepareStatement(BASIC_SQL_COPY_STMT); + + pstmt.executeQuery(); + + return null; + } + }, SQLException.class, "The query isn't SELECT query"); + } + + /** + * Checks cache contents for a typical test using SQL SELECT command. + * + * @param tblName Table name to query. + * @param checkLastName Check 'lastName' column (not imported in some tests). + * @param recCnt Number of records to expect. + * @throws SQLException When one of checks has failed. + */ + private void checkCacheContents(String tblName, boolean checkLastName, int recCnt) throws SQLException { + ResultSet rs = stmt.executeQuery("select _key, age, firstName, lastName from " + tblName); + + assert rs != null; + + int cnt = 0; + + while (rs.next()) { + int id = rs.getInt("_key"); + + if (id == 123) { + assertEquals(12, rs.getInt("age")); + assertEquals("FirstName123 MiddleName123", rs.getString("firstName")); + if (checkLastName) + assertEquals("LastName123", rs.getString("lastName")); + } + else if (id == 456) { + assertEquals(45, rs.getInt("age")); + assertEquals("FirstName456", rs.getString("firstName")); + if (checkLastName) + assertEquals("LastName456", rs.getString("lastName")); + } + else + fail("Wrong ID: " + id); + + cnt++; + } + + assertEquals(recCnt, cnt); + } + + /** + * Checks cache contents for a UTF-8 bulk load tests using SQL SELECT command. + * + * @param tblName Table name to query. + * @param checkLastName Check 'lastName' column (not imported in some tests). + * @param recCnt Number of records to expect. + * @throws SQLException When one of checks has failed. + */ + private void checkUtfCacheContents(String tblName, boolean checkLastName, int recCnt) throws SQLException { + ResultSet rs = stmt.executeQuery("select _key, age, firstName, lastName from " + tblName); + + assert rs != null; + + int cnt = 0; + + while (rs.next()) { + int id = rs.getInt("_key"); + + if (id == 123) { + assertEquals(12, rs.getInt("age")); + assertEquals("Имя123 Отчество123", rs.getString("firstName")); + if (checkLastName) + assertEquals("Фамилия123", rs.getString("lastName")); + } + else if (id == 456) { + assertEquals(45, rs.getInt("age")); + assertEquals("Имя456", rs.getString("firstName")); + if (checkLastName) + assertEquals("Фамилия456", rs.getString("lastName")); + } + else + fail("Wrong ID: " + id); + + cnt++; + } + + assertEquals(recCnt, cnt); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedNearSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedNearSelfTest.java new file mode 100644 index 0000000000000..887b1d9d7eae0 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedNearSelfTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; + +/** A {@link JdbcThinBulkLoadAbstractSelfTest} for partitioned atomic near-cache mode. */ +public class JdbcThinBulkLoadAtomicPartitionedNearSelfTest extends JdbcThinBulkLoadAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected CacheMode cacheMode() { + return CacheMode.PARTITIONED; + } + + /** {@inheritDoc} */ + @Override protected CacheAtomicityMode atomicityMode() { + return CacheAtomicityMode.ATOMIC; + } + + /** {@inheritDoc} */ + @Override protected boolean nearCache() { + return true; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedSelfTest.java new file mode 100644 index 0000000000000..55813330c3422 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicPartitionedSelfTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; + +/** A {@link JdbcThinBulkLoadAbstractSelfTest} for partitioned atomic mode. */ +public class JdbcThinBulkLoadAtomicPartitionedSelfTest extends JdbcThinBulkLoadAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected CacheMode cacheMode() { + return CacheMode.PARTITIONED; + } + + /** {@inheritDoc} */ + @Override protected CacheAtomicityMode atomicityMode() { + return CacheAtomicityMode.ATOMIC; + } + + /** {@inheritDoc} */ + @Override protected boolean nearCache() { + return false; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicReplicatedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicReplicatedSelfTest.java new file mode 100644 index 0000000000000..c3d69afbd89fa --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAtomicReplicatedSelfTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; + +/** A {@link JdbcThinBulkLoadAbstractSelfTest} for replicated atomic near-cache mode. */ +public class JdbcThinBulkLoadAtomicReplicatedSelfTest extends JdbcThinBulkLoadAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected CacheMode cacheMode() { + return CacheMode.REPLICATED; + } + + /** {@inheritDoc} */ + @Override protected CacheAtomicityMode atomicityMode() { + return CacheAtomicityMode.ATOMIC; + } + + /** {@inheritDoc} */ + @Override protected boolean nearCache() { + return false; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedNearSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedNearSelfTest.java new file mode 100644 index 0000000000000..9336dd194788a --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedNearSelfTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; + +/** A {@link JdbcThinBulkLoadAbstractSelfTest} for partitioned transactional near-cache mode. */ +public class JdbcThinBulkLoadTransactionalPartitionedNearSelfTest extends JdbcThinBulkLoadAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected CacheMode cacheMode() { + return CacheMode.PARTITIONED; + } + + /** {@inheritDoc} */ + @Override protected CacheAtomicityMode atomicityMode() { + return CacheAtomicityMode.TRANSACTIONAL; + } + + /** {@inheritDoc} */ + @Override protected boolean nearCache() { + return true; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedSelfTest.java new file mode 100644 index 0000000000000..d1dea2a7e302c --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalPartitionedSelfTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; + +/** A {@link JdbcThinBulkLoadAbstractSelfTest} for partitioned transactional mode. */ +public class JdbcThinBulkLoadTransactionalPartitionedSelfTest extends JdbcThinBulkLoadAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected CacheMode cacheMode() { + return CacheMode.PARTITIONED; + } + + /** {@inheritDoc} */ + @Override protected CacheAtomicityMode atomicityMode() { + return CacheAtomicityMode.TRANSACTIONAL; + } + + /** {@inheritDoc} */ + @Override protected boolean nearCache() { + return false; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalReplicatedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalReplicatedSelfTest.java new file mode 100644 index 0000000000000..1c377fac96b29 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadTransactionalReplicatedSelfTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; + +/** A {@link JdbcThinBulkLoadAbstractSelfTest} for replicated transactional mode. */ +public class JdbcThinBulkLoadTransactionalReplicatedSelfTest extends JdbcThinBulkLoadAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected CacheMode cacheMode() { + return CacheMode.REPLICATED; + } + + /** {@inheritDoc} */ + @Override protected CacheAtomicityMode atomicityMode() { + return CacheAtomicityMode.TRANSACTIONAL; + } + + /** {@inheritDoc} */ + @Override protected boolean nearCache() { + return false; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java index dbe93a49f465d..539713aeb4cdb 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinDynamicIndexAbstractSelfTest.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.concurrent.Callable; import org.apache.ignite.IgniteCache; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; diff --git a/modules/clients/src/test/resources/bulkload0.csv b/modules/clients/src/test/resources/bulkload0.csv new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/modules/clients/src/test/resources/bulkload1.csv b/modules/clients/src/test/resources/bulkload1.csv new file mode 100644 index 0000000000000..596ac32aa66b0 --- /dev/null +++ b/modules/clients/src/test/resources/bulkload1.csv @@ -0,0 +1 @@ +123,12,"FirstName123 MiddleName123",LastName123 \ No newline at end of file diff --git a/modules/clients/src/test/resources/bulkload2.csv b/modules/clients/src/test/resources/bulkload2.csv new file mode 100644 index 0000000000000..d398c19c1cd9d --- /dev/null +++ b/modules/clients/src/test/resources/bulkload2.csv @@ -0,0 +1,2 @@ +123,12,"FirstName123 MiddleName123",LastName123 +456,45,"FirstName456","LastName456" \ No newline at end of file diff --git a/modules/clients/src/test/resources/bulkload2_utf.csv b/modules/clients/src/test/resources/bulkload2_utf.csv new file mode 100644 index 0000000000000..bdb64896be58f --- /dev/null +++ b/modules/clients/src/test/resources/bulkload2_utf.csv @@ -0,0 +1,2 @@ +123,12,"Имя123 Отчество123",Фамилия123 +456,45,"Имя456","Фамилия456" \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index d7d4443867587..56f3ada88b782 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -452,6 +452,9 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_SQL_MERGE_TABLE_PREFETCH_SIZE = "IGNITE_SQL_MERGE_TABLE_PREFETCH_SIZE"; + /** Disable fallback to H2 SQL parser if the internal SQL parser fails to parse the statement. */ + public static final String IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK = "IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK"; + /** Maximum size for affinity assignment history. */ public static final String IGNITE_AFFINITY_HISTORY_SIZE = "IGNITE_AFFINITY_HISTORY_SIZE"; diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java b/modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java new file mode 100644 index 0000000000000..b7fdec3e8d07f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.query; + +import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters; +import org.apache.ignite.internal.processors.bulkload.BulkLoadProcessor; +import org.jetbrains.annotations.NotNull; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +/** + * A special FieldsQueryCursor subclass that is used as a sentinel to transfer data from bulk load + * (COPY) command to the JDBC or other client-facing driver: the bulk load batch processor + * and parameters to send to the client. + * */ +public class BulkLoadContextCursor implements FieldsQueryCursor> { + /** Bulk load context from SQL command. */ + private final BulkLoadProcessor processor; + + /** Bulk load parameters to send to the client. */ + private final BulkLoadAckClientParameters clientParams; + + /** + * Creates a cursor. + * + * @param processor Bulk load context object to store. + * @param clientParams Parameters to send to client. + */ + public BulkLoadContextCursor(BulkLoadProcessor processor, BulkLoadAckClientParameters clientParams) { + this.processor = processor; + this.clientParams = clientParams; + } + + /** + * Returns a bulk load context. + * + * @return a bulk load context. + */ + public BulkLoadProcessor bulkLoadProcessor() { + return processor; + } + + /** + * Returns the bulk load parameters to send to the client. + * + * @return The bulk load parameters to send to the client. + */ + public BulkLoadAckClientParameters clientParams() { + return clientParams; + } + + /** {@inheritDoc} */ + @Override public List> getAll() { + return Collections.singletonList(Arrays.asList(processor, clientParams)); + } + + /** {@inheritDoc} */ + @NotNull @Override public Iterator> iterator() { + return getAll().iterator(); + } + + /** {@inheritDoc} */ + @Override public void close() { + // no-op + } + + /** {@inheritDoc} */ + @Override public String getFieldName(int idx) { + if (idx < 0 || idx > 1) + throw new IndexOutOfBoundsException(); + + return idx == 0 ? "processor" : "clientParams"; + } + + /** {@inheritDoc} */ + @Override public int getColumnsCount() { + return 2; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java index d29df932c2ba5..202001149d932 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java @@ -17,6 +17,9 @@ package org.apache.ignite.internal.jdbc.thin; +import java.io.BufferedInputStream; +import java.io.FileInputStream; +import java.io.InputStream; import java.sql.BatchUpdateException; import java.sql.Connection; import java.sql.ResultSet; @@ -25,21 +28,24 @@ import java.sql.SQLWarning; import java.sql.Statement; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import org.apache.ignite.cache.query.SqlQuery; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; -import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.odbc.ClientListenerResponse; +import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteResult; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadAckResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQuery; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryExecuteMultipleStatementsResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryExecuteRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryExecuteResult; -import org.apache.ignite.internal.processors.odbc.jdbc.JdbcStatementType; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResultInfo; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcStatementType; import static java.sql.ResultSet.CONCUR_READ_ONLY; import static java.sql.ResultSet.FETCH_FORWARD; @@ -132,6 +138,9 @@ protected void execute0(JdbcStatementType stmtType, String sql, List arg assert res0 != null; + if (res0 instanceof JdbcBulkLoadAckResult) + res0 = sendFile((JdbcBulkLoadAckResult)res0); + if (res0 instanceof JdbcQueryExecuteResult) { JdbcQueryExecuteResult res = (JdbcQueryExecuteResult)res0; @@ -176,6 +185,61 @@ else if (res0 instanceof JdbcQueryExecuteMultipleStatementsResult) { assert resultSets.size() > 0 : "At least one results set is expected"; } + /** + * Sends a file to server in batches via multiple {@link JdbcBulkLoadBatchRequest}s. + * + * @param cmdRes Result of invoking COPY command: contains server-parsed + * bulk load parameters, such as file name and batch size. + */ + private JdbcResult sendFile(JdbcBulkLoadAckResult cmdRes) throws SQLException { + String fileName = cmdRes.params().localFileName(); + int batchSize = cmdRes.params().batchSize(); + + int batchNum = 0; + + try { + try (InputStream input = new BufferedInputStream(new FileInputStream(fileName))) { + byte[] buf = new byte[batchSize]; + + int readBytes; + while ((readBytes = input.read(buf)) != -1) { + if (readBytes == 0) + continue; + + JdbcResult res = conn.sendRequest(new JdbcBulkLoadBatchRequest( + cmdRes.queryId(), + batchNum++, + JdbcBulkLoadBatchRequest.CMD_CONTINUE, + readBytes == buf.length ? buf : Arrays.copyOf(buf, readBytes))); + + if (!(res instanceof JdbcQueryExecuteResult)) + throw new SQLException("Unknown response sent by the server: " + res); + } + + return conn.sendRequest(new JdbcBulkLoadBatchRequest( + cmdRes.queryId(), + batchNum++, + JdbcBulkLoadBatchRequest.CMD_FINISHED_EOF)); + } + } + catch (Exception e) { + try { + conn.sendRequest(new JdbcBulkLoadBatchRequest( + cmdRes.queryId(), + batchNum, + JdbcBulkLoadBatchRequest.CMD_FINISHED_ERROR)); + } + catch (SQLException e1) { + throw new SQLException("Cannot send finalization request: " + e1.getMessage(), e); + } + + if (e instanceof SQLException) + throw (SQLException) e; + else + throw new SQLException("Failed to read file: '" + fileName + "'", SqlStateCode.INTERNAL_ERROR, e); + } + } + /** {@inheritDoc} */ @Override public int executeUpdate(String sql) throws SQLException { execute0(JdbcStatementType.UPDATE_STMT_TYPE, sql, null); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java index aa9f009962db2..07034f4579e41 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java @@ -32,6 +32,8 @@ import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteJdbcDriver; import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.cache.query.BulkLoadContextCursor; +import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.IgniteKernal; @@ -168,7 +170,15 @@ public JdbcQueryTask(Ignite ignite, String cacheName, String schemaName, String qry.setLazy(lazy()); qry.setSchema(schemaName); - QueryCursorImpl> qryCursor = (QueryCursorImpl>)cache.withKeepBinary().query(qry); + FieldsQueryCursor> fldQryCursor = cache.withKeepBinary().query(qry); + + if (fldQryCursor instanceof BulkLoadContextCursor) { + fldQryCursor.close(); + + throw new SQLException("COPY command is currently supported only in thin JDBC driver."); + } + + QueryCursorImpl> qryCursor = (QueryCursorImpl>)fldQryCursor; if (isQry == null) isQry = qryCursor.isQuery(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadAckClientParameters.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadAckClientParameters.java new file mode 100644 index 0000000000000..119d9f94e9dcf --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadAckClientParameters.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import org.jetbrains.annotations.NotNull; + +/** + * Bulk load parameters, which are parsed from SQL command and sent from server to client. + */ +public class BulkLoadAckClientParameters { + /** Minimum batch size. */ + public static final int MIN_BATCH_SIZE = 1; + + /** + * Maximum batch size. Note that the batch is wrapped to transport objects and the overall packet should fit + * into a Java array. 512 has been chosen arbitrarily. + */ + public static final int MAX_BATCH_SIZE = Integer.MAX_VALUE - 512; + + /** Size of a file batch for COPY command. */ + public static final int DEFAULT_BATCH_SIZE = 4 * 1024 * 1024; + + /** Local name of the file to send to server */ + @NotNull private final String locFileName; + + /** File batch size in bytes. */ + private final int batchSize; + + /** + * Creates a bulk load parameters. + * + * @param locFileName File name to send from client to server. + * @param batchSize Batch size (Number of bytes in a portion of a file to send in one JDBC request/response). + */ + public BulkLoadAckClientParameters(@NotNull String locFileName, int batchSize) { + this.locFileName = locFileName; + this.batchSize = batchSize; + } + + /** + * Returns the local name of file to send. + * + * @return The local name of file to send. + */ + @NotNull public String localFileName() { + return locFileName; + } + + /** + * Returns the batch size. + * + * @return The batch size. + */ + public int batchSize() { + return batchSize; + } + + /** + * Checks if batch size value is valid. + * + * @param sz The batch size to check. + * @throws IllegalArgumentException if batch size is invalid. + */ + public static boolean isValidBatchSize(int sz) { + return sz >= MIN_BATCH_SIZE && sz <= MAX_BATCH_SIZE; + } + + /** + * Creates proper batch size error message if {@link #isValidBatchSize(int)} check has failed. + * + * @param sz The batch size. + * @return The string with the error message. + */ + public static String batchSizeErrorMsg(int sz) { + return "Batch size should be within [" + MIN_BATCH_SIZE + ".." + MAX_BATCH_SIZE + "]: " + sz; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java new file mode 100644 index 0000000000000..90714c8308de4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.lang.IgniteInClosure; + +/** A proxy, which stores given key+value pair to a cache. */ +public abstract class BulkLoadCacheWriter implements IgniteInClosure>, AutoCloseable { + /** + * Returns number of entry updates made by the writer. + * + * @return The number of cache entry updates. + */ + public abstract long updateCnt(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java new file mode 100644 index 0000000000000..6f5e91e8194fe --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +import java.util.regex.Pattern; + +/** A placeholder for bulk load CSV format parser options. */ +public class BulkLoadCsvFormat extends BulkLoadFormat { + + /** Line separator pattern. */ + @NotNull public static final Pattern DEFAULT_LINE_SEPARATOR = Pattern.compile("[\r\n]+"); + + /** Field separator pattern. */ + @NotNull public static final Pattern DEFAULT_FIELD_SEPARATOR = Pattern.compile(","); + + /** Quote characters */ + @NotNull public static final String DEFAULT_QUOTE_CHARS = "\""; + + /** Default escape sequence start characters. */ + @Nullable public static final String DEFAULT_ESCAPE_CHARS = null; + + /** Line comment start pattern. */ + @Nullable public static final Pattern DEFAULT_COMMENT_CHARS = null; + + /** Format name. */ + public static final String NAME = "CSV"; + + /** Line separator pattern. */ + @Nullable private Pattern lineSeparator; + + /** Field separator pattern. */ + @Nullable private Pattern fieldSeparator; + + /** Set of quote characters. */ + @Nullable private String quoteChars; + + /** Line comment start pattern. */ + @Nullable private Pattern commentChars; + + /** Set of escape start characters. */ + @Nullable private String escapeChars; + + /** + * Returns the name of the format. + * + * @return The name of the format. + */ + @Override public String name() { + return NAME; + } + + /** + * Returns the line separator pattern. + * + * @return The line separator pattern. + */ + @Nullable public Pattern lineSeparator() { + return lineSeparator; + } + + /** + * Sets the line separator pattern. + * + * @param lineSeparator The line separator pattern. + */ + public void lineSeparator(@Nullable Pattern lineSeparator) { + this.lineSeparator = lineSeparator; + } + + /** + * Returns the field separator pattern. + * + * @return The field separator pattern. + */ + @Nullable public Pattern fieldSeparator() { + return fieldSeparator; + } + + /** + * Sets the field separator pattern. + * + * @param fieldSeparator The field separator pattern. + */ + public void fieldSeparator(@Nullable Pattern fieldSeparator) { + this.fieldSeparator = fieldSeparator; + } + + /** + * Returns the quote characters. + * + * @return The quote characters. + */ + @Nullable public String quoteChars() { + return quoteChars; + } + + /** + * Sets the quote characters. + * + * @param quoteChars The quote characters. + */ + public void quoteChars(@Nullable String quoteChars) { + this.quoteChars = quoteChars; + } + + /** + * Returns the line comment start pattern. + * + * @return The line comment start pattern. + */ + @Nullable public Pattern commentChars() { + return commentChars; + } + + /** + * Sets the line comment start pattern. + * + * @param commentChars The line comment start pattern. + */ + public void commentChars(@Nullable Pattern commentChars) { + this.commentChars = commentChars; + } + + /** + * Returns the escape characters. + * + * @return The escape characters. + */ + @Nullable public String escapeChars() { + return escapeChars; + } + + /** + * Sets the escape characters. + * + * @param escapeChars The escape characters. + */ + public void escapeChars(@Nullable String escapeChars) { + this.escapeChars = escapeChars; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java new file mode 100644 index 0000000000000..0511596012477 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.bulkload.pipeline.CharsetDecoderBlock; +import org.apache.ignite.internal.processors.bulkload.pipeline.CsvLineProcessorBlock; +import org.apache.ignite.internal.processors.bulkload.pipeline.PipelineBlock; +import org.apache.ignite.internal.processors.bulkload.pipeline.StrListAppenderBlock; +import org.apache.ignite.internal.processors.bulkload.pipeline.LineSplitterBlock; + +import java.util.LinkedList; +import java.util.List; + +/** CSV parser for COPY command. */ +public class BulkLoadCsvParser extends BulkLoadParser { + /** Processing pipeline input block: a decoder for the input stream of bytes */ + private final PipelineBlock inputBlock; + + /** A record collecting block that appends its input to {@code List}. */ + private final StrListAppenderBlock collectorBlock; + + /** + * Creates bulk load CSV parser. + * + * @param format Format options (parsed from COPY command). + */ + public BulkLoadCsvParser(BulkLoadCsvFormat format) { + inputBlock = new CharsetDecoderBlock(BulkLoadFormat.DEFAULT_INPUT_CHARSET); + + collectorBlock = new StrListAppenderBlock(); + + // Handling of the other options is to be implemented in IGNITE-7537. + inputBlock.append(new LineSplitterBlock(format.lineSeparator())) + .append(new CsvLineProcessorBlock(format.fieldSeparator(), format.quoteChars())) + .append(collectorBlock); + } + + /** {@inheritDoc} */ + @Override protected Iterable> parseBatch(byte[] batchData, boolean isLastBatch) + throws IgniteCheckedException { + List> res = new LinkedList<>(); + + collectorBlock.output(res); + + inputBlock.accept(batchData, isLastBatch); + + return res; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java new file mode 100644 index 0000000000000..cff93c5788552 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import java.nio.charset.Charset; + +/** A superclass and a factory for bulk load format options. */ +public abstract class BulkLoadFormat { + /** The default input charset. */ + public static final Charset DEFAULT_INPUT_CHARSET = Charset.forName("UTF-8"); + + /** + * Returns the format name. + * + * @return The format name. + */ + public abstract String name(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadParser.java new file mode 100644 index 0000000000000..252e87b1a9bc6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadParser.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import org.apache.ignite.IgniteCheckedException; + +import java.util.List; + +/** + * Bulk load file format parser superclass + factory of known formats. + * + *

        The parser processes a batch of input data and return a list of records. + * + *

        The parser uses corresponding options from {@link BulkLoadFormat} subclass. + */ +public abstract class BulkLoadParser { + /** + * Parses a batch of input data and returns a list of records parsed + * (in most cases this is a list of strings). + * + *

        Note that conversion between parsed and database table type is done by the other + * object (see {@link BulkLoadProcessor#dataConverter}) by the request processing code. + * This method is not obliged to do this conversion. + * + * @param batchData Data from the current batch. + * @param isLastBatch true if this is the last batch. + * @return The list of records. + * @throws IgniteCheckedException If any processing error occurs. + */ + protected abstract Iterable> parseBatch(byte[] batchData, boolean isLastBatch) + throws IgniteCheckedException; + + /** + * Creates a parser for a given format options. + * + * @param format The input format object. + * @return The parser. + * @throws IllegalArgumentException if the format is not known to the factory. + */ + public static BulkLoadParser createParser(BulkLoadFormat format) { + if (format instanceof BulkLoadCsvFormat) + return new BulkLoadCsvParser((BulkLoadCsvFormat)format); + + throw new IllegalArgumentException("Internal error: format is not defined"); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadProcessor.java new file mode 100644 index 0000000000000..ccf3e251e539e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadProcessor.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.IgniteIllegalStateException; +import org.apache.ignite.internal.util.lang.IgniteClosureX; +import org.apache.ignite.lang.IgniteBiTuple; + +import java.util.List; + +/** + * Bulk load (COPY) command processor used on server to keep various context data and process portions of input + * received from the client side. + */ +public class BulkLoadProcessor implements AutoCloseable { + /** Parser of the input bytes. */ + private final BulkLoadParser inputParser; + + /** + * Converter, which transforms the list of strings parsed from the input stream to the key+value entry to add to + * the cache. + */ + private final IgniteClosureX, IgniteBiTuple> dataConverter; + + /** Streamer that puts actual key/value into the cache. */ + private final BulkLoadCacheWriter outputStreamer; + + /** Becomes true after {@link #close()} method is called. */ + private boolean isClosed; + + /** + * Creates bulk load processor. + * + * @param inputParser Parser of the input bytes. + * @param dataConverter Converter, which transforms the list of strings parsed from the input stream to the + * key+value entry to add to the cache. + * @param outputStreamer Streamer that puts actual key/value into the cache. + */ + public BulkLoadProcessor(BulkLoadParser inputParser, IgniteClosureX, IgniteBiTuple> dataConverter, + BulkLoadCacheWriter outputStreamer) { + this.inputParser = inputParser; + this.dataConverter = dataConverter; + this.outputStreamer = outputStreamer; + isClosed = false; + } + + /** + * Returns the streamer that puts actual key/value into the cache. + * + * @return Streamer that puts actual key/value into the cache. + */ + public BulkLoadCacheWriter outputStreamer() { + return outputStreamer; + } + + /** + * Processes the incoming batch and writes data to the cache by calling the data converter and output streamer. + * + * @param batchData Data from the current batch. + * @param isLastBatch true if this is the last batch. + * @throws IgniteIllegalStateException when called after {@link #close()}. + */ + public void processBatch(byte[] batchData, boolean isLastBatch) throws IgniteCheckedException { + if (isClosed) + throw new IgniteIllegalStateException("Attempt to process a batch on a closed BulkLoadProcessor"); + + Iterable> inputRecords = inputParser.parseBatch(batchData, isLastBatch); + + for (List record : inputRecords) { + IgniteBiTuple kv = dataConverter.apply(record); + + outputStreamer.apply(kv); + } + } + + /** + * Aborts processing and closes the underlying objects ({@link IgniteDataStreamer}). + */ + @Override public void close() throws Exception { + if (isClosed) + return; + + isClosed = true; + + outputStreamer.close(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadStreamerWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadStreamerWriter.java new file mode 100644 index 0000000000000..3e5efd941152e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadStreamerWriter.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload; + +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.lang.IgniteBiTuple; + +/** + * A bulk load cache writer object that adds entries using {@link IgniteDataStreamer}. + */ +public class BulkLoadStreamerWriter extends BulkLoadCacheWriter { + /** Serialization version UID. */ + private static final long serialVersionUID = 0L; + + /** The streamer. */ + private final IgniteDataStreamer streamer; + + /** + * A number of {@link IgniteDataStreamer#addData(Object, Object)} calls made, + * since we don't have any kind of result data back from the streamer. + */ + private long updateCnt; + + /** + * Creates a cache writer. + * + * @param streamer The streamer to use. + */ + public BulkLoadStreamerWriter(IgniteDataStreamer streamer) { + this.streamer = streamer; + updateCnt = 0; + } + + /** {@inheritDoc} */ + @Override public void apply(IgniteBiTuple entry) { + streamer.addData(entry.getKey(), entry.getValue()); + + updateCnt++; + } + + /** {@inheritDoc} */ + @Override public void close() { + streamer.close(); + } + + /** {@inheritDoc} */ + @Override public long updateCnt() { + return updateCnt; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java new file mode 100644 index 0000000000000..5b18def1a37cc --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload.pipeline; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteIllegalStateException; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.Charset; +import java.nio.charset.CharsetDecoder; +import java.nio.charset.CoderResult; +import java.nio.charset.CodingErrorAction; +import java.util.Arrays; + +/** + * A {@link PipelineBlock}, which converts stream of bytes supplied as byte[] arrays to an array of char[] using + * the specified encoding. Decoding errors (malformed input and unmappable characters) are to handled by dropping + * the erroneous input, appending the coder's replacement value to the output buffer, and resuming the coding operation. + */ +public class CharsetDecoderBlock extends PipelineBlock { + /** Charset decoder */ + private final CharsetDecoder charsetDecoder; + + /** Leftover bytes (partial characters) from the last batch, + * or null if everything was processed. */ + private byte[] leftover; + + /** True once we've reached the end of input. */ + private boolean isEndOfInput; + + /** + * Creates charset decoder block. + * + * @param charset The charset encoding to decode bytes from. + */ + public CharsetDecoderBlock(Charset charset) { + charsetDecoder = charset.newDecoder() + .onMalformedInput(CodingErrorAction.REPLACE) + .onUnmappableCharacter(CodingErrorAction.REPLACE); + + isEndOfInput = false; + leftover = null; + } + + /** {@inheritDoc} */ + @Override public void accept(byte[] data, boolean isLastAppend) throws IgniteCheckedException { + assert nextBlock != null; + + assert !isEndOfInput : "convertBytes() called after end of input"; + + isEndOfInput = isLastAppend; + + if (leftover == null && data.length == 0) { + nextBlock.accept(new char[0], isLastAppend); + return; + } + + ByteBuffer dataBuf; + + if (leftover == null) + dataBuf = ByteBuffer.wrap(data); + else { + dataBuf = ByteBuffer.allocate(leftover.length + data.length); + + dataBuf.put(leftover) + .put(data); + + dataBuf.flip(); + + leftover = null; + } + + int outBufLen = (int)Math.ceil(charsetDecoder.maxCharsPerByte() * (data.length + 1)); + + assert outBufLen > 0; + + CharBuffer outBuf = CharBuffer.allocate(outBufLen); + + for (;;) { + CoderResult res = charsetDecoder.decode(dataBuf, outBuf, isEndOfInput); + + if (res.isUnderflow()) { + // End of input buffer reached. Either skip the partial character at the end or wait for the next batch. + if (!isEndOfInput && dataBuf.remaining() > 0) + leftover = Arrays.copyOfRange(dataBuf.array(), + dataBuf.arrayOffset() + dataBuf.position(), dataBuf.limit()); + + if (isEndOfInput) + charsetDecoder.flush(outBuf); // See {@link CharsetDecoder} class javadoc for the protocol. + + if (outBuf.position() > 0) + nextBlock.accept(Arrays.copyOfRange(outBuf.array(), outBuf.arrayOffset(), outBuf.position()), + isEndOfInput); + + break; + } + + if (res.isOverflow()) { // Not enough space in the output buffer, flush it and retry. + assert outBuf.position() > 0; + + nextBlock.accept(Arrays.copyOfRange(outBuf.array(), outBuf.arrayOffset(), outBuf.position()), + isEndOfInput); + + outBuf.flip(); + + continue; + } + + assert ! res.isMalformed() && ! res.isUnmappable(); + + // We're not supposed to reach this point with the current implementation. + // The code below will fire exception if Oracle implementation of CharsetDecoder will be changed in future. + throw new IgniteIllegalStateException("Unknown CharsetDecoder state"); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CsvLineProcessorBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CsvLineProcessorBlock.java new file mode 100644 index 0000000000000..5b2ee4b9f3ced --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CsvLineProcessorBlock.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload.pipeline; + +import org.apache.ignite.IgniteCheckedException; +import org.jetbrains.annotations.NotNull; + +import java.util.regex.Pattern; + +/** + * A {@link PipelineBlock}, which splits line according to CSV format rules and unquotes fields. + * The next block {@link PipelineBlock#accept(Object, boolean)} is called per-line. + */ +public class CsvLineProcessorBlock extends PipelineBlock { + /** Field delimiter pattern. */ + private final Pattern fldDelim; + + /** Quote character. */ + private final String quoteChars; + + /** + * Creates a CSV line parser. + * + * @param fldDelim The pattern for the field delimiter. + * @param quoteChars Quoting character. + */ + public CsvLineProcessorBlock(Pattern fldDelim, String quoteChars) { + this.fldDelim = fldDelim; + this.quoteChars = quoteChars; + } + + /** {@inheritDoc} */ + @Override public void accept(String input, boolean isLastPortion) throws IgniteCheckedException { + // Currently we don't process quoted field delimiter properly, will be fixed in IGNITE-7537. + String[] fields = fldDelim.split(input); + + for (int i = 0; i < fields.length; i++) + fields[i] = trim(fields[i]); + + nextBlock.accept(fields, isLastPortion); + } + + /** + * Trims quote characters from beginning and end of the line. + * + * @param str String to trim. + * @return The trimmed string. + */ + @NotNull private String trim(String str) { + int startPos = quoteChars.indexOf(str.charAt(0)) != -1 ? 1 : 0; + int endPos = quoteChars.indexOf(str.charAt(str.length() - 1)) != -1 ? str.length() - 1 : str.length(); + + return str.substring(startPos, endPos); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/LineSplitterBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/LineSplitterBlock.java new file mode 100644 index 0000000000000..122d0db4ca7b3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/LineSplitterBlock.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload.pipeline; + +import org.apache.ignite.IgniteCheckedException; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * A {@link PipelineBlock}, which splits input stream of char[] into lines using the specified {@link Pattern} + * as line separator. Next block {@link PipelineBlock#accept(Object, boolean)} is invoked for each line. + * Leftover characters are remembered and used during processing the next input batch, + * unless isLastPortion flag is specified. + */ +public class LineSplitterBlock extends PipelineBlock { + /** Line separator pattern */ + private final Pattern delim; + + /** Leftover characters from the previous invocation of {@link #accept(char[], boolean)}. */ + private StringBuilder leftover = new StringBuilder(); + + /** + * Creates line splitter block. + * + * @param delim The line separator pattern. + */ + public LineSplitterBlock(Pattern delim) { + this.delim = delim; + } + + /** {@inheritDoc} */ + @Override public void accept(char[] chars, boolean isLastPortion) throws IgniteCheckedException { + leftover.append(chars); + + String input = leftover.toString(); + Matcher matcher = delim.matcher(input); + + int lastPos = 0; + while (matcher.find()) { + String outStr = input.substring(lastPos, matcher.start()); + + if (!outStr.isEmpty()) + nextBlock.accept(outStr, false); + + lastPos = matcher.end(); + } + + if (lastPos != 0) + leftover.delete(0, lastPos); + + if (isLastPortion && leftover.length() > 0) { + nextBlock.accept(leftover.toString(), true); + leftover.setLength(0); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java new file mode 100644 index 0000000000000..914b4b4d4aaa4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload.pipeline; + +import org.apache.ignite.IgniteCheckedException; +import org.jetbrains.annotations.Nullable; + +/** + * A file parsing pipeline block. Accepts an portion of an input (isLastPortion flag is provided to signify the last + * block to process) and optionally calls the next block with transformed input or performs any other handling, + * such as storing input to internal structures. + */ +public abstract class PipelineBlock { + /** The next block in pipeline or null if this block is a terminator. */ + @Nullable protected PipelineBlock nextBlock; + + /** + * Creates a pipeline block. + * + *

        (There is no nextBlock argument in the constructor: setting the next block using + * {@link #append(PipelineBlock)} method is more convenient. + */ + protected PipelineBlock() { + nextBlock = null; + } + + /** + * Sets the next block in this block and returns the next block. + * + *

        Below is an example of using this method to set up a pipeline:
        + * {@code block1.append(block2).append(block3); }. + *

        Block2 here becomes the next for block1, and block3 is the next one for the block2. + * + * @param next The next block for the current block. + * @return The next block ({@code next} argument). + */ + public PipelineBlock append(PipelineBlock next) { + nextBlock = next; + return next; + } + + /** + * Accepts a portion of input. {@code isLastPortion} parameter should be set if this is a last portion + * of the input. The method must not be called after the end of input: the call with {@code isLastPortion == true} + * is the last one. + * + * @param inputPortion Portion of input. + * @param isLastPortion Is this the last portion. + */ + public abstract void accept(I inputPortion, boolean isLastPortion) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java new file mode 100644 index 0000000000000..91cbc1e11117d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.bulkload.pipeline; + +import java.util.Arrays; +import java.util.List; + +/** + * The PipelineBlock which appends its input to a user-supplied list. + * + *

        The list is set using {@link #output(List)} method. + */ +public class StrListAppenderBlock extends PipelineBlock { + /** The output list. */ + private List> output; + + /** + * Creates the block. List can be configured using {@link #output(List)} method. + */ + public StrListAppenderBlock() { + output = null; + } + + /** + * Sets the output list. + * + * @param output The output list. + */ + public void output(List> output) { + this.output = output; + } + + /** {@inheritDoc} */ + @Override public void accept(String[] elements, boolean isLastPortion) { + output.add(Arrays.asList(elements)); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java new file mode 100644 index 0000000000000..8a170ab64b8c7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.jdbc; + +import org.apache.ignite.binary.BinaryObjectException; +import org.apache.ignite.internal.binary.BinaryReaderExImpl; +import org.apache.ignite.internal.binary.BinaryWriterExImpl; +import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters; +import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * A reply from server to SQL COPY command, which is essentially a request from server to client + * to send files from client to server (see IGNITE-6917 for details). + * + * @see JdbcBulkLoadProcessor for the protocol. + * @see SqlBulkLoadCommand + */ +public class JdbcBulkLoadAckResult extends JdbcResult { + /** Query ID for matching this command on server in further {@link JdbcBulkLoadBatchRequest} commands. */ + private long qryId; + + /** + * Bulk load parameters, which are parsed on the server side and sent to client to specify + * what files to send, batch size, etc. + */ + private BulkLoadAckClientParameters params; + + /**Creates uninitialized bulk load batch request result. */ + public JdbcBulkLoadAckResult() { + super(BULK_LOAD_ACK); + + qryId = 0; + params = null; + } + + /** + * Constructs a request from server (in form of reply) to send files from client to server. + * + * @param qryId Query ID to send in further {@link JdbcBulkLoadBatchRequest}s. + * @param params Various parameters for sending batches from client side. + */ + public JdbcBulkLoadAckResult(long qryId, BulkLoadAckClientParameters params) { + super(BULK_LOAD_ACK); + + this.qryId = qryId; + this.params = params; + } + + /** + * Returns the query ID. + * + * @return Query ID. + */ + public long queryId() { + return qryId; + } + + /** + * Returns the parameters for the client. + * + * @return The parameters for the client. + */ + public BulkLoadAckClientParameters params() { + return params; + } + + /** {@inheritDoc} */ + @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException { + super.writeBinary(writer); + + writer.writeLong(qryId); + writer.writeString(params.localFileName()); + writer.writeInt(params.batchSize()); + } + + /** {@inheritDoc} */ + @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException { + super.readBinary(reader); + + qryId = reader.readLong(); + + String locFileName = reader.readString(); + int batchSize = reader.readInt(); + + if (!BulkLoadAckClientParameters.isValidBatchSize(batchSize)) + throw new BinaryObjectException(BulkLoadAckClientParameters.batchSizeErrorMsg(batchSize)); + + params = new BulkLoadAckClientParameters(locFileName, batchSize); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JdbcBulkLoadAckResult.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java new file mode 100644 index 0000000000000..b75de5a251c7c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.jdbc; + +import org.apache.ignite.binary.BinaryObjectException; +import org.apache.ignite.internal.binary.BinaryReaderExImpl; +import org.apache.ignite.internal.binary.BinaryWriterExImpl; +import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.jetbrains.annotations.NotNull; + +/** + * A JDBC request that sends a batch of a file to the server. Used when handling + * {@link SqlBulkLoadCommand} command. + */ +public class JdbcBulkLoadBatchRequest extends JdbcRequest { + /** A sentinel to indicate that {@link #cmd} field was not initialized. */ + public static final int CMD_UNKNOWN = -1; + + /** Next batch comes in this request and there are more batches. */ + public static final int CMD_CONTINUE = 0; + + /** + * This is the final batch from the client and there was an error on the client side, + * so terminate with error on the server side as well. + */ + public static final int CMD_FINISHED_ERROR = 1; + + /** + * This is the final batch of the file and everything went well on the client side. + * Server may complete the request. + */ + public static final int CMD_FINISHED_EOF = 2; + + /** QueryID of the original COPY command request. */ + private long qryId; + + /** Batch index starting from 0. */ + private int batchIdx; + + /** Command (see CMD_xxx constants above). */ + private int cmd; + + /** Data in this batch. */ + @NotNull private byte[] data; + + /** + * Creates the request with uninitialized parameters. + */ + public JdbcBulkLoadBatchRequest() { + super(BULK_LOAD_BATCH); + + qryId = -1; + batchIdx = -1; + cmd = CMD_UNKNOWN; + data = null; + } + + /** + * Creates the request with specified parameters and zero-length data. + * Typically used with {@link #CMD_FINISHED_ERROR} and {@link #CMD_FINISHED_EOF}. + * + * @param qryId The query ID from the {@link JdbcBulkLoadAckResult}. + * @param batchIdx Index of the current batch starting with 0. + * @param cmd The command ({@link #CMD_CONTINUE}, {@link #CMD_FINISHED_EOF}, or {@link #CMD_FINISHED_ERROR}). + */ + public JdbcBulkLoadBatchRequest(long qryId, int batchIdx, int cmd) { + this(qryId, batchIdx, cmd, new byte[0]); + } + + /** + * Creates the request with the specified parameters. + * + * @param qryId The query ID from the {@link JdbcBulkLoadAckResult}. + * @param batchIdx Index of the current batch starting with 0. + * @param cmd The command ({@link #CMD_CONTINUE}, {@link #CMD_FINISHED_EOF}, or {@link #CMD_FINISHED_ERROR}). + * @param data The data block (zero length is acceptable). + */ + public JdbcBulkLoadBatchRequest(long qryId, int batchIdx, int cmd, @NotNull byte[] data) { + super(BULK_LOAD_BATCH); + + this.qryId = qryId; + this.batchIdx = batchIdx; + + assert isCmdValid(cmd) : "Invalid command value: " + cmd; + this.cmd = cmd; + + this.data = data; + } + + /** + * Returns the original query ID. + * + * @return The original query ID. + */ + public long queryId() { + return qryId; + } + + /** + * Returns the batch index. + * + * @return The batch index. + */ + public long batchIdx() { + return batchIdx; + } + + /** + * Returns the command (see CMD_xxx constants for details). + * + * @return The command. + */ + public int cmd() { + return cmd; + } + + /** + * Returns the data. + * + * @return data if data was not supplied + */ + @NotNull public byte[] data() { + return data; + } + + /** {@inheritDoc} */ + @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException { + super.writeBinary(writer); + + writer.writeLong(qryId); + writer.writeInt(batchIdx); + writer.writeInt(cmd); + writer.writeByteArray(data); + } + + /** {@inheritDoc} */ + @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException { + super.readBinary(reader); + + qryId = reader.readLong(); + batchIdx = reader.readInt(); + + int c = reader.readInt(); + if (!isCmdValid(c)) + throw new BinaryObjectException("Invalid command: " + cmd); + + cmd = c; + + data = reader.readByteArray(); + assert data != null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JdbcBulkLoadBatchRequest.class, this); + } + + /** + * Checks if the command value is valid. + * + * @param c The command value to check. + * @return True if valid, false otherwise. + */ + private static boolean isCmdValid(int c) { + return c >= CMD_CONTINUE && c <= CMD_FINISHED_EOF; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadProcessor.java new file mode 100644 index 0000000000000..97577917ff6c1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadProcessor.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.jdbc; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteIllegalStateException; +import org.apache.ignite.internal.processors.bulkload.BulkLoadProcessor; +import org.apache.ignite.internal.processors.query.IgniteSQLException; + +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_CONTINUE; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_EOF; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_ERROR; + +/** + * JDBC wrapper around {@link BulkLoadProcessor} that provides extra logic. + * + * Unlike other "single shot" request-reply commands, the + * COPY command the client-server interaction looks like this: + * + *

        + * Thin JDBC client                            Server
        + *        |                                       |
        + *        |------- JdbcQueryExecuteRequest ------>|
        + *        |         with SQL copy command         |
        + *        |                                       |
        + *        |<---- JdbcBulkLoadAckResult -----------|
        + *        | with BulkLoadAckClientParameters      |
        + *        | containing file name and batch size.  |
        + *        |                                       |
        + * (open the file,                                |
        + *  read portions and send them)                  |
        + *        |                                       |
        + *        |------- JdbcBulkLoadBatchRequest #1 -->|
        + *        | with a portion of input file.         |
        + *        |                                       |
        + *        |<--- JdbcQueryExecuteResult -----------|
        + *        | with current update counter.          |
        + *        |                                       |
        + *        |------- JdbcBulkLoadBatchRequest #2--->|
        + *        | with a portion of input file.         |
        + *        |                                       |
        + *        |<--- JdbcQueryExecuteResult -----------|
        + *        | with current update counter.          |
        + *        |                                       |
        + *        |------- JdbcBulkLoadBatchRequest #3--->|
        + *        | with the LAST portion of input file.  |
        + *        |                                       |
        + *        |<--- JdbcQueryExecuteResult -----------|
        + *        | with the final update counter.        |
        + *        |                                       |
        + * (close the file)                               |
        + *        |                                       |
        + * 
        + * + * In case of input file reading error, a flag is carried to the server: + * {@link JdbcBulkLoadBatchRequest#CMD_FINISHED_ERROR} and the processing + * is aborted on the both sides. + */ +public class JdbcBulkLoadProcessor { + /** A core processor that handles incoming data packets. */ + private final BulkLoadProcessor processor; + + /** Next batch index (for a very simple check that all batches were delivered to us). */ + protected long nextBatchIdx; + + /** + * Creates a JDBC-specific adapter for bulk load processor. + * + * @param processor Bulk load processor from the core to delegate calls to. + */ + public JdbcBulkLoadProcessor(BulkLoadProcessor processor) { + this.processor = processor; + nextBatchIdx = 0; + } + + /** + * Completely processes a bulk load batch request. + * + * Calls {@link BulkLoadProcessor} wrapping around some JDBC-specific logic + * (commands, bulk load batch index checking). + * + * @param req The current request. + */ + public void processBatch(JdbcBulkLoadBatchRequest req) + throws IgniteCheckedException { + if (nextBatchIdx != req.batchIdx()) + throw new IgniteSQLException("Batch #" + (nextBatchIdx + 1) + + " is missing. Received #" + req.batchIdx() + " instead."); + + nextBatchIdx++; + + switch (req.cmd()) { + case CMD_FINISHED_EOF: + processor.processBatch(req.data(), true); + + break; + + case CMD_CONTINUE: + processor.processBatch(req.data(), false); + + break; + + case CMD_FINISHED_ERROR: + break; + + default: + throw new IgniteIllegalStateException("Command was not recognized: " + req.cmd()); + } + } + + /** + * Closes the underlying objects. + * Currently we don't handle normal termination vs. abort. + */ + public void close() throws Exception { + processor.close(); + + nextBatchIdx = -1; + } + + /** + * Provides update counter for sending in the {@link JdbcBatchExecuteResult}. + * + * @return The update counter for sending in {@link JdbcBatchExecuteResult}. + */ + public long updateCnt() { + return processor.outputStreamer().updateCnt(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java index 385924cde5ac9..22522ad8b6ec2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java @@ -60,6 +60,8 @@ public class JdbcRequest extends ClientListenerRequestNoId implements JdbcRawBin /** Get schemas metadata request. */ static final byte META_SCHEMAS = 12; + /** Send a batch of a data from client to server. */ + static final byte BULK_LOAD_BATCH = 13; /** Request type. */ private byte type; @@ -154,6 +156,11 @@ public static JdbcRequest readRequest(BinaryReaderExImpl reader) throws BinaryOb break; + case BULK_LOAD_BATCH: + req = new JdbcBulkLoadBatchRequest(); + + break; + default: throw new IgniteException("Unknown SQL listener request ID: [request ID=" + reqType + ']'); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java index e3b6f5b5ef80e..941d4b4ba036e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java @@ -30,14 +30,17 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cache.query.BulkLoadContextCursor; import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteVersionUtils; import org.apache.ignite.internal.binary.BinaryWriterExImpl; -import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; +import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters; +import org.apache.ignite.internal.processors.bulkload.BulkLoadProcessor; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion; import org.apache.ignite.internal.processors.odbc.ClientListenerRequest; import org.apache.ignite.internal.processors.odbc.ClientListenerRequestHandler; @@ -53,8 +56,12 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_CONTINUE; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_EOF; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_ERROR; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_3_0; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BATCH_EXEC; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BULK_LOAD_BATCH; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.META_COLUMNS; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.META_INDEXES; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.META_PARAMS; @@ -88,6 +95,9 @@ public class JdbcRequestHandler implements ClientListenerRequestHandler { /** Current queries cursors. */ private final ConcurrentHashMap qryCursors = new ConcurrentHashMap<>(); + /** Current bulk load processors. */ + private final ConcurrentHashMap bulkLoadRequests = new ConcurrentHashMap<>(); + /** Distributed joins flag. */ private final boolean distributedJoins; @@ -192,6 +202,9 @@ public JdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int case META_SCHEMAS: return getSchemas((JdbcMetaSchemasRequest)req); + + case BULK_LOAD_BATCH: + return processBulkLoadFileBatch((JdbcBulkLoadBatchRequest)req); } return new JdbcResponse(IgniteQueryErrorCode.UNSUPPORTED_OPERATION, @@ -202,6 +215,46 @@ public JdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int } } + /** + * Processes a file batch sent from client as part of bulk load COPY command. + * + * @param req Request object with a batch of a file received from client. + * @return Response to send to the client. + */ + private ClientListenerResponse processBulkLoadFileBatch(JdbcBulkLoadBatchRequest req) { + JdbcBulkLoadProcessor processor = bulkLoadRequests.get(req.queryId()); + + if (ctx == null) + return new JdbcResponse(IgniteQueryErrorCode.UNEXPECTED_OPERATION, "Unknown query ID: " + + req.queryId() + ". Bulk load session may have been reclaimed due to timeout."); + + try { + processor.processBatch(req); + + switch (req.cmd()) { + case CMD_FINISHED_ERROR: + case CMD_FINISHED_EOF: + bulkLoadRequests.remove(req.queryId()); + + processor.close(); + + break; + + case CMD_CONTINUE: + break; + + default: + throw new IllegalArgumentException(); + } + + return new JdbcResponse(new JdbcQueryExecuteResult(req.queryId(), processor.updateCnt())); + } + catch (Exception e) { + U.error(null, "Error processing file batch", e); + return new JdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Server error: " + e); + } + } + /** {@inheritDoc} */ @Override public ClientListenerResponse handleException(Exception e, ClientListenerRequest req) { return exceptionToResult(e); @@ -232,6 +285,17 @@ public void onDisconnect() { { for (JdbcQueryCursor cursor : qryCursors.values()) cursor.close(); + + for (JdbcBulkLoadProcessor processor : bulkLoadRequests.values()) { + try { + processor.close(); + } + catch (Exception e) { + U.error(null, "Error closing JDBC bulk load processor.", e); + } + } + + bulkLoadRequests.clear(); } finally { busyLock.leaveBusy(); @@ -305,10 +369,22 @@ private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) { List>> results = ctx.query().querySqlFieldsNoCache(qry, true, protocolVer.compareTo(VER_2_3_0) < 0); - if (results.size() == 1) { - FieldsQueryCursor> qryCur = results.get(0); + FieldsQueryCursor> fieldsCur = results.get(0); + + if (fieldsCur instanceof BulkLoadContextCursor) { + BulkLoadContextCursor blCur = (BulkLoadContextCursor) fieldsCur; - JdbcQueryCursor cur = new JdbcQueryCursor(qryId, req.pageSize(), req.maxRows(), (QueryCursorImpl)qryCur); + BulkLoadProcessor blProcessor = blCur.bulkLoadProcessor(); + BulkLoadAckClientParameters clientParams = blCur.clientParams(); + + bulkLoadRequests.put(qryId, new JdbcBulkLoadProcessor(blProcessor)); + + return new JdbcResponse(new JdbcBulkLoadAckResult(qryId, clientParams)); + } + + if (results.size() == 1) { + JdbcQueryCursor cur = new JdbcQueryCursor(qryId, req.pageSize(), req.maxRows(), + (QueryCursorImpl)fieldsCur); JdbcQueryExecuteResult res; @@ -497,10 +573,13 @@ private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) { qry.setSchema(schemaName); - QueryCursorImpl> qryCur = (QueryCursorImpl>)ctx.query() - .querySqlFieldsNoCache(qry, true, true).get(0); + FieldsQueryCursor> qryCur = F.first(ctx.query() + .querySqlFieldsNoCache(qry, true, true)); + + if (qryCur instanceof BulkLoadContextCursor) + throw new IgniteSQLException("COPY command cannot be executed in batch mode."); - assert !qryCur.isQuery(); + assert !((QueryCursorImpl)qryCur).isQuery(); List> items = qryCur.getAll(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java index 6d460e658a900..952aa88feba0f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java @@ -62,6 +62,9 @@ public class JdbcResult implements JdbcRawBinarylizable { /** Columns metadata result V2. */ static final byte META_COLUMNS_V2 = 14; + /** A request to send file from client to server. */ + static final byte BULK_LOAD_ACK = 15; + /** Success status. */ private byte type; @@ -155,6 +158,11 @@ public static JdbcResult readResult(BinaryReaderExImpl reader) throws BinaryObje break; + case BULK_LOAD_ACK: + res = new JdbcBulkLoadAckResult(); + + break; + default: throw new IgniteException("Unknown SQL listener request ID: [request ID=" + resId + ']'); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java index ac826cc50262f..64530a23517ae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java @@ -39,6 +39,9 @@ public class SqlKeyword { /** Keyword: BOOL. */ public static final String BOOL = "BOOL"; + /** Keyword: BATCH_SIZE. */ + public static final String BATCH_SIZE = "BATCH_SIZE"; + /** Keyword: BOOLEAN. */ public static final String BOOLEAN = "BOOLEAN"; @@ -51,6 +54,9 @@ public class SqlKeyword { /** Keyword: CHARACTER. */ public static final String CHARACTER = "CHARACTER"; + /** Keyword: COPY. */ + public static final String COPY = "COPY"; + /** Keyword: CREATE. */ public static final String CREATE = "CREATE"; @@ -87,6 +93,12 @@ public class SqlKeyword { /** Keyword: FLOAT8. */ public static final String FLOAT8 = "FLOAT8"; + /** Keyword: FORMAT. */ + public static final String FORMAT = "FORMAT"; + + /** Keyword: FROM. */ + public static final String FROM = "FROM"; + /** Keyword: FULLTEXT. */ public static final String FULLTEXT = "FULLTEXT"; @@ -114,6 +126,9 @@ public class SqlKeyword { /** Keyword: INTEGER. */ public static final String INTEGER = "INTEGER"; + /** Keyword: INTO. */ + public static final String INTO = "INTO"; + /** Keyword: KEY. */ public static final String KEY = "KEY"; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java index 19f526d146ea6..8af5326a828e1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java @@ -17,11 +17,13 @@ package org.apache.ignite.internal.sql; +import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand; import org.apache.ignite.internal.sql.command.SqlCommand; import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; import org.apache.ignite.internal.sql.command.SqlDropIndexCommand; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.internal.sql.SqlKeyword.COPY; import static org.apache.ignite.internal.sql.SqlKeyword.CREATE; import static org.apache.ignite.internal.sql.SqlKeyword.DROP; import static org.apache.ignite.internal.sql.SqlKeyword.HASH; @@ -98,6 +100,11 @@ private SqlCommand nextCommand0() { case DROP: cmd = processDrop(); + break; + + case COPY: + cmd = processCopy(); + break; } @@ -109,7 +116,7 @@ private SqlCommand nextCommand0() { return cmd; } else - throw errorUnexpectedToken(lex, CREATE, DROP); + throw errorUnexpectedToken(lex, CREATE, DROP, COPY); case QUOTED: case MINUS: @@ -123,6 +130,15 @@ private SqlCommand nextCommand0() { } } + /** + * Processes COPY command. + * + * @return The {@link SqlBulkLoadCommand} command. + */ + private SqlCommand processCopy() { + return new SqlBulkLoadCommand().parse(lex); + } + /** * Process CREATE keyword. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlBulkLoadCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlBulkLoadCommand.java new file mode 100644 index 0000000000000..e5246d5bd57b5 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlBulkLoadCommand.java @@ -0,0 +1,273 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql.command; + +import org.apache.ignite.internal.processors.bulkload.BulkLoadCsvFormat; +import org.apache.ignite.internal.processors.bulkload.BulkLoadFormat; +import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters; +import org.apache.ignite.internal.sql.SqlKeyword; +import org.apache.ignite.internal.sql.SqlLexer; +import org.apache.ignite.internal.sql.SqlLexerTokenType; +import org.apache.ignite.internal.util.typedef.internal.S; + +import java.util.ArrayList; +import java.util.List; + +import static org.apache.ignite.internal.sql.SqlParserUtils.error; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseIdentifier; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseInt; +import static org.apache.ignite.internal.sql.SqlParserUtils.parseQualifiedIdentifier; +import static org.apache.ignite.internal.sql.SqlParserUtils.skipCommaOrRightParenthesis; +import static org.apache.ignite.internal.sql.SqlParserUtils.skipIfMatches; +import static org.apache.ignite.internal.sql.SqlParserUtils.skipIfMatchesKeyword; + +/** + * A parser for a COPY command (called 'bulk load' in the code, since word 'copy' is too generic). + */ +public class SqlBulkLoadCommand implements SqlCommand { + /** Local file name to send from client to server. */ + private String locFileName; + + /** Schema name + table name. */ + private SqlQualifiedName tblQName; + + /** User-specified list of columns. */ + private List cols; + + /** File format. */ + private BulkLoadFormat inputFormat; + + /** Batch size (size of portion of a file sent in each sub-request). */ + private Integer batchSize; + + /** + * Parses the command. + * + * @param lex The lexer. + * @return The parsed command object. + */ + @Override public SqlCommand parse(SqlLexer lex) { + skipIfMatchesKeyword(lex, SqlKeyword.FROM); // COPY keyword is already parsed + + parseFileName(lex); + + parseTableName(lex); + + parseColumns(lex); + + parseFormat(lex); + + parseParameters(lex); + + return this; + } + + /** + * Parses the file name. + * + * @param lex The lexer. + */ + private void parseFileName(SqlLexer lex) { + locFileName = parseIdentifier(lex); + } + + /** + * Parses the schema and table names. + * + * @param lex The lexer. + */ + private void parseTableName(SqlLexer lex) { + skipIfMatchesKeyword(lex, SqlKeyword.INTO); + + tblQName = parseQualifiedIdentifier(lex); + } + + /** + * Parses the list of columns. + * + * @param lex The lexer. + */ + private void parseColumns(SqlLexer lex) { + skipIfMatches(lex, SqlLexerTokenType.PARENTHESIS_LEFT); + + cols = new ArrayList<>(); + + do { + cols.add(parseColumn(lex)); + } + while (!skipCommaOrRightParenthesis(lex)); + } + + /** + * Parses column clause. + * + * @param lex The lexer. + * @return The column name. + */ + private String parseColumn(SqlLexer lex) { + return parseIdentifier(lex); + } + + /** + * Parses the format clause. + * + * @param lex The lexer. + */ + private void parseFormat(SqlLexer lex) { + skipIfMatchesKeyword(lex, SqlKeyword.FORMAT); + + String name = parseIdentifier(lex); + + switch (name.toUpperCase()) { + case BulkLoadCsvFormat.NAME: + BulkLoadCsvFormat fmt = new BulkLoadCsvFormat(); + + // IGNITE-7537 will introduce user-defined values + fmt.lineSeparator(BulkLoadCsvFormat.DEFAULT_LINE_SEPARATOR); + fmt.fieldSeparator(BulkLoadCsvFormat.DEFAULT_FIELD_SEPARATOR); + fmt.quoteChars(BulkLoadCsvFormat.DEFAULT_QUOTE_CHARS); + fmt.commentChars(BulkLoadCsvFormat.DEFAULT_COMMENT_CHARS); + fmt.escapeChars(BulkLoadCsvFormat.DEFAULT_ESCAPE_CHARS); + + inputFormat = fmt; + + break; + + default: + throw error(lex, "Unknown format name: " + name + + ". Currently supported format is " + BulkLoadCsvFormat.NAME); + } + } + + /** + * Parses the optional parameters. + * + * @param lex The lexer. + */ + private void parseParameters(SqlLexer lex) { + while (lex.lookAhead().tokenType() == SqlLexerTokenType.DEFAULT) { + switch (lex.lookAhead().token()) { + case SqlKeyword.BATCH_SIZE: + lex.shift(); + + int sz = parseInt(lex); + + if (!BulkLoadAckClientParameters.isValidBatchSize(sz)) + throw error(lex, BulkLoadAckClientParameters.batchSizeErrorMsg(sz)); + + batchSize = sz; + + break; + + default: + return; + } + } + } + + /** + * Returns the schemaName. + * + * @return schemaName. + */ + @Override public String schemaName() { + return tblQName.schemaName(); + } + + /** {@inheritDoc} */ + @Override public void schemaName(String schemaName) { + tblQName.schemaName(schemaName); + } + + /** + * Returns the table name. + * + * @return The table name + */ + public String tableName() { + return tblQName.name(); + } + + /** + * Sets the table name + * + * @param tblName The table name. + */ + public void tableName(String tblName) { + tblQName.name(tblName); + } + + /** + * Returns the local file name. + * + * @return The local file name. + */ + public String localFileName() { + return locFileName; + } + + /** + * Sets the local file name. + * + * @param locFileName The local file name. + */ + public void localFileName(String locFileName) { + this.locFileName = locFileName; + } + + /** + * Returns the list of columns. + * + * @return The list of columns. + */ + public List columns() { + return cols; + } + + /** + * Returns the input file format. + * + * @return The input file format. + */ + public BulkLoadFormat inputFormat() { + return inputFormat; + } + + /** + * Returns the batch size. + * + * @return The batch size. + */ + public Integer batchSize() { + return batchSize; + } + + /** + * Sets the batch size. + * + * @param batchSize The batch size. + */ + public void batchSize(int batchSize) { + this.batchSize = batchSize; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SqlBulkLoadCommand.class, this); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserBulkLoadSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserBulkLoadSelfTest.java new file mode 100644 index 0000000000000..b5cd55be2169f --- /dev/null +++ b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserBulkLoadSelfTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.sql; + +/** + * Tests for SQL parser: COPY command. + */ +public class SqlParserBulkLoadSelfTest extends SqlParserAbstractSelfTest { + /** + * Tests for COPY command. + * + * @throws Exception If any of sub-tests was failed. + */ + public void testCopy() { + assertParseError(null, + "copy grom \"any.file\" into Person (_key, age, firstName, lastName) format csv", + "Unexpected token: \"GROM\" (expected: \"FROM\")"); + + assertParseError(null, + "copy from into Person (_key, age, firstName, lastName) format csv", + "Unexpected token: \"INTO\" (expected: \"[identifier]\""); + + assertParseError(null, + "copy from any.file into Person (_key, age, firstName, lastName) format csv", + "Unexpected token: \".\" (expected: \"INTO\""); + + assertParseError(null, + "copy from \"any.file\" to Person (_key, age, firstName, lastName) format csv", + "Unexpected token: \"TO\" (expected: \"INTO\")"); + + // Column list + + assertParseError(null, + "copy from \"any.file\" into Person () format csv", + "Unexpected token: \")\" (expected: \"[identifier]\")"); + + assertParseError(null, + "copy from \"any.file\" into Person (,) format csv", + "Unexpected token: \",\" (expected: \"[identifier]\")"); + + assertParseError(null, + "copy from \"any.file\" into Person format csv", + "Unexpected token: \"FORMAT\" (expected: \"(\")"); + + // FORMAT + + assertParseError(null, + "copy from \"any.file\" into Person (_key, age, firstName, lastName)", + "Unexpected end of command (expected: \"FORMAT\")"); + + assertParseError(null, + "copy from \"any.file\" into Person (_key, age, firstName, lastName) format lsd", + "Unknown format name: LSD"); + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java index fc4b94498bd9d..deb262c8fe857 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java @@ -37,8 +37,15 @@ import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cache.query.BulkLoadContextCursor; +import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters; +import org.apache.ignite.internal.processors.bulkload.BulkLoadCacheWriter; +import org.apache.ignite.internal.processors.bulkload.BulkLoadParser; +import org.apache.ignite.internal.processors.bulkload.BulkLoadProcessor; +import org.apache.ignite.internal.processors.bulkload.BulkLoadStreamerWriter; import org.apache.ignite.internal.processors.cache.CacheOperationContext; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; @@ -55,12 +62,17 @@ import org.apache.ignite.internal.processors.query.h2.dml.UpdateMode; import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan; import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlanBuilder; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; +import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand; +import org.apache.ignite.internal.sql.command.SqlCommand; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap; +import org.apache.ignite.internal.util.lang.IgniteClosureX; import org.apache.ignite.internal.util.lang.IgniteSingletonIterator; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T3; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteInClosure; @@ -696,6 +708,67 @@ UpdateResult mapDistributedUpdate(String schemaName, PreparedStatement stmt, Sql return updateSqlFields(schemaName, c, GridSqlQueryParser.prepared(stmt), fldsQry, local, filter, cancel); } + /** + * Runs a DML statement for which we have internal command executor. + * + * @param sql The SQL command text to execute. + * @param cmd The command to execute. + * @return The cursor returned by the statement. + * @throws IgniteSQLException If failed. + */ + public FieldsQueryCursor> runNativeDmlStatement(String sql, SqlCommand cmd) { + try { + if (cmd instanceof SqlBulkLoadCommand) + return processBulkLoadCommand((SqlBulkLoadCommand)cmd); + else + throw new IgniteSQLException("Unsupported DML operation: " + sql, + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + + } + catch (IgniteSQLException e) { + throw e; + } + catch (Exception e) { + throw new IgniteSQLException("Unexpected DML operation failure: " + e.getMessage(), e); + } + } + + /** + * Process bulk load COPY command. + * + * @param cmd The command. + * @return The context (which is the result of the first request/response). + * @throws IgniteCheckedException If something failed. + */ + public FieldsQueryCursor> processBulkLoadCommand(SqlBulkLoadCommand cmd) throws IgniteCheckedException { + if (cmd.batchSize() == null) + cmd.batchSize(BulkLoadAckClientParameters.DEFAULT_BATCH_SIZE); + + GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName()); + + if (tbl == null) + throw new IgniteSQLException("Table does not exist: " + cmd.tableName(), + IgniteQueryErrorCode.TABLE_NOT_FOUND); + + UpdatePlan plan = UpdatePlanBuilder.planForBulkLoad(cmd, tbl); + + IgniteClosureX, IgniteBiTuple> dataConverter = new BulkLoadDataConverter(plan); + + GridCacheContext cache = tbl.cache(); + + IgniteDataStreamer streamer = cache.grid().dataStreamer(cache.name()); + + BulkLoadCacheWriter outputWriter = new BulkLoadStreamerWriter(streamer); + + BulkLoadParser inputParser = BulkLoadParser.createParser(cmd.inputFormat()); + + BulkLoadProcessor processor = new BulkLoadProcessor(inputParser, dataConverter, outputWriter); + + BulkLoadAckClientParameters params = new BulkLoadAckClientParameters(cmd.localFileName(), cmd.batchSize()); + + return new BulkLoadContextCursor(processor, params); + } + /** */ private final static class InsertEntryProcessor implements EntryProcessor { /** Value to set. */ @@ -810,4 +883,31 @@ static void checkUpdateResult(UpdateResult r) { } } + /** + * Converts a row of values to actual key+value using {@link UpdatePlan#processRow(List)}. + */ + private static class BulkLoadDataConverter extends IgniteClosureX, IgniteBiTuple> { + /** Update plan to convert incoming rows. */ + private final UpdatePlan plan; + + /** + * Creates the converter with the given update plan. + * + * @param plan The update plan to use. + */ + private BulkLoadDataConverter(UpdatePlan plan) { + this.plan = plan; + } + + /** + * Converts the record to a key+value. + * + * @param record The record to convert. + * @return The key+value. + * @throws IgniteCheckedException If conversion failed for some reason. + */ + @Override public IgniteBiTuple applyx(List record) throws IgniteCheckedException { + return plan.processRow(record); + } + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 381a8dcde7378..82978aa79a842 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -43,12 +43,14 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Pattern; import javax.cache.Cache; import javax.cache.CacheException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cache.query.FieldsQueryCursor; import org.apache.ignite.cache.query.QueryCancelledException; import org.apache.ignite.cache.query.QueryCursor; @@ -73,7 +75,6 @@ import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; -import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.query.CacheQueryPartitionInfo; import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable; @@ -105,10 +106,10 @@ import org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode; import org.apache.ignite.internal.processors.query.h2.opt.GridH2DefaultTableEngine; import org.apache.ignite.internal.processors.query.h2.opt.GridH2IndexBase; +import org.apache.ignite.internal.processors.query.h2.opt.GridH2PlainRowFactory; import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; -import org.apache.ignite.internal.processors.query.h2.opt.GridH2PlainRowFactory; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter; @@ -118,7 +119,9 @@ import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitorClosure; import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor; +import org.apache.ignite.internal.sql.SqlParseException; import org.apache.ignite.internal.sql.SqlParser; +import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand; import org.apache.ignite.internal.sql.command.SqlCommand; import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand; import org.apache.ignite.internal.sql.command.SqlDropIndexCommand; @@ -186,6 +189,9 @@ */ @SuppressWarnings({"UnnecessaryFullyQualifiedName", "NonFinalStaticVariableUsedInClassInitialization"}) public class IgniteH2Indexing implements GridQueryIndexing { + public static final Pattern INTERNAL_CMD_RE = Pattern.compile( + "^(create|drop)\\s+index|^copy", Pattern.CASE_INSENSITIVE); + /* * Register IO for indexes. */ @@ -1340,7 +1346,7 @@ UpdateResult runDistributedUpdate( */ private List>> tryQueryDistributedSqlFieldsNative(String schemaName, SqlFieldsQuery qry) { // Heuristic check for fast return. - if (!qry.getSql().toUpperCase().contains("INDEX")) + if (!INTERNAL_CMD_RE.matcher(qry.getSql().trim()).find()) return null; // Parse. @@ -1355,8 +1361,10 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri if (parser.nextCommand() != null) return null; - // Only CREATE/DROP INDEX is supported for now. - if (!(cmd instanceof SqlCreateIndexCommand || cmd instanceof SqlDropIndexCommand)) + // Currently supported commands are: CREATE/DROP INDEX/COPY + if (!(cmd instanceof SqlCreateIndexCommand + || cmd instanceof SqlDropIndexCommand + || cmd instanceof SqlBulkLoadCommand)) return null; } catch (Exception e) { @@ -1364,21 +1372,33 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri if (log.isDebugEnabled()) log.debug("Failed to parse SQL with native parser [qry=" + qry.getSql() + ", err=" + e + ']'); - return null; + if (!IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK)) + return null; + + int code = e instanceof SqlParseException ? ((SqlParseException)e).code() : IgniteQueryErrorCode.PARSING; + + throw new IgniteSQLException("Failed to parse DDL statement: " + qry.getSql() + ": " + e.getMessage(), + code, e); } // Execute. - try { - List>> ress = new ArrayList<>(1); + if (cmd instanceof SqlBulkLoadCommand) { + FieldsQueryCursor> cursor = dmlProc.runNativeDmlStatement(qry.getSql(), cmd); - FieldsQueryCursor> res = ddlProc.runDdlStatement(qry.getSql(), cmd); + return Collections.singletonList(cursor); + } + else {try { + List>> ress = new ArrayList<>(1); + FieldsQueryCursor> cursor = ddlProc.runDdlStatement(qry.getSql(), cmd); - ress.add(res); + ress.add(cursor); return ress; } catch (IgniteCheckedException e) { - throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + qry.getSql() + ']', e); + throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + qry.getSql() + "]: " + + e.getMessage(), e); + } } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index 3c8d9feae7450..a6cc93e2e7832 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -57,6 +57,7 @@ import org.apache.ignite.internal.sql.command.SqlIndexColumn; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; import org.h2.command.Prepared; import org.h2.command.ddl.AlterTableAlterColumn; import org.h2.command.ddl.CreateIndex; @@ -378,6 +379,8 @@ else if (stmt0 instanceof GridSqlAlterTableAddColumn) { return resCur; } catch (SchemaOperationException e) { + U.error(null, "DDL operation failure", e); + throw convert(e); } catch (IgniteSQLException e) { diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdateMode.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdateMode.java index 0440648c193ec..d9c627a9dc9c0 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdateMode.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdateMode.java @@ -22,15 +22,18 @@ * or UPDATE/DELETE from subquery or literals/params based. */ public enum UpdateMode { - /** */ + /** MERGE command. */ MERGE, - /** */ + /** INSERT command. */ INSERT, - /** */ + /** UPDATE command. */ UPDATE, - /** */ + /** DELETE command. */ DELETE, + + /** COPY command. */ + BULK_LOAD } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index 31dc52de30ae0..1dd6c45000706 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -38,6 +38,7 @@ import java.util.List; import java.util.Map; +import static org.apache.ignite.internal.processors.query.h2.dml.UpdateMode.BULK_LOAD; import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.DEFAULT_COLUMNS_COUNT; /** @@ -174,6 +175,10 @@ public UpdatePlan( * @throws IgniteCheckedException if failed. */ public IgniteBiTuple processRow(List row) throws IgniteCheckedException { + if (mode != BULK_LOAD && row.size() != colNames.length) + throw new IgniteSQLException("Not enough values in a row: " + row.size() + " instead of " + colNames.length, + IgniteQueryErrorCode.ENTRY_PROCESSING); + GridH2RowDescriptor rowDesc = tbl.rowDescriptor(); GridQueryTypeDescriptor desc = rowDesc.type(); @@ -197,7 +202,8 @@ public UpdatePlan( if (key == null) { if (F.isEmpty(desc.keyFieldName())) - throw new IgniteSQLException("Key for INSERT or MERGE must not be null", IgniteQueryErrorCode.NULL_KEY); + throw new IgniteSQLException("Key for INSERT, COPY, or MERGE must not be null", + IgniteQueryErrorCode.NULL_KEY); else throw new IgniteSQLException("Null value is not allowed for column '" + desc.keyFieldName() + "'", IgniteQueryErrorCode.NULL_KEY); @@ -205,16 +211,18 @@ public UpdatePlan( if (val == null) { if (F.isEmpty(desc.valueFieldName())) - throw new IgniteSQLException("Value for INSERT, MERGE, or UPDATE must not be null", + throw new IgniteSQLException("Value for INSERT, COPY, MERGE, or UPDATE must not be null", IgniteQueryErrorCode.NULL_VALUE); else throw new IgniteSQLException("Null value is not allowed for column '" + desc.valueFieldName() + "'", IgniteQueryErrorCode.NULL_VALUE); } + int actualColCnt = Math.min(colNames.length, row.size()); + Map newColVals = new HashMap<>(); - for (int i = 0; i < colNames.length; i++) { + for (int i = 0; i < actualColCnt; i++) { if (i == keyColIdx || i == valColIdx) continue; @@ -231,14 +239,14 @@ public UpdatePlan( // We update columns in the order specified by the table for a reason - table's // column order preserves their precedence for correct update of nested properties. - Column[] cols = tbl.getColumns(); + Column[] tblCols = tbl.getColumns(); // First 3 columns are _key, _val and _ver. Skip 'em. - for (int i = DEFAULT_COLUMNS_COUNT; i < cols.length; i++) { + for (int i = DEFAULT_COLUMNS_COUNT; i < tblCols.length; i++) { if (tbl.rowDescriptor().isKeyValueOrVersionColumn(i)) continue; - String colName = cols[i].getName(); + String colName = tblCols[i].getName(); if (!newColVals.containsKey(colName)) continue; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index a5516392dca31..9cd8c59a46fd6 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -53,6 +53,7 @@ import org.apache.ignite.internal.processors.query.h2.sql.GridSqlTable; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlUnion; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlUpdate; +import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -373,6 +374,90 @@ else if (stmt instanceof GridSqlDelete) { } } + /** + * Prepare update plan for COPY command (AKA bulk load). + * + * @param cmd Bulk load command + * @return The update plan for this command. + * @throws IgniteCheckedException if failed. + */ + @SuppressWarnings("ConstantConditions") + public static UpdatePlan planForBulkLoad(SqlBulkLoadCommand cmd, GridH2Table tbl) throws IgniteCheckedException { + GridH2RowDescriptor desc = tbl.rowDescriptor(); + + if (desc == null) + throw new IgniteSQLException("Row descriptor undefined for table '" + tbl.getName() + "'", + IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR); + + GridCacheContext cctx = desc.context(); + + List cols = cmd.columns(); + + if (cols == null) + throw new IgniteSQLException("Columns are not defined", IgniteQueryErrorCode.NULL_TABLE_DESCRIPTOR); + + String[] colNames = new String[cols.size()]; + + int[] colTypes = new int[cols.size()]; + + int keyColIdx = -1; + int valColIdx = -1; + + boolean hasKeyProps = false; + boolean hasValProps = false; + + for (int i = 0; i < cols.size(); i++) { + String colName = cols.get(i); + + colNames[i] = colName; + + Column h2Col = tbl.getColumn(colName); + + colTypes[i] = h2Col.getType(); + int colId = h2Col.getColumnId(); + + if (desc.isKeyColumn(colId)) { + keyColIdx = i; + continue; + } + + if (desc.isValueColumn(colId)) { + valColIdx = i; + continue; + } + + GridQueryProperty prop = desc.type().property(colName); + + assert prop != null : "Property '" + colName + "' not found."; + + if (prop.key()) + hasKeyProps = true; + else + hasValProps = true; + } + + KeyValueSupplier keySupplier = createSupplier(cctx, desc.type(), keyColIdx, hasKeyProps, + true, false); + KeyValueSupplier valSupplier = createSupplier(cctx, desc.type(), valColIdx, hasValProps, + false, false); + + return new UpdatePlan( + UpdateMode.BULK_LOAD, + tbl, + colNames, + colTypes, + keySupplier, + valSupplier, + keyColIdx, + valColIdx, + null, + true, + 0, + null, + null + ); + } + /** * Detect appropriate method of instantiating key or value (take from param, create binary builder, * invoke default ctor, or allocate). diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index 7b3b271a33c52..bbbe3b8823e5b 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -155,6 +155,7 @@ import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryDistributedJoinsTest; import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryTest; import org.apache.ignite.internal.processors.sql.SqlConnectorConfigurationValidationSelfTest; +import org.apache.ignite.internal.sql.SqlParserBulkLoadSelfTest; import org.apache.ignite.internal.sql.SqlParserCreateIndexSelfTest; import org.apache.ignite.internal.sql.SqlParserDropIndexSelfTest; import org.apache.ignite.spi.communication.tcp.GridOrderedMessageCancelSelfTest; @@ -173,6 +174,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(SqlParserCreateIndexSelfTest.class); suite.addTestSuite(SqlParserDropIndexSelfTest.class); + suite.addTestSuite(SqlParserBulkLoadSelfTest.class); suite.addTestSuite(SqlConnectorConfigurationValidationSelfTest.class); suite.addTestSuite(ClientConnectorConfigurationValidationSelfTest.class); diff --git a/parent/pom.xml b/parent/pom.xml index cb335d1232214..121d77b4b714b 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -810,6 +810,7 @@ **/*index*.md **/*.timestamp **/*.iml + **/*.csv **/pom-installed.xml **/keystore/*.jks **/keystore/*.pem @@ -837,8 +838,7 @@ **/books/*.txt src/main/java/org/apache/ignite/examples/streaming/wordcount/*.txt examples/src/main/java/org/apache/ignite/examples/streaming/wordcount/*.txt - src/main/resources/person.csv - examples/src/main/resources/person.csv + **/resources/datasets/**/* src/main/java/org/jetbrains/annotations/*.java dev-tools/IGNITE-*.patch dev-tools/.gradle/**/* From ab6e2be6c1007b473294e71418897a145db57fd2 Mon Sep 17 00:00:00 2001 From: Igor Seliverstov Date: Wed, 7 Feb 2018 14:28:04 +0300 Subject: [PATCH 233/243] Java 1.7 compatible fix --- .../org/apache/ignite/cache/query/BulkLoadContextCursor.java | 2 +- .../processors/bulkload/pipeline/StrListAppenderBlock.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java b/modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java index b7fdec3e8d07f..9d348db29c376 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/BulkLoadContextCursor.java @@ -69,7 +69,7 @@ public BulkLoadAckClientParameters clientParams() { /** {@inheritDoc} */ @Override public List> getAll() { - return Collections.singletonList(Arrays.asList(processor, clientParams)); + return (List)Collections.singletonList(Arrays.asList(processor, clientParams)); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java index 91cbc1e11117d..68668398955ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/StrListAppenderBlock.java @@ -47,6 +47,6 @@ public void output(List> output) { /** {@inheritDoc} */ @Override public void accept(String[] elements, boolean isLastPortion) { - output.add(Arrays.asList(elements)); + output.add(Arrays.asList(elements)); } } From bda061b628555ca233beb1b2f92f4aceda987516 Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Mon, 13 Nov 2017 15:37:02 +0300 Subject: [PATCH 234/243] IGNITE-6752: JDBC thin: connection properties refactoring. This closes #2927. --- .../JdbcThinAutoCloseServerCursorTest.java | 4 +- .../jdbc/thin/JdbcThinConnectionSelfTest.java | 155 ++--- .../org/apache/ignite/IgniteJdbcDriver.java | 29 +- .../apache/ignite/IgniteJdbcThinDriver.java | 35 +- .../jdbc/thin/ConnectionProperties.java | 148 ++++ .../jdbc/thin/ConnectionPropertiesImpl.java | 637 ++++++++++++++++++ .../jdbc/thin/JdbcThinConnection.java | 163 +---- .../internal/jdbc/thin/JdbcThinTcpIo.java | 195 ++---- .../internal/jdbc/thin/JdbcThinUtils.java | 79 +-- 9 files changed, 945 insertions(+), 500 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java index db4ed3f1ab3e8..943ca0e6bc548 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAutoCloseServerCursorTest.java @@ -29,7 +29,6 @@ import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.internal.jdbc.thin.JdbcThinUtils; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; @@ -51,8 +50,7 @@ public class JdbcThinAutoCloseServerCursorTest extends JdbcThinAbstractSelfTest private static final String CACHE_NAME = "cache"; /** URL. */ - private static final String URL = "jdbc:ignite:thin://127.0.0.1/?" + - JdbcThinUtils.PARAM_AUTO_CLOSE_SERVER_CURSOR + "=true"; + private static final String URL = "jdbc:ignite:thin://127.0.0.1/?autoCloseServerCursor=true"; /** {@inheritDoc} */ @SuppressWarnings("unchecked") diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java index 7f67136c202b3..0cf6ab6ff8e7e 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java @@ -135,9 +135,10 @@ public void testInvalidEndpoint() throws Exception { assertInvalid("jdbc:ignite:thin://:10000", "Host name is empty"); assertInvalid("jdbc:ignite:thin:// :10000", "Host name is empty"); - assertInvalid("jdbc:ignite:thin://127.0.0.1:-1", "Invalid port"); - assertInvalid("jdbc:ignite:thin://127.0.0.1:0", "Invalid port"); - assertInvalid("jdbc:ignite:thin://127.0.0.1:100000", "Invalid port"); + assertInvalid("jdbc:ignite:thin://127.0.0.1:-1", "Property cannot be lower than 1 [name=port, value=-1]"); + assertInvalid("jdbc:ignite:thin://127.0.0.1:0", "Property cannot be lower than 1 [name=port, value=0]"); + assertInvalid("jdbc:ignite:thin://127.0.0.1:100000", + "Property cannot be upper than 65535 [name=port, value=100000]"); } /** @@ -147,31 +148,31 @@ public void testInvalidEndpoint() throws Exception { */ public void testSocketBuffers() throws Exception { assertInvalid("jdbc:ignite:thin://127.0.0.1?socketSendBuffer=-1", - "Property cannot be negative [name=" + JdbcThinUtils.PARAM_SOCK_SND_BUF); + "Property cannot be lower than 0 [name=socketSendBuffer, value=-1]"); assertInvalid("jdbc:ignite:thin://127.0.0.1?socketReceiveBuffer=-1", - "Property cannot be negative [name=" + JdbcThinUtils.PARAM_SOCK_RCV_BUF); + "Property cannot be lower than 0 [name=socketReceiveBuffer, value=-1]"); try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1")) { - assertEquals(0, io(conn).socketSendBuffer()); - assertEquals(0, io(conn).socketReceiveBuffer()); + assertEquals(0, io(conn).connectionProperties().getSocketSendBuffer()); + assertEquals(0, io(conn).connectionProperties().getSocketReceiveBuffer()); } // Note that SO_* options are hints, so we check that value is equals to either what we set or to default. try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?socketSendBuffer=1024")) { - assertEquals(1024, io(conn).socketSendBuffer()); - assertEquals(0, io(conn).socketReceiveBuffer()); + assertEquals(1024, io(conn).connectionProperties().getSocketSendBuffer()); + assertEquals(0, io(conn).connectionProperties().getSocketReceiveBuffer()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?socketReceiveBuffer=1024")) { - assertEquals(0, io(conn).socketSendBuffer()); - assertEquals(1024, io(conn).socketReceiveBuffer()); + assertEquals(0, io(conn).connectionProperties().getSocketSendBuffer()); + assertEquals(1024, io(conn).connectionProperties().getSocketReceiveBuffer()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?" + "socketSendBuffer=1024&socketReceiveBuffer=2048")) { - assertEquals(1024, io(conn).socketSendBuffer()); - assertEquals(2048, io(conn).socketReceiveBuffer()); + assertEquals(1024, io(conn).connectionProperties().getSocketSendBuffer()); + assertEquals(2048, io(conn).connectionProperties().getSocketReceiveBuffer()); } } @@ -182,76 +183,76 @@ public void testSocketBuffers() throws Exception { */ public void testSqlHints() throws Exception { try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1")) { - assertFalse(io(conn).distributedJoins()); - assertFalse(io(conn).enforceJoinOrder()); - assertFalse(io(conn).collocated()); - assertFalse(io(conn).replicatedOnly()); - assertFalse(io(conn).lazy()); - assertFalse(io(conn).skipReducerOnUpdate()); + assertFalse(io(conn).connectionProperties().isDistributedJoins()); + assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); + assertFalse(io(conn).connectionProperties().isCollocated()); + assertFalse(io(conn).connectionProperties().isReplicatedOnly()); + assertFalse(io(conn).connectionProperties().isLazy()); + assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?distributedJoins=true")) { - assertTrue(io(conn).distributedJoins()); - assertFalse(io(conn).enforceJoinOrder()); - assertFalse(io(conn).collocated()); - assertFalse(io(conn).replicatedOnly()); - assertFalse(io(conn).lazy()); - assertFalse(io(conn).skipReducerOnUpdate()); + assertTrue(io(conn).connectionProperties().isDistributedJoins()); + assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); + assertFalse(io(conn).connectionProperties().isCollocated()); + assertFalse(io(conn).connectionProperties().isReplicatedOnly()); + assertFalse(io(conn).connectionProperties().isLazy()); + assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?enforceJoinOrder=true")) { - assertFalse(io(conn).distributedJoins()); - assertTrue(io(conn).enforceJoinOrder()); - assertFalse(io(conn).collocated()); - assertFalse(io(conn).replicatedOnly()); - assertFalse(io(conn).lazy()); - assertFalse(io(conn).skipReducerOnUpdate()); + assertFalse(io(conn).connectionProperties().isDistributedJoins()); + assertTrue(io(conn).connectionProperties().isEnforceJoinOrder()); + assertFalse(io(conn).connectionProperties().isCollocated()); + assertFalse(io(conn).connectionProperties().isReplicatedOnly()); + assertFalse(io(conn).connectionProperties().isLazy()); + assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?collocated=true")) { - assertFalse(io(conn).distributedJoins()); - assertFalse(io(conn).enforceJoinOrder()); - assertTrue(io(conn).collocated()); - assertFalse(io(conn).replicatedOnly()); - assertFalse(io(conn).lazy()); - assertFalse(io(conn).skipReducerOnUpdate()); + assertFalse(io(conn).connectionProperties().isDistributedJoins()); + assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); + assertTrue(io(conn).connectionProperties().isCollocated()); + assertFalse(io(conn).connectionProperties().isReplicatedOnly()); + assertFalse(io(conn).connectionProperties().isLazy()); + assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?replicatedOnly=true")) { - assertFalse(io(conn).distributedJoins()); - assertFalse(io(conn).enforceJoinOrder()); - assertFalse(io(conn).collocated()); - assertTrue(io(conn).replicatedOnly()); - assertFalse(io(conn).lazy()); - assertFalse(io(conn).skipReducerOnUpdate()); + assertFalse(io(conn).connectionProperties().isDistributedJoins()); + assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); + assertFalse(io(conn).connectionProperties().isCollocated()); + assertTrue(io(conn).connectionProperties().isReplicatedOnly()); + assertFalse(io(conn).connectionProperties().isLazy()); + assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?lazy=true")) { - assertFalse(io(conn).distributedJoins()); - assertFalse(io(conn).enforceJoinOrder()); - assertFalse(io(conn).collocated()); - assertFalse(io(conn).replicatedOnly()); - assertTrue(io(conn).lazy()); - assertFalse(io(conn).skipReducerOnUpdate()); + assertFalse(io(conn).connectionProperties().isDistributedJoins()); + assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); + assertFalse(io(conn).connectionProperties().isCollocated()); + assertFalse(io(conn).connectionProperties().isReplicatedOnly()); + assertTrue(io(conn).connectionProperties().isLazy()); + assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?skipReducerOnUpdate=true")) { - assertFalse(io(conn).distributedJoins()); - assertFalse(io(conn).enforceJoinOrder()); - assertFalse(io(conn).collocated()); - assertFalse(io(conn).replicatedOnly()); - assertFalse(io(conn).lazy()); - assertTrue(io(conn).skipReducerOnUpdate()); + assertFalse(io(conn).connectionProperties().isDistributedJoins()); + assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); + assertFalse(io(conn).connectionProperties().isCollocated()); + assertFalse(io(conn).connectionProperties().isReplicatedOnly()); + assertFalse(io(conn).connectionProperties().isLazy()); + assertTrue(io(conn).connectionProperties().isSkipReducerOnUpdate()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?distributedJoins=true&" + "enforceJoinOrder=true&collocated=true&replicatedOnly=true&lazy=true&skipReducerOnUpdate=true")) { - assertTrue(io(conn).distributedJoins()); - assertTrue(io(conn).enforceJoinOrder()); - assertTrue(io(conn).collocated()); - assertTrue(io(conn).replicatedOnly()); - assertTrue(io(conn).lazy()); - assertTrue(io(conn).skipReducerOnUpdate()); + assertTrue(io(conn).connectionProperties().isDistributedJoins()); + assertTrue(io(conn).connectionProperties().isEnforceJoinOrder()); + assertTrue(io(conn).connectionProperties().isCollocated()); + assertTrue(io(conn).connectionProperties().isReplicatedOnly()); + assertTrue(io(conn).connectionProperties().isLazy()); + assertTrue(io(conn).connectionProperties().isSkipReducerOnUpdate()); } } @@ -262,35 +263,35 @@ public void testSqlHints() throws Exception { */ public void testTcpNoDelay() throws Exception { assertInvalid("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=0", - "Failed to parse boolean property [name=" + JdbcThinUtils.PARAM_TCP_NO_DELAY); + "Failed to parse boolean property [name=tcpNoDelay, value=0]"); assertInvalid("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=1", - "Failed to parse boolean property [name=" + JdbcThinUtils.PARAM_TCP_NO_DELAY); + "Failed to parse boolean property [name=tcpNoDelay, value=1]"); assertInvalid("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=false1", - "Failed to parse boolean property [name=" + JdbcThinUtils.PARAM_TCP_NO_DELAY); + "Failed to parse boolean property [name=tcpNoDelay, value=false1]"); assertInvalid("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=true1", - "Failed to parse boolean property [name=" + JdbcThinUtils.PARAM_TCP_NO_DELAY); + "Failed to parse boolean property [name=tcpNoDelay, value=true1]"); try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1")) { - assertTrue(io(conn).tcpNoDelay()); + assertTrue(io(conn).connectionProperties().isTcpNoDelay()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=true")) { - assertTrue(io(conn).tcpNoDelay()); + assertTrue(io(conn).connectionProperties().isTcpNoDelay()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=True")) { - assertTrue(io(conn).tcpNoDelay()); + assertTrue(io(conn).connectionProperties().isTcpNoDelay()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=false")) { - assertFalse(io(conn).tcpNoDelay()); + assertFalse(io(conn).connectionProperties().isTcpNoDelay()); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?tcpNoDelay=False")) { - assertFalse(io(conn).tcpNoDelay()); + assertFalse(io(conn).connectionProperties().isTcpNoDelay()); } } @@ -300,9 +301,9 @@ public void testTcpNoDelay() throws Exception { * @throws Exception If failed. */ public void testAutoCloseServerCursorProperty() throws Exception { - String url = "jdbc:ignite:thin://127.0.0.1?" + JdbcThinUtils.PARAM_AUTO_CLOSE_SERVER_CURSOR; + String url = "jdbc:ignite:thin://127.0.0.1?autoCloseServerCursor"; - String err = "Failed to parse boolean property [name=" + JdbcThinUtils.PARAM_AUTO_CLOSE_SERVER_CURSOR; + String err = "Failed to parse boolean property [name=autoCloseServerCursor"; assertInvalid(url + "=0", err); assertInvalid(url + "=1", err); @@ -310,23 +311,23 @@ public void testAutoCloseServerCursorProperty() throws Exception { assertInvalid(url + "=true1", err); try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1")) { - assertFalse(io(conn).autoCloseServerCursor()); + assertFalse(io(conn).connectionProperties().isAutoCloseServerCursor()); } try (Connection conn = DriverManager.getConnection(url + "=true")) { - assertTrue(io(conn).autoCloseServerCursor()); + assertTrue(io(conn).connectionProperties().isAutoCloseServerCursor()); } try (Connection conn = DriverManager.getConnection(url + "=True")) { - assertTrue(io(conn).autoCloseServerCursor()); + assertTrue(io(conn).connectionProperties().isAutoCloseServerCursor()); } try (Connection conn = DriverManager.getConnection(url + "=false")) { - assertFalse(io(conn).autoCloseServerCursor()); + assertFalse(io(conn).connectionProperties().isAutoCloseServerCursor()); } try (Connection conn = DriverManager.getConnection(url + "=False")) { - assertFalse(io(conn).autoCloseServerCursor()); + assertFalse(io(conn).connectionProperties().isAutoCloseServerCursor()); } } diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java index ea9b7f8fa7fa3..8195bd4302735 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java @@ -30,7 +30,6 @@ import org.apache.ignite.cache.affinity.AffinityKey; import org.apache.ignite.internal.jdbc.JdbcConnection; import org.apache.ignite.internal.jdbc.JdbcDriverPropertyInfo; -import org.apache.ignite.internal.jdbc.thin.JdbcThinUtils; import org.apache.ignite.logger.java.JavaLogger; /** @@ -304,12 +303,6 @@ public class IgniteJdbcDriver implements Driver { /** Collocated parameter name. */ private static final String PARAM_COLLOCATED = "collocated"; - /** Parameter: enforce join order flag. */ - public static final String PARAM_ENFORCE_JOIN_ORDER = "enforceJoinOrder"; - - /** Parameter: lazy query execution flag. */ - public static final String PARAM_LAZY = "lazy"; - /** Distributed joins parameter name. */ private static final String PARAM_DISTRIBUTED_JOINS = "distributedJoins"; @@ -337,6 +330,12 @@ public class IgniteJdbcDriver implements Driver { /** Skip reducer on update property name. */ private static final String PARAM_SKIP_REDUCER_ON_UPDATE = "skipReducerOnUpdate"; + /** Parameter: enforce join order flag (SQL hint). */ + public static final String PARAM_ENFORCE_JOIN_ORDER = "enforceJoinOrder"; + + /** Parameter: replicated only flag (SQL hint). */ + public static final String PARAM_LAZY = "lazy"; + /** Hostname property name. */ public static final String PROP_HOST = PROP_PREFIX + "host"; @@ -358,12 +357,6 @@ public class IgniteJdbcDriver implements Driver { /** Distributed joins property name. */ public static final String PROP_DISTRIBUTED_JOINS = PROP_PREFIX + PARAM_DISTRIBUTED_JOINS; - /** Enforce join order property name. */ - public static final String PROP_ENFORCE_JOIN_ORDER = PROP_PREFIX + PARAM_ENFORCE_JOIN_ORDER; - - /** Lazy query execution property name. */ - public static final String PROP_LAZY = PROP_PREFIX + PARAM_LAZY; - /** Transactions allowed property name. */ public static final String PROP_TX_ALLOWED = PROP_PREFIX + PARAM_TX_ALLOWED; @@ -388,6 +381,12 @@ public class IgniteJdbcDriver implements Driver { /** Skip reducer on update update property name. */ public static final String PROP_SKIP_REDUCER_ON_UPDATE = PROP_PREFIX + PARAM_SKIP_REDUCER_ON_UPDATE; + /** Transactions allowed property name. */ + public static final String PROP_ENFORCE_JOIN_ORDER = PROP_PREFIX + PARAM_ENFORCE_JOIN_ORDER; + + /** Lazy property name. */ + public static final String PROP_LAZY = PROP_PREFIX + PARAM_LAZY; + /** Cache name property name. */ public static final String PROP_CFG = PROP_PREFIX + "cfg"; @@ -457,8 +456,8 @@ public class IgniteJdbcDriver implements Driver { new JdbcDriverPropertyInfo("Local", info.getProperty(PROP_LOCAL), ""), new JdbcDriverPropertyInfo("Collocated", info.getProperty(PROP_COLLOCATED), ""), new JdbcDriverPropertyInfo("Distributed Joins", info.getProperty(PROP_DISTRIBUTED_JOINS), ""), - new JdbcDriverPropertyInfo("Enforce Join Order", info.getProperty(JdbcThinUtils.PROP_ENFORCE_JOIN_ORDER), ""), - new JdbcDriverPropertyInfo("Lazy query execution", info.getProperty(JdbcThinUtils.PROP_LAZY), ""), + new JdbcDriverPropertyInfo("Enforce Join Order", info.getProperty(PROP_ENFORCE_JOIN_ORDER), ""), + new JdbcDriverPropertyInfo("Lazy query execution", info.getProperty(PROP_LAZY), ""), new JdbcDriverPropertyInfo("Transactions Allowed", info.getProperty(PROP_TX_ALLOWED), ""), new JdbcDriverPropertyInfo("Queries with multiple statements allowed", info.getProperty(PROP_MULTIPLE_STMTS), ""), new JdbcDriverPropertyInfo("Skip reducer on update", info.getProperty(PROP_SKIP_REDUCER_ON_UPDATE), "") diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java index a313f92dc01f6..03f126faba086 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcThinDriver.java @@ -23,17 +23,17 @@ import java.sql.DriverPropertyInfo; import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; -import java.util.Arrays; -import java.util.List; import java.util.Properties; import java.util.logging.Logger; import org.apache.ignite.cache.affinity.AffinityKey; import org.apache.ignite.internal.IgniteVersionUtils; -import org.apache.ignite.internal.jdbc.JdbcDriverPropertyInfo; +import org.apache.ignite.internal.jdbc.thin.ConnectionPropertiesImpl; import org.apache.ignite.internal.jdbc.thin.JdbcThinConnection; import org.apache.ignite.internal.jdbc.thin.JdbcThinUtils; import org.apache.ignite.internal.util.typedef.F; +import static org.apache.ignite.internal.jdbc.thin.ConnectionPropertiesImpl.PROP_PREFIX; + /** * JDBC driver thin implementation for In-Memory Data Grid. *

        @@ -130,18 +130,6 @@ */ @SuppressWarnings("JavadocReference") public class IgniteJdbcThinDriver implements Driver { - /* - * Static initializer. - */ - static { - try { - DriverManager.registerDriver(new IgniteJdbcThinDriver()); - } - catch (SQLException e) { - throw new RuntimeException("Failed to register " + IgniteJdbcThinDriver.class.getName(), e); - } - } - /** Major version. */ private static final int MAJOR_VER = IgniteVersionUtils.VER.major(); @@ -167,7 +155,7 @@ public class IgniteJdbcThinDriver implements Driver { String schema = parseUrl(url, props); - return new JdbcThinConnection(url, props, schema); + return new JdbcThinConnection(url, schema, props); } /** {@inheritDoc} */ @@ -179,18 +167,7 @@ public class IgniteJdbcThinDriver implements Driver { @Override public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { parseUrl(url, info); - List props = Arrays.asList( - new JdbcDriverPropertyInfo("Hostname", info.getProperty(JdbcThinUtils.PROP_HOST), ""), - new JdbcDriverPropertyInfo("Port number", info.getProperty(JdbcThinUtils.PROP_PORT), ""), - new JdbcDriverPropertyInfo("Distributed Joins", info.getProperty(JdbcThinUtils.PROP_DISTRIBUTED_JOINS), ""), - new JdbcDriverPropertyInfo("Enforce Join Order", info.getProperty(JdbcThinUtils.PROP_ENFORCE_JOIN_ORDER), ""), - new JdbcDriverPropertyInfo("Collocated", info.getProperty(JdbcThinUtils.PROP_COLLOCATED), ""), - new JdbcDriverPropertyInfo("Replicated only", info.getProperty(JdbcThinUtils.PROP_REPLICATED_ONLY), ""), - new JdbcDriverPropertyInfo("Lazy query execution flag", info.getProperty(JdbcThinUtils.PROP_LAZY),""), - new JdbcDriverPropertyInfo("Skip reducer on update", info.getProperty(JdbcThinUtils.PROP_SKIP_REDUCER_ON_UPDATE),"") - ); - - return props.toArray(new DriverPropertyInfo[0]); + return ConnectionPropertiesImpl.getDriverPropertyInfo(info); } /** {@inheritDoc} */ @@ -285,7 +262,7 @@ private void parseParameters(String str, Properties props) throws SQLException { if (key.isEmpty() || val.isEmpty()) throw new SQLException("Invalid parameter format (key and value cannot be empty): " + param); - props.setProperty(JdbcThinUtils.PROP_PREFIX + key, val); + props.setProperty(PROP_PREFIX + key, val); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java new file mode 100644 index 0000000000000..d79348415f4cc --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc.thin; + +import java.sql.SQLException; + +/** + * Provide access and manipulations with connection JDBC properties. + */ +public interface ConnectionProperties { + /** + * @return Host name or host's IP to connect. + */ + public String getHost(); + + /** + * @param host Host name or host's IP to connect. + */ + public void setHost(String host); + + /** + * @return Port to connect. + */ + public int getPort(); + + /** + * @param port Port to connect. + * @throws SQLException On error. + */ + public void setPort(int port) throws SQLException; + + /** + * @return Distributed joins flag. + */ + public boolean isDistributedJoins(); + + /** + * @param distributedJoins Distributed joins flag. + */ + public void setDistributedJoins(boolean distributedJoins); + + /** + * @return Enforce join order flag. + */ + public boolean isEnforceJoinOrder(); + + /** + * @param enforceJoinOrder Enforce join order flag. + */ + public void setEnforceJoinOrder(boolean enforceJoinOrder); + + /** + * @return Collocated flag. + */ + public boolean isCollocated(); + + /** + * @param collocated Collocated flag. + */ + public void setCollocated(boolean collocated); + + /** + * @return Replicated only flag. + */ + public boolean isReplicatedOnly(); + + /** + * @param replicatedOnly Replicated only flag. + */ + public void setReplicatedOnly(boolean replicatedOnly); + + /** + * @return Auto close server cursors flag. + */ + public boolean isAutoCloseServerCursor(); + + /** + * @param autoCloseServerCursor Auto close server cursors flag. + */ + public void setAutoCloseServerCursor(boolean autoCloseServerCursor); + + /** + * @return Socket send buffer size. + */ + public int getSocketSendBuffer(); + + /** + * @param size Socket send buffer size. + * @throws SQLException On error. + */ + public void setSocketSendBuffer(int size) throws SQLException; + + /** + * @return Socket receive buffer size. + */ + public int getSocketReceiveBuffer(); + + /** + * @param size Socket receive buffer size. + * @throws SQLException On error. + */ + public void setSocketReceiveBuffer(int size) throws SQLException; + + /** + * @return TCP no delay flag. + */ + public boolean isTcpNoDelay(); + + /** + * @param tcpNoDelay TCP no delay flag. + */ + public void setTcpNoDelay(boolean tcpNoDelay); + + /** + * @return Lazy query execution flag. + */ + public boolean isLazy(); + + /** + * @param lazy Lazy query execution flag. + */ + public void setLazy(boolean lazy); + + /** + * @return Skip reducer on update flag. + */ + public boolean isSkipReducerOnUpdate(); + + /** + * @param skipReducerOnUpdate Skip reducer on update flag. + */ + public void setSkipReducerOnUpdate(boolean skipReducerOnUpdate); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java new file mode 100644 index 0000000000000..86ba2fa832b1d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java @@ -0,0 +1,637 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc.thin; + +import java.io.Serializable; +import java.sql.DriverPropertyInfo; +import java.sql.SQLException; +import java.util.Properties; +import javax.naming.RefAddr; +import javax.naming.Reference; +import org.apache.ignite.configuration.ClientConnectorConfiguration; +import org.apache.ignite.internal.processors.odbc.SqlStateCode; +import org.apache.ignite.internal.util.typedef.F; + +/** + * Holds JDBC connection properties. + */ +public class ConnectionPropertiesImpl implements ConnectionProperties, Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** Prefix for property names. */ + public static final String PROP_PREFIX = "ignite.jdbc."; + + /** Host name property. */ + private StringProperty host = new StringProperty( + "host", "Ignite node IP to connect", null, null, true, new PropertyValidator() { + private static final long serialVersionUID = 0L; + + @Override public void validate(String host) throws SQLException { + if (F.isEmpty(host)) + throw new SQLException("Host name is empty", SqlStateCode.CLIENT_CONNECTION_FAILED); + } + }); + + /** Connection port property. */ + private IntegerProperty port = new IntegerProperty( + "port", "Ignite node IP to connect", ClientConnectorConfiguration.DFLT_PORT, false, 1, 0xFFFF); + + /** Distributed joins property. */ + private BooleanProperty distributedJoins = new BooleanProperty( + "distributedJoins", "Enable distributed joins", false, false); + + /** Enforce join order property. */ + private BooleanProperty enforceJoinOrder = new BooleanProperty( + "enforceJoinOrder", "Enable enforce join order", false, false); + + /** Collocated property. */ + private BooleanProperty collocated = new BooleanProperty( + "collocated", "Enable collocated query", false, false); + + /** Replicated only property. */ + private BooleanProperty replicatedOnly = new BooleanProperty( + "replicatedOnly", "Specify if the all queries contain only replicated tables", false, false); + + /** Auto close server cursor property. */ + private BooleanProperty autoCloseServerCursor = new BooleanProperty( + "autoCloseServerCursor", "Enable auto close server cursors when last piece of result set is retrieved. " + + "If the server-side cursor is already closed, you may get an exception when trying to call " + + "`ResultSet.getMetadata()` method.", false, false); + + /** TCP no delay property. */ + private BooleanProperty tcpNoDelay = new BooleanProperty( + "tcpNoDelay", "TCP no delay flag", true, false); + + /** Lazy query execution property. */ + private BooleanProperty lazy = new BooleanProperty( + "lazy", "Enable lazy query execution", false, false); + + /** Socket send buffer size property. */ + private IntegerProperty socketSendBuffer = new IntegerProperty( + "socketSendBuffer", "Socket send buffer size", + 0, false, 0, Integer.MAX_VALUE); + + /** Socket receive buffer size property. */ + private IntegerProperty socketReceiveBuffer = new IntegerProperty( + "socketReceiveBuffer", "Socket send buffer size", + 0, false, 0, Integer.MAX_VALUE); + + /** Executes update queries on ignite server nodes flag. */ + private BooleanProperty skipReducerOnUpdate = new BooleanProperty( + "skipReducerOnUpdate", "Enable execution update queries on ignite server nodes", false, false); + + /** Properties array. */ + private final ConnectionProperty [] propsArray = { + host, port, + distributedJoins, enforceJoinOrder, collocated, replicatedOnly, autoCloseServerCursor, + tcpNoDelay, lazy, socketSendBuffer, socketReceiveBuffer, skipReducerOnUpdate + }; + + /** {@inheritDoc} */ + @Override public String getHost() { + return host.value(); + } + + /** {@inheritDoc} */ + @Override public void setHost(String host) { + this.host.setValue(host); + } + + /** {@inheritDoc} */ + @Override public int getPort() { + return port.value(); + } + + /** {@inheritDoc} */ + @Override public void setPort(int port) throws SQLException { + this.port.setValue(port); + } + + /** {@inheritDoc} */ + @Override public boolean isDistributedJoins() { + return distributedJoins.value(); + } + + /** {@inheritDoc} */ + @Override public void setDistributedJoins(boolean val) { + distributedJoins.setValue(val); + } + + /** {@inheritDoc} */ + @Override public boolean isEnforceJoinOrder() { + return enforceJoinOrder.value(); + } + + /** {@inheritDoc} */ + @Override public void setEnforceJoinOrder(boolean val) { + enforceJoinOrder.setValue(val); + } + + /** {@inheritDoc} */ + @Override public boolean isCollocated() { + return collocated.value(); + } + + /** {@inheritDoc} */ + @Override public void setCollocated(boolean val) { + collocated.setValue(val); + } + + /** {@inheritDoc} */ + @Override public boolean isReplicatedOnly() { + return replicatedOnly.value(); + } + + /** {@inheritDoc} */ + @Override public void setReplicatedOnly(boolean val) { + replicatedOnly.setValue(val); + } + + /** {@inheritDoc} */ + @Override public boolean isAutoCloseServerCursor() { + return autoCloseServerCursor.value(); + } + + /** {@inheritDoc} */ + @Override public void setAutoCloseServerCursor(boolean val) { + autoCloseServerCursor.setValue(val); + } + + /** {@inheritDoc} */ + @Override public int getSocketSendBuffer() { + return socketSendBuffer.value(); + } + + /** {@inheritDoc} */ + @Override public void setSocketSendBuffer(int size) throws SQLException { + socketSendBuffer.setValue(size); + } + + /** {@inheritDoc} */ + @Override public int getSocketReceiveBuffer() { + return socketReceiveBuffer.value(); + } + + /** {@inheritDoc} */ + @Override public void setSocketReceiveBuffer(int size) throws SQLException { + socketReceiveBuffer.setValue(size); + } + + /** {@inheritDoc} */ + @Override public boolean isTcpNoDelay() { + return tcpNoDelay.value(); + } + + /** {@inheritDoc} */ + @Override public void setTcpNoDelay(boolean val) { + tcpNoDelay.setValue(val); + } + + /** {@inheritDoc} */ + @Override public boolean isLazy() { + return lazy.value(); + } + + /** {@inheritDoc} */ + @Override public void setLazy(boolean val) { + lazy.setValue(val); + } + + /** {@inheritDoc} */ + @Override public boolean isSkipReducerOnUpdate() { + return skipReducerOnUpdate.value(); + } + + /** {@inheritDoc} */ + @Override public void setSkipReducerOnUpdate(boolean val) { + skipReducerOnUpdate.setValue(val); + } + + /** + * @param props Environment properties. + * @throws SQLException On error. + */ + void init(Properties props) throws SQLException { + Properties props0 = (Properties)props.clone(); + + for (ConnectionProperty aPropsArray : propsArray) + aPropsArray.init(props0); + } + + /** + * @return Driver's properties info array. + */ + private DriverPropertyInfo[] getDriverPropertyInfo() { + DriverPropertyInfo[] dpis = new DriverPropertyInfo[propsArray.length]; + + for (int i = 0; i < propsArray.length; ++i) + dpis[i] = propsArray[i].getDriverPropertyInfo(); + + return dpis; + } + + /** + * @param props Environment properties. + * @return Driver's properties info array. + * @throws SQLException On error. + */ + public static DriverPropertyInfo[] getDriverPropertyInfo(Properties props) throws SQLException { + ConnectionPropertiesImpl cpi = new ConnectionPropertiesImpl(); + + cpi.init(props); + + return cpi.getDriverPropertyInfo(); + } + + /** + * + */ + private interface PropertyValidator extends Serializable { + /** + * @param val String representation of the property value to validate. + * @throws SQLException On validation fails. + */ + void validate(String val) throws SQLException; + } + + /** + * + */ + private abstract static class ConnectionProperty implements Serializable { + /** */ + private static final long serialVersionUID = 0L; + + /** Name. */ + protected String name; + + /** Property description. */ + protected String desc; + + /** Default value. */ + protected Object dfltVal; + + /** + * An array of possible values if the value may be selected + * from a particular set of values; otherwise null. + */ + protected String [] choices; + + /** Required flag. */ + protected boolean required; + + /** Property validator. */ + protected PropertyValidator validator; + + /** + * @param name Name. + * @param desc Description. + * @param dfltVal Default value. + * @param choices Possible values. + * @param required {@code true} if the property is required. + */ + ConnectionProperty(String name, String desc, Object dfltVal, String[] choices, boolean required) { + this.name = name; + this.desc= desc; + this.dfltVal = dfltVal; + this.choices = choices; + this.required = required; + } + + /** + * @param name Name. + * @param desc Description. + * @param dfltVal Default value. + * @param choices Possible values. + * @param required {@code true} if the property is required. + * @param validator Property validator. + */ + ConnectionProperty(String name, String desc, Object dfltVal, String[] choices, boolean required, + PropertyValidator validator) { + this.name = name; + this.desc= desc; + this.dfltVal = dfltVal; + this.choices = choices; + this.required = required; + this.validator = validator; + } + + /** + * @return Default value. + */ + Object getDfltVal() { + return dfltVal; + } + + /** + * @return Property name. + */ + String getName() { + return name; + } + + /** + * @return Array of possible values if the value may be selected + * from a particular set of values; otherwise null + */ + String[] choices() { + return choices; + } + + /** + * @param props Properties. + * @throws SQLException On error. + */ + void init(Properties props) throws SQLException { + String strVal = props.getProperty(PROP_PREFIX + name); + + if (required && strVal == null) { + throw new SQLException("Property '" + name + "' is required but not defined", + SqlStateCode.CLIENT_CONNECTION_FAILED); + } + + if (validator != null) + validator.validate(strVal); + + props.remove(name); + + init(strVal); + } + + /** + * @param ref Reference object. + * @throws SQLException On error. + */ + void init(Reference ref) throws SQLException { + RefAddr refAddr = ref.get(name); + + if (refAddr != null) { + String str = (String) refAddr.getContent(); + + if (validator != null) + validator.validate(str); + + init(str); + } + } + + /** + * @param str String representation of the + * @throws SQLException on error. + */ + abstract void init(String str) throws SQLException; + + /** + * @return String representation of the property value. + */ + abstract String valueObject(); + + /** + * @return JDBC property info object. + */ + DriverPropertyInfo getDriverPropertyInfo() { + DriverPropertyInfo dpi = new DriverPropertyInfo(name, valueObject()); + + dpi.choices = choices(); + dpi.required = required; + dpi.description = desc; + + return dpi; + } + } + + /** + * + */ + private static class BooleanProperty extends ConnectionProperty { + /** */ + private static final long serialVersionUID = 0L; + + /** Bool choices. */ + private static final String [] boolChoices = new String[] {Boolean.TRUE.toString(), Boolean.FALSE.toString()}; + + /** Value. */ + private boolean val; + + /** + * @param name Name. + * @param desc Description. + * @param dfltVal Default value. + * @param required {@code true} if the property is required. + */ + BooleanProperty(String name, String desc, boolean dfltVal, boolean required) { + super(name, desc, dfltVal, boolChoices, required); + + val = dfltVal; + } + + /** + * @return Property value. + */ + boolean value() { + return val; + } + + /** {@inheritDoc} */ + @Override void init(String str) throws SQLException { + if (str == null) + val = (Boolean)dfltVal; + else { + if (Boolean.TRUE.toString().equalsIgnoreCase(str)) + val = true; + else if (Boolean.FALSE.toString().equalsIgnoreCase(str)) + val = false; + else + throw new SQLException("Failed to parse boolean property [name=" + name + + ", value=" + str + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); + } + } + + /** {@inheritDoc} */ + @Override String valueObject() { + return Boolean.toString(val); + } + + /** + * @param val Property value to set. + */ + void setValue(boolean val) { + this.val = val; + } + } + + /** + * + */ + private abstract static class NumberProperty extends ConnectionProperty { + /** */ + private static final long serialVersionUID = 0L; + + /** Value. */ + protected Number val; + + /** Allowed value range. */ + private Number [] range; + + /** + * @param name Name. + * @param desc Description. + * @param dfltVal Default value. + * @param required {@code true} if the property is required. + * @param min Lower bound of allowed range. + * @param max Upper bound of allowed range. + */ + NumberProperty(String name, String desc, Number dfltVal, boolean required, Number min, Number max) { + super(name, desc, dfltVal, null, required); + + assert dfltVal != null; + + val = dfltVal; + + range = new Number[] {min, max}; + } + + /** {@inheritDoc} */ + @Override void init(String str) throws SQLException { + if (str == null) + val = (int)dfltVal; + else { + try { + setValue(parse(str)); + } catch (NumberFormatException e) { + throw new SQLException("Failed to parse int property [name=" + name + + ", value=" + str + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); + } + } + } + + /** + * @param str String value. + * @return Number value. + * @throws NumberFormatException On parse error. + */ + protected abstract Number parse(String str) throws NumberFormatException; + + /** {@inheritDoc} */ + @Override String valueObject() { + return String.valueOf(val); + } + + /** + * @param val Property value. + * @throws SQLException On error. + */ + void setValue(Number val) throws SQLException { + if (range != null) { + if (val.doubleValue() < range[0].doubleValue()) { + throw new SQLException("Property cannot be lower than " + range[0].toString() + " [name=" + name + + ", value=" + val.toString() + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); + } + + if (val.doubleValue() > range[1].doubleValue()) { + throw new SQLException("Property cannot be upper than " + range[1].toString() + " [name=" + name + + ", value=" + val.toString() + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); + } + } + + this.val = val; + } + } + + /** + * + */ + private static class IntegerProperty extends NumberProperty { + /** */ + private static final long serialVersionUID = 0L; + + /** + * @param name Name. + * @param desc Description. + * @param dfltVal Default value. + * @param required {@code true} if the property is required. + * @param min Lower bound of allowed range. + * @param max Upper bound of allowed range. + */ + IntegerProperty(String name, String desc, Number dfltVal, boolean required, int min, int max) { + super(name, desc, dfltVal, required, min, max); + } + + /** {@inheritDoc} */ + @Override protected Number parse(String str) throws NumberFormatException { + return Integer.parseInt(str); + } + + /** + * @return Property value. + */ + int value() { + return val.intValue(); + } + } + + /** + * + */ + private static class StringProperty extends ConnectionProperty { + /** */ + private static final long serialVersionUID = 0L; + + /** Value */ + private String val; + + /** + * @param name Name. + * @param desc Description. + * @param dfltVal Default value. + * @param choices Possible values. + * @param required {@code true} if the property is required. + * @param validator Property value validator. + */ + StringProperty(String name, String desc, String dfltVal, String [] choices, boolean required, + PropertyValidator validator) { + super(name, desc, dfltVal, choices, required, validator); + + val = dfltVal; + } + + /** + * @param val Property value. + */ + void setValue(String val) { + this.val = val; + } + + /** + * @return Property value. + */ + String value() { + return val; + } + + /** {@inheritDoc} */ + @Override void init(String str) throws SQLException { + val = str; + } + + /** {@inheritDoc} */ + @Override String valueObject() { + return val; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java index 57b25e18360d3..999c793d55ccf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java @@ -51,18 +51,6 @@ import static java.sql.ResultSet.CONCUR_READ_ONLY; import static java.sql.ResultSet.HOLD_CURSORS_OVER_COMMIT; import static java.sql.ResultSet.TYPE_FORWARD_ONLY; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_AUTO_CLOSE_SERVER_CURSORS; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_COLLOCATED; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_DISTRIBUTED_JOINS; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_ENFORCE_JOIN_ORDER; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_HOST; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_LAZY; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_PORT; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_REPLICATED_ONLY; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_SOCK_RCV_BUF; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_SOCK_SND_BUF; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_TCP_NO_DELAY; -import static org.apache.ignite.internal.jdbc.thin.JdbcThinUtils.PROP_SKIP_REDUCER_ON_UPDATE; /** * JDBC connection implementation. @@ -103,53 +91,42 @@ public class JdbcThinConnection implements Connection { /** Jdbc metadata. Cache the JDBC object on the first access */ private JdbcThinDatabaseMetadata metadata; + /** Connection properties. */ + private ConnectionProperties connProps; + /** * Creates new connection. * * @param url Connection URL. - * @param props Additional properties. * @param schema Schema name. + * @param props Connection properties. * @throws SQLException In case Ignite client failed to start. */ - public JdbcThinConnection(String url, Properties props, String schema) throws SQLException { + public JdbcThinConnection(String url, String schema, Properties props) throws SQLException { assert url != null; - assert props != null; this.url = url; + connProps = new ConnectionPropertiesImpl(); + + ((ConnectionPropertiesImpl)connProps).init(props); + holdability = HOLD_CURSORS_OVER_COMMIT; autoCommit = true; txIsolation = Connection.TRANSACTION_NONE; this.schema = normalizeSchema(schema); - String host = extractHost(props); - int port = extractPort(props); - - boolean distributedJoins = extractBoolean(props, PROP_DISTRIBUTED_JOINS, false); - boolean enforceJoinOrder = extractBoolean(props, PROP_ENFORCE_JOIN_ORDER, false); - boolean collocated = extractBoolean(props, PROP_COLLOCATED, false); - boolean replicatedOnly = extractBoolean(props, PROP_REPLICATED_ONLY, false); - boolean autoCloseServerCursor = extractBoolean(props, PROP_AUTO_CLOSE_SERVER_CURSORS, false); - boolean lazyExec = extractBoolean(props, PROP_LAZY, false); - - int sockSndBuf = extractIntNonNegative(props, PROP_SOCK_SND_BUF, 0); - int sockRcvBuf = extractIntNonNegative(props, PROP_SOCK_RCV_BUF, 0); - - boolean tcpNoDelay = extractBoolean(props, PROP_TCP_NO_DELAY, true); - boolean skipReducerOnUpdate = extractBoolean(props, PROP_SKIP_REDUCER_ON_UPDATE, false); - try { - cliIo = new JdbcThinTcpIo(host, port, distributedJoins, enforceJoinOrder, collocated, replicatedOnly, - autoCloseServerCursor, lazyExec, sockSndBuf, sockRcvBuf, tcpNoDelay, skipReducerOnUpdate); + cliIo = new JdbcThinTcpIo(connProps); cliIo.start(); } catch (Exception e) { cliIo.close(); - throw new SQLException("Failed to connect to Ignite cluster [host=" + host + ", port=" + port + ']', - SqlStateCode.CLIENT_CONNECTION_FAILED, e); + throw new SQLException("Failed to connect to Ignite cluster [host=" + connProps.getHost() + + ", port=" + connProps.getPort() + ']', SqlStateCode.CLIENT_CONNECTION_FAILED, e); } } @@ -653,7 +630,7 @@ IgniteProductVersion igniteVersion() { * @return Auto close server cursors flag. */ boolean autoCloseServerCursor() { - return cliIo.autoCloseServerCursor(); + return connProps.isAutoCloseServerCursor(); } /** @@ -682,120 +659,6 @@ R sendRequest(JdbcRequest req) throws SQLException { } } - /** - * Extract host. - * - * @param props Properties. - * @return Host. - * @throws SQLException If failed. - */ - private static String extractHost(Properties props) throws SQLException { - String host = props.getProperty(PROP_HOST); - - if (host != null) - host = host.trim(); - - if (F.isEmpty(host)) - throw new SQLException("Host name is empty.", SqlStateCode.CLIENT_CONNECTION_FAILED); - - return host; - } - - /** - * Extract port. - * - * @param props Properties. - * @return Port. - * @throws SQLException If failed. - */ - private static int extractPort(Properties props) throws SQLException { - String portStr = props.getProperty(PROP_PORT); - - if (portStr == null) - return JdbcThinUtils.DFLT_PORT; - - int port; - - try { - port = Integer.parseInt(portStr); - - if (port <= 0 || port > 0xFFFF) - throw new SQLException("Invalid port: " + portStr, SqlStateCode.CLIENT_CONNECTION_FAILED); - } - catch (NumberFormatException e) { - throw new SQLException("Invalid port: " + portStr, SqlStateCode.CLIENT_CONNECTION_FAILED); - } - - return port; - } - - /** - * Extract boolean property. - * - * @param props Properties. - * @param propName Property name. - * @param dfltVal Default value. - * @return Value. - * @throws SQLException If failed. - */ - private static boolean extractBoolean(Properties props, String propName, boolean dfltVal) throws SQLException { - String strVal = props.getProperty(propName); - - if (strVal == null) - return dfltVal; - - if (Boolean.TRUE.toString().equalsIgnoreCase(strVal)) - return true; - else if (Boolean.FALSE.toString().equalsIgnoreCase(strVal)) - return false; - else - throw new SQLException("Failed to parse boolean property [name=" + JdbcThinUtils.trimPrefix(propName) + - ", value=" + strVal + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); - } - - /** - * Extract non-negative int property. - * - * @param props Properties. - * @param propName Property name. - * @param dfltVal Default value. - * @return Value. - * @throws SQLException If failed. - */ - private static int extractIntNonNegative(Properties props, String propName, int dfltVal) throws SQLException { - int res = extractInt(props, propName, dfltVal); - - if (res < 0) - throw new SQLException("Property cannot be negative [name=" + JdbcThinUtils.trimPrefix(propName) + - ", value=" + res + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); - - return res; - } - - /** - * Extract int property. - * - * @param props Properties. - * @param propName Property name. - * @param dfltVal Default value. - * @return Value. - * @throws SQLException If failed. - */ - private static int extractInt(Properties props, String propName, int dfltVal) throws SQLException { - String strVal = props.getProperty(propName); - - if (strVal == null) - return dfltVal; - - try { - return Integer.parseInt(strVal); - } - catch (NumberFormatException e) { - throw new SQLException("Failed to parse int property [name=" + JdbcThinUtils.trimPrefix(propName) + - ", value=" + strVal + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); - } - } - /** * @return Connection URL. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java index 0670fb10bfc16..4d239348b699e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java @@ -20,9 +20,27 @@ import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.IOException; +import java.io.InputStream; import java.net.InetSocketAddress; +import java.net.MalformedURLException; import java.net.Socket; +import java.net.URL; +import java.security.KeyManagementException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.binary.BinaryReaderExImpl; import org.apache.ignite.internal.binary.BinaryWriterExImpl; @@ -39,6 +57,7 @@ import org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResponse; import org.apache.ignite.internal.util.ipc.loopback.IpcClientTcpEndpoint; +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteProductVersion; @@ -76,41 +95,8 @@ public class JdbcThinTcpIo { /** Initial output for query close message. */ private static final int QUERY_CLOSE_MSG_SIZE = 9; - /** Host. */ - private final String host; - - /** Port. */ - private final int port; - - /** Distributed joins. */ - private final boolean distributedJoins; - - /** Enforce join order. */ - private final boolean enforceJoinOrder; - - /** Collocated flag. */ - private final boolean collocated; - - /** Replicated only flag. */ - private final boolean replicatedOnly; - - /** Lazy execution query flag. */ - private final boolean lazy; - - /** Flag to automatically close server cursor. */ - private final boolean autoCloseServerCursor; - - /** Executes update queries on server nodes. */ - private final boolean skipReducerOnUpdate; - - /** Socket send buffer. */ - private final int sockSndBuf; - - /** Socket receive buffer. */ - private final int sockRcvBuf; - - /** TCP no delay flag. */ - private final boolean tcpNoDelay; + /** Connection properties. */ + private final ConnectionProperties connProps; /** Endpoint. */ private IpcClientTcpEndpoint endpoint; @@ -130,34 +116,10 @@ public class JdbcThinTcpIo { /** * Constructor. * - * @param host Host. - * @param port Port. - * @param distributedJoins Distributed joins flag. - * @param enforceJoinOrder Enforce join order flag. - * @param collocated Collocated flag. - * @param replicatedOnly Replicated only flag. - * @param autoCloseServerCursor Flag to automatically close server cursors. - * @param lazy Lazy execution query flag. - * @param sockSndBuf Socket send buffer. - * @param sockRcvBuf Socket receive buffer. - * @param tcpNoDelay TCP no delay flag. - * @param skipReducerOnUpdate Executes update queries on ignite server nodes. + * @param connProps Connection properties. */ - JdbcThinTcpIo(String host, int port, boolean distributedJoins, boolean enforceJoinOrder, boolean collocated, - boolean replicatedOnly, boolean autoCloseServerCursor, boolean lazy, int sockSndBuf, int sockRcvBuf, - boolean tcpNoDelay, boolean skipReducerOnUpdate) { - this.host = host; - this.port = port; - this.distributedJoins = distributedJoins; - this.enforceJoinOrder = enforceJoinOrder; - this.collocated = collocated; - this.replicatedOnly = replicatedOnly; - this.autoCloseServerCursor = autoCloseServerCursor; - this.lazy = lazy; - this.sockSndBuf = sockSndBuf; - this.sockRcvBuf = sockRcvBuf; - this.tcpNoDelay = tcpNoDelay; - this.skipReducerOnUpdate = skipReducerOnUpdate; + JdbcThinTcpIo(ConnectionProperties connProps) { + this.connProps = connProps; } /** @@ -167,16 +129,16 @@ public class JdbcThinTcpIo { public void start() throws SQLException, IOException { Socket sock = new Socket(); - if (sockSndBuf != 0) - sock.setSendBufferSize(sockSndBuf); + if (connProps.getSocketSendBuffer() != 0) + sock.setSendBufferSize(connProps.getSocketSendBuffer()); - if (sockRcvBuf != 0) - sock.setReceiveBufferSize(sockRcvBuf); + if (connProps.getSocketReceiveBuffer() != 0) + sock.setReceiveBufferSize(connProps.getSocketReceiveBuffer()); - sock.setTcpNoDelay(tcpNoDelay); + sock.setTcpNoDelay(connProps.isTcpNoDelay()); try { - sock.connect(new InetSocketAddress(host, port)); + sock.connect(new InetSocketAddress(connProps.getHost(), connProps.getPort())); endpoint = new IpcClientTcpEndpoint(sock); @@ -184,8 +146,8 @@ public void start() throws SQLException, IOException { in = new BufferedInputStream(endpoint.inputStream()); } catch (IOException | IgniteCheckedException e) { - throw new SQLException("Failed to connect to server [host=" + host + ", port=" + port + ']', - SqlStateCode.CLIENT_CONNECTION_FAILED, e); + throw new SQLException("Failed to connect to server [host=" + connProps.getHost() + + ", port=" + connProps.getPort() + ']', SqlStateCode.CLIENT_CONNECTION_FAILED, e); } handshake(CURRENT_VER); @@ -210,13 +172,13 @@ public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQL writer.writeByte(ClientListenerNioListener.JDBC_CLIENT); - writer.writeBoolean(distributedJoins); - writer.writeBoolean(enforceJoinOrder); - writer.writeBoolean(collocated); - writer.writeBoolean(replicatedOnly); - writer.writeBoolean(autoCloseServerCursor); - writer.writeBoolean(lazy); - writer.writeBoolean(skipReducerOnUpdate); + writer.writeBoolean(connProps.isDistributedJoins()); + writer.writeBoolean(connProps.isEnforceJoinOrder()); + writer.writeBoolean(connProps.isCollocated()); + writer.writeBoolean(connProps.isReplicatedOnly()); + writer.writeBoolean(connProps.isAutoCloseServerCursor()); + writer.writeBoolean(connProps.isLazy()); + writer.writeBoolean(connProps.isSkipReducerOnUpdate()); send(writer.array()); @@ -280,11 +242,11 @@ private void handshake_2_1_0() throws IOException, SQLException { writer.writeByte(ClientListenerNioListener.JDBC_CLIENT); - writer.writeBoolean(distributedJoins); - writer.writeBoolean(enforceJoinOrder); - writer.writeBoolean(collocated); - writer.writeBoolean(replicatedOnly); - writer.writeBoolean(autoCloseServerCursor); + writer.writeBoolean(connProps.isDistributedJoins()); + writer.writeBoolean(connProps.isEnforceJoinOrder()); + writer.writeBoolean(connProps.isCollocated()); + writer.writeBoolean(connProps.isReplicatedOnly()); + writer.writeBoolean(connProps.isAutoCloseServerCursor()); send(writer.array()); @@ -429,59 +391,10 @@ public void close() { } /** - * @return Distributed joins flag. - */ - public boolean distributedJoins() { - return distributedJoins; - } - - /** - * @return Enforce join order flag. - */ - public boolean enforceJoinOrder() { - return enforceJoinOrder; - } - - /** - * @return Collocated flag. - */ - public boolean collocated() { - return collocated; - } - - /** - * @return Replicated only flag. + * @return Connection properties. */ - public boolean replicatedOnly() { - return replicatedOnly; - } - - /** - * @return Auto close server cursors flag. - */ - public boolean autoCloseServerCursor() { - return autoCloseServerCursor; - } - - /** - * @return Socket send buffer size. - */ - public int socketSendBuffer() { - return sockSndBuf; - } - - /** - * @return Socket receive buffer size. - */ - public int socketReceiveBuffer() { - return sockRcvBuf; - } - - /** - * @return TCP no delay flag. - */ - public boolean tcpNoDelay() { - return tcpNoDelay; + public ConnectionProperties connectionProperties() { + return connProps; } /** @@ -490,18 +403,4 @@ public boolean tcpNoDelay() { IgniteProductVersion igniteVersion() { return igniteVer; } - - /** - * @return Lazy query execution flag. - */ - public boolean lazy() { - return lazy; - } - - /** - * @return Server side update flag. - */ - public boolean skipReducerOnUpdate() { - return skipReducerOnUpdate; - } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java index c9bf61cab5583..5f895dd55958c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinUtils.java @@ -17,8 +17,6 @@ package org.apache.ignite.internal.jdbc.thin; -import org.apache.ignite.configuration.ClientConnectorConfiguration; - import java.sql.Time; import java.sql.Timestamp; import java.sql.Types; @@ -37,6 +35,7 @@ import static java.sql.Types.TIMESTAMP; import static java.sql.Types.TINYINT; import static java.sql.Types.VARCHAR; +import static org.apache.ignite.internal.jdbc.thin.ConnectionPropertiesImpl.PROP_PREFIX; /** * Utility methods for thin JDBC driver. @@ -45,88 +44,12 @@ public class JdbcThinUtils { /** URL prefix. */ public static final String URL_PREFIX = "jdbc:ignite:thin://"; - /** Prefix for property names. */ - public static final String PROP_PREFIX = "ignite.jdbc."; - /** Port number property name. */ public static final String PROP_PORT = PROP_PREFIX + "port"; /** Hostname property name. */ public static final String PROP_HOST = PROP_PREFIX + "host"; - /** Parameter: distributed joins flag (SQL hint). */ - public static final String PARAM_DISTRIBUTED_JOINS = "distributedJoins"; - - /** Parameter: enforce join order flag (SQL hint). */ - public static final String PARAM_ENFORCE_JOIN_ORDER = "enforceJoinOrder"; - - /** Parameter: collocated flag (SQL hint). */ - public static final String PARAM_COLLOCATED = "collocated"; - - /** Parameter: lazy query execution flag (SQL hint). */ - public static final String PARAM_REPLICATED_ONLY = "replicatedOnly"; - - /** Parameter: replicated only flag (SQL hint). */ - public static final String PARAM_LAZY = "lazy"; - - /** Parameter: socket send buffer. */ - public static final String PARAM_SOCK_SND_BUF = "socketSendBuffer"; - - /** Parameter: socket receive buffer. */ - public static final String PARAM_SOCK_RCV_BUF = "socketReceiveBuffer"; - - /** Parameter: TCP no-delay flag. */ - public static final String PARAM_TCP_NO_DELAY = "tcpNoDelay"; - - /** Parameter: Automatically close server cursor. */ - public static final String PARAM_AUTO_CLOSE_SERVER_CURSOR = "autoCloseServerCursor"; - - /** Parameter: execute update query in distributed mode on ignite server nodes. */ - public static final String PARAM_SKIP_REDUCER_ON_UPDATE = "skipReducerOnUpdate"; - - /** Distributed joins property name. */ - public static final String PROP_DISTRIBUTED_JOINS = PROP_PREFIX + PARAM_DISTRIBUTED_JOINS; - - /** Transactions allowed property name. */ - public static final String PROP_ENFORCE_JOIN_ORDER = PROP_PREFIX + PARAM_ENFORCE_JOIN_ORDER; - - /** Collocated property name. */ - public static final String PROP_COLLOCATED = PROP_PREFIX + PARAM_COLLOCATED; - - /** Lazy property name. */ - public static final String PROP_LAZY = PROP_PREFIX + PARAM_LAZY; - - /** Replicated only property name. */ - public static final String PROP_REPLICATED_ONLY = PROP_PREFIX + PARAM_REPLICATED_ONLY; - - /** Socket send buffer property name. */ - public static final String PROP_SOCK_SND_BUF = PROP_PREFIX + PARAM_SOCK_SND_BUF; - - /** Socket receive buffer property name. */ - public static final String PROP_SOCK_RCV_BUF = PROP_PREFIX + PARAM_SOCK_RCV_BUF; - - /** TCP no delay property name. */ - public static final String PROP_TCP_NO_DELAY = PROP_PREFIX + PARAM_TCP_NO_DELAY; - - /** Automatically close server cursor. */ - public static final String PROP_AUTO_CLOSE_SERVER_CURSORS = PROP_PREFIX + PARAM_AUTO_CLOSE_SERVER_CURSOR; - - /** Executes update queries on ignite server nodes in distributed mode. */ - public static final String PROP_SKIP_REDUCER_ON_UPDATE = PROP_PREFIX + PARAM_SKIP_REDUCER_ON_UPDATE; - - /** Default port. */ - public static final int DFLT_PORT = ClientConnectorConfiguration.DFLT_PORT; - - /** - * Trim prefix from property. - * - * @param prop Property. - * @return Parameter name. - */ - public static String trimPrefix(String prop) { - return prop.substring(PROP_PREFIX.length()); - } - /** * Converts Java class name to type from {@link Types}. * From 7b1822ee5e3810e68f58a2510c680686569f33f0 Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Tue, 19 Dec 2017 11:11:11 +0300 Subject: [PATCH 235/243] IGNITE-4490: SQL: avoid querying H2 for INSERT and MERGE when it is not needed. This closes #1387. --- .../jdbc/JdbcErrorsAbstractSelfTest.java | 2 +- .../jdbc/thin/JdbcThinBatchSelfTest.java | 5 +- .../query/h2/DmlStatementsProcessor.java | 55 ++++----- ...stUpdateArgument.java => DmlArgument.java} | 14 ++- .../processors/query/h2/dml/DmlArguments.java | 104 ++++++++++++++++++ .../processors/query/h2/dml/DmlAstUtils.java | 14 ++- .../processors/query/h2/dml/DmlUtils.java | 19 +++- .../processors/query/h2/dml/FastUpdate.java | 91 ++------------- .../processors/query/h2/dml/UpdatePlan.java | 80 ++++++++++++-- .../query/h2/dml/UpdatePlanBuilder.java | 38 ++++++- 10 files changed, 284 insertions(+), 138 deletions(-) rename modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/{FastUpdateArgument.java => DmlArgument.java} (77%) create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlArguments.java diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java index 952baa5c4e85c..fb96f31382609 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java @@ -107,7 +107,7 @@ public void testIndexErrors() throws SQLException { public void testDmlErrors() throws SQLException { checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, null)", "22004"); - checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, 'zzz')", "50000"); + checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, 'zzz')", "0700B"); } /** diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java index 5e2e39e03dd06..8609615131f42 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java @@ -260,7 +260,8 @@ public void testBatchExceptionPrepared() throws SQLException { pstmt.executeBatch(); fail("BatchUpdateException must be thrown"); - } catch(BatchUpdateException e) { + } + catch(BatchUpdateException e) { int [] updCnts = e.getUpdateCounts(); assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length); @@ -268,7 +269,7 @@ public void testBatchExceptionPrepared() throws SQLException { for (int i = 0; i < BATCH_SIZE; ++i) assertEquals("Invalid update count",1, updCnts[i]); - if (!e.getMessage().contains("Failed to execute SQL query.")) { + if (!e.getMessage().contains("Value conversion failed")) { log.error("Invalid exception: ", e); fail(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java index deb262c8fe857..6732e7b8b37b9 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java @@ -32,7 +32,6 @@ import javax.cache.processor.EntryProcessor; import javax.cache.processor.EntryProcessorException; import javax.cache.processor.MutableEntry; - import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.IgniteException; @@ -58,7 +57,6 @@ import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender; import org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedPlanInfo; -import org.apache.ignite.internal.processors.query.h2.dml.FastUpdate; import org.apache.ignite.internal.processors.query.h2.dml.UpdateMode; import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan; import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlanBuilder; @@ -179,7 +177,7 @@ else if (!opCtx.isKeepBinary()) UpdateResult r; try { - r = executeUpdateStatement(schemaName, cctx, conn, prepared, fieldsQry, loc, filters, cancel, errKeys); + r = executeUpdateStatement(schemaName, cctx, conn, prepared, fieldsQry, loc, filters, cancel); } finally { cctx.operationContextPerCall(opCtx); @@ -259,15 +257,13 @@ GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, Connection conn, P * @throws IgniteCheckedException if failed. */ @SuppressWarnings({"unchecked", "ConstantConditions"}) - long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Object[] args) + long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, final Object[] args) throws IgniteCheckedException { - args = U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY); - Prepared p = GridSqlQueryParser.prepared(stmt); assert p != null; - UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, true, idx, null, null, null); + final UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, true, idx, null, null, null); if (!F.eq(streamer.cacheName(), plan.cacheContext().name())) throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + @@ -282,14 +278,22 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Obje final ArrayList> data = new ArrayList<>(plan.rowCount()); - final GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), plan.selectQuery(), - F.asList(args), null, false, 0, null); - QueryCursorImpl> stepCur = new QueryCursorImpl<>(new Iterable>() { @Override public Iterator> iterator() { try { - return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), - cctx.keepBinary()); + Iterator> it; + + if (!F.isEmpty(plan.selectQuery())) { + GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), + plan.selectQuery(), F.asList(U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY)), + null, false, 0, null); + + it = res.iterator(); + } + else + it = plan.createRows(U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY)).iterator(); + + return new GridQueryCacheObjectsIterator(it, idx.objectContext(), cctx.keepBinary()); } catch (IgniteCheckedException e) { throw new IgniteException(e); @@ -341,27 +345,23 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, Obje * @param loc Local query flag. * @param filters Cache name and key filter. * @param cancel Query cancel state holder. - * @param failedKeys Keys to restrict UPDATE and DELETE operations with. Null or empty array means no restriction. * @return Pair [number of successfully processed items; keys that have failed to be processed] * @throws IgniteCheckedException if failed. */ @SuppressWarnings({"ConstantConditions", "unchecked"}) private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, Connection c, - Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, - GridQueryCancel cancel, Object[] failedKeys) throws IgniteCheckedException { - int mainCacheId = CU.cacheId(cctx.name()); + Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) + throws IgniteCheckedException { + int mainCacheId = cctx.cacheId(); Integer errKeysPos = null; UpdatePlan plan = getPlanForStatement(schemaName, c, prepared, fieldsQry, loc, errKeysPos); - FastUpdate fastUpdate = plan.fastUpdate(); - - if (fastUpdate != null) { - assert F.isEmpty(failedKeys) && errKeysPos == null; + UpdateResult fastUpdateRes = plan.processFast(fieldsQry.getArgs()); - return fastUpdate.execute(plan.cacheContext().cache(), fieldsQry.getArgs()); - } + if (fastUpdateRes != null) + return fastUpdateRes; if (plan.distributedPlan() != null) { UpdateResult result = doDistributedUpdate(schemaName, fieldsQry, plan, cancel); @@ -371,13 +371,13 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo return result; } - assert !F.isEmpty(plan.selectQuery()); - - QueryCursorImpl> cur; + Iterable> cur; // Do a two-step query only if locality flag is not set AND if plan's SELECT corresponds to an actual // sub-query and not some dummy stuff like "select 1, 2, 3;" if (!loc && !plan.isLocalSubquery()) { + assert !F.isEmpty(plan.selectQuery()); + SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQuery(), fieldsQry.isCollocated()) .setArgs(fieldsQry.getArgs()) .setDistributedJoins(fieldsQry.isDistributedJoins()) @@ -386,9 +386,10 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo .setPageSize(fieldsQry.getPageSize()) .setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS); - cur = (QueryCursorImpl>)idx.queryDistributedSqlFields(schemaName, newFieldsQry, true, - cancel, mainCacheId, true).get(0); + cur = idx.queryDistributedSqlFields(schemaName, newFieldsQry, true, cancel, mainCacheId, true).get(0); } + else if (plan.hasRows()) + cur = plan.createRows(fieldsQry.getArgs()); else { final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQuery(), F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdateArgument.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlArgument.java similarity index 77% rename from modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdateArgument.java rename to modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlArgument.java index dc90fe99d7f0c..b3c3dcecafcd6 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdateArgument.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlArgument.java @@ -17,11 +17,15 @@ package org.apache.ignite.internal.processors.query.h2.dml; -import org.apache.ignite.internal.util.lang.GridPlainClosure; - /** - * Operand for fast UPDATE or DELETE (single item operation that does not involve any SELECT). + * DML argument */ -public interface FastUpdateArgument extends GridPlainClosure { - // No-op. +public interface DmlArgument { + /** + * Get argument from parameter list. + * + * @param params Query input parameters. + * @return value. + */ + Object get(Object[] params); } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlArguments.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlArguments.java new file mode 100644 index 0000000000000..f5aa3326c8fb5 --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlArguments.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2.dml; + +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlConst; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlParameter; +import org.jetbrains.annotations.Nullable; + +/** + * DML arguments factory. + */ +public class DmlArguments { + /** Operand that always evaluates as {@code null}. */ + private final static DmlArgument NULL_ARG = new ConstantArgument(null); + + /** + * Create argument from AST element. + * + * @param el Element. + * @return DML argument. + */ + public static DmlArgument create(@Nullable GridSqlElement el) { + assert el == null ^ (el instanceof GridSqlConst || el instanceof GridSqlParameter); + + if (el == null) + return NULL_ARG; + + if (el instanceof GridSqlConst) + return new ConstantArgument(((GridSqlConst)el).value().getObject()); + else + return new ParamArgument(((GridSqlParameter)el).index()); + } + + /** + * Private constructor. + */ + private DmlArguments() { + // No-op. + } + + /** + * Value argument. + */ + private static class ConstantArgument implements DmlArgument { + /** Value to return. */ + private final Object val; + + /** + * Constructor. + * + * @param val Value. + */ + private ConstantArgument(Object val) { + this.val = val; + } + + /** {@inheritDoc} */ + public Object get(Object[] params) { + return val; + } + } + + /** + * Parameter argument. + */ + private static class ParamArgument implements DmlArgument { + /** Value to return. */ + private final int paramIdx; + + /** + * Constructor. + * + * @param paramIdx Parameter index. + */ + private ParamArgument(int paramIdx) { + assert paramIdx >= 0; + + this.paramIdx = paramIdx; + } + + /** {@inheritDoc} */ + @Override public Object get(Object[] params) { + assert params.length > paramIdx; + + return params[paramIdx]; + } + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java index 054e70822bfca..161ff4a471b22 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java @@ -83,7 +83,7 @@ private DmlAstUtils() { * @param cols Columns to insert values into. * @param rows Rows to create pseudo-SELECT upon. * @param subQry Subquery to use rather than rows. - * @return Subquery or pseudo-SELECT to evaluate inserted expressions. + * @return Subquery or pseudo-SELECT to evaluate inserted expressions, or {@code null} no query needs to be run. */ public static GridSqlQuery selectForInsertOrMerge(GridSqlColumn[] cols, List rows, GridSqlQuery subQry) { @@ -98,6 +98,8 @@ public static GridSqlQuery selectForInsertOrMerge(GridSqlColumn[] cols, List expC return newArr; } - return H2Utils.convert(val, desc, type); + Object res = H2Utils.convert(val, desc, type); + + if (res instanceof Date && res.getClass() != Date.class && expCls == Date.class) { + // We can get a Timestamp instead of Date when converting a String to Date + // without query - let's handle this + return new Date(((Date) res).getTime()); + } + + return res; } catch (Exception e) { throw new IgniteSQLException("Value conversion failed [from=" + currCls.getName() + ", to=" + diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java index e662245a40166..dcceff3c2e774 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java @@ -20,26 +20,21 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.processors.cache.GridCacheAdapter; import org.apache.ignite.internal.processors.query.h2.UpdateResult; -import org.apache.ignite.internal.processors.query.h2.sql.GridSqlConst; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement; -import org.apache.ignite.internal.processors.query.h2.sql.GridSqlParameter; import org.jetbrains.annotations.Nullable; /** * Arguments for fast, query-less UPDATE or DELETE - key and, optionally, value and new value. */ public final class FastUpdate { - /** Operand that always evaluates as {@code null}. */ - private final static FastUpdateArgument NULL_ARG = new ConstantArgument(null); - /** Operand to compute key. */ - private final FastUpdateArgument keyArg; + private final DmlArgument keyArg; /** Operand to compute value. */ - private final FastUpdateArgument valArg; + private final DmlArgument valArg; /** Operand to compute new value. */ - private final FastUpdateArgument newValArg; + private final DmlArgument newValArg; /** * Create fast update instance. @@ -50,9 +45,9 @@ public final class FastUpdate { * @return Fast update. */ public static FastUpdate create(GridSqlElement key, GridSqlElement val, @Nullable GridSqlElement newVal) { - FastUpdateArgument keyArg = argument(key); - FastUpdateArgument valArg = argument(val); - FastUpdateArgument newValArg = argument(newVal); + DmlArgument keyArg = DmlArguments.create(key); + DmlArgument valArg = DmlArguments.create(val); + DmlArgument newValArg = DmlArguments.create(newVal); return new FastUpdate(keyArg, valArg, newValArg); } @@ -64,7 +59,7 @@ public static FastUpdate create(GridSqlElement key, GridSqlElement val, @Nullabl * @param valArg Value argument. * @param newValArg New value argument. */ - private FastUpdate(FastUpdateArgument keyArg, FastUpdateArgument valArg, FastUpdateArgument newValArg) { + private FastUpdate(DmlArgument keyArg, DmlArgument valArg, DmlArgument newValArg) { this.keyArg = keyArg; this.valArg = valArg; this.newValArg = newValArg; @@ -80,12 +75,12 @@ private FastUpdate(FastUpdateArgument keyArg, FastUpdateArgument valArg, FastUpd */ @SuppressWarnings({"unchecked", "ConstantConditions"}) public UpdateResult execute(GridCacheAdapter cache, Object[] args) throws IgniteCheckedException { - Object key = keyArg.apply(args); + Object key = keyArg.get(args); assert key != null; - Object val = valArg.apply(args); - Object newVal = newValArg.apply(args); + Object val = valArg.get(args); + Object newVal = newValArg.get(args); boolean res; @@ -106,70 +101,4 @@ public UpdateResult execute(GridCacheAdapter cache, Object[] args) throws Ignite return res ? UpdateResult.ONE : UpdateResult.ZERO; } - - /** - * Create argument for AST element. - * - * @param el Element. - * @return Argument. - */ - private static FastUpdateArgument argument(@Nullable GridSqlElement el) { - assert el == null ^ (el instanceof GridSqlConst || el instanceof GridSqlParameter); - - if (el == null) - return NULL_ARG; - - if (el instanceof GridSqlConst) - return new ConstantArgument(((GridSqlConst)el).value().getObject()); - else - return new ParamArgument(((GridSqlParameter)el).index()); - } - - /** - * Value argument. - */ - private static class ConstantArgument implements FastUpdateArgument { - /** Value to return. */ - private final Object val; - - /** - * Constructor. - * - * @param val Value. - */ - private ConstantArgument(Object val) { - this.val = val; - } - - /** {@inheritDoc} */ - @Override public Object apply(Object[] arg) throws IgniteCheckedException { - return val; - } - } - - /** - * Parameter argument. - */ - private static class ParamArgument implements FastUpdateArgument { - /** Value to return. */ - private final int paramIdx; - - /** - * Constructor. - * - * @param paramIdx Parameter index. - */ - private ParamArgument(int paramIdx) { - assert paramIdx >= 0; - - this.paramIdx = paramIdx; - } - - /** {@inheritDoc} */ - @Override public Object apply(Object[] arg) throws IgniteCheckedException { - assert arg.length > paramIdx; - - return arg[paramIdx]; - } - } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index 1dd6c45000706..6c986704da599 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -17,6 +17,10 @@ package org.apache.ignite.internal.processors.query.h2.dml; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.binary.BinaryObjectBuilder; @@ -26,6 +30,7 @@ import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.processors.query.h2.UpdateResult; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; import org.apache.ignite.internal.util.typedef.F; @@ -34,10 +39,6 @@ import org.h2.table.Column; import org.jetbrains.annotations.Nullable; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import static org.apache.ignite.internal.processors.query.h2.dml.UpdateMode.BULK_LOAD; import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.DEFAULT_COLUMNS_COUNT; @@ -48,7 +49,7 @@ public final class UpdatePlan { /** Initial statement to drive the rest of the logic. */ private final UpdateMode mode; - /** Target table to be affected by initial DML statement. */ + /** to be affected by initial DML statement. */ private final GridH2Table tbl; /** Column names to set or update. */ @@ -75,6 +76,9 @@ public final class UpdatePlan { /** Subquery flag - {@code true} if {@link #selectQry} is an actual subquery that retrieves data from some cache. */ private final boolean isLocSubqry; + /** Rows for query-less MERGE or INSERT. */ + private final List> rows; + /** Number of rows in rows based MERGE or INSERT. */ private final int rowsNum; @@ -97,6 +101,7 @@ public final class UpdatePlan { * @param valColIdx value column index. * @param selectQry Select query. * @param isLocSubqry Local subquery flag. + * @param rows Rows for query-less INSERT or MERGE. * @param rowsNum Rows number. * @param fastUpdate Fast update (if any). * @param distributed Distributed plan (if any) @@ -112,12 +117,14 @@ public UpdatePlan( int valColIdx, String selectQry, boolean isLocSubqry, + List> rows, int rowsNum, @Nullable FastUpdate fastUpdate, @Nullable DmlDistributedPlanInfo distributed ) { this.colNames = colNames; this.colTypes = colTypes; + this.rows = rows; this.rowsNum = rowsNum; assert mode != null; @@ -162,6 +169,7 @@ public UpdatePlan( -1, selectQry, false, + null, 0, fastUpdate, distributed @@ -346,6 +354,61 @@ public T3 processRowForUpdate(List row) throws Ignite return new T3<>(key, oldVal, newVal); } + /** + * Process fast DML operation if possible. + * + * @param args QUery arguments. + * @return Update result or {@code null} if fast update is not applicable for plan. + * @throws IgniteCheckedException If failed. + */ + public UpdateResult processFast(Object[] args) throws IgniteCheckedException { + if (fastUpdate != null) + return fastUpdate.execute(cacheContext().cache(), args); + + return null; + } + + /** + * @return {@code True} if predefined rows exist. + */ + public boolean hasRows() { + return !F.isEmpty(rows); + } + + /** + * Extract rows from plan without performing any query. + * @param args Original query arguments. + * @return Rows from plan. + * @throws IgniteCheckedException if failed. + */ + public List> createRows(Object[] args) throws IgniteCheckedException { + assert rowsNum > 0 && !F.isEmpty(colNames); + + List> res = new ArrayList<>(rowsNum); + + GridH2RowDescriptor desc = tbl.rowDescriptor(); + + for (List row : rows) { + List resRow = new ArrayList<>(); + + for (int j = 0; j < colNames.length; j++) { + Object colVal = row.get(j).get(args); + + if (j == keyColIdx || j == valColIdx) { + Class colCls = j == keyColIdx ? desc.type().keyClass() : desc.type().valueClass(); + + colVal = DmlUtils.convert(colVal, desc, colCls, colTypes[j]); + } + + resRow.add(colVal); + } + + res.add(resRow); + } + + return res; + } + /** * @return Update mode. */ @@ -387,11 +450,4 @@ public String selectQuery() { @Nullable public boolean isLocalSubquery() { return isLocSubqry; } - - /** - * @return Fast update. - */ - @Nullable public FastUpdate fastUpdate() { - return fastUpdate; - } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index 9cd8c59a46fd6..7c5232f280223 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; +import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -128,6 +129,8 @@ private static UpdatePlan planForInsert(GridSqlStatement stmt, boolean loc, Igni GridH2RowDescriptor desc; + List elRows = null; + if (stmt instanceof GridSqlInsert) { GridSqlInsert ins = (GridSqlInsert) stmt; target = ins.into(); @@ -137,6 +140,10 @@ private static UpdatePlan planForInsert(GridSqlStatement stmt, boolean loc, Igni cols = ins.columns(); sel = DmlAstUtils.selectForInsertOrMerge(cols, ins.rows(), ins.query()); + + if (sel == null) + elRows = ins.rows(); + isTwoStepSubqry = (ins.query() != null); rowsNum = isTwoStepSubqry ? 0 : ins.rows().size(); } @@ -150,6 +157,10 @@ else if (stmt instanceof GridSqlMerge) { cols = merge.columns(); sel = DmlAstUtils.selectForInsertOrMerge(cols, merge.rows(), merge.query()); + + if (sel == null) + elRows = merge.rows(); + isTwoStepSubqry = (merge.query() != null); rowsNum = isTwoStepSubqry ? 0 : merge.rows().size(); } @@ -159,7 +170,7 @@ else if (stmt instanceof GridSqlMerge) { } // Let's set the flag only for subqueries that have their FROM specified. - isTwoStepSubqry = (isTwoStepSubqry && (sel instanceof GridSqlUnion || + isTwoStepSubqry &= (sel != null && (sel instanceof GridSqlUnion || (sel instanceof GridSqlSelect && ((GridSqlSelect) sel).from() != null))); int keyColIdx = -1; @@ -211,13 +222,33 @@ else if (stmt instanceof GridSqlMerge) { KeyValueSupplier keySupplier = createSupplier(cctx, desc.type(), keyColIdx, hasKeyProps, true, false); KeyValueSupplier valSupplier = createSupplier(cctx, desc.type(), valColIdx, hasValProps, false, false); - String selectSql = sel.getSQL(); + String selectSql = sel != null ? sel.getSQL() : null; DmlDistributedPlanInfo distributed = (rowsNum == 0 && !F.isEmpty(selectSql)) ? checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()) : null; UpdateMode mode = stmt instanceof GridSqlMerge ? UpdateMode.MERGE : UpdateMode.INSERT; + List> rows = null; + + if (elRows != null) { + assert sel == null; + + rows = new ArrayList<>(elRows.size()); + + for (GridSqlElement[] elRow : elRows) { + List row = new ArrayList<>(cols.length); + + for (GridSqlElement el : elRow) { + DmlArgument arg = DmlArguments.create(el); + + row.add(arg); + } + + rows.add(row); + } + } + return new UpdatePlan( mode, tbl.dataTable(), @@ -229,6 +260,7 @@ else if (stmt instanceof GridSqlMerge) { valColIdx, selectSql, !isTwoStepSubqry, + rows, rowsNum, null, distributed @@ -350,6 +382,7 @@ else if (stmt instanceof GridSqlDelete) { valColIdx, selectSql, false, + null, 0, null, distributed @@ -452,6 +485,7 @@ public static UpdatePlan planForBulkLoad(SqlBulkLoadCommand cmd, GridH2Table tbl valColIdx, null, true, + null, 0, null, null From c63b72bb9a5580d18c0f8de8e5506ea28c8da92b Mon Sep 17 00:00:00 2001 From: tledkov-gridgain Date: Fri, 5 Jan 2018 12:08:54 +0300 Subject: [PATCH 236/243] IGNITE-5623: SQL: default column values support for CREATE TABLE. This closes #3256. (cherry picked from commit 78e79e0) --- .../jdbc/thin/JdbcThinMetadataSelfTest.java | 27 +- .../org/apache/ignite/cache/QueryEntity.java | 37 ++- .../internal/binary/BinaryFieldImpl.java | 5 + .../jdbc/thin/JdbcThinDatabaseMetadata.java | 2 +- .../internal/jdbc/thin/JdbcThinTcpIo.java | 9 +- .../processors/odbc/jdbc/JdbcColumnMeta.java | 7 + .../odbc/jdbc/JdbcColumnMetaV3.java | 83 +++++++ .../odbc/jdbc/JdbcConnectionContext.java | 6 +- .../odbc/jdbc/JdbcMetaColumnsResultV3.java | 50 ++++ .../odbc/jdbc/JdbcRequestHandler.java | 13 +- .../processors/odbc/jdbc/JdbcResult.java | 10 +- .../utils/PlatformConfigurationUtils.java | 36 ++- .../processors/query/GridQueryProcessor.java | 2 +- .../processors/query/GridQueryProperty.java | 7 + .../query/GridQueryTypeDescriptor.java | 13 +- .../internal/processors/query/QueryField.java | 21 ++ .../query/QueryTypeDescriptorImpl.java | 23 ++ .../internal/processors/query/QueryUtils.java | 17 +- .../query/property/QueryBinaryProperty.java | 13 +- .../query/property/QueryClassProperty.java | 5 + .../query/h2/ddl/DdlStatementsProcessor.java | 13 +- .../processors/query/h2/dml/UpdatePlan.java | 3 + .../query/h2/sql/GridSqlColumn.java | 10 + .../query/h2/sql/GridSqlQueryParser.java | 34 ++- .../query/IgniteSqlDefaultValueTest.java | 234 ++++++++++++++++++ .../h2/GridIndexingSpiAbstractSelfTest.java | 10 + .../query/h2/sql/GridQueryParsingTest.java | 20 +- .../IgniteCacheQuerySelfTestSuite.java | 3 + .../QueryEntityConfigurationParityTest.cs | 50 ++++ .../Cache/CacheConfigurationTest.cs | 5 +- .../Cache/Query/CacheDmlQueriesTest.cs | 42 +++- .../Config/full-config.xml | 4 +- .../IgniteConfigurationSerializerTest.cs | 4 +- .../Cache/Configuration/QueryEntity.cs | 3 +- .../Cache/Configuration/QueryField.cs | 7 + .../Configuration/QuerySqlFieldAttribute.cs | 5 + .../IgniteConfigurationSection.xsd | 7 + 37 files changed, 783 insertions(+), 57 deletions(-) create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV3.java create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java index 2fd40d1805d2a..16b0ad526e229 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java @@ -138,7 +138,8 @@ protected CacheConfiguration cacheConfiguration(QueryEntity qryEntity) { try (Connection conn = DriverManager.getConnection(URL)) { Statement stmt = conn.createStatement(); - stmt.execute("CREATE TABLE TEST (ID INT, NAME VARCHAR(50), VAL VARCHAR(50), PRIMARY KEY (ID, NAME))"); + stmt.execute("CREATE TABLE TEST (ID INT, NAME VARCHAR(50) default 'default name', " + + "age int default 21, VAL VARCHAR(50), PRIMARY KEY (ID, NAME))"); stmt.execute("CREATE TABLE \"Quoted\" (\"Id\" INT primary key, \"Name\" VARCHAR(50)) WITH WRAP_KEY"); stmt.execute("CREATE INDEX \"MyTestIndex quoted\" on \"Quoted\" (\"Id\" DESC)"); stmt.execute("CREATE INDEX IDX ON TEST (ID ASC)"); @@ -371,23 +372,25 @@ public void testGetAllColumns() throws Exception { ResultSet rs = meta.getColumns(null, null, null, null); Set expectedCols = new HashSet<>(Arrays.asList( - "org.ORGANIZATION.ID", - "org.ORGANIZATION.NAME", - "pers.PERSON.ORGID", - "pers.PERSON.AGE", - "pers.PERSON.NAME", - "PUBLIC.TEST.ID", - "PUBLIC.TEST.NAME", - "PUBLIC.TEST.VAL", - "PUBLIC.Quoted.Id", - "PUBLIC.Quoted.Name")); + "org.ORGANIZATION.ID.null", + "org.ORGANIZATION.NAME.null", + "pers.PERSON.ORGID.null", + "pers.PERSON.AGE.null", + "pers.PERSON.NAME.null", + "PUBLIC.TEST.ID.null", + "PUBLIC.TEST.NAME.'default name'", + "PUBLIC.TEST.VAL.null", + "PUBLIC.TEST.AGE.21", + "PUBLIC.Quoted.Id.null", + "PUBLIC.Quoted.Name.null")); Set actualCols = new HashSet<>(expectedCols.size()); while(rs.next()) { actualCols.add(rs.getString("TABLE_SCHEM") + '.' + rs.getString("TABLE_NAME") + "." - + rs.getString("COLUMN_NAME")); + + rs.getString("COLUMN_NAME") + "." + + rs.getString("COLUMN_DEF")); } assert expectedCols.equals(actualCols) : "expectedCols=" + expectedCols + diff --git a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java index 2002b4fa5c7c3..0065bae959d73 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java @@ -86,6 +86,9 @@ public class QueryEntity implements Serializable { /** Fields that must have non-null value. NB: DO NOT remove underscore to avoid clashes with QueryEntityEx. */ private Set _notNullFields; + /** Fields default values. */ + private Map defaultFieldValues = new HashMap<>(); + /** * Creates an empty query entity. */ @@ -114,6 +117,9 @@ public QueryEntity(QueryEntity other) { tableName = other.tableName; _notNullFields = other._notNullFields != null ? new HashSet<>(other._notNullFields) : null; + + defaultFieldValues = other.defaultFieldValues != null ? new HashMap<>(other.defaultFieldValues) + : new HashMap(); } /** @@ -355,9 +361,12 @@ public String getTableName() { * Sets table name for this query entity. * * @param tableName table name + * @return {@code this} for chaining. */ - public void setTableName(String tableName) { + public QueryEntity setTableName(String tableName) { this.tableName = tableName; + + return this; } /** @@ -381,6 +390,27 @@ public QueryEntity setNotNullFields(@Nullable Set notNullFields) { return this; } + /** + * Gets fields default values. + * + * @return Field's name to default value map. + */ + public Map getDefaultFieldValues() { + return defaultFieldValues; + } + + /** + * Sets fields default values. + * + * @param defaultFieldValues Field's name to default value map. + * @return {@code this} for chaining. + */ + public QueryEntity setDefaultFieldValues(Map defaultFieldValues) { + this.defaultFieldValues = defaultFieldValues; + + return this; + } + /** * Utility method for building query entities programmatically. * @@ -639,13 +669,14 @@ private static void processAnnotation(boolean key, QuerySqlField sqlAnn, QueryTe F.eq(aliases, entity.aliases) && F.eqNotOrdered(idxs, entity.idxs) && F.eq(tableName, entity.tableName) && - F.eq(_notNullFields, entity._notNullFields); + F.eq(_notNullFields, entity._notNullFields) && + F.eq(defaultFieldValues, entity.defaultFieldValues); } /** {@inheritDoc} */ @Override public int hashCode() { return Objects.hash(keyType, valType, keyFieldName, valueFieldName, fields, keyFields, aliases, idxs, - tableName, _notNullFields); + tableName, _notNullFields, defaultFieldValues); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java index 59bd03dd39f0a..883576c9faad3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java @@ -56,6 +56,8 @@ public class BinaryFieldImpl implements BinaryFieldEx { /** * Constructor. * + * @param ctx Binary context. + * @param typeId Type ID. * @param schemas Schemas. * @param fieldName Field name. * @param fieldId Field ID. @@ -278,6 +280,9 @@ public int fieldOrder(BinaryObjectExImpl obj) { int schemaId = obj.schemaId(); + if (schemaId == 0) + return BinarySchema.ORDER_NOT_FOUND; + BinarySchema schema = schemas.schema(schemaId); if (schema == null) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java index cfc3b689b5cd7..dd8b73392197e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java @@ -852,7 +852,7 @@ private List columnRow(JdbcColumnMeta colMeta, int pos) { row.add(10); // 10. NUM_PREC_RADIX row.add(colMeta.isNullable() ? columnNullable : columnNoNulls); // 11. NULLABLE row.add((String)null); // 12. REMARKS - row.add((String)null); // 13. COLUMN_DEF + row.add(colMeta.defaultValue()); // 13. COLUMN_DEF row.add(colMeta.dataType()); // 14. SQL_DATA_TYPE row.add((Integer)null); // 15. SQL_DATETIME_SUB row.add(Integer.MAX_VALUE); // 16. CHAR_OCTET_LENGTH diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java index 4d239348b699e..fec218e368494 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java @@ -74,8 +74,11 @@ public class JdbcThinTcpIo { /** Version 2.3.1. */ private static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); + /** Version 2.4.0. */ + private static final ClientListenerProtocolVersion VER_2_4_0 = ClientListenerProtocolVersion.create(2, 4, 0); + /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_0; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_4_0; /** Initial output stream capacity for handshake. */ private static final int HANDSHAKE_MSG_SIZE = 13; @@ -212,8 +215,8 @@ public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQL ClientListenerProtocolVersion srvProtocolVer = ClientListenerProtocolVersion.create(maj, min, maintenance); - if (VER_2_1_5.equals(srvProtocolVer)) - handshake(VER_2_1_5); + if (VER_2_3_0.equals(srvProtocolVer) || VER_2_1_5.equals(srvProtocolVer)) + handshake(srvProtocolVer); else if (VER_2_1_0.equals(srvProtocolVer)) handshake_2_1_0(); else { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java index d927c26ecc793..c0ac322c3aa8e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java @@ -127,6 +127,13 @@ public String dataTypeClass() { return dataTypeClass; } + /** + * @return Column's default value. + */ + public String defaultValue() { + return null; + } + /** * Return 'nullable' flag in compatibility mode (according with column name and column type). * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java new file mode 100644 index 0000000000000..9911be0a3e1c8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.jdbc; + +import org.apache.ignite.internal.binary.BinaryReaderExImpl; +import org.apache.ignite.internal.binary.BinaryWriterExImpl; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * JDBC column metadata V3. + */ +public class JdbcColumnMetaV3 extends JdbcColumnMetaV2 { + /** Default value. */ + private String dfltValue; + + /** + * Default constructor is used for serialization. + */ + JdbcColumnMetaV3() { + // No-op. + } + + /** + * @param schemaName Schema. + * @param tblName Table. + * @param colName Column. + * @param cls Type. + * @param nullable Allow nulls. + * @param dfltVal Default value. + */ + public JdbcColumnMetaV3(String schemaName, String tblName, String colName, Class cls, boolean nullable, + Object dfltVal) { + super(schemaName, tblName, colName, cls, nullable); + + if (dfltVal == null) + dfltValue = null; + else { + if (dfltVal instanceof String) + dfltValue = "'" + String.valueOf(dfltVal) + "'"; + else + dfltValue = String.valueOf(dfltVal); + } + } + + /** {@inheritDoc} */ + @Override public String defaultValue() { + return dfltValue; + } + + /** {@inheritDoc} */ + @Override public void writeBinary(BinaryWriterExImpl writer) { + super.writeBinary(writer); + + writer.writeString(dfltValue); + } + + /** {@inheritDoc} */ + @Override public void readBinary(BinaryReaderExImpl reader) { + super.readBinary(reader); + + dfltValue = reader.readString(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JdbcColumnMetaV3.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java index 7b404664e33e2..5841a4d450e9f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java @@ -40,8 +40,11 @@ public class JdbcConnectionContext implements ClientListenerConnectionContext { /** Version 2.3.1: added "multiple statements query" feature. */ public static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); + /** Version 2.4.0: adds default values for columns feature. */ + public static final ClientListenerProtocolVersion VER_2_4_0 = ClientListenerProtocolVersion.create(2, 4, 0); + /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_3_0; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_4_0; /** Supported versions. */ private static final Set SUPPORTED_VERS = new HashSet<>(); @@ -63,6 +66,7 @@ public class JdbcConnectionContext implements ClientListenerConnectionContext { static { SUPPORTED_VERS.add(CURRENT_VER); + SUPPORTED_VERS.add(VER_2_3_0); SUPPORTED_VERS.add(VER_2_1_5); SUPPORTED_VERS.add(VER_2_1_0); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV3.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV3.java new file mode 100644 index 0000000000000..0cee9b725f68b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResultV3.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.odbc.jdbc; + +import java.util.Collection; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * JDBC columns metadata result. + */ +public class JdbcMetaColumnsResultV3 extends JdbcMetaColumnsResult { + /** + * Default constructor is used for deserialization. + */ + JdbcMetaColumnsResultV3() { + super(META_COLUMNS_V3); + } + + /** + * @param meta Columns metadata. + */ + JdbcMetaColumnsResultV3(Collection meta) { + super(META_COLUMNS_V3, meta); + } + + /** {@inheritDoc} */ + @Override protected JdbcColumnMeta createMetaColumn() { + return new JdbcColumnMetaV3(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(JdbcMetaColumnsResultV3.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java index 941d4b4ba036e..b6e21a2db558c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java @@ -60,6 +60,7 @@ import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_EOF; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_ERROR; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_3_0; +import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_4_0; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BATCH_EXEC; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BULK_LOAD_BATCH; import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.META_COLUMNS; @@ -671,7 +672,13 @@ private JdbcResponse getColumnsMeta(JdbcMetaColumnsRequest req) { JdbcColumnMeta columnMeta; - if (protocolVer.compareTo(VER_2_3_0) >= 0) { + if (protocolVer.compareTo(VER_2_4_0) >= 0) { + GridQueryProperty prop = table.property(colName); + + columnMeta = new JdbcColumnMetaV3(table.schemaName(), table.tableName(), + field.getKey(), field.getValue(), !prop.notNull(), prop.defaultValue()); + } + else if (protocolVer.compareTo(VER_2_3_0) >= 0) { GridQueryProperty prop = table.property(colName); columnMeta = new JdbcColumnMetaV2(table.schemaName(), table.tableName(), @@ -689,7 +696,9 @@ private JdbcResponse getColumnsMeta(JdbcMetaColumnsRequest req) { JdbcMetaColumnsResult res; - if (protocolVer.compareTo(VER_2_3_0) >= 0) + if (protocolVer.compareTo(VER_2_4_0) >= 0) + res = new JdbcMetaColumnsResultV3(meta); + else if (protocolVer.compareTo(VER_2_3_0) >= 0) res = new JdbcMetaColumnsResultV2(meta); else res = new JdbcMetaColumnsResult(meta); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java index 952aa88feba0f..43631e96a6cbc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java @@ -62,8 +62,11 @@ public class JdbcResult implements JdbcRawBinarylizable { /** Columns metadata result V2. */ static final byte META_COLUMNS_V2 = 14; + /** Columns metadata result V3. */ + static final byte META_COLUMNS_V3 = 15; + /** A request to send file from client to server. */ - static final byte BULK_LOAD_ACK = 15; + static final byte BULK_LOAD_ACK = 16; /** Success status. */ private byte type; @@ -158,6 +161,11 @@ public static JdbcResult readResult(BinaryReaderExImpl reader) throws BinaryObje break; + case META_COLUMNS_V3: + res = new JdbcMetaColumnsResultV3(); + + break; + case BULK_LOAD_ACK: res = new JdbcBulkLoadAckResult(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java index c124d5a8fe838..d5ed7d3f75352 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java @@ -463,6 +463,7 @@ private static QueryEntity readQueryEntity(BinaryRawReader in) { int cnt = in.readInt(); Set keyFields = new HashSet<>(cnt); Set notNullFields = new HashSet<>(cnt); + Map defVals = new HashMap<>(cnt); if (cnt > 0) { LinkedHashMap fields = new LinkedHashMap<>(cnt); @@ -478,6 +479,10 @@ private static QueryEntity readQueryEntity(BinaryRawReader in) { if (in.readBoolean()) notNullFields.add(fieldName); + + Object defVal = in.readObject(); + if (defVal != null) + defVals.put(fieldName, defVal); } res.setFields(fields); @@ -487,6 +492,9 @@ private static QueryEntity readQueryEntity(BinaryRawReader in) { if (!notNullFields.isEmpty()) res.setNotNullFields(notNullFields); + + if (!defVals.isEmpty()) + res.setDefaultFieldValues(defVals); } // Aliases @@ -937,21 +945,22 @@ public static void writeCacheConfiguration(BinaryRawWriter writer, CacheConfigur * Write query entity. * * @param writer Writer. - * @param queryEntity Query entity. + * @param qryEntity Query entity. */ - private static void writeQueryEntity(BinaryRawWriter writer, QueryEntity queryEntity) { - assert queryEntity != null; + private static void writeQueryEntity(BinaryRawWriter writer, QueryEntity qryEntity) { + assert qryEntity != null; - writer.writeString(queryEntity.getKeyType()); - writer.writeString(queryEntity.getValueType()); - writer.writeString(queryEntity.getTableName()); + writer.writeString(qryEntity.getKeyType()); + writer.writeString(qryEntity.getValueType()); + writer.writeString(qryEntity.getTableName()); // Fields - LinkedHashMap fields = queryEntity.getFields(); + LinkedHashMap fields = qryEntity.getFields(); if (fields != null) { - Set keyFields = queryEntity.getKeyFields(); - Set notNullFields = queryEntity.getNotNullFields(); + Set keyFields = qryEntity.getKeyFields(); + Set notNullFields = qryEntity.getNotNullFields(); + Map defVals = qryEntity.getDefaultFieldValues(); writer.writeInt(fields.size()); @@ -960,13 +969,14 @@ private static void writeQueryEntity(BinaryRawWriter writer, QueryEntity queryEn writer.writeString(field.getValue()); writer.writeBoolean(keyFields != null && keyFields.contains(field.getKey())); writer.writeBoolean(notNullFields != null && notNullFields.contains(field.getKey())); + writer.writeObject(defVals != null ? defVals.get(field.getKey()) : null); } } else writer.writeInt(0); // Aliases - Map aliases = queryEntity.getAliases(); + Map aliases = qryEntity.getAliases(); if (aliases != null) { writer.writeInt(aliases.size()); @@ -980,7 +990,7 @@ private static void writeQueryEntity(BinaryRawWriter writer, QueryEntity queryEn writer.writeInt(0); // Indexes - Collection indexes = queryEntity.getIndexes(); + Collection indexes = qryEntity.getIndexes(); if (indexes != null) { writer.writeInt(indexes.size()); @@ -991,8 +1001,8 @@ private static void writeQueryEntity(BinaryRawWriter writer, QueryEntity queryEn else writer.writeInt(0); - writer.writeString(queryEntity.getKeyFieldName()); - writer.writeString(queryEntity.getValueFieldName()); + writer.writeString(qryEntity.getKeyFieldName()); + writer.writeString(qryEntity.getValueFieldName()); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index df4652bb3e59f..7444ec7f13ea4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -2288,7 +2288,7 @@ private void processDynamicAddColumn(QueryTypeDescriptorImpl d, List for (QueryField col : cols) { try { props.add(new QueryBinaryProperty(ctx, col.name(), null, Class.forName(col.typeName()), - false, null, !col.isNullable())); + false, null, !col.isNullable(), null)); } catch (ClassNotFoundException e) { throw new SchemaOperationException("Class not found for new property: " + col.typeName()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProperty.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProperty.java index c8ae21274dba0..b258b7c4702ec 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProperty.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProperty.java @@ -70,4 +70,11 @@ public interface GridQueryProperty { * @return {@code true} if property does not allow {@code null} value. */ public boolean notNull(); + + /** + * Gets the default value for this property. + * + * @return {@code null} if a default value is not set for the property. + */ + public Object defaultValue(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryTypeDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryTypeDescriptor.java index dcf850c6b9c6d..8a23e50f40d84 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryTypeDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryTypeDescriptor.java @@ -19,6 +19,8 @@ import java.util.Map; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.processors.cache.CacheObject; +import org.apache.ignite.internal.util.lang.GridMapEntry; import org.jetbrains.annotations.Nullable; /** @@ -177,4 +179,13 @@ public interface GridQueryTypeDescriptor { * @throws IgniteCheckedException, If failure happens. */ public void validateKeyAndValue(Object key, Object val) throws IgniteCheckedException; -} \ No newline at end of file + + /** + * Sets defaults value for given key and value. + * + * @param key Key. + * @param val Value. + * @throws IgniteCheckedException If failed. + */ + public void setDefaults(Object key, Object val) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java index 8c7d3678ea6d4..1a75ef16d4270 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java @@ -37,15 +37,29 @@ public class QueryField implements Serializable { /** Nullable flag. */ private final boolean nullable; + /** Default value. */ + private final Object dfltValue; + /** * @param name Field name. * @param typeName Class name for this field's values. * @param nullable Nullable flag. */ public QueryField(String name, String typeName, boolean nullable) { + this(name, typeName, nullable, null); + } + + /** + * @param name Field name. + * @param typeName Class name for this field's values. + * @param nullable Nullable flag. + * @param dfltValue Default value. + */ + public QueryField(String name, String typeName, boolean nullable, Object dfltValue) { this.name = name; this.typeName = typeName; this.nullable = nullable; + this.dfltValue = dfltValue; } /** @@ -69,6 +83,13 @@ public boolean isNullable() { return nullable; } + /** + * @return Default value. + */ + public Object defaultValue() { + return dfltValue; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(QueryField.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java index 72adefd43f37f..d15588ed1244e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java @@ -107,6 +107,9 @@ public class QueryTypeDescriptorImpl implements GridQueryTypeDescriptor { /** */ private List validateProps; + /** */ + private List propsWithDefaultValue; + /** * Constructor. * @@ -380,6 +383,13 @@ public void addProperty(GridQueryProperty prop, boolean failOnDuplicate) throws validateProps.add(prop); } + if (prop.defaultValue() != null) { + if (propsWithDefaultValue == null) + propsWithDefaultValue = new ArrayList<>(); + + propsWithDefaultValue.add(prop); + } + fields.put(name, prop.type()); } @@ -518,4 +528,17 @@ else if (F.eq(prop.name(), valFieldName)) { throw new IgniteSQLException("Null value is not allowed for column '" + prop.name() + "'", errCode); } } + + /** {@inheritDoc} */ + @SuppressWarnings("ForLoopReplaceableByForEach") + @Override public void setDefaults(Object key, Object val) throws IgniteCheckedException { + if (F.isEmpty(propsWithDefaultValue)) + return; + + for (int i = 0; i < propsWithDefaultValue.size(); ++i) { + GridQueryProperty prop = propsWithDefaultValue.get(i); + + prop.setValue(key, val, prop.defaultValue()); + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java index 9584e05f27fd0..a5dc595abf2d4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java @@ -241,6 +241,7 @@ public static QueryEntity normalizeQueryEntity(QueryEntity entity, boolean escap normalEntity.setKeyFieldName(entity.getKeyFieldName()); normalEntity.setValueFieldName(entity.getValueFieldName()); normalEntity.setNotNullFields(entity.getNotNullFields()); + normalEntity.setDefaultFieldValues(entity.getDefaultFieldValues()); // Normalize table name. String normalTblName = entity.getTableName(); @@ -515,6 +516,7 @@ public static void processBinaryMeta(GridKernalContext ctx, QueryEntity qryEntit throws IgniteCheckedException { Set keyFields = qryEntity.getKeyFields(); Set notNulls = qryEntity.getNotNullFields(); + Map dlftVals = qryEntity.getDefaultFieldValues(); // We have to distinguish between empty and null keyFields when the key is not of SQL type - // when a key is not of SQL type, absence of a field in nonnull keyFields tell us that this field @@ -543,9 +545,11 @@ public static void processBinaryMeta(GridKernalContext ctx, QueryEntity qryEntit boolean notNull = notNulls != null && notNulls.contains(entry.getKey()); + Object dfltVal = dlftVals != null ? dlftVals.get(entry.getKey()) : null; + QueryBinaryProperty prop = buildBinaryProperty(ctx, entry.getKey(), U.classForName(entry.getValue(), Object.class, true), - d.aliases(), isKeyField, notNull); + d.aliases(), isKeyField, notNull, dfltVal); d.addProperty(prop, false); } @@ -688,10 +692,12 @@ else if (idxTyp != null) * @param isKeyField Key ownership flag, as defined in {@link QueryEntity#keyFields}: {@code true} if field belongs * to key, {@code false} if it belongs to value, {@code null} if QueryEntity#keyFields is null. * @param notNull {@code true} if {@code null} value is not allowed. + * @param dlftVal Default value. * @return Binary property. + * @throws IgniteCheckedException On error. */ public static QueryBinaryProperty buildBinaryProperty(GridKernalContext ctx, String pathStr, Class resType, - Map aliases, @Nullable Boolean isKeyField, boolean notNull) throws IgniteCheckedException { + Map aliases, @Nullable Boolean isKeyField, boolean notNull, Object dlftVal) throws IgniteCheckedException { String[] path = pathStr.split("\\."); QueryBinaryProperty res = null; @@ -707,7 +713,7 @@ public static QueryBinaryProperty buildBinaryProperty(GridKernalContext ctx, Str String alias = aliases.get(fullName.toString()); // The key flag that we've found out is valid for the whole path. - res = new QueryBinaryProperty(ctx, prop, res, resType, isKeyField, alias, notNull); + res = new QueryBinaryProperty(ctx, prop, res, resType, isKeyField, alias, notNull, dlftVal); } return res; @@ -1296,5 +1302,10 @@ public KeyOrValProperty(boolean key, String name, Class cls) { @Override public boolean notNull() { return true; } + + /** {@inheritDoc} */ + @Override public Object defaultValue() { + return null; + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java index 18508a8b8edad..f440d124c55e6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryBinaryProperty.java @@ -68,6 +68,9 @@ public class QueryBinaryProperty implements GridQueryProperty { /** */ private final boolean notNull; + /** */ + private final Object defaultValue; + /** * Constructor. * @@ -78,9 +81,10 @@ public class QueryBinaryProperty implements GridQueryProperty { * @param key {@code true} if key property, {@code false} otherwise, {@code null} if unknown. * @param alias Field alias. * @param notNull {@code true} if null value is not allowed. + * @param defaultValue Default value. */ public QueryBinaryProperty(GridKernalContext ctx, String propName, QueryBinaryProperty parent, - Class type, @Nullable Boolean key, String alias, boolean notNull) { + Class type, @Nullable Boolean key, String alias, boolean notNull, Object defaultValue) { this.ctx = ctx; log = ctx.log(QueryBinaryProperty.class); @@ -93,6 +97,8 @@ public QueryBinaryProperty(GridKernalContext ctx, String propName, QueryBinaryPr if (key != null) this.isKeyProp = key ? 1 : -1; + + this.defaultValue = defaultValue; } /** {@inheritDoc} */ @@ -275,4 +281,9 @@ private Object fieldValue(BinaryObject obj) { @Override public boolean notNull() { return notNull; } + + /** {@inheritDoc} */ + @Override public Object defaultValue() { + return defaultValue; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryClassProperty.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryClassProperty.java index 076a769aa0b4b..575fe17f78c30 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryClassProperty.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/property/QueryClassProperty.java @@ -141,4 +141,9 @@ public void parent(QueryClassProperty parent) { @Override public boolean notNull() { return notNull; } + + /** {@inheritDoc} */ + @Override public Object defaultValue() { + return null; + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index a6cc93e2e7832..6b054a231ad60 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -346,7 +347,7 @@ else if (stmt0 instanceof GridSqlAlterTableAddColumn) { QueryField field = new QueryField(col.columnName(), DataType.getTypeClassName(col.column().getType()), - col.column().isNullable()); + col.column().isNullable(), col.defaultValue()); cols.add(field); @@ -451,6 +452,8 @@ private static QueryEntity toQueryEntity(GridSqlCreateTable createTbl) { Set notNullFields = null; + HashMap dfltValues = new HashMap<>(); + for (Map.Entry e : createTbl.columns().entrySet()) { GridSqlColumn gridCol = e.getValue(); @@ -464,8 +467,16 @@ private static QueryEntity toQueryEntity(GridSqlCreateTable createTbl) { notNullFields.add(e.getKey()); } + + Object dfltVal = gridCol.defaultValue(); + + if (dfltVal != null) + dfltValues.put(e.getKey(), dfltVal); } + if (!F.isEmpty(dfltValues)) + res.setDefaultFieldValues(dfltValues); + String valTypeName = QueryUtils.createTableValueTypeName(createTbl.schemaName(), createTbl.tableName()); String keyTypeName = QueryUtils.createTableKeyTypeName(valTypeName); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index 6c986704da599..f8c7a654f25d5 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -245,6 +245,8 @@ public UpdatePlan( newColVals.put(colName, DmlUtils.convert(row.get(i), rowDesc, expCls, colTypes[i])); } + desc.setDefaults(key, val); + // We update columns in the order specified by the table for a reason - table's // column order preserves their precedence for correct update of nested properties. Column[] tblCols = tbl.getColumns(); @@ -282,6 +284,7 @@ public UpdatePlan( * * @param row Row to process. * @throws IgniteCheckedException if failed. + * @return Tuple contains: [key, old value, new value] */ public T3 processRowForUpdate(List row) throws IgniteCheckedException { GridH2RowDescriptor rowDesc = tbl.rowDescriptor(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlColumn.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlColumn.java index ef460e3e2f74c..bc14ae2c1ede0 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlColumn.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlColumn.java @@ -20,6 +20,7 @@ import java.util.Collections; import org.apache.ignite.internal.util.typedef.F; import org.h2.command.Parser; +import org.h2.expression.Expression; import org.h2.table.Column; /** @@ -118,6 +119,15 @@ public void expressionInFrom(GridSqlAlias from) { this.from = from; } + /** + * @return Default value. + */ + public Object defaultValue() { + Expression dfltExpr = col.getDefaultExpression(); + + return dfltExpr != null ? col.convert(dfltExpr.getValue(null)).getObject() : null; + } + /** * @return H2 Column. */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java index d16468cd7f804..c9644137ac272 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java @@ -95,6 +95,7 @@ import org.h2.table.TableBase; import org.h2.table.TableFilter; import org.h2.table.TableView; +import org.h2.value.DataType; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.AND; @@ -1191,9 +1192,23 @@ private static GridSqlColumn parseColumn(Column col) { throw new IgniteSQLException("Computed columns are not supported [colName=" + col.getName() + ']', IgniteQueryErrorCode.UNSUPPORTED_OPERATION); - if (col.getDefaultExpression() != null) - throw new IgniteSQLException("DEFAULT expressions are not supported [colName=" + col.getName() + ']', - IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + if (col.getDefaultExpression() != null) { + if (!col.getDefaultExpression().isConstant()) { + throw new IgniteSQLException("Non-constant DEFAULT expressions are not supported [colName=" + col.getName() + ']', + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + } + + DataType colType = DataType.getDataType(col.getType()); + DataType dfltType = DataType.getDataType(col.getDefaultExpression().getType()); + + if ((DataType.isStringType(colType.type) && !DataType.isStringType(dfltType.type)) + || (DataType.supportsAdd(colType.type) && !DataType.supportsAdd(dfltType.type))) { + throw new IgniteSQLException("Invalid default value for column. [colName=" + col.getName() + + ", colType=" + colType.name + + ", dfltValueType=" + dfltType.name + ']', + IgniteQueryErrorCode.UNEXPECTED_ELEMENT_TYPE); + } + } if (col.getSequence() != null) throw new IgniteSQLException("SEQUENCE columns are not supported [colName=" + col.getName() + ']', @@ -1217,13 +1232,15 @@ private static GridSqlColumn parseColumn(Column col) { /** * Parse {@code ALTER TABLE ... ADD COLUMN} statement. * @param addCol H2 statement. + * @return Grid SQL statement. + * * @see */ private GridSqlStatement parseAddColumn(AlterTableAlterColumn addCol) { assert addCol.getType() == CommandInterface.ALTER_TABLE_ADD_COLUMN; if (ALTER_COLUMN_BEFORE_COL.get(addCol) != null || ALTER_COLUMN_AFTER_COL.get(addCol) != null) - throw new IgniteSQLException("ALTER TABLE ADD COLUMN BEFORE/AFTER is not supported", + throw new IgniteSQLException("ALTER TABLE ADD COLUMN BEFORE/AFTER is not supported" , IgniteQueryErrorCode.UNSUPPORTED_OPERATION); GridSqlAlterTableAddColumn res = new GridSqlAlterTableAddColumn(); @@ -1232,8 +1249,15 @@ private GridSqlStatement parseAddColumn(AlterTableAlterColumn addCol) { GridSqlColumn[] gridNewCols = new GridSqlColumn[h2NewCols.size()]; - for (int i = 0; i < h2NewCols.size(); i++) + for (int i = 0; i < h2NewCols.size(); i++) { + Column col = h2NewCols.get(i); + + if (col.getDefaultExpression() != null) + throw new IgniteSQLException("ALTER TABLE ADD COLUMN with DEFAULT value is not supported " + + "[col=" + col.getName() + ']', IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + gridNewCols[i] = parseColumn(h2NewCols.get(i)); + } res.columns(gridNewCols); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java new file mode 100644 index 0000000000000..6747e2810dd4a --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query; + +import java.math.BigDecimal; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Callable; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** */ +@SuppressWarnings("ThrowableNotThrown") +public class IgniteSqlDefaultValueTest extends GridCommonAbstractTest { + /** IP finder. */ + private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); + + /** Name of client node. */ + private static final String NODE_CLIENT = "client"; + + /** Number of server nodes. */ + private static final int NODE_COUNT = 2; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration c = super.getConfiguration(gridName); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(IP_FINDER); + disco.setForceServerMode(true); + + c.setDiscoverySpi(disco); + + if (gridName.equals(NODE_CLIENT)) + c.setClientMode(true); + + return c; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + startGrids(NODE_COUNT); + + startGrid(NODE_CLIENT); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + + super.afterTestsStopped(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + Collection tblNames = new ArrayList<>(); + + for (String cacheName : grid(0).context().cache().publicCacheNames()) { + for (GridQueryTypeDescriptor table : grid(0).context().query().types(cacheName)) + tblNames.add(table.tableName()); + } + + for (String tbl : tblNames) + sql("DROP TABLE " + tbl); + + super.afterTest(); + } + + /** + */ + public void testDefaultValueColumn() { + sql("CREATE TABLE TEST (id int, val0 varchar DEFAULT 'default-val', primary key (id))"); + sql("INSERT INTO TEST (id) VALUES (?)", 1); + sql("INSERT INTO TEST (id, val0) VALUES (?, ?)", 2, null); + sql("INSERT INTO TEST (id, val0) VALUES (?, ?)", 3, "test-val"); + + List> exp = Arrays.asList( + Arrays.asList(1, "default-val"), + Arrays.asList(2, null), + Arrays.asList(3, "test-val") + ); + + List> res = sql("select id, val0 from TEST"); + + checkResults(exp, res); + } + + /** + */ + public void testDefaultValueColumnAfterUpdate() { + sql("CREATE TABLE TEST (id int, val0 varchar DEFAULT 'default-val', val1 varchar, primary key (id))"); + sql("INSERT INTO TEST (id, val1) VALUES (?, ?)", 1, "val-10"); + sql("INSERT INTO TEST (id, val1) VALUES (?, ?)", 2, "val-20"); + sql("INSERT INTO TEST (id, val1) VALUES (?, ?)", 3, "val-30"); + + List> exp = Arrays.asList( + Arrays.asList(1, "default-val", "val-10"), + Arrays.asList(2, "default-val", "val-20"), + Arrays.asList(3, "default-val", "val-30") + ); + + List> res = sql("select id, val0, val1 from TEST"); + + checkResults(exp, res); + + sql("UPDATE TEST SET val1=? where id=?", "val-21", 2); + + List> expAfterUpdate = Arrays.asList( + Arrays.asList(1, "default-val", "val-10"), + Arrays.asList(2, "default-val", "val-21"), + Arrays.asList(3, "default-val", "val-30") + ); + + List> resAfterUpdate = sql("select id, val0, val1 from TEST"); + + checkResults(expAfterUpdate, resAfterUpdate); + } + + /** + */ + public void testEmptyValueNullDefaults() { + sql("CREATE TABLE TEST (id int, val0 varchar, primary key (id))"); + sql("INSERT INTO TEST (id) VALUES (?)", 1); + sql("INSERT INTO TEST (id, val0) VALUES (?, ?)", 2, "test-val"); + + List> expected = Arrays.asList( + Arrays.asList(1, null), + Arrays.asList(2, "test-val") + ); + + List> res = sql("select id, val0 from TEST"); + + checkResults(expected, res); + } + + /** + */ + public void testAddColumnWithDefaults() { + sql("CREATE TABLE TEST (id int, val0 varchar, primary key (id))"); + + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() { + sql("ALTER TABLE TEST ADD COLUMN val1 varchar DEFAULT 'default-val'"); + + return null; + } + }, IgniteSQLException.class, "ALTER TABLE ADD COLUMN with DEFAULT value is not supported"); + } + + /** + */ + public void testDefaultTypes() { + assertEquals("Check tinyint", (byte)28, getDefaultObject("TINYINT", "28")); + assertEquals("Check smallint", (short)28, getDefaultObject("SMALLINT", "28")); + assertEquals("Check int", 28, getDefaultObject("INT", "28")); + assertEquals("Check double", 28.25, getDefaultObject("DOUBLE", "28.25")); + assertEquals("Check float", 28.25, getDefaultObject("FLOAT", "28.25")); + assertEquals("Check decimal", BigDecimal.valueOf(28.25), getDefaultObject("DECIMAL", "28.25")); + assertEquals("Check varchar", "test value", getDefaultObject("VARCHAR", "'test value'")); + assertEquals("Check time", Time.valueOf("14:01:01"), getDefaultObject("TIME", "'14:01:01'")); + assertEquals("Check date", Date.valueOf("2017-12-29"), getDefaultObject("DATE", "'2017-12-29'")); + assertEquals("Check timestamp", Timestamp.valueOf("2017-12-29 14:01:01"), + getDefaultObject("TIMESTAMP", "'2017-12-29 14:01:01'")); + } + + /** + * @param sqlType SQL type. + * @param dfltVal Value string representation. + * @return Object is returned by SELECT query. + */ + private Object getDefaultObject(String sqlType, String dfltVal) { + sql(String.format("CREATE TABLE TEST (id int, val %s default %s, primary key (id))", + sqlType, dfltVal)); + + sql("INSERT INTO TEST (id) VALUES (1)"); + + List> res = sql("SELECT val FROM TEST WHERE id=1"); + + sql("DROP TABLE TEST"); + + return res.get(0).get(0); + } + + /** + * @param exp Expected results. + * @param actual Actual results. + */ + @SuppressWarnings("SuspiciousMethodCalls") + private void checkResults(Collection> exp, Collection> actual) { + assertEquals(exp.size(), actual.size()); + + for (List row : actual) { + if (!exp.contains(row)) + fail("Unexpected results: [row=" + row + ']'); + } + } + + /** + * @param sql SQL query + * @param args Query parameters. + * @return Results set. + */ + private List> sql(String sql, Object ... args) { + return grid(NODE_CLIENT).context().query().querySqlFieldsNoCache( + new SqlFieldsQuery(sql).setArgs(args), false).getAll(); + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java index 62860c0d16f82..1da695be4d1c1 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java @@ -560,6 +560,11 @@ String cacheName() { @Override public boolean notNull() { return false; } + + /** */ + @Override public Object defaultValue() { + return null; + } }; } @@ -654,6 +659,11 @@ String cacheName() { @Override public void validateKeyAndValue(Object key, Object value) throws IgniteCheckedException { // No-op. } + + /** {@inheritDoc} */ + @Override public void setDefaults(Object key, Object val) throws IgniteCheckedException { + // No-op. + } } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java index a1fe04b8a8516..1f0e7d783f14a 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java @@ -628,13 +628,27 @@ false, c("id", Value.INT), c("city", Value.STRING), c("name", Value.STRING), assertParseThrows("create table Person (id int as age * 2 primary key, age int) WITH \"template=cache\"", IgniteSQLException.class, "Computed columns are not supported [colName=ID]"); - assertParseThrows("create table Person (id int primary key, age int default 5) WITH \"template=cache\"", - IgniteSQLException.class, "DEFAULT expressions are not supported [colName=AGE]"); - assertParseThrows("create table Int (_key int primary key, _val int) WITH \"template=cache\"", IgniteSQLException.class, "Direct specification of _KEY and _VAL columns is forbidden"); } + /** */ + public void testParseCreateTableWithDefaults() { + assertParseThrows("create table Person (id int primary key, age int, " + + "ts TIMESTAMP default CURRENT_TIMESTAMP()) WITH \"template=cache\"", + IgniteSQLException.class, "Non-constant DEFAULT expressions are not supported [colName=TS]"); + + assertParseThrows("create table Person (id int primary key, age int default 'test') " + + "WITH \"template=cache\"", + IgniteSQLException.class, "Invalid default value for column. " + + "[colName=AGE, colType=INTEGER, dfltValueType=VARCHAR]"); + + assertParseThrows("create table Person (id int primary key, name varchar default 1) " + + "WITH \"template=cache\"", + IgniteSQLException.class, "Invalid default value for column. " + + "[colName=NAME, colType=VARCHAR, dfltValueType=INTEGER]"); + } + /** */ public void testParseAlterTableAddColumn() throws Exception { assertAlterTableAddColumnEquals(buildAlterTableAddColumn("SCH2", "Person", false, false, diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index bbbe3b8823e5b..31ac807ce1e2a 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -130,6 +130,7 @@ import org.apache.ignite.internal.processors.cache.query.IndexingSpiQuerySelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryTxSelfTest; import org.apache.ignite.internal.processors.client.ClientConnectorConfigurationValidationSelfTest; +import org.apache.ignite.internal.processors.query.IgniteSqlDefaultValueTest; import org.apache.ignite.internal.processors.query.IgniteSqlDistributedJoinSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlParameterizedQueryTest; @@ -371,6 +372,8 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(IgniteCheckClusterStateBeforeExecuteQueryTest.class); + suite.addTestSuite(IgniteSqlDefaultValueTest.class); + return suite; } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs new file mode 100644 index 0000000000000..98ab084096afa --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.ApiParity +{ + using Apache.Ignite.Core.Cache.Configuration; + using NUnit.Framework; + + /// + /// Tests that .NET has all properties from Java configuration APIs. + /// + public class QueryEntityConfigurationParityTest + { + /** Properties that are not needed on .NET side. */ + private static readonly string[] UnneededProperties = + { + "findKeyType", + "findValueType", + "KeyFields", + "NotNullFields", + "FieldsDefaultValues" + }; + + /// + /// Tests the ignite configuration parity. + /// + [Test] + public void TestQueryEntityConfiguration() + { + ParityTest.CheckConfigurationParity( + @"modules\core\src\main\java\org\apache\ignite\cache\QueryEntity.java", + typeof(QueryEntity), + UnneededProperties); + } + } +} \ No newline at end of file diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs index 4f13172914f53..1c45e5e7a0c01 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheConfigurationTest.cs @@ -516,6 +516,7 @@ private static void AssertConfigsAreEqual(QueryField x, QueryField y) Assert.AreEqual(x.FieldTypeName, y.FieldTypeName); Assert.AreEqual(x.IsKeyField, y.IsKeyField); Assert.AreEqual(x.NotNull, y.NotNull); + Assert.AreEqual(x.DefaultValue, y.DefaultValue); } /// @@ -591,7 +592,7 @@ private static CacheConfiguration GetCustomCacheConfiguration(string name = null Fields = new[] { new QueryField("length", typeof(int)), - new QueryField("name", typeof(string)) {IsKeyField = true}, + new QueryField("name", typeof(string)) {IsKeyField = true, DefaultValue = "defName"}, new QueryField("location", typeof(string)) {NotNull = true}, }, Aliases = new [] {new QueryAlias("length", "len") }, @@ -684,7 +685,7 @@ private static CacheConfiguration GetCustomCacheConfiguration2(string name = nul TableName = "MyTable", Fields = new[] { - new QueryField("length", typeof(int)), + new QueryField("length", typeof(int)) {DefaultValue = -1}, new QueryField("name", typeof(string)), new QueryField("location", typeof(string)) {IsKeyField = true} }, diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs index 172cb90d36015..a2584cccbcaae 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs @@ -388,6 +388,46 @@ public void TestCompositeKeyAllDataTypes() Assert.AreEqual("VALUE", cache[key]); } + /// + /// Tests the QueryField.DefaultValue functionality. + /// + [Test] + public void TestDefaultValue() + { + // Attribute-based config. + var cfg = new CacheConfiguration("def_value_attr", new QueryEntity(typeof(int), typeof(Foo))); + Assert.AreEqual(-1, cfg.QueryEntities.Single().Fields.Single(x => x.Name == "Id").DefaultValue); + + var cache = Ignition.GetIgnite().CreateCache(cfg); + Assert.AreEqual(-1, + cache.GetConfiguration().QueryEntities.Single().Fields.Single(x => x.Name == "Id").DefaultValue); + + cache.Query(new SqlFieldsQuery("insert into foo(_key, id, name) values (?, ?, ?)", 1, 2, "John")).GetAll(); + cache.Query(new SqlFieldsQuery("insert into foo(_key, name) values (?, ?)", 3, "Mary")).GetAll(); + + Assert.AreEqual(2, cache[1].Id); + Assert.AreEqual(-1, cache[3].Id); + + // QueryEntity-based config. + cfg = new CacheConfiguration("def_value_binary", new QueryEntity + { + KeyType = typeof(int), + ValueTypeName = "DefValTest", + Fields = new[] + { + new QueryField("Name", typeof(string)) {DefaultValue = "foo"} + } + }); + + var cache2 = Ignition.GetIgnite().CreateCache(cfg).WithKeepBinary(); + + cache2.Query(new SqlFieldsQuery("insert into DefValTest(_key, name) values (?, ?)", 1, "John")).GetAll(); + cache2.Query(new SqlFieldsQuery("insert into DefValTest(_key) values (?)", 2)).GetAll(); + + Assert.AreEqual("John", cache2[1].GetField("Name")); + Assert.AreEqual("foo", cache2[2].GetField("Name")); + } + /// /// Key. /// @@ -425,7 +465,7 @@ public Key2(int lo, int hi, string str) : this() /// private class Foo { - [QuerySqlField] public int Id { get; set; } + [QuerySqlField(DefaultValue = -1)] public int Id { get; set; } [QuerySqlField(NotNull = true)] public string Name { get; set; } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml index 215a04d7361fc..a7f4aff32055a 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml @@ -55,7 +55,9 @@ - + + 3.456 + diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs index 87df49252f384..e28673a9d5eb4 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs @@ -120,6 +120,7 @@ public void TestPredefinedXml() Assert.AreEqual(typeof(int), queryEntity.Fields.Single().FieldType); Assert.IsTrue(queryEntity.Fields.Single().IsKeyField); Assert.IsTrue(queryEntity.Fields.Single().NotNull); + Assert.AreEqual(3.456d, (double)queryEntity.Fields.Single().DefaultValue); Assert.AreEqual("somefield.field", queryEntity.Aliases.Single().FullName); Assert.AreEqual("shortField", queryEntity.Aliases.Single().Alias); @@ -656,7 +657,8 @@ private static IgniteConfiguration GetTestConfig() new QueryField("field", typeof(int)) { IsKeyField = true, - NotNull = true + NotNull = true, + DefaultValue = "foo" } }, Indexes = new[] diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs index e8d0c91af9e46..1a745b2e80027 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs @@ -460,7 +460,8 @@ private static IEnumerable GetGroupIndexes(List indexe fields.Add(new QueryField(columnName, memberInfo.Value) { IsKeyField = isKey, - NotNull = attr.NotNull + NotNull = attr.NotNull, + DefaultValue = attr.DefaultValue }); ScanAttributes(memberInfo.Value, fields, indexes, columnName, visitedTypes, isKey); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs index b8142fd2639bf..38aeed35f0ec9 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs @@ -84,6 +84,7 @@ internal QueryField(IBinaryRawReader reader) FieldTypeName = reader.ReadString(); IsKeyField = reader.ReadBoolean(); NotNull = reader.ReadBoolean(); + DefaultValue = reader.ReadObject(); } /// @@ -97,6 +98,7 @@ internal void Write(IBinaryRawWriter writer) writer.WriteString(FieldTypeName); writer.WriteBoolean(IsKeyField); writer.WriteBoolean(NotNull); + writer.WriteObject(DefaultValue); } /// @@ -146,6 +148,11 @@ public string FieldTypeName /// public bool NotNull { get; set; } + /// + /// Gets or sets the default value for the field. + /// + public object DefaultValue { get; set; } + /// /// Validates this instance and outputs information to the log, if necessary. /// diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs index d15cc1af3d1d1..96912dde908a3 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs @@ -76,5 +76,10 @@ public QuerySqlFieldAttribute() /// Gets or sets a value indicating whether null values are allowed for this field. /// public bool NotNull { get; set; } + + /// + /// Gets or sets the default value for the field (has effect when inserting with DML). + /// + public object DefaultValue { get; set; } } } diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd index 7e1e39a789951..71a26655a7d22 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd +++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd @@ -308,6 +308,13 @@ + + + + Default field value. + + + Cache name. From d283f4244fe3cd1f0f54c1f38b09a84213160d79 Mon Sep 17 00:00:00 2001 From: rkondakov Date: Fri, 19 Jan 2018 12:00:55 +0300 Subject: [PATCH 237/243] IGNITE-6772: SQL exception messages became more informative. This closes #3342. (cherry picked from commit 4606d83) --- .../internal/jdbc2/JdbcErrorsSelfTest.java | 12 +- .../jdbc/JdbcErrorsAbstractSelfTest.java | 257 +++++++++++++++--- .../thin/JdbcThinComplexDmlDdlSelfTest.java | 2 +- .../jdbc/thin/JdbcThinErrorsSelfTest.java | 9 +- .../thin/JdbcThinNoDefaultSchemaTest.java | 2 +- .../ignite/internal/jdbc/JdbcResultSet.java | 2 +- .../ignite/internal/jdbc2/JdbcConnection.java | 2 +- .../ignite/internal/jdbc2/JdbcResultSet.java | 2 +- .../cache/IgniteCacheProxyImpl.java | 2 +- .../processors/query/h2/IgniteH2Indexing.java | 2 +- .../IgniteCacheSqlQueryErrorSelfTest.java | 214 +++++++++++++++ .../index/H2DynamicIndexingComplexTest.java | 2 +- .../query/IgniteSqlSchemaIndexingTest.java | 11 +- .../IgniteCacheQuerySelfTestSuite.java | 2 + 14 files changed, 465 insertions(+), 56 deletions(-) create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheSqlQueryErrorSelfTest.java diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcErrorsSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcErrorsSelfTest.java index d33e3a5a4764d..63f0c84a67f76 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcErrorsSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcErrorsSelfTest.java @@ -41,13 +41,15 @@ public class JdbcErrorsSelfTest extends JdbcErrorsAbstractSelfTest { * @throws SQLException if failed. */ public void testConnectionError() throws SQLException { + final String path = "jdbc:ignite:сfg://cache=test@/unknown/path"; + checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { - DriverManager.getConnection("jdbc:ignite:сfg://cache=test@/unknown/path"); + DriverManager.getConnection(path); return null; } - }, "08001"); + }, "08001", "No suitable driver found for " + path); } /** @@ -55,13 +57,15 @@ public void testConnectionError() throws SQLException { * @throws SQLException if failed. */ public void testInvalidConnectionStringFormat() throws SQLException { + final String cfgPath = "cache="; + checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { // Empty config path yields an error. - DriverManager.getConnection("jdbc:ignite:cfg://cache="); + DriverManager.getConnection("jdbc:ignite:cfg://" + cfgPath); return null; } - }, "08001"); + }, "08001", "Failed to start Ignite node. Spring XML configuration path is invalid: " + cfgPath); } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java index fb96f31382609..49746b690eed1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java @@ -81,7 +81,8 @@ public abstract class JdbcErrorsAbstractSelfTest extends GridCommonAbstractTest * @throws SQLException if failed. */ public void testParsingErrors() throws SQLException { - checkErrorState("gibberish", "42000"); + checkErrorState("gibberish", "42000", + "Failed to parse query. Syntax error in SQL statement \"GIBBERISH[*] \""); } /** @@ -89,7 +90,7 @@ public void testParsingErrors() throws SQLException { * @throws SQLException if failed. */ public void testTableErrors() throws SQLException { - checkErrorState("DROP TABLE \"PUBLIC\".missing", "42000"); + checkErrorState("DROP TABLE \"PUBLIC\".missing", "42000", "Table doesn't exist: MISSING"); } /** @@ -97,7 +98,7 @@ public void testTableErrors() throws SQLException { * @throws SQLException if failed. */ public void testIndexErrors() throws SQLException { - checkErrorState("DROP INDEX \"PUBLIC\".missing", "42000"); + checkErrorState("DROP INDEX \"PUBLIC\".missing", "42000", "Index doesn't exist: MISSING"); } /** @@ -105,9 +106,11 @@ public void testIndexErrors() throws SQLException { * @throws SQLException if failed. */ public void testDmlErrors() throws SQLException { - checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, null)", "22004"); + checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, null)", "22004", + "Value for INSERT, MERGE, or UPDATE must not be null"); - checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, 'zzz')", "0700B"); + checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, 'zzz')", "0700B", + "Value conversion failed [from=java.lang.String, to=java.lang.Integer]"); } /** @@ -115,7 +118,8 @@ public void testDmlErrors() throws SQLException { * @throws SQLException if failed. */ public void testUnsupportedSql() throws SQLException { - checkErrorState("ALTER TABLE \"test\".Integer DROP COLUMN _key", "0A000"); + checkErrorState("ALTER TABLE \"test\".Integer MODIFY COLUMN _key CHAR", "0A000", + "ALTER COLUMN is not supported"); } /** @@ -133,7 +137,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { @@ -145,7 +149,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { @@ -157,7 +161,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { @@ -171,7 +175,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { @@ -185,7 +189,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { @@ -199,7 +203,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { @@ -213,7 +217,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { @@ -227,7 +231,7 @@ public void testConnectionClosed() throws SQLException { return null; } - }, "08003"); + }, "08003", "Connection is closed."); } /** @@ -247,7 +251,7 @@ public void testResultSetClosed() throws SQLException { rs.getInt(1); } } - }, "24000"); + }, "24000", "Result set is closed"); } /** @@ -266,7 +270,7 @@ public void testInvalidIntFormat() throws SQLException { rs.getLong(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to long"); } /** @@ -285,7 +289,7 @@ public void testInvalidLongFormat() throws SQLException { rs.getLong(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to long"); } /** @@ -304,7 +308,7 @@ public void testInvalidFloatFormat() throws SQLException { rs.getFloat(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to float"); } /** @@ -323,7 +327,7 @@ public void testInvalidDoubleFormat() throws SQLException { rs.getDouble(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to double"); } /** @@ -342,7 +346,7 @@ public void testInvalidByteFormat() throws SQLException { rs.getByte(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to byte"); } /** @@ -361,7 +365,7 @@ public void testInvalidShortFormat() throws SQLException { rs.getShort(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to short"); } /** @@ -380,7 +384,7 @@ public void testInvalidBigDecimalFormat() throws SQLException { rs.getBigDecimal(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to"); } /** @@ -399,7 +403,7 @@ public void testInvalidBooleanFormat() throws SQLException { rs.getBoolean(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to boolean"); } /** @@ -418,7 +422,7 @@ public void testInvalidObjectFormat() throws SQLException { rs.getObject(1, List.class); } } - }, "0700B"); + }, "0700B", "Cannot convert to"); } /** @@ -437,7 +441,7 @@ public void testInvalidDateFormat() throws SQLException { rs.getDate(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to date"); } /** @@ -456,7 +460,7 @@ public void testInvalidTimeFormat() throws SQLException { rs.getTime(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to time"); } /** @@ -475,7 +479,7 @@ public void testInvalidTimestampFormat() throws SQLException { rs.getTimestamp(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to timestamp"); } /** @@ -494,7 +498,7 @@ public void testInvalidUrlFormat() throws SQLException { rs.getURL(1); } } - }, "0700B"); + }, "0700B", "Cannot convert to"); } /** @@ -516,7 +520,7 @@ public void testNotNullViolation() throws SQLException { return null; } - }, "22004"); + }, "22004", "Null value is not allowed for column 'NAME'"); } finally { stmt.execute("DROP TABLE nulltest"); @@ -541,7 +545,8 @@ public void testNotNullRestrictionReadThroughCacheStore() throws SQLException { "WITH \"template=" + CACHE_STORE_TEMPLATE + "\""); } } - }, "0A000"); + }, "0A000", + "NOT NULL constraint is not supported when CacheConfiguration.readThrough is enabled."); } /** @@ -560,7 +565,151 @@ public void testNotNullRestrictionCacheInterceptor() throws SQLException { "WITH \"template=" + CACHE_INTERCEPTOR_TEMPLATE + "\""); } } - }, "0A000"); + }, "0A000", "NOT NULL constraint is not supported when CacheConfiguration.interceptor is set."); + } + + /** + * Checks wrong table name select error message. + * + * @throws SQLException If failed. + */ + public void testSelectWrongTable() throws SQLException { + checkSqlErrorMessage("select from wrong", "42000", + "Failed to parse query. Table \"WRONG\" not found"); + } + + /** + * Checks wrong column name select error message. + * + * @throws SQLException If failed. + */ + public void testSelectWrongColumnName() throws SQLException { + checkSqlErrorMessage("select wrong from test", "42000", + "Failed to parse query. Column \"WRONG\" not found"); + } + + /** + * Checks wrong syntax select error message. + * + * @throws SQLException If failed. + */ + public void testSelectWrongSyntax() throws SQLException { + checkSqlErrorMessage("select from test where", "42000", + "Failed to parse query. Syntax error in SQL statement \"SELECT FROM TEST WHERE[*]"); + } + + /** + * Checks wrong table name DML error message. + * + * @throws SQLException If failed. + */ + public void testDmlWrongTable() throws SQLException { + checkSqlErrorMessage("insert into wrong (id, val) values (3, 'val3')", "42000", + "Failed to parse query. Table \"WRONG\" not found"); + + checkSqlErrorMessage("merge into wrong (id, val) values (3, 'val3')", "42000", + "Failed to parse query. Table \"WRONG\" not found"); + + checkSqlErrorMessage("update wrong set val = 'val3' where id = 2", "42000", + "Failed to parse query. Table \"WRONG\" not found"); + + checkSqlErrorMessage("delete from wrong where id = 2", "42000", + "Failed to parse query. Table \"WRONG\" not found"); + } + + /** + * Checks wrong column name DML error message. + * + * @throws SQLException If failed. + */ + public void testDmlWrongColumnName() throws SQLException { + checkSqlErrorMessage("insert into test (id, wrong) values (3, 'val3')", "42000", + "Failed to parse query. Column \"WRONG\" not found"); + + checkSqlErrorMessage("merge into test (id, wrong) values (3, 'val3')", "42000", + "Failed to parse query. Column \"WRONG\" not found"); + + checkSqlErrorMessage("update test set wrong = 'val3' where id = 2", "42000", + "Failed to parse query. Column \"WRONG\" not found"); + + checkSqlErrorMessage("delete from test where wrong = 2", "42000", + "Failed to parse query. Column \"WRONG\" not found"); + } + + /** + * Checks wrong syntax DML error message. + * + * @throws SQLException If failed. + */ + public void testDmlWrongSyntax() throws SQLException { + checkSqlErrorMessage("insert test (id, val) values (3, 'val3')", "42000", + "Failed to parse query. Syntax error in SQL statement \"INSERT TEST[*] (ID, VAL)"); + + checkSqlErrorMessage("merge test (id, val) values (3, 'val3')", "42000", + "Failed to parse query. Syntax error in SQL statement \"MERGE TEST[*] (ID, VAL)"); + + checkSqlErrorMessage("update test val = 'val3' where id = 2", "42000", + "Failed to parse query. Syntax error in SQL statement \"UPDATE TEST VAL =[*] 'val3' WHERE ID = 2"); + + checkSqlErrorMessage("delete from test 1where id = 2", "42000", + "Failed to parse query. Syntax error in SQL statement \"DELETE FROM TEST 1[*]WHERE ID = 2 "); + } + + /** + * Checks wrong table name DDL error message. + * + * @throws SQLException If failed. + */ + public void testDdlWrongTable() throws SQLException { + checkSqlErrorMessage("create table test (id int primary key, val varchar)", "42000", + "Table already exists: TEST"); + + checkSqlErrorMessage("drop table wrong", "42000", + "Table doesn't exist: WRONG"); + + checkSqlErrorMessage("create index idx1 on wrong (val)", "42000", + "Table doesn't exist: WRONG"); + + checkSqlErrorMessage("drop index wrong", "42000", + "Index doesn't exist: WRONG"); + + checkSqlErrorMessage("alter table wrong drop column val", "42000", + "Failed to parse query. Table \"WRONG\" not found"); + } + + /** + * Checks wrong column name DDL error message. + * + * @throws SQLException If failed. + */ + public void testDdlWrongColumnName() throws SQLException { + checkSqlErrorMessage("create index idx1 on test (wrong)", "42000", + "Column doesn't exist: WRONG"); + + checkSqlErrorMessage("alter table test drop column wrong", "42000", + "Failed to parse query. Column \"WRONG\" not found"); + } + + /** + * Checks wrong syntax DDL error message. + * + * @throws SQLException If failed. + */ + public void testDdlWrongSyntax() throws SQLException { + checkSqlErrorMessage("create table test2 (id int wrong key, val varchar)", "42000", + "Failed to parse query. Syntax error in SQL statement \"CREATE TABLE TEST2 (ID INT WRONG[*]"); + + checkSqlErrorMessage("drop table test on", "42000", + "Failed to parse query. Syntax error in SQL statement \"DROP TABLE TEST ON[*]"); + + checkSqlErrorMessage("create index idx1 test (val)", "42000", + "Failed to parse query. Syntax error in SQL statement \"CREATE INDEX IDX1 TEST[*]"); + + checkSqlErrorMessage("drop index", "42000", + "Failed to parse query. Syntax error in SQL statement \"DROP INDEX [*]"); + + checkSqlErrorMessage("alter table test drop column", "42000", + "Failed to parse query. Syntax error in SQL statement \"ALTER TABLE TEST DROP COLUMN [*]"); } /** @@ -576,14 +725,14 @@ public void testNotNullRestrictionCacheInterceptor() throws SQLException { * @throws SQLException if failed. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - private void checkErrorState(final String sql, String expState) throws SQLException { + private void checkErrorState(final String sql, String expState, String expMsg) throws SQLException { checkErrorState(new ConnClosure() { @Override public void run(Connection conn) throws Exception { try (final PreparedStatement stmt = conn.prepareStatement(sql)) { stmt.execute(); } } - }, expState); + }, expState, expMsg); } /** @@ -593,7 +742,7 @@ private void checkErrorState(final String sql, String expState) throws SQLExcept * @throws SQLException if failed. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - protected void checkErrorState(final ConnClosure clo, String expState) throws SQLException { + protected void checkErrorState(final ConnClosure clo, String expState, String expMsg) throws SQLException { checkErrorState(new IgniteCallable() { @Override public Void call() throws Exception { try (final Connection conn = getConnection()) { @@ -604,7 +753,7 @@ protected void checkErrorState(final ConnClosure clo, String expState) throws SQ return null; } } - }, expState); + }, expState, expMsg); } /** @@ -614,12 +763,46 @@ protected void checkErrorState(final ConnClosure clo, String expState) throws SQ * @throws SQLException if failed. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - protected void checkErrorState(final IgniteCallable clo, String expState) throws SQLException { - SQLException ex = (SQLException)GridTestUtils.assertThrows(null, clo, SQLException.class, null); + protected void checkErrorState(final IgniteCallable clo, String expState, String expMsg) throws SQLException { + SQLException ex = (SQLException)GridTestUtils.assertThrows(null, clo, SQLException.class, expMsg); assertEquals(expState, ex.getSQLState()); } + /** + * Check SQL exception message and error code. + * + * @param sql Query string. + * @param expState Error code. + * @param expMsg Error message. + * @throws SQLException if failed. + */ + private void checkSqlErrorMessage(final String sql, String expState, String expMsg) throws SQLException { + checkErrorState(new IgniteCallable() { + @Override public Void call() throws Exception { + try (final Connection conn = getConnection()) { + conn.setSchema("PUBLIC"); + + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("DROP TABLE IF EXISTS wrong"); + stmt.executeUpdate("DROP TABLE IF EXISTS test"); + + stmt.executeUpdate("CREATE TABLE test (id INT PRIMARY KEY, val VARCHAR)"); + + stmt.executeUpdate("INSERT INTO test (id, val) VALUES (1, 'val1')"); + stmt.executeUpdate("INSERT INTO test (id, val) VALUES (2, 'val2')"); + + stmt.execute(sql); + + fail("Exception is expected"); + } + + return null; + } + } + }, expState, expMsg); + } + /** * Runnable that accepts a {@link Connection} and can throw an exception. */ diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java index d4e03bc417908..94777f7537ea7 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java @@ -146,7 +146,7 @@ public void testCreateSelect() throws Exception { return null; } - }, SQLException.class, "Failed to parse query: SELECT * from Person"); + }, SQLException.class, "Table \"PERSON\" not found"); sql(new UpdateChecker(0), "CREATE TABLE person (id int, name varchar, age int, company varchar, city varchar, " + diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java index db70f3be44204..462c98d64fd01 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java @@ -46,7 +46,7 @@ public void testConnectionError() throws SQLException { return null; } - }, "08001"); + }, "08001", "Failed to connect to Ignite cluster [host=unknown.host"); } /** @@ -61,7 +61,7 @@ public void testInvalidConnectionStringFormat() throws SQLException { return null; } - }, "08001"); + }, "08001", "Property cannot be upper than 65535"); } /** @@ -74,7 +74,7 @@ public void testInvalidIsolationLevel() throws SQLException { @Override public void run(Connection conn) throws Exception { conn.setTransactionIsolation(1000); } - }, "0700E"); + }, "0700E", "Invalid transaction isolation level."); } /** @@ -102,6 +102,9 @@ public void testBatchUpdateException() throws SQLException { assertEquals(1, updCnt); assertEquals("42000", e.getSQLState()); + + assertTrue("Unexpected error message: " + e.getMessage(), e.getMessage() != null && + e.getMessage().contains("Failed to parse query. Column \"ID1\" not found")); } } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java index a1be582003e49..11aef9fa87faf 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinNoDefaultSchemaTest.java @@ -217,7 +217,7 @@ public void testSetSchema() throws Exception { return null; } - }, SQLException.class, "Failed to parse query"); + }, SQLException.class, "Table \"INTEGER\" not found"); conn.setSchema("\"cache1\""); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcResultSet.java index ee1547129bd3d..544207e52b460 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcResultSet.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcResultSet.java @@ -1523,7 +1523,7 @@ else if (cls == String.class) throw new SQLException("Invalid column index: " + colIdx); } catch (ClassCastException ignored) { - throw new SQLException("Value is an not instance of " + cls.getName()); + throw new SQLException("Cannot convert to " + cls.getSimpleName().toLowerCase()); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java index 29cb6a1669dea..b51e0b95084ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java @@ -248,7 +248,7 @@ public JdbcConnection(String url, Properties props) throws SQLException { catch (Exception e) { close(); - throw convertToSqlException(e, "Failed to start Ignite node.", SqlStateCode.CLIENT_CONNECTION_FAILED); + throw convertToSqlException(e, "Failed to start Ignite node. " + e.getMessage(), SqlStateCode.CLIENT_CONNECTION_FAILED); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java index e2ff5d866dffb..e6e84880ea2eb 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java @@ -1556,7 +1556,7 @@ else if (cls == String.class) throw new SQLException("Invalid column index: " + colIdx); } catch (ClassCastException ignored) { - throw new SQLException("Value is an not instance of " + cls.getName(), SqlStateCode.CONVERSION_FAILED); + throw new SQLException("Cannot convert to " + cls.getSimpleName().toLowerCase(), SqlStateCode.CONVERSION_FAILED); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java index ae1c6f52f8679..c59785847da9c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java @@ -594,7 +594,7 @@ private QueryCursor> queryContinuous(ContinuousQuery qry, bool if (e instanceof CacheException) throw (CacheException)e; - throw new CacheException(e); + throw new CacheException(e.getMessage(), e); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 82978aa79a842..c546d2b037bcc 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -1487,7 +1487,7 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri cachesCreated = true; } else - throw new IgniteSQLException("Failed to parse query: " + sqlQry, + throw new IgniteSQLException("Failed to parse query. " + e.getMessage(), IgniteQueryErrorCode.PARSING, e); } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheSqlQueryErrorSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheSqlQueryErrorSelfTest.java new file mode 100644 index 0000000000000..09790854b2d03 --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheSqlQueryErrorSelfTest.java @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.concurrent.Callable; +import javax.cache.CacheException; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.testframework.GridTestUtils; + +/** + * Java API query error messages test. + */ +public class IgniteCacheSqlQueryErrorSelfTest extends GridCacheAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected int gridCount() { + return 1; + } + + /** + * Checks wrong table name select error message. + * + * @throws Exception If failed. + */ + public void testSelectWrongTable() throws Exception { + checkSqlErrorMessage("select from wrong", + "Failed to parse query. Table \"WRONG\" not found"); + } + + /** + * Checks wrong column name select error message. + * + * @throws Exception If failed. + */ + public void testSelectWrongColumnName() throws Exception { + checkSqlErrorMessage("select wrong from test", + "Failed to parse query. Column \"WRONG\" not found"); + } + + /** + * Checks wrong syntax select error message. + * + * @throws Exception If failed. + */ + public void testSelectWrongSyntax() throws Exception { + checkSqlErrorMessage("select from test where", + "Failed to parse query. Syntax error in SQL statement \"SELECT FROM TEST WHERE[*]"); + } + + /** + * Checks wrong table name DML error message. + * + * @throws Exception If failed. + */ + public void testDmlWrongTable() throws Exception { + checkSqlErrorMessage("insert into wrong (id, val) values (3, 'val3')", + "Failed to parse query. Table \"WRONG\" not found"); + + checkSqlErrorMessage("merge into wrong (id, val) values (3, 'val3')", + "Failed to parse query. Table \"WRONG\" not found"); + + checkSqlErrorMessage("update wrong set val = 'val3' where id = 2", + "Failed to parse query. Table \"WRONG\" not found"); + + checkSqlErrorMessage("delete from wrong where id = 2", + "Failed to parse query. Table \"WRONG\" not found"); + } + + /** + * Checks wrong column name DML error message. + * + * @throws Exception If failed. + */ + public void testDmlWrongColumnName() throws Exception { + checkSqlErrorMessage("insert into test (id, wrong) values (3, 'val3')", + "Failed to parse query. Column \"WRONG\" not found"); + + checkSqlErrorMessage("merge into test (id, wrong) values (3, 'val3')", + "Failed to parse query. Column \"WRONG\" not found"); + + checkSqlErrorMessage("update test set wrong = 'val3' where id = 2", + "Failed to parse query. Column \"WRONG\" not found"); + + checkSqlErrorMessage("delete from test where wrong = 2", + "Failed to parse query. Column \"WRONG\" not found"); + } + + /** + * Checks wrong syntax DML error message. + * + * @throws Exception If failed. + */ + public void testDmlWrongSyntax() throws Exception { + checkSqlErrorMessage("insert test (id, val) values (3, 'val3')", + "Failed to parse query. Syntax error in SQL statement \"INSERT TEST[*] (ID, VAL)"); + + checkSqlErrorMessage("merge test (id, val) values (3, 'val3')", + "Failed to parse query. Syntax error in SQL statement \"MERGE TEST[*] (ID, VAL)"); + + checkSqlErrorMessage("update test val = 'val3' where id = 2", + "Failed to parse query. Syntax error in SQL statement \"UPDATE TEST VAL =[*] 'val3' WHERE ID = 2"); + + checkSqlErrorMessage("delete from test 1where id = 2", + "Failed to parse query. Syntax error in SQL statement \"DELETE FROM TEST 1[*]WHERE ID = 2 "); + } + + /** + * Checks wrong table name DDL error message. + * + * @throws Exception If failed. + */ + public void testDdlWrongTable() throws Exception { + checkSqlErrorMessage("create table test (id int primary key, val varchar)", + "Table already exists: TEST"); + + checkSqlErrorMessage("drop table wrong", + "Table doesn't exist: WRONG"); + + checkSqlErrorMessage("create index idx1 on wrong (val)", + "Table doesn't exist: WRONG"); + + checkSqlErrorMessage("drop index wrong", + "Index doesn't exist: WRONG"); + + checkSqlErrorMessage("alter table wrong drop column val", + "Failed to parse query. Table \"WRONG\" not found"); + } + + /** + * Checks wrong column name DDL error message. + * + * @throws Exception If failed. + */ + public void testDdlWrongColumnName() throws Exception { + checkSqlErrorMessage("create index idx1 on test (wrong)", + "Column doesn't exist: WRONG"); + + checkSqlErrorMessage("alter table test drop column wrong", + "Failed to parse query. Column \"WRONG\" not found"); + } + + /** + * Checks wrong syntax DDL error message. + * + * @throws Exception If failed. + */ + public void testDdlWrongSyntax() throws Exception { + checkSqlErrorMessage("create table wrong (id int wrong key, val varchar)", + "Failed to parse query. Syntax error in SQL statement \"CREATE TABLE WRONG (ID INT WRONG[*]"); + + checkSqlErrorMessage("drop table test on", + "Failed to parse query. Syntax error in SQL statement \"DROP TABLE TEST ON[*]"); + + checkSqlErrorMessage("create index idx1 test (val)", + "Failed to parse query. Syntax error in SQL statement \"CREATE INDEX IDX1 TEST[*]"); + + checkSqlErrorMessage("drop index", + "Failed to parse query. Syntax error in SQL statement \"DROP INDEX [*]"); + + checkSqlErrorMessage("alter table test drop column", + "Failed to parse query. Syntax error in SQL statement \"ALTER TABLE TEST DROP COLUMN [*]"); + } + + /** + * Checks SQL error message. + * + * @param sql SQL command. + * @param expMsg Expected error message. + */ + private void checkSqlErrorMessage(final String sql, String expMsg) { + execute("DROP TABLE IF EXISTS wrong"); + execute("DROP TABLE IF EXISTS test"); + + execute("CREATE TABLE test (id INT PRIMARY KEY, val VARCHAR)"); + + execute("INSERT INTO test (id, val) VALUES (1, 'val1')"); + execute("INSERT INTO test (id, val) VALUES (2, 'val2')"); + + GridTestUtils.assertThrows(null, new Callable() { + + @Override public Object call() throws Exception { + execute(sql); + + fail("Exception is expected"); + + return null; + } + }, CacheException.class, expMsg); + } + + /** + * Executes SQL command. + * + * @param sql SQL command. + */ + private void execute(String sql) { + jcache().query(new SqlFieldsQuery(sql).setSchema("PUBLIC")).getAll(); + } + +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java index 0be691e072f1f..845e3474682d1 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java @@ -187,7 +187,7 @@ public void testOperations() { @Override public Object call() throws Exception { return executeSql("SELECT * from Person"); } - }, IgniteSQLException.class, "Failed to parse query: SELECT * from Person"); + }, IgniteSQLException.class, "Table \"PERSON\" not found"); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java index 570d2db605823..e375df2cf6e7e 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlSchemaIndexingTest.java @@ -190,9 +190,11 @@ public void testSchemaEscapeAll() throws Exception { .setSqlSchema("\"SchemaName2\"") .setSqlEscapeAll(true); - escapeCheckSchemaName(ignite(0).createCache(cfg), log, cfg.getSqlSchema(), false); + escapeCheckSchemaName(ignite(0).createCache(cfg), log, cfg.getSqlSchema(), false, + "Table \"FACT\" not found"); - escapeCheckSchemaName(ignite(0).createCache(cfgEsc), log, "SchemaName2", true); + escapeCheckSchemaName(ignite(0).createCache(cfgEsc), log, "SchemaName2", true, + "Schema \"SCHEMANAME2\" not found"); ignite(0).destroyCache(cfg.getName()); ignite(0).destroyCache(cfgEsc.getName()); @@ -204,9 +206,10 @@ public void testSchemaEscapeAll() throws Exception { * @param log logger for assertThrows * @param schemaName Schema name without quotes for testing * @param caseSensitive Whether schema name is case sensitive. + * @param msg Expected error message. */ private static void escapeCheckSchemaName(final IgniteCache cache, IgniteLogger log, - String schemaName, boolean caseSensitive) { + String schemaName, boolean caseSensitive, String msg) { final SqlFieldsQuery qryWrong = new SqlFieldsQuery("select f.id, f.name " + "from " + schemaName.toUpperCase() + ".Fact f"); @@ -218,7 +221,7 @@ private static void escapeCheckSchemaName(final IgniteCache cache return null; } - }, CacheException.class, "Failed to parse query"); + }, CacheException.class, msg); if (caseSensitive) schemaName = "\"" + schemaName + "\""; diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index 31ac807ce1e2a..a8a76a305dd2d 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -69,6 +69,7 @@ import org.apache.ignite.internal.processors.cache.IgniteCacheQueryH2IndexingLeakTest; import org.apache.ignite.internal.processors.cache.IgniteCacheQueryIndexSelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheQueryLoadSelfTest; +import org.apache.ignite.internal.processors.cache.IgniteCacheSqlQueryErrorSelfTest; import org.apache.ignite.internal.processors.cache.IgniteCacheUpdateSqlQuerySelfTest; import org.apache.ignite.internal.processors.cache.IgniteCheckClusterStateBeforeExecuteQueryTest; import org.apache.ignite.internal.processors.cache.IgniteCrossCachesJoinsQueryTest; @@ -207,6 +208,7 @@ public static TestSuite suite() throws Exception { // Parsing suite.addTestSuite(GridQueryParsingTest.class); + suite.addTestSuite(IgniteCacheSqlQueryErrorSelfTest.class); // Config. suite.addTestSuite(IgniteCacheDuplicateEntityConfigurationSelfTest.class); From c2a8cc476d9f979b49962ab92ccd62a2f9b63e6e Mon Sep 17 00:00:00 2001 From: Alexey Kuznetsov Date: Thu, 18 Jan 2018 05:25:19 +0300 Subject: [PATCH 238/243] IGNITE-7274 Web Console: Support multiple statements on Queries screen. (cherry picked from commit 1926783) --- .../cache/GatewayProtectedCacheProxy.java | 14 +++ .../processors/cache/IgniteCacheProxy.java | 41 ++++++--- .../cache/IgniteCacheProxyImpl.java | 86 ++++++++++++------- .../processors/query/GridQueryProcessor.java | 40 ++++++--- .../internal/visor/query/VisorQueryTask.java | 14 ++- .../processors/query/h2/IgniteH2Indexing.java | 3 + .../query/h2/sql/GridSqlQueryParser.java | 21 ++++- .../GridCacheCrossCacheQuerySelfTest.java | 78 ++++++++++++++--- .../cache/index/H2DynamicTableSelfTest.java | 6 +- 9 files changed, 226 insertions(+), 77 deletions(-) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java index 37bf9bb788f46..61658668236ca 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java @@ -386,6 +386,20 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { } } + /** {@inheritDoc} */ + @Override public List>> queryMultipleStatements(SqlFieldsQuery qry) { + GridCacheGateway gate = gate(); + + CacheOperationContext prev = onEnter(gate, opCtx); + + try { + return delegate.queryMultipleStatements(qry); + } + finally { + onLeave(gate, prev); + } + } + /** {@inheritDoc} */ @Override public QueryCursor query(Query qry, IgniteClosure transformer) { GridCacheGateway gate = gate(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java index 361764e2e8a2e..c33312539d4e5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxy.java @@ -19,11 +19,16 @@ import java.io.Externalizable; import java.util.Date; +import java.util.List; import java.util.UUID; import org.apache.ignite.IgniteCache; -import org.apache.ignite.lang.IgniteAsyncSupport; +import org.apache.ignite.cache.query.FieldsQueryCursor; +import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.lang.IgniteFuture; +/** + * Cache proxy. + */ public interface IgniteCacheProxy extends IgniteCache, Externalizable { /** * @return Context. @@ -39,15 +44,22 @@ public interface IgniteCacheProxy extends IgniteCache, Externalizabl public IgniteCacheProxy cacheNoGate(); /** - * Creates projection that will operate with binary objects.

        Projection returned by this method will force - * cache not to deserialize binary objects, so keys and values will be returned from cache API methods without - * changes. Therefore, signature of the projection can contain only following types:

        • {@code BinaryObject} - * for binary classes
        • All primitives (byte, int, ...) and there boxed versions (Byte, Integer, ...)
        • - *
        • Arrays of primitives (byte[], int[], ...)
        • {@link String} and array of {@link String}s
        • - *
        • {@link UUID} and array of {@link UUID}s
        • {@link Date} and array of {@link Date}s
        • {@link - * java.sql.Timestamp} and array of {@link java.sql.Timestamp}s
        • Enums and array of enums
        • Maps, - * collections and array of objects (but objects inside them will still be converted if they are binary)
        • - *

        For example, if you use {@link Integer} as a key and {@code Value} class as a value (which will be + * Creates projection that will operate with binary objects. + *

        Projection returned by this method will force cache not to deserialize binary objects, + * so keys and values will be returned from cache API methods without changes. + * Therefore, signature of the projection can contain only following types: + *

          + *
        • {@code BinaryObject} for binary classes
        • + *
        • All primitives (byte, int, ...) and there boxed versions (Byte, Integer, ...)
        • + *
        • Arrays of primitives (byte[], int[], ...)
        • + *
        • {@link String} and array of {@link String}s
        • + *
        • {@link UUID} and array of {@link UUID}s
        • + *
        • {@link Date} and array of {@link Date}s
        • + *
        • {@link java.sql.Timestamp} and array of {@link java.sql.Timestamp}s
        • + *
        • Enums and array of enums
        • + *
        • Maps, collections and array of objects (but objects inside them will still be converted if they are binary)
        • + *
        + *

        For example, if you use {@link Integer} as a key and {@code Value} class as a value (which will be * stored in binary format), you should acquire following projection to avoid deserialization: *

              * IgniteInternalCache prj = cache.keepBinary();
        @@ -100,4 +112,13 @@ public interface IgniteCacheProxy extends IgniteCache, Externalizabl
              * @return Future that contains cache close operation.
              */
             public IgniteFuture closeAsync();
        +
        +    /**
        +     * Queries cache with multiple statements. Accepts {@link SqlFieldsQuery} class.
        +     *
        +     * @param qry SqlFieldsQuery.
        +     * @return List of cursors.
        +     * @see SqlFieldsQuery
        +     */
        +    public List>> queryMultipleStatements(SqlFieldsQuery qry);
         }
        diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java
        index c59785847da9c..4c2d0e696b082 100644
        --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java
        +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java
        @@ -171,8 +171,7 @@ private IgniteCacheProxyImpl(
             /**
              * @return Context.
              */
        -    @Override
        -    public GridCacheContext context() {
        +    @Override public GridCacheContext context() {
                 return ctx;
             }
         
        @@ -279,8 +278,8 @@ public IgniteCacheProxy gatewayWrapper() {
                 try {
                     if (ctx.cache().isLocal())
                         return (IgniteFuture)createFuture(ctx.cache().localLoadCacheAsync(p, args));
        -            else
        -                return (IgniteFuture)createFuture(ctx.cache().globalLoadCacheAsync(p, args));
        +
        +            return (IgniteFuture)createFuture(ctx.cache().globalLoadCacheAsync(p, args));
                 }
                 catch (IgniteCheckedException | IgniteException e) {
                     throw cacheException(e);
        @@ -356,15 +355,13 @@ private  QueryCursor query(
                 @Nullable ClusterGroup grp)
                 throws IgniteCheckedException {
         
        -        final CacheQuery qry;
        -
                 CacheOperationContext opCtxCall = ctx.operationContextPerCall();
         
                 boolean isKeepBinary = opCtxCall != null && opCtxCall.isKeepBinary();
         
                 IgniteBiPredicate p = scanQry.getFilter();
         
        -        qry = ctx.queries().createScanQuery(p, transformer, scanQry.getPartition(), isKeepBinary);
        +        final CacheQuery qry = ctx.queries().createScanQuery(p, transformer, scanQry.getPartition(), isKeepBinary);
         
                 if (scanQry.getPageSize() > 0)
                     qry.pageSize(scanQry.getPageSize());
        @@ -560,6 +557,30 @@ private QueryCursor> queryContinuous(ContinuousQuery qry, bool
                 return (FieldsQueryCursor>)query((Query)qry);
             }
         
        +    /** {@inheritDoc} */
        +    @Override public List>> queryMultipleStatements(SqlFieldsQuery qry) {
        +        A.notNull(qry, "qry");
        +        try {
        +            ctx.checkSecurity(SecurityPermission.CACHE_READ);
        +
        +            validate(qry);
        +
        +            convertToBinary(qry);
        +
        +            CacheOperationContext opCtxCall = ctx.operationContextPerCall();
        +
        +            boolean keepBinary = opCtxCall != null && opCtxCall.isKeepBinary();
        +
        +            return ctx.kernalContext().query().querySqlFields(ctx, qry, keepBinary, false);
        +        }
        +        catch (Exception e) {
        +            if (e instanceof CacheException)
        +                throw (CacheException)e;
        +
        +            throw new CacheException(e);
        +        }
        +    }
        +
             /** {@inheritDoc} */
             @SuppressWarnings("unchecked")
             @Override public  QueryCursor query(Query qry) {
        @@ -1582,15 +1603,22 @@ else if (clazz.isAssignableFrom(IgniteEx.class))
             }
         
             /**
        -     * Creates projection that will operate with binary objects. 

        Projection returned by this method will force - * cache not to deserialize binary objects, so keys and values will be returned from cache API methods without - * changes. Therefore, signature of the projection can contain only following types:

        • {@code BinaryObject} - * for binary classes
        • All primitives (byte, int, ...) and there boxed versions (Byte, Integer, ...)
        • - *
        • Arrays of primitives (byte[], int[], ...)
        • {@link String} and array of {@link String}s
        • - *
        • {@link UUID} and array of {@link UUID}s
        • {@link Date} and array of {@link Date}s
        • {@link - * java.sql.Timestamp} and array of {@link java.sql.Timestamp}s
        • Enums and array of enums
        • Maps, - * collections and array of objects (but objects inside them will still be converted if they are binary)
        • - *

        For example, if you use {@link Integer} as a key and {@code Value} class as a value (which will be + * Creates projection that will operate with binary objects. + *

        Projection returned by this method will force cache not to deserialize binary objects, + * so keys and values will be returned from cache API methods without changes. + * Therefore, signature of the projection can contain only following types: + *

          + *
        • {@code BinaryObject} for binary classes
        • + *
        • All primitives (byte, int, ...) and there boxed versions (Byte, Integer, ...)
        • + *
        • Arrays of primitives (byte[], int[], ...)
        • + *
        • {@link String} and array of {@link String}s
        • + *
        • {@link UUID} and array of {@link UUID}s
        • + *
        • {@link Date} and array of {@link Date}s
        • + *
        • {@link java.sql.Timestamp} and array of {@link java.sql.Timestamp}s
        • + *
        • Enums and array of enums
        • + *
        • Maps, collections and array of objects (but objects inside them will still be converted if they are binary)
        • + *
        + *

        For example, if you use {@link Integer} as a key and {@code Value} class as a value (which will be * stored in binary format), you should acquire following projection to avoid deserialization: *

              * IgniteInternalCache prj = cache.keepBinary();
        @@ -1604,9 +1632,8 @@ else if (clazz.isAssignableFrom(IgniteEx.class))
              *
              * @return Projection for binary objects.
              */
        -    @Override
             @SuppressWarnings("unchecked")
        -    public  IgniteCache keepBinary() {
        +    @Override public  IgniteCache keepBinary() {
                 throw new UnsupportedOperationException();
             }
         
        @@ -1614,17 +1641,15 @@ public  IgniteCache keepBinary() {
              * @param dataCenterId Data center ID.
              * @return Projection for data center id.
              */
        -    @Override
             @SuppressWarnings("unchecked")
        -    public IgniteCache withDataCenterId(byte dataCenterId) {
        +    @Override public IgniteCache withDataCenterId(byte dataCenterId) {
                 throw new UnsupportedOperationException();
             }
         
             /**
              * @return Cache with skip store enabled.
              */
        -    @Override
        -    public IgniteCache skipStore() {
        +    @Override public IgniteCache skipStore() {
                 throw new UnsupportedOperationException();
             }
         
        @@ -1671,8 +1696,7 @@ private  void setFuture(IgniteInternalFuture fut) {
             /**
              * @return Internal proxy.
              */
        -    @Override
        -    public GridCacheProxyImpl internalProxy() {
        +    @Override public GridCacheProxyImpl internalProxy() {
                 return new GridCacheProxyImpl<>(ctx, delegate, ctx.operationContextPerCall());
             }
         
        @@ -1747,17 +1771,17 @@ public boolean isRestarting() {
             public void restart() {
                 GridFutureAdapter restartFut = new GridFutureAdapter<>();
         
        -        final GridFutureAdapter currentFut = this.restartFut.get();
        +        final GridFutureAdapter curFut = this.restartFut.get();
         
        -        boolean changed = this.restartFut.compareAndSet(currentFut, restartFut);
        +        boolean changed = this.restartFut.compareAndSet(curFut, restartFut);
         
        -        if (changed && currentFut != null)
        +        if (changed && curFut != null)
                     restartFut.listen(new IgniteInClosure>() {
        -                @Override public void apply(IgniteInternalFuture future) {
        -                    if (future.error() != null)
        -                        currentFut.onDone(future.error());
        +                @Override public void apply(IgniteInternalFuture fut) {
        +                    if (fut.error() != null)
        +                        curFut.onDone(fut.error());
                             else
        -                        currentFut.onDone();
        +                        curFut.onDone();
                         }
                     });
             }
        diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java
        index 7444ec7f13ea4..de6cf91d82941 100644
        --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java
        +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java
        @@ -1859,6 +1859,22 @@ private void checkxEnabled() throws IgniteException {
             @SuppressWarnings("unchecked")
             public FieldsQueryCursor> querySqlFields(final GridCacheContext cctx, final SqlFieldsQuery qry,
                 final boolean keepBinary) {
        +        return querySqlFields(cctx, qry, keepBinary, true).get(0);
        +    }
        +
        +    /**
        +     * Query SQL fields.
        +     *
        +     * @param cctx Cache context.
        +     * @param qry Query.
        +     * @param keepBinary Keep binary flag.
        +     * @param failOnMultipleStmts If {@code true} the method must throws exception when query contains
        +     *      more then one SQL statement.
        +     * @return Cursor.
        +     */
        +    @SuppressWarnings("unchecked")
        +    public List>> querySqlFields(final GridCacheContext cctx, final SqlFieldsQuery qry,
        +        final boolean keepBinary, final boolean failOnMultipleStmts) {
                 checkxEnabled();
         
                 validateSqlFieldsQuery(qry);
        @@ -1876,37 +1892,39 @@ public FieldsQueryCursor> querySqlFields(final GridCacheContext cct
                     final String schemaName = qry.getSchema() != null ? qry.getSchema() : idx.schema(cctx.name());
                     final int mainCacheId = CU.cacheId(cctx.name());
         
        -            IgniteOutClosureX>> clo;
        +            IgniteOutClosureX>>> clo;
         
                     if (loc) {
        -                clo = new IgniteOutClosureX>>() {
        -                    @Override public FieldsQueryCursor> applyx() throws IgniteCheckedException {
        +                clo = new IgniteOutClosureX>>>() {
        +                    @Override public List>> applyx() throws IgniteCheckedException {
                                 GridQueryCancel cancel = new GridQueryCancel();
         
        -                        FieldsQueryCursor> cur;
        +                        List>> cursors;
         
                                 if (cctx.config().getQueryParallelism() > 1) {
                                     qry.setDistributedJoins(true);
         
        -                            cur = idx.queryDistributedSqlFields(schemaName, qry,
        -                                keepBinary, cancel, mainCacheId, true).get(0);
        +                            cursors = idx.queryDistributedSqlFields(schemaName, qry,
        +                                keepBinary, cancel, mainCacheId, true);
                                 }
                                 else {
                                     IndexingQueryFilter filter = idx.backupFilter(requestTopVer.get(), qry.getPartitions());
         
        -                            cur = idx.queryLocalSqlFields(schemaName, qry, keepBinary, filter, cancel);
        +                            cursors = new ArrayList<>(1);
        +
        +                            cursors.add(idx.queryLocalSqlFields(schemaName, qry, keepBinary, filter, cancel));
                                 }
         
                                 sendQueryExecutedEvent(qry.getSql(), qry.getArgs(), cctx.name());
         
        -                        return cur;
        +                        return cursors;
                             }
                         };
                     }
                     else {
        -                clo = new IgniteOutClosureX>>() {
        -                    @Override public FieldsQueryCursor> applyx() throws IgniteCheckedException {
        -                        return idx.queryDistributedSqlFields(schemaName, qry, keepBinary, null, mainCacheId, true).get(0);
        +                clo = new IgniteOutClosureX>>>() {
        +                    @Override public List>> applyx() throws IgniteCheckedException {
        +                        return idx.queryDistributedSqlFields(schemaName, qry, keepBinary, null, mainCacheId, failOnMultipleStmts);
                             }
                         };
                     }
        diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java
        index 51bf7d6f10009..1daa1f2bd9ca7 100644
        --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java
        +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java
        @@ -25,6 +25,7 @@
         import org.apache.ignite.IgniteCache;
         import org.apache.ignite.cache.query.FieldsQueryCursor;
         import org.apache.ignite.cache.query.SqlFieldsQuery;
        +import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
         import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata;
         import org.apache.ignite.internal.processors.task.GridInternal;
         import org.apache.ignite.internal.util.typedef.F;
        @@ -84,22 +85,27 @@ private VisorQueryJob(VisorQueryTaskArg arg, boolean debug) {
         
                         long start = U.currentTimeMillis();
         
        -                FieldsQueryCursor> qryCursor;
        +                List>> qryCursors;
         
                         String cacheName = arg.getCacheName();
         
                         if (F.isEmpty(cacheName))
        -                    qryCursor = ignite.context().query().querySqlFieldsNoCache(qry, true);
        +                    qryCursors = ignite.context().query().querySqlFieldsNoCache(qry, true, false);
                         else {
                             IgniteCache c = ignite.cache(cacheName);
         
                             if (c == null)
                                 throw new SQLException("Fail to execute query. Cache not found: " + cacheName);
         
        -                    qryCursor = c.withKeepBinary().query(qry);
        +                    qryCursors = ((IgniteCacheProxy)c.withKeepBinary()).queryMultipleStatements(qry);
                         }
         
        -                VisorQueryCursor> cur = new VisorQueryCursor<>(qryCursor);
        +                // In case of multiple statements leave opened only last cursor.
        +                for (int i = 0; i < qryCursors.size() - 1; i++)
        +                    U.closeQuiet(qryCursors.get(i));
        +
        +                // In case of multiple statements return last cursor as result.
        +                VisorQueryCursor> cur = new VisorQueryCursor<>(F.last(qryCursors));
         
                         Collection meta = cur.fieldsMeta();
         
        diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
        index c546d2b037bcc..710103f91405a 100644
        --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
        +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
        @@ -859,6 +859,9 @@ public GridQueryFieldsResult queryLocalSqlFields(final String schemaName, final
         
                 final PreparedStatement stmt = preparedStatementWithParams(conn, qry, params, true);
         
        +        if (GridSqlQueryParser.checkMultipleStatements(stmt))
        +            throw new IgniteSQLException("Multiple statements queries are not supported for local queries");
        +
                 Prepared p = GridSqlQueryParser.prepared(stmt);
         
                 if (DmlStatementsProcessor.isDmlStatement(p)) {
        diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java
        index c9644137ac272..f918d59348458 100644
        --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java
        +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java
        @@ -408,10 +408,13 @@ public class GridSqlQueryParser {
             /** */
             private static final Getter REMAINING;
         
        +    /** */
        +    public static final String ORG_H2_COMMAND_COMMAND_LIST = "org.h2.command.CommandList";
        +
             static {
                 try {
                     CLS_COMMAND_LIST = (Class)CommandContainer.class.getClassLoader()
        -                .loadClass("org.h2.command.CommandList");
        +                .loadClass(ORG_H2_COMMAND_COMMAND_LIST);
         
                     LIST_COMMAND = getter(CLS_COMMAND_LIST, "command");
         
        @@ -506,6 +509,16 @@ public GridSqlQueryParser(boolean useOptimizedSubqry) {
                 optimizedTableFilterOrder = useOptimizedSubqry ? new HashMap() : null;
             }
         
        +    /**
        +     * @param stmt Prepared statement to check.
        +     * @return {@code true} in case of multiple statements.
        +     */
        +    public static boolean checkMultipleStatements(PreparedStatement stmt) {
        +        Command cmd = COMMAND.get((JdbcPreparedStatement)stmt);
        +
        +        return ORG_H2_COMMAND_COMMAND_LIST.equals(cmd.getClass().getName());
        +    }
        +
             /**
              * @param stmt Prepared statement.
              * @return Parsed select.
        @@ -530,7 +543,7 @@ public static PreparedWithRemaining preparedWithRemaining(PreparedStatement stmt
                 else {
                     Class cmdCls = cmd.getClass();
         
        -            if (cmdCls.getName().equals("org.h2.command.CommandList")) {
        +            if (cmdCls.getName().equals(ORG_H2_COMMAND_COMMAND_LIST)) {
                         return new PreparedWithRemaining(PREPARED.get(LIST_COMMAND.get(cmd)), REMAINING.get(cmd));
                     }
                     else
        @@ -1452,7 +1465,7 @@ private static int parseIntParam(String name, String val) {
                 try {
                     return Integer.parseInt(val);
                 }
        -        catch (NumberFormatException e) {
        +        catch (NumberFormatException ignored) {
                     throw new IgniteSQLException("Parameter value must be an integer [name=" + name + ", value=" + val + ']',
                         IgniteQueryErrorCode.PARSING);
                 }
        @@ -1970,4 +1983,4 @@ public String remainingSql() {
                     return remainingSql;
                 }
             }
        -}
        \ No newline at end of file
        +}
        diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java
        index 6344cba5baf50..bbd3d0d9e85eb 100644
        --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java
        +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java
        @@ -24,21 +24,26 @@
         import java.util.List;
         import java.util.Map;
         import java.util.Set;
        +import java.util.concurrent.Callable;
         import org.apache.ignite.Ignite;
         import org.apache.ignite.IgniteCache;
         import org.apache.ignite.IgniteCheckedException;
         import org.apache.ignite.cache.CacheMode;
         import org.apache.ignite.cache.CacheWriteSynchronizationMode;
        +import org.apache.ignite.cache.query.FieldsQueryCursor;
         import org.apache.ignite.cache.query.SqlFieldsQuery;
         import org.apache.ignite.cache.query.annotations.QuerySqlField;
         import org.apache.ignite.configuration.CacheConfiguration;
         import org.apache.ignite.configuration.IgniteConfiguration;
         import org.apache.ignite.internal.IgniteKernal;
         import org.apache.ignite.internal.processors.query.GridQueryProcessor;
        +import org.apache.ignite.internal.processors.query.IgniteSQLException;
         import org.apache.ignite.internal.util.typedef.X;
        +import org.apache.ignite.internal.util.typedef.internal.U;
         import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
         import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
         import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
        +import org.apache.ignite.testframework.GridTestUtils;
         import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
         
         import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
        @@ -48,6 +53,15 @@
          * Tests cross cache queries.
          */
         public class GridCacheCrossCacheQuerySelfTest extends GridCommonAbstractTest {
        +    /** */
        +    private static final String PART_CACHE_NAME = "partitioned";
        +
        +    /** */
        +    private static final String REPL_PROD_CACHE_NAME = "replicated-prod";
        +
        +    /** */
        +    private static final String REPL_STORE_CACHE_NAME = "replicated-store";
        +
             /** */
             private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
         
        @@ -65,9 +79,9 @@ public class GridCacheCrossCacheQuerySelfTest extends GridCommonAbstractTest {
                 c.setDiscoverySpi(disco);
         
                 c.setCacheConfiguration(
        -            createCache("partitioned", CacheMode.PARTITIONED, Integer.class, FactPurchase.class),
        -            createCache("replicated-prod", CacheMode.REPLICATED, Integer.class, DimProduct.class),
        -            createCache("replicated-store", CacheMode.REPLICATED, Integer.class, DimStore.class));
        +            createCache(PART_CACHE_NAME, CacheMode.PARTITIONED, Integer.class, FactPurchase.class),
        +            createCache(REPL_PROD_CACHE_NAME, CacheMode.REPLICATED, Integer.class, DimProduct.class),
        +            createCache(REPL_STORE_CACHE_NAME, CacheMode.REPLICATED, Integer.class, DimStore.class));
         
                 return c;
             }
        @@ -115,8 +129,7 @@ private static CacheConfiguration createCache(String name, CacheMode mode, Class
              * @throws Exception If failed.
              */
             public void testTwoStepGroupAndAggregates() throws Exception {
        -        IgniteInternalCache cache =
        -            ((IgniteKernal)ignite).getCache("partitioned");
        +        IgniteInternalCache cache = ((IgniteKernal)ignite).getCache(PART_CACHE_NAME);
         
                 GridQueryProcessor qryProc = ((IgniteKernal) ignite).context().query();
         
        @@ -216,7 +229,7 @@ public void testTwoStepGroupAndAggregates() throws Exception {
              * @throws Exception If failed.
              */
             public void testApiQueries() throws Exception {
        -        IgniteCache c = ignite.cache("partitioned");
        +        IgniteCache c = ignite.cache(PART_CACHE_NAME);
         
                 c.query(new SqlFieldsQuery("select cast(? as varchar) from FactPurchase").setArgs("aaa")).getAll();
         
        @@ -227,9 +240,46 @@ public void testApiQueries() throws Exception {
                 assertEquals("aaa", res.get(0).get(0));
             }
         
        -//    @Override protected long getTestTimeout() {
        -//        return 10 * 60 * 1000;
        -//    }
        +    /**
        +     * @throws Exception If failed.
        +     */
        +    public void testMultiStatement() throws Exception {
        +        final IgniteInternalCache cache = ((IgniteKernal)ignite).getCache(PART_CACHE_NAME);
        +
        +        final GridQueryProcessor qryProc = ((IgniteKernal) ignite).context().query();
        +
        +        final SqlFieldsQuery qry = new SqlFieldsQuery(
        +            "insert into FactPurchase(_key, id, productId, storeId, price) values (555, 555, 555, 555, 555);" +
        +            "select count(*) from FactPurchase"
        +        );
        +
        +        GridTestUtils.assertThrows(log,
        +            new Callable() {
        +                @Override public Object call() throws Exception {
        +                    qryProc.querySqlFields(cache.context(), qry, false, true);
        +
        +                    return null;
        +                }
        +            }, IgniteSQLException.class, "Multiple statements queries are not supported");
        +
        +        List>> cursors = qryProc.querySqlFields(cache.context(), qry, false, false);
        +
        +        assertEquals(2, cursors.size());
        +
        +        for(FieldsQueryCursor> cur : cursors)
        +            U.closeQuiet(cur);
        +
        +        qry.setLocal(true);
        +
        +        GridTestUtils.assertThrows(log,
        +            new Callable() {
        +                @Override public Object call() throws Exception {
        +                    qryProc.querySqlFields(cache.context(), qry, false, false);
        +
        +                    return null;
        +                }
        +            }, IgniteSQLException.class, "Multiple statements queries are not supported for local queries");
        +    }
         
             /**
              * @param l List.
        @@ -246,8 +296,8 @@ private static int i(List l, int idx){
             private void fillCaches() throws IgniteCheckedException {
                 int idGen = 0;
         
        -        GridCacheAdapter dimCacheProd = ((IgniteKernal)ignite).internalCache("replicated-prod");
        -        GridCacheAdapter dimCacheStore = ((IgniteKernal)ignite).internalCache("replicated-store");
        +        GridCacheAdapter dimCacheProd = ((IgniteKernal)ignite).internalCache(REPL_PROD_CACHE_NAME);
        +        GridCacheAdapter dimCacheStore = ((IgniteKernal)ignite).internalCache(REPL_STORE_CACHE_NAME);
         
                 List dimStores = new ArrayList<>();
         
        @@ -273,17 +323,17 @@ private void fillCaches() throws IgniteCheckedException {
                     dimProds.add(v);
                 }
         
        -        GridCacheAdapter factCache = ((IgniteKernal)ignite).internalCache("partitioned");
        +        GridCacheAdapter factCache = ((IgniteKernal)ignite).internalCache(PART_CACHE_NAME);
         
                 Collections.sort(dimStores, new Comparator() {
                     @Override public int compare(DimStore o1, DimStore o2) {
        -                return o1.getId() > o2.getId() ? 1 : o1.getId() < o2.getId() ? -1 : 0;
        +                return Integer.compare(o1.getId(), o2.getId());
                     }
                 });
         
                 Collections.sort(dimProds, new Comparator() {
                     @Override public int compare(DimProduct o1, DimProduct o2) {
        -                return o1.getId() > o2.getId() ? 1 : o1.getId() < o2.getId() ? -1 : 0;
        +                return Integer.compare(o1.getId(), o2.getId());
                     }
                 });
         
        diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java
        index ef59a62bcad1f..82602f04dbce3 100644
        --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java
        +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java
        @@ -74,13 +74,13 @@
          */
         public class H2DynamicTableSelfTest extends AbstractSchemaSelfTest {
             /** Client node index. */
        -    private final static int CLIENT = 2;
        +    private static final int CLIENT = 2;
         
             /** */
        -    private final static String INDEXED_CACHE_NAME = CACHE_NAME + "_idx";
        +    private static final String INDEXED_CACHE_NAME = CACHE_NAME + "_idx";
         
             /** */
        -    private final static String INDEXED_CACHE_NAME_2 = INDEXED_CACHE_NAME + "_2";
        +    private static final String INDEXED_CACHE_NAME_2 = INDEXED_CACHE_NAME + "_2";
         
             /** Data region name. */
             public static final String DATA_REGION_NAME = "my_data_region";
        
        From 85d1e8fc5ceefe0d8f41afb2e8be4fa92d5a1df7 Mon Sep 17 00:00:00 2001
        From: rkondakov 
        Date: Wed, 17 Jan 2018 10:44:55 +0300
        Subject: [PATCH 239/243] IGNITE-6022: JDBC: optimized batching handling. This
         closes #3298.
        
        (cherry picked from commit 0a4f22e)
        ---
         .../jdbc/thin/JdbcThinBatchSelfTest.java      | 509 +++++++++++++++++-
         .../jdbc/thin/JdbcThinErrorsSelfTest.java     |   7 +-
         .../cache/query/SqlFieldsQueryEx.java         |  45 ++
         .../odbc/jdbc/JdbcRequestHandler.java         | 104 +++-
         .../processors/query/IgniteSQLException.java  |   9 +
         .../query/h2/DmlStatementsProcessor.java      | 330 ++++++++++--
         .../processors/query/h2/IgniteH2Indexing.java |   7 +-
         .../query/h2/dml/DmlBatchSender.java          | 247 +++++++--
         .../processors/query/h2/dml/DmlUtils.java     |  12 +
         .../processors/query/h2/dml/UpdatePlan.java   |  61 ++-
         .../query/h2/dml/UpdatePlanBuilder.java       |   2 +-
         .../h2/twostep/GridReduceQueryExecutor.java   |   2 +-
         12 files changed, 1220 insertions(+), 115 deletions(-)
        
        diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java
        index 8609615131f42..fe7c170729595 100644
        --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java
        +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBatchSelfTest.java
        @@ -21,9 +21,14 @@
         import java.sql.PreparedStatement;
         import java.sql.SQLException;
         import java.sql.Statement;
        +import java.util.Arrays;
         import java.util.concurrent.Callable;
        +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
        +import org.apache.ignite.internal.processors.odbc.SqlStateCode;
         import org.apache.ignite.testframework.GridTestUtils;
         
        +import static org.junit.Assert.assertArrayEquals;
        +
         /**
          * Statement test.
          */
        @@ -151,7 +156,9 @@ public void testBatchOnClosedStatement() throws SQLException {
             public void testBatchException() throws SQLException {
                 final int BATCH_SIZE = 7;
         
        -        for (int idx = 0, i = 0; i < BATCH_SIZE; ++i, idx += i) {
        +        final int FAILED_IDX = 5;
        +
        +        for (int idx = 0, i = 0; i < FAILED_IDX; ++i, idx += i) {
                     stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values "
                         + generateValues(idx, i + 1));
                 }
        @@ -159,7 +166,7 @@ public void testBatchException() throws SQLException {
                 stmt.addBatch("select * from Person");
         
                 stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values "
        -            + generateValues(100, 1));
        +            + generateValues(100, 7));
         
                 try {
                     stmt.executeBatch();
        @@ -171,13 +178,212 @@ public void testBatchException() throws SQLException {
                     assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
         
                     for (int i = 0; i < BATCH_SIZE; ++i)
        -                assertEquals("Invalid update count",i + 1, updCnts[i]);
        +                assertEquals("Invalid update count", i != FAILED_IDX ? i + 1 : Statement.EXECUTE_FAILED,
        +                    updCnts[i]);
         
                     if (!e.getMessage().contains("Given statement type does not match that declared by JDBC driver")) {
                         log.error("Invalid exception: ", e);
         
                         fail();
                     }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.PARSING_EXCEPTION, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.STMT_TYPE_MISMATCH, e.getErrorCode());
        +        }
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchParseException() throws SQLException {
        +        final int BATCH_SIZE = 7;
        +
        +        final int FAILED_IDX = 5;
        +
        +        for (int idx = 0, i = 0; i < FAILED_IDX; ++i, idx += i) {
        +            stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values "
        +                + generateValues(idx, i + 1));
        +        }
        +
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values (4444, 'fail', 1, 1, 1)");
        +
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values "
        +            + generateValues(100, 7));
        +
        +        try {
        +            stmt.executeBatch();
        +
        +            fail("BatchUpdateException must be thrown");
        +        } catch(BatchUpdateException e) {
        +            int [] updCnts = e.getUpdateCounts();
        +
        +            assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +            for (int i = 0; i < BATCH_SIZE; ++i)
        +                assertEquals("Invalid update count: " + i, i != FAILED_IDX ? i + 1 : Statement.EXECUTE_FAILED,
        +                    updCnts[i]);
        +
        +            if (!e.getMessage().contains("Value conversion failed")) {
        +                log.error("Invalid exception: ", e);
        +
        +                fail();
        +            }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.CONVERSION_FAILED, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.CONVERSION_FAILED, e.getErrorCode());
        +        }
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchMerge() throws SQLException {
        +        final int BATCH_SIZE = 7;
        +
        +        for (int idx = 0, i = 0; i < BATCH_SIZE; ++i, idx += i) {
        +            stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values "
        +                + generateValues(idx, i + 1));
        +        }
        +
        +        int [] updCnts = stmt.executeBatch();
        +
        +        assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +        for (int i = 0; i < BATCH_SIZE; ++i)
        +            assertEquals("Invalid update count",i + 1, updCnts[i]);
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchMergeParseException() throws SQLException {
        +        final int BATCH_SIZE = 7;
        +
        +        final int FAILED_IDX = 5;
        +
        +        for (int idx = 0, i = 0; i < FAILED_IDX; ++i, idx += i) {
        +            stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values "
        +                + generateValues(idx, i + 1));
        +        }
        +
        +        stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values (4444, 'FAIL', 1, 1, 1)");
        +
        +        stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values "
        +            + generateValues(100, 7));
        +
        +        try {
        +            stmt.executeBatch();
        +
        +            fail("BatchUpdateException must be thrown");
        +        } catch(BatchUpdateException e) {
        +            int [] updCnts = e.getUpdateCounts();
        +
        +            assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +            for (int i = 0; i < BATCH_SIZE; ++i)
        +                assertEquals("Invalid update count: " + i, i != FAILED_IDX ? i + 1 : Statement.EXECUTE_FAILED,
        +                    updCnts[i]);
        +
        +            if (!e.getMessage().contains("Value conversion failed")) {
        +                log.error("Invalid exception: ", e);
        +
        +                fail();
        +            }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.CONVERSION_FAILED, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.CONVERSION_FAILED, e.getErrorCode());
        +        }
        +    }
        +
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchKeyDuplicatesException() throws SQLException {
        +        final int BATCH_SIZE = 7;
        +
        +        final int FAILED_IDX = 5;
        +
        +        int idx = 0;
        +
        +        for (int i = 0; i < FAILED_IDX; ++i, idx += i) {
        +            stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values "
        +                + generateValues(idx, i + 1));
        +        }
        +
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values ('p0', 0, 'Name0', 'Lastname0', 20)");
        +
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values "
        +            + generateValues(++idx, 7));
        +
        +        try {
        +            stmt.executeBatch();
        +
        +            fail("BatchUpdateException must be thrown");
        +        } catch(BatchUpdateException e) {
        +            int [] updCnts = e.getUpdateCounts();
        +
        +            assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +            for (int i = 0; i < BATCH_SIZE; ++i)
        +                assertEquals("Invalid update count: " + i, i != FAILED_IDX ? i + 1 : Statement.EXECUTE_FAILED,
        +                    updCnts[i]);
        +
        +            if (!e.getMessage().contains("Failed to INSERT some keys because they are already in cache [keys=[p0]")) {
        +                log.error("Invalid exception: ", e);
        +
        +                fail();
        +            }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.CONSTRAINT_VIOLATION, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.DUPLICATE_KEY, e.getErrorCode());
        +        }
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testHeterogeneousBatch() throws SQLException {
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values ('p0', 0, 'Name0', 'Lastname0', 10)");
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values ('p1', 1, 'Name1', 'Lastname1', 20), ('p2', 2, 'Name2', 'Lastname2', 30)");
        +        stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values ('p3', 3, 'Name3', 'Lastname3', 40)");
        +        stmt.addBatch("update Person set id = 5 where age >= 30");
        +        stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values ('p0', 2, 'Name2', 'Lastname2', 50)");
        +        stmt.addBatch("delete from Person where age <= 40");
        +
        +        int [] updCnts = stmt.executeBatch();
        +
        +        assertEquals("Invalid update counts size", 6, updCnts.length);
        +        assertArrayEquals("Invalid update count", new int[] {1, 2, 1, 2, 1, 3}, updCnts);
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testHeterogeneousBatchException() throws SQLException {
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values ('p0', 0, 'Name0', 'Lastname0', 10)");
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values ('p1', 1, 'Name1', 'Lastname1', 20), ('p2', 2, 'Name2', 'Lastname2', 30)");
        +        stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values ('p3', 3, 'Name3', 'Lastname3', 40)");
        +        stmt.addBatch("update Person set id = 'FAIL' where age >= 30"); // Fail.
        +        stmt.addBatch("merge into Person (_key, id, firstName, lastName, age) values ('p0', 2, 'Name2', 'Lastname2', 50)");
        +        stmt.addBatch("delete from Person where FAIL <= 40"); // Fail.
        +
        +        try {
        +            stmt.executeBatch();
        +
        +            fail("BatchUpdateException must be thrown");
        +        } catch(BatchUpdateException e) {
        +            int[] updCnts = e.getUpdateCounts();
        +
        +            if (!e.getMessage().contains("Value conversion failed")) {
        +                log.error("Invalid exception: ", e);
        +
        +                fail();
        +            }
        +
        +            assertEquals("Invalid update counts size", 6, updCnts.length);
        +            assertArrayEquals("Invalid update count",
        +                new int[] {1, 2, 1, Statement.EXECUTE_FAILED, 1, Statement.EXECUTE_FAILED}, updCnts);
                 }
             }
         
        @@ -235,7 +441,11 @@ public void testBatchPrepared() throws SQLException {
             public void testBatchExceptionPrepared() throws SQLException {
                 final int BATCH_SIZE = 7;
         
        -        for (int i = 0; i < BATCH_SIZE; ++i) {
        +        final int FAILED_IDX = 5;
        +
        +        assert FAILED_IDX + 2 == BATCH_SIZE;
        +
        +        for (int i = 0; i < FAILED_IDX; ++i) {
                     int paramCnt = 1;
         
                     pstmt.setString(paramCnt++, "p" + i);
        @@ -248,11 +458,20 @@ public void testBatchExceptionPrepared() throws SQLException {
                 }
         
                 int paramCnt = 1;
        -        pstmt.setString(paramCnt++, "p" + 100);
        -        pstmt.setString(paramCnt++, "x");
        -        pstmt.setString(paramCnt++, "Name" + 100);
        -        pstmt.setString(paramCnt++, "Lastname" + 100);
        -        pstmt.setInt(paramCnt++, 20 + 100);
        +        pstmt.setString(paramCnt++, "p" + FAILED_IDX);
        +        pstmt.setString(paramCnt++, "FAIL");
        +        pstmt.setString(paramCnt++, "Name" + FAILED_IDX);
        +        pstmt.setString(paramCnt++, "Lastname" + FAILED_IDX);
        +        pstmt.setInt(paramCnt++, 20 + FAILED_IDX);
        +
        +        pstmt.addBatch();
        +
        +        paramCnt = 1;
        +        pstmt.setString(paramCnt++, "p" + FAILED_IDX + 1);
        +        pstmt.setInt(paramCnt++, FAILED_IDX + 1);
        +        pstmt.setString(paramCnt++, "Name" + FAILED_IDX + 1);
        +        pstmt.setString(paramCnt++, "Lastname" + FAILED_IDX + 1);
        +        pstmt.setInt(paramCnt++, 20 + FAILED_IDX + 1);
         
                 pstmt.addBatch();
         
        @@ -267,13 +486,283 @@ public void testBatchExceptionPrepared() throws SQLException {
                     assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
         
                     for (int i = 0; i < BATCH_SIZE; ++i)
        -                assertEquals("Invalid update count",1, updCnts[i]);
        +                assertEquals("Invalid update count", i != FAILED_IDX ? 1 : Statement.EXECUTE_FAILED, updCnts[i]);
         
                     if (!e.getMessage().contains("Value conversion failed")) {
                         log.error("Invalid exception: ", e);
         
                         fail();
                     }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.CONVERSION_FAILED, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.CONVERSION_FAILED, e.getErrorCode());
        +        }
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchMergePrepared() throws SQLException {
        +        final int BATCH_SIZE = 10;
        +
        +        pstmt = conn.prepareStatement("merge into Person(_key, id, firstName, lastName, age) values " +
        +            "(?, ?, ?, ?, ?)");
        +
        +        for (int i = 0; i < BATCH_SIZE; ++i) {
        +            int paramCnt = 1;
        +
        +            pstmt.setString(paramCnt++, "p" + i);
        +            pstmt.setInt(paramCnt++, i);
        +            pstmt.setString(paramCnt++, "Name" + i);
        +            pstmt.setString(paramCnt++, "Lastname" + i);
        +            pstmt.setInt(paramCnt++, 20 + i);
        +
        +            pstmt.addBatch();
        +        }
        +
        +        int [] updCnts = pstmt.executeBatch();
        +
        +        assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +        for (int i = 0; i < BATCH_SIZE; ++i)
        +            assertEquals("Invalid update count",1, updCnts[i]);
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchMergeExceptionPrepared() throws SQLException {
        +        final int BATCH_SIZE = 7;
        +
        +        final int FAILED_IDX = 5;
        +
        +        pstmt = conn.prepareStatement("merge into Person(_key, id, firstName, lastName, age) values " +
        +            "(?, ?, ?, ?, ?)");
        +
        +        assert FAILED_IDX + 2 == BATCH_SIZE;
        +
        +        for (int i = 0; i < FAILED_IDX; ++i) {
        +            int paramCnt = 1;
        +
        +            pstmt.setString(paramCnt++, "p" + i);
        +            pstmt.setInt(paramCnt++, i);
        +            pstmt.setString(paramCnt++, "Name" + i);
        +            pstmt.setString(paramCnt++, "Lastname" + i);
        +            pstmt.setInt(paramCnt++, 20 + i);
        +
        +            pstmt.addBatch();
        +        }
        +
        +        int paramCnt = 1;
        +        pstmt.setString(paramCnt++, "p" + FAILED_IDX);
        +        pstmt.setString(paramCnt++, "FAIL");
        +        pstmt.setString(paramCnt++, "Name" + FAILED_IDX);
        +        pstmt.setString(paramCnt++, "Lastname" + FAILED_IDX);
        +        pstmt.setInt(paramCnt++, 20 + FAILED_IDX);
        +
        +        pstmt.addBatch();
        +
        +        paramCnt = 1;
        +        pstmt.setString(paramCnt++, "p" + FAILED_IDX + 1);
        +        pstmt.setInt(paramCnt++, FAILED_IDX + 1);
        +        pstmt.setString(paramCnt++, "Name" + FAILED_IDX + 1);
        +        pstmt.setString(paramCnt++, "Lastname" + FAILED_IDX + 1);
        +        pstmt.setInt(paramCnt++, 20 + FAILED_IDX + 1);
        +
        +        pstmt.addBatch();
        +
        +        try {
        +            int[] res = pstmt.executeBatch();
        +
        +            fail("BatchUpdateException must be thrown res=" + Arrays.toString(res));
        +        }
        +        catch(BatchUpdateException e) {
        +            int [] updCnts = e.getUpdateCounts();
        +
        +            assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +            for (int i = 0; i < BATCH_SIZE; ++i)
        +                assertEquals("Invalid update count", i != FAILED_IDX ? 1 : Statement.EXECUTE_FAILED, updCnts[i]);
        +
        +            if (!e.getMessage().contains("Value conversion failed")) {
        +                log.error("Invalid exception: ", e);
        +
        +                fail();
        +            }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.CONVERSION_FAILED, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.CONVERSION_FAILED, e.getErrorCode());
        +        }
        +    }
        +
        +    /**
        +     * Populates table 'Person' with entities.
        +     *
        +     * @param size Number of entities.
        +     * @throws SQLException If failed.
        +     */
        +    private void populateTable(int size) throws SQLException {
        +        stmt.addBatch("insert into Person (_key, id, firstName, lastName, age) values "
        +            + generateValues(0, size));
        +
        +        stmt.executeBatch();
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchUpdatePrepared() throws SQLException {
        +        final int BATCH_SIZE = 10;
        +
        +        populateTable(BATCH_SIZE);
        +
        +        pstmt = conn.prepareStatement("update Person set age = 100 where id = ?;");
        +
        +        for (int i = 0; i < BATCH_SIZE; ++i) {
        +            pstmt.setInt(1, i);
        +
        +            pstmt.addBatch();
        +        }
        +
        +        int [] updCnts = pstmt.executeBatch();
        +
        +        assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +        for (int i = 0; i < BATCH_SIZE; ++i)
        +            assertEquals("Invalid update count",1, updCnts[i]);
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchUpdateExceptionPrepared() throws SQLException {
        +        final int BATCH_SIZE = 7;
        +
        +        final int FAILED_IDX = 5;
        +
        +        populateTable(BATCH_SIZE);
        +
        +        pstmt = conn.prepareStatement("update Person set age = 100 where id = ?;");
        +
        +        assert FAILED_IDX + 2 == BATCH_SIZE;
        +
        +        for (int i = 0; i < FAILED_IDX; ++i) {
        +            pstmt.setInt(1, i);
        +
        +            pstmt.addBatch();
        +        }
        +
        +        pstmt.setString(1, "FAIL");
        +
        +        pstmt.addBatch();
        +
        +        pstmt.setInt(1, FAILED_IDX + 1);
        +
        +        pstmt.addBatch();
        +
        +        try {
        +            int[] res = pstmt.executeBatch();
        +
        +            fail("BatchUpdateException must be thrown res=" + Arrays.toString(res));
        +        }
        +        catch(BatchUpdateException e) {
        +            int [] updCnts = e.getUpdateCounts();
        +
        +            assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +            for (int i = 0; i < BATCH_SIZE; ++i)
        +                assertEquals("Invalid update count", i != FAILED_IDX ? 1 : Statement.EXECUTE_FAILED, updCnts[i]);
        +
        +            if (!e.getMessage().contains("Data conversion error converting \"FAIL\"")) {
        +                log.error("Invalid exception: ", e);
        +
        +                fail();
        +            }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.INTERNAL_ERROR, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.UNKNOWN, e.getErrorCode());
        +
        +            //assertEquals("Invalid SQL state.", SqlStateCode.CONVERSION_FAILED, e.getSQLState());
        +            //assertEquals("Invalid error code.", IgniteQueryErrorCode.CONVERSION_FAILED, e.getErrorCode());
        +        }
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchDeletePrepared() throws SQLException {
        +        final int BATCH_SIZE = 10;
        +
        +        populateTable(BATCH_SIZE);
        +
        +        pstmt = conn.prepareStatement("delete from Person where id = ?;");
        +
        +        for (int i = 0; i < BATCH_SIZE; ++i) {
        +            pstmt.setInt(1, i);
        +
        +            pstmt.addBatch();
        +        }
        +
        +        int [] updCnts = pstmt.executeBatch();
        +
        +        assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +        for (int i = 0; i < BATCH_SIZE; ++i)
        +            assertEquals("Invalid update count",1, updCnts[i]);
        +    }
        +
        +    /**
        +     * @throws SQLException If failed.
        +     */
        +    public void testBatchDeleteExceptionPrepared() throws SQLException {
        +        final int BATCH_SIZE = 7;
        +
        +        final int FAILED_IDX = 5;
        +
        +        populateTable(BATCH_SIZE);
        +
        +        pstmt = conn.prepareStatement("delete from Person where id = ?;");
        +
        +        assert FAILED_IDX + 2 == BATCH_SIZE;
        +
        +        for (int i = 0; i < FAILED_IDX; ++i) {
        +            pstmt.setInt(1, i);
        +
        +            pstmt.addBatch();
        +        }
        +
        +        pstmt.setString(1, "FAIL");
        +
        +        pstmt.addBatch();
        +
        +        pstmt.setInt(1, FAILED_IDX + 1);
        +
        +        pstmt.addBatch();
        +
        +        try {
        +            int[] res = pstmt.executeBatch();
        +
        +            fail("BatchUpdateException must be thrown res=" + Arrays.toString(res));
        +        }
        +        catch(BatchUpdateException e) {
        +            int [] updCnts = e.getUpdateCounts();
        +
        +            assertEquals("Invalid update counts size", BATCH_SIZE, updCnts.length);
        +
        +            for (int i = 0; i < BATCH_SIZE; ++i)
        +                assertEquals("Invalid update count", i != FAILED_IDX ? 1 : Statement.EXECUTE_FAILED, updCnts[i]);
        +
        +            if (!e.getMessage().contains("Data conversion error converting \"FAIL\"")) {
        +                log.error("Invalid exception: ", e);
        +
        +                fail();
        +            }
        +
        +            assertEquals("Invalid SQL state.", SqlStateCode.INTERNAL_ERROR, e.getSQLState());
        +            assertEquals("Invalid error code.", IgniteQueryErrorCode.UNKNOWN, e.getErrorCode());
        +
        +            //assertEquals("Invalid SQL state.", SqlStateCode.CONVERSION_FAILED, e.getSQLState());
        +            //assertEquals("Invalid error code.", IgniteQueryErrorCode.CONVERSION_FAILED, e.getErrorCode());
                 }
             }
         
        diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java
        index 462c98d64fd01..90588c4e6e50b 100644
        --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java
        +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinErrorsSelfTest.java
        @@ -25,6 +25,8 @@
         import org.apache.ignite.jdbc.JdbcErrorsAbstractSelfTest;
         import org.apache.ignite.lang.IgniteCallable;
         
        +import static org.junit.Assert.assertArrayEquals;
        +
         /**
          * Test SQLSTATE codes propagation with thin client driver.
          */
        @@ -96,10 +98,9 @@ public void testBatchUpdateException() throws SQLException {
                         fail("BatchUpdateException is expected");
                     }
                     catch (BatchUpdateException e) {
        -                assertEquals(2, e.getUpdateCounts().length);
        +                assertEquals(3, e.getUpdateCounts().length);
         
        -                for (int updCnt : e.getUpdateCounts())
        -                    assertEquals(1, updCnt);
        +                assertArrayEquals("", new int[] {1, 1, Statement.EXECUTE_FAILED}, e.getUpdateCounts());
         
                         assertEquals("42000", e.getSQLState());
         
        diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java
        index c5f786ec45a0c..ff10e3d63da9a 100644
        --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java
        +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java
        @@ -17,8 +17,11 @@
         
         package org.apache.ignite.internal.processors.cache.query;
         
        +import java.util.ArrayList;
        +import java.util.List;
         import java.util.concurrent.TimeUnit;
         import org.apache.ignite.cache.query.SqlFieldsQuery;
        +import org.apache.ignite.internal.util.typedef.F;
         
         /**
          * {@link SqlFieldsQuery} with experimental and internal features.
        @@ -33,12 +36,16 @@ public final class SqlFieldsQueryEx extends SqlFieldsQuery {
             /** Whether server side DML should be enabled. */
             private boolean skipReducerOnUpdate;
         
        +    /** Batched arguments list. */
        +    private List batchedArgs;
        +
             /**
              * @param sql SQL query.
              * @param isQry Flag indicating whether this object denotes a query or an update operation.
              */
             public SqlFieldsQueryEx(String sql, Boolean isQry) {
                 super(sql);
        +
                 this.isQry = isQry;
             }
         
        @@ -50,6 +57,7 @@ private SqlFieldsQueryEx(SqlFieldsQueryEx qry) {
         
                 this.isQry = qry.isQry;
                 this.skipReducerOnUpdate = qry.skipReducerOnUpdate;
        +        this.batchedArgs = qry.batchedArgs;
             }
         
             /**
        @@ -155,4 +163,41 @@ public boolean isSkipReducerOnUpdate() {
             @Override public SqlFieldsQuery copy() {
                 return new SqlFieldsQueryEx(this);
             }
        +
        +    /**
        +     * Adds batched arguments.
        +     *
        +     * @param args Batched arguments.
        +     */
        +    public void addBatchedArgs(Object[] args) {
        +        if (this.batchedArgs == null)
        +            this.batchedArgs = new ArrayList<>();
        +
        +        this.batchedArgs.add(args);
        +    }
        +
        +    /**
        +     * Clears batched arguments.
        +     */
        +    public void clearBatchedArgs() {
        +        this.batchedArgs = null;
        +    }
        +
        +    /**
        +     * Returns batched arguments.
        +     *
        +     * @return Batched arguments.
        +     */
        +    public List batchedArguments() {
        +        return this.batchedArgs;
        +    }
        +
        +    /**
        +     * Checks if query is batched.
        +     *
        +     * @return {@code True} if batched.
        +     */
        +    public boolean isBatched() {
        +        return !F.isEmpty(batchedArgs);
        +    }
         }
        diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java
        index b6e21a2db558c..8fe621a6c306b 100644
        --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java
        +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java
        @@ -17,12 +17,14 @@
         
         package org.apache.ignite.internal.processors.odbc.jdbc;
         
        +import java.sql.BatchUpdateException;
         import java.sql.ParameterMetaData;
        +import java.sql.Statement;
         import java.util.ArrayList;
        -import java.util.Arrays;
         import java.util.Collection;
         import java.util.Collections;
         import java.util.HashSet;
        +import java.util.Iterator;
         import java.util.LinkedHashSet;
         import java.util.List;
         import java.util.Map;
        @@ -53,8 +55,10 @@
         import org.apache.ignite.internal.processors.query.QueryUtils;
         import org.apache.ignite.internal.util.GridSpinBusyLock;
         import org.apache.ignite.internal.util.typedef.F;
        +import org.apache.ignite.internal.util.typedef.X;
         import org.apache.ignite.internal.util.typedef.internal.S;
         import org.apache.ignite.internal.util.typedef.internal.U;
        +import org.apache.ignite.lang.IgniteBiTuple;
         
         import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_CONTINUE;
         import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_EOF;
        @@ -552,19 +556,21 @@ private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) {
                 if (F.isEmpty(schemaName))
                     schemaName = QueryUtils.DFLT_SCHEMA;
         
        -        int successQueries = 0;
        -        int updCnts[] = new int[req.queries().size()];
        +        int qryCnt = req.queries().size();
         
        -        try {
        -            String sql = null;
        +        List updCntsAcc = new ArrayList<>(qryCnt);
        +
        +        // Send back only the first error. Others will be written to the log.
        +        IgniteBiTuple firstErr = new IgniteBiTuple<>();
         
        -            for (JdbcQuery q : req.queries()) {
        -                if (q.sql() != null)
        -                    sql = q.sql();
        +        SqlFieldsQueryEx qry = null;
         
        -                SqlFieldsQuery qry = new SqlFieldsQueryEx(sql, false);
        +        for (JdbcQuery q : req.queries()) {
        +            if (q.sql() != null) { // If we have a new query string in the batch,
        +                if (qry != null) // then execute the previous sub-batch and create a new SqlFieldsQueryEx.
        +                    executeBatchedQuery(qry, updCntsAcc, firstErr);
         
        -                qry.setArgs(q.args());
        +                qry = new SqlFieldsQueryEx(q.sql(), false);
         
                         qry.setDistributedJoins(distributedJoins);
                         qry.setEnforceJoinOrder(enforceJoinOrder);
        @@ -573,41 +579,91 @@ private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) {
                         qry.setLazy(lazy);
         
                         qry.setSchema(schemaName);
        +            }
         
        -                FieldsQueryCursor> qryCur = F.first(ctx.query()
        -                    .querySqlFieldsNoCache(qry, true, true));
        +            assert qry != null;
        +
        +            qry.addBatchedArgs(q.args());
        +        }
         
        -                if (qryCur instanceof BulkLoadContextCursor)
        +        if (qry != null)
        +            executeBatchedQuery(qry, updCntsAcc, firstErr);
        +
        +        int updCnts[] = U.toIntArray(updCntsAcc);
        +
        +        if (firstErr.isEmpty())
        +            return new JdbcResponse(new JdbcBatchExecuteResult(updCnts, ClientListenerResponse.STATUS_SUCCESS, null));
        +        else
        +            return new JdbcResponse(new JdbcBatchExecuteResult(updCnts, firstErr.getKey(), firstErr.getValue()));
        +    }
        +
        +    /**
        +     * Executes query and updates result counters.
        +     *
        +     * @param qry Query.
        +     * @param updCntsAcc Per query rows updates counter.
        +     * @param firstErr First error data - code and message.
        +     */
        +    private void executeBatchedQuery(SqlFieldsQueryEx qry, List updCntsAcc,
        +        IgniteBiTuple firstErr) {
        +        try {
        +            List>> qryRes = ctx.query().querySqlFieldsNoCache(qry, true, true);
        +
        +            for (FieldsQueryCursor> cur : qryRes) {
        +                if (cur instanceof BulkLoadContextCursor)
                             throw new IgniteSQLException("COPY command cannot be executed in batch mode.");
         
        -                assert !((QueryCursorImpl)qryCur).isQuery();
        +                assert !((QueryCursorImpl)cur).isQuery();
         
        -                List> items = qryCur.getAll();
        +                Iterator> it = cur.iterator();
         
        -                updCnts[successQueries++] = ((Long)items.get(0).get(0)).intValue();
        -            }
        +                if (it.hasNext()) {
        +                    int val = ((Long)it.next().get(0)).intValue();
         
        -            return new JdbcResponse(new JdbcBatchExecuteResult(updCnts, ClientListenerResponse.STATUS_SUCCESS, null));
        +                    updCntsAcc.add(val);
        +                }
        +            }
                 }
                 catch (Exception e) {
        -            U.error(log, "Failed to execute batch query [reqId=" + req.requestId() + ", req=" + req + ']', e);
        -
                     int code;
         
                     String msg;
         
                     if (e instanceof IgniteSQLException) {
        -                code = ((IgniteSQLException) e).statusCode();
        +                BatchUpdateException batchCause = X.cause(e, BatchUpdateException.class);
         
        -                msg = e.getMessage();
        +                if (batchCause != null) {
        +                    int[] updCntsOnErr = batchCause.getUpdateCounts();
        +
        +                    for (int i = 0; i < updCntsOnErr.length; i++)
        +                        updCntsAcc.add(updCntsOnErr[i]);
        +
        +                    msg = batchCause.getMessage();
        +
        +                    code = batchCause.getErrorCode();
        +                }
        +                else {
        +                    for (int i = 0; i < qry.batchedArguments().size(); i++)
        +                        updCntsAcc.add(Statement.EXECUTE_FAILED);
        +
        +                    msg = e.getMessage();
        +
        +                    code = ((IgniteSQLException)e).statusCode();
        +                }
                     }
                     else {
        -                code = IgniteQueryErrorCode.UNKNOWN;
        +                for (int i = 0; i < qry.batchedArguments().size(); i++)
        +                    updCntsAcc.add(Statement.EXECUTE_FAILED);
         
                         msg = e.getMessage();
        +
        +                code = IgniteQueryErrorCode.UNKNOWN;
                     }
         
        -            return new JdbcResponse(new JdbcBatchExecuteResult(Arrays.copyOf(updCnts, successQueries), code, msg));
        +            if (firstErr.isEmpty())
        +                firstErr.set(code, msg);
        +            else
        +                U.error(log, "Failed to execute batch query [qry=" + qry +']', e);
                 }
             }
         
        diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/IgniteSQLException.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/IgniteSQLException.java
        index 2bacc23be379b..2f74601cf6823 100644
        --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/IgniteSQLException.java
        +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/IgniteSQLException.java
        @@ -127,6 +127,15 @@ public int statusCode() {
                 return statusCode;
             }
         
        +    /**
        +     * {@link SQLException#SQLState} getter.
        +     *
        +     * @return {@link SQLException#SQLState}.
        +     */
        +    public String sqlState() {
        +        return sqlState;
        +    }
        +
             /**
              * @return JDBC exception containing details from this instance.
              */
        diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java
        index 6732e7b8b37b9..ce946e7def09e 100644
        --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java
        +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java
        @@ -17,11 +17,14 @@
         
         package org.apache.ignite.internal.processors.query.h2;
         
        +import java.sql.BatchUpdateException;
         import java.sql.Connection;
         import java.sql.PreparedStatement;
         import java.sql.SQLException;
        +import java.sql.Statement;
         import java.util.ArrayList;
         import java.util.Arrays;
        +import java.util.Collection;
         import java.util.Collections;
         import java.util.Iterator;
         import java.util.LinkedHashMap;
        @@ -49,6 +52,7 @@
         import org.apache.ignite.internal.processors.cache.GridCacheContext;
         import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
         import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
        +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
         import org.apache.ignite.internal.processors.odbc.SqlStateCode;
         import org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator;
         import org.apache.ignite.internal.processors.query.GridQueryCancel;
        @@ -57,6 +61,7 @@
         import org.apache.ignite.internal.processors.query.IgniteSQLException;
         import org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender;
         import org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedPlanInfo;
        +import org.apache.ignite.internal.processors.query.h2.dml.DmlUtils;
         import org.apache.ignite.internal.processors.query.h2.dml.UpdateMode;
         import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan;
         import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlanBuilder;
        @@ -70,7 +75,6 @@
         import org.apache.ignite.internal.util.typedef.F;
         import org.apache.ignite.internal.util.typedef.T3;
         import org.apache.ignite.internal.util.typedef.X;
        -import org.apache.ignite.internal.util.typedef.internal.CU;
         import org.apache.ignite.internal.util.typedef.internal.U;
         import org.apache.ignite.lang.IgniteBiTuple;
         import org.apache.ignite.lang.IgniteInClosure;
        @@ -82,6 +86,7 @@
         import org.h2.command.dml.Update;
         import org.jetbrains.annotations.Nullable;
         
        +import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.DUPLICATE_KEY;
         import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.createJdbcSqlException;
         import static org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing.UPDATE_RESULT_META;
         
        @@ -158,21 +163,7 @@ private UpdateResult updateSqlFields(String schemaName, Connection conn, Prepare
                 GridCacheContext cctx = plan.cacheContext();
         
                 for (int i = 0; i < DFLT_DML_RERUN_ATTEMPTS; i++) {
        -            CacheOperationContext opCtx = cctx.operationContextPerCall();
        -
        -            // Force keepBinary for operation context to avoid binary deserialization inside entry processor
        -            if (cctx.binaryMarshaller()) {
        -                CacheOperationContext newOpCtx = null;
        -
        -                if (opCtx == null)
        -                    // Mimics behavior of GridCacheAdapter#keepBinary and GridCacheProxyImpl#keepBinary
        -                    newOpCtx = new CacheOperationContext(false, null, true, null, false, null, false);
        -                else if (!opCtx.isKeepBinary())
        -                    newOpCtx = opCtx.keepBinary();
        -
        -                if (newOpCtx != null)
        -                    cctx.operationContextPerCall(newOpCtx);
        -            }
        +            CacheOperationContext opCtx = setKeepBinaryContext(cctx);
         
                     UpdateResult r;
         
        @@ -200,6 +191,125 @@ else if (items == 0L)
                 return new UpdateResult(items, errKeys);
             }
         
        +    /**
        +     * Execute DML statement, possibly with few re-attempts in case of concurrent data modifications.
        +     *
        +     * @param schemaName Schema.
        +     * @param conn Connection.
        +     * @param prepared Prepared statement.
        +     * @param fieldsQry Original query.
        +     * @param loc Query locality flag.
        +     * @param filters Cache name and key filter.
        +     * @param cancel Cancel.
        +     * @return Update result (modified items count and failed keys).
        +     * @throws IgniteCheckedException if failed.
        +     */
        +    private Collection updateSqlFieldsBatched(String schemaName, Connection conn, Prepared prepared,
        +        SqlFieldsQueryEx fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel)
        +        throws IgniteCheckedException {
        +        List argss = fieldsQry.batchedArguments();
        +
        +        UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null);
        +
        +        if (plan.hasRows() && plan.mode() == UpdateMode.INSERT) {
        +            GridCacheContext cctx = plan.cacheContext();
        +
        +            CacheOperationContext opCtx = setKeepBinaryContext(cctx);
        +
        +            try {
        +                List>> cur = plan.createRows(argss);
        +
        +                List res = processDmlSelectResultBatched(plan, cur, fieldsQry.getPageSize());
        +
        +                return res;
        +            }
        +            finally {
        +                cctx.operationContextPerCall(opCtx);
        +            }
        +        }
        +        else {
        +            // Fallback to previous mode.
        +            Collection ress = new ArrayList<>(argss.size());
        +
        +            SQLException batchException = null;
        +
        +            int[] cntPerRow = new int[argss.size()];
        +
        +            int cntr = 0;
        +
        +            for (Object[] args : argss) {
        +                SqlFieldsQueryEx qry0 = (SqlFieldsQueryEx)fieldsQry.copy();
        +
        +                qry0.clearBatchedArgs();
        +                qry0.setArgs(args);
        +
        +                UpdateResult res;
        +
        +                try {
        +                    res = updateSqlFields(schemaName, conn, prepared, qry0, loc, filters, cancel);
        +
        +                    cntPerRow[cntr++] = (int)res.counter();
        +
        +                    ress.add(res);
        +                }
        +                catch (Exception e ) {
        +                    String sqlState;
        +
        +                    int code;
        +
        +                    if (e instanceof IgniteSQLException) {
        +                        sqlState = ((IgniteSQLException)e).sqlState();
        +
        +                        code = ((IgniteSQLException)e).statusCode();
        +                    } else {
        +                        sqlState = SqlStateCode.INTERNAL_ERROR;
        +
        +                        code = IgniteQueryErrorCode.UNKNOWN;
        +                    }
        +
        +                    batchException = chainException(batchException, new SQLException(e.getMessage(), sqlState, code, e));
        +
        +                    cntPerRow[cntr++] = Statement.EXECUTE_FAILED;
        +                }
        +            }
        +
        +            if (batchException != null) {
        +                BatchUpdateException e = new BatchUpdateException(batchException.getMessage(),
        +                    batchException.getSQLState(), batchException.getErrorCode(), cntPerRow, batchException);
        +
        +                throw new IgniteCheckedException(e);
        +            }
        +
        +            return ress;
        +        }
        +    }
        +
        +    /**
        +     * Makes current operation context as keepBinary.
        +     *
        +     * @param cctx Cache context.
        +     * @return Old operation context.
        +     */
        +    private CacheOperationContext setKeepBinaryContext(GridCacheContext cctx) {
        +        CacheOperationContext opCtx = cctx.operationContextPerCall();
        +
        +        // Force keepBinary for operation context to avoid binary deserialization inside entry processor
        +        if (cctx.binaryMarshaller()) {
        +            CacheOperationContext newOpCtx = null;
        +
        +            if (opCtx == null)
        +                // Mimics behavior of GridCacheAdapter#keepBinary and GridCacheProxyImpl#keepBinary
        +                newOpCtx = new CacheOperationContext(false, null, true, null, false, null, false);
        +            else if (!opCtx.isKeepBinary())
        +                newOpCtx = opCtx.keepBinary();
        +
        +            if (newOpCtx != null)
        +                cctx.operationContextPerCall(newOpCtx);
        +        }
        +
        +        return opCtx;
        +    }
        +
             /**
              * @param schemaName Schema.
              * @param c Connection.
        @@ -210,18 +320,43 @@ else if (items == 0L)
              * @throws IgniteCheckedException if failed.
              */
             @SuppressWarnings("unchecked")
        -    QueryCursorImpl> updateSqlFieldsDistributed(String schemaName, Connection c, Prepared p,
        +    List>> updateSqlFieldsDistributed(String schemaName, Connection c, Prepared p,
                 SqlFieldsQuery fieldsQry, GridQueryCancel cancel) throws IgniteCheckedException {
        -        UpdateResult res = updateSqlFields(schemaName, c, p, fieldsQry, false, null, cancel);
        +        if (DmlUtils.isBatched(fieldsQry)) {
        +            Collection ress = updateSqlFieldsBatched(schemaName, c, p, (SqlFieldsQueryEx)fieldsQry,
        +                false, null, cancel);
        +
        +            ArrayList>> resCurs = new ArrayList<>(ress.size());
        +
        +            for (UpdateResult res : ress) {
        +                checkUpdateResult(res);
        +
        +                QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl(Collections.singletonList
        +                    (Collections.singletonList(res.counter())), cancel, false);
        +
        +                resCur.fieldsMeta(UPDATE_RESULT_META);
        +
        +                resCurs.add(resCur);
        +            }
        +
        +            return resCurs;
        +        }
        +        else {
        +            UpdateResult res = updateSqlFields(schemaName, c, p, fieldsQry, false, null, cancel);
        +
        +            ArrayList>> resCurs = new ArrayList<>(1);
         
        -        checkUpdateResult(res);
        +            checkUpdateResult(res);
         
        -        QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl(Collections.singletonList
        -            (Collections.singletonList(res.counter())), cancel, false);
        +            QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl(Collections.singletonList
        +                (Collections.singletonList(res.counter())), cancel, false);
         
        -        resCur.fieldsMeta(UPDATE_RESULT_META);
        +            resCur.fieldsMeta(UPDATE_RESULT_META);
         
        -        return resCur;
        +            resCurs.add(resCur);
        +
        +            return resCurs;
        +        }
             }
         
             /**
        @@ -411,6 +546,30 @@ else if (plan.hasRows())
                 return processDmlSelectResult(cctx, plan, cur, pageSize);
             }
         
        +    /**
        +     * Performs the planned update.
        +     * @param plan Update plan.
        +     * @param rows Rows to update.
        +     * @param pageSize Page size.
        +     * @return {@link List} of update results.
        +     * @throws IgniteCheckedException If failed.
        +     */
        +    private List processDmlSelectResultBatched(UpdatePlan plan, List>> rows, int pageSize)
        +        throws IgniteCheckedException {
        +        switch (plan.mode()) {
        +            case MERGE:
        +                // TODO
        +                throw new IgniteCheckedException("Unsupported, fix");
        +
        +            case INSERT:
        +                return doInsertBatched(plan, rows, pageSize);
        +
        +            default:
        +                throw new IgniteSQLException("Unexpected batched DML operation [mode=" + plan.mode() + ']',
        +                    IgniteQueryErrorCode.UNEXPECTED_OPERATION);
        +        }
        +    }
        +
             /**
              * @param cctx Cache context.
              * @param plan Update plan.
        @@ -501,7 +660,7 @@ private UpdateResult doDistributedUpdate(String schemaName, SqlFieldsQuery field
             @SuppressWarnings({"unchecked", "ConstantConditions", "ThrowableResultOfMethodCallIgnored"})
             private UpdateResult doDelete(GridCacheContext cctx, Iterable> cursor, int pageSize)
                 throws IgniteCheckedException {
        -        DmlBatchSender sender = new DmlBatchSender(cctx, pageSize);
        +        DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
         
                 for (List row : cursor) {
                     if (row.size() != 2) {
        @@ -510,7 +669,7 @@ private UpdateResult doDelete(GridCacheContext cctx, Iterable> cursor, i
                         continue;
                     }
         
        -            sender.add(row.get(0), new ModifyingEntryProcessor(row.get(1), RMV));
        +            sender.add(row.get(0), new ModifyingEntryProcessor(row.get(1), RMV),  0);
                 }
         
                 sender.flush();
        @@ -549,7 +708,7 @@ private UpdateResult doUpdate(UpdatePlan plan, Iterable> cursor, int pag
                 throws IgniteCheckedException {
                 GridCacheContext cctx = plan.cacheContext();
         
        -        DmlBatchSender sender = new DmlBatchSender(cctx, pageSize);
        +        DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
         
                 for (List row : cursor) {
                     T3 row0 = plan.processRowForUpdate(row);
        @@ -558,7 +717,7 @@ private UpdateResult doUpdate(UpdatePlan plan, Iterable> cursor, int pag
                     Object oldVal = row0.get2();
                     Object newVal = row0.get3();
         
        -            sender.add(key, new ModifyingEntryProcessor(oldVal, new EntryValueUpdater(newVal)));
        +            sender.add(key, new ModifyingEntryProcessor(oldVal, new EntryValueUpdater(newVal)), 0);
                 }
         
                 sender.flush();
        @@ -649,16 +808,16 @@ private long doInsert(UpdatePlan plan, Iterable> cursor, int pageSize) t
                         return 1;
                     else
                         throw new IgniteSQLException("Duplicate key during INSERT [key=" + t.getKey() + ']',
        -                    IgniteQueryErrorCode.DUPLICATE_KEY);
        +                    DUPLICATE_KEY);
                 }
                 else {
                     // Keys that failed to INSERT due to duplication.
        -            DmlBatchSender sender = new DmlBatchSender(cctx, pageSize);
        +            DmlBatchSender sender = new DmlBatchSender(cctx, pageSize, 1);
         
                     for (List row : cursor) {
                         final IgniteBiTuple keyValPair = plan.processRow(row);
         
        -                sender.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue()));
        +                sender.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue()),  0);
                     }
         
                     sender.flush();
        @@ -684,6 +843,117 @@ private long doInsert(UpdatePlan plan, Iterable> cursor, int pageSize) t
                 }
             }
         
        +    /**
        +     * Execute INSERT statement plan.
        +     *
        +     * @param plan Plan to execute.
        +     * @param cursor Cursor to take inserted data from. I.e. list of batch arguments for each query.
        +     * @param pageSize Batch size for streaming, anything <= 0 for single page operations.
        +     * @return Number of items affected.
        +     * @throws IgniteCheckedException if failed, particularly in case of duplicate keys.
        +     */
        +    private List doInsertBatched(UpdatePlan plan, List>> cursor, int pageSize)
        +        throws IgniteCheckedException {
        +        GridCacheContext cctx = plan.cacheContext();
        +
        +        DmlBatchSender snd = new DmlBatchSender(cctx, pageSize, cursor.size());
        +
        +        int rowNum = 0;
        +
        +        SQLException resEx = null;
        +
        +        for (List> qryRow : cursor) {
        +            for (List row : qryRow) {
        +                try {
        +                    final IgniteBiTuple keyValPair = plan.processRow(row);
        +
        +                    snd.add(keyValPair.getKey(), new InsertEntryProcessor(keyValPair.getValue()), rowNum);
        +                }
        +                catch (Exception e) {
        +                    String sqlState;
        +
        +                    int code;
        +
        +                    if (e instanceof IgniteSQLException) {
        +                        sqlState = ((IgniteSQLException)e).sqlState();
        +
        +                        code = ((IgniteSQLException)e).statusCode();
        +                    } else {
        +                        sqlState = SqlStateCode.INTERNAL_ERROR;
        +
        +                        code = IgniteQueryErrorCode.UNKNOWN;
        +                    }
        +
        +                    resEx = chainException(resEx, new SQLException(e.getMessage(), sqlState, code, e));
        +
        +                    snd.setFailed(rowNum);
        +                }
        +            }
        +
        +            rowNum++;
        +        }
        +
        +        try {
        +            snd.flush();
        +        }
        +        catch (Exception e) {
        +            resEx = chainException(resEx, new SQLException(e.getMessage(), SqlStateCode.INTERNAL_ERROR,
        +                IgniteQueryErrorCode.UNKNOWN, e));
        +        }
        +
        +        resEx = chainException(resEx, snd.error());
        +
        +        if (!F.isEmpty(snd.failedKeys())) {
        +            SQLException e = new SQLException("Failed to INSERT some keys because they are already in cache [keys=" +
        +                snd.failedKeys() + ']', SqlStateCode.CONSTRAINT_VIOLATION, DUPLICATE_KEY);
        +
        +            resEx = chainException(resEx, e);
        +        }
        +
        +        if (resEx != null) {
        +            BatchUpdateException e = new BatchUpdateException(resEx.getMessage(), resEx.getSQLState(),
        +                resEx.getErrorCode(), snd.perRowCounterAsArray(), resEx);
        +
        +            throw new IgniteCheckedException(e);
        +        }
        +
        +        int[] cntPerRow = snd.perRowCounterAsArray();
        +
        +        List res = new ArrayList<>(cntPerRow.length);
        +
        +        for (int i = 0; i < cntPerRow.length; i++ ) {
        +            int cnt = cntPerRow[i];
        +
        +            res.add(new UpdateResult(cnt , X.EMPTY_OBJECT_ARRAY));
        +        }
        +
        +        return res;
        +    }
        +
        +    /**
        +     * Adds exception to the chain.
        +     *
        +     * @param main Exception to add another exception to.
        +     * @param add Exception which should be added to chain.
        +     * @return Chained exception.
        +     */
        +    private SQLException chainException(SQLException main, SQLException add) {
        +        if (main == null) {
        +            if (add != null) {
        +                main = add;
        +
        +                return main;
        +            }
        +            else
        +                return null;
        +        }
        +        else {
        +            main.setNextException(add);
        +
        +            return main;
        +        }
        +    }
        +
             /**
              *
              * @param schemaName Schema name.
        diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
        index 710103f91405a..5164862d6408e 100644
        --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
        +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
        @@ -103,6 +103,7 @@
         import org.apache.ignite.internal.processors.query.h2.database.io.H2InnerIO;
         import org.apache.ignite.internal.processors.query.h2.database.io.H2LeafIO;
         import org.apache.ignite.internal.processors.query.h2.ddl.DdlStatementsProcessor;
        +import org.apache.ignite.internal.processors.query.h2.dml.DmlUtils;
         import org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode;
         import org.apache.ignite.internal.processors.query.h2.opt.GridH2DefaultTableEngine;
         import org.apache.ignite.internal.processors.query.h2.opt.GridH2IndexBase;
        @@ -1004,7 +1005,7 @@ private ResultSet executeSqlQuery(final Connection conn, final PreparedStatement
                     if (e.getErrorCode() == ErrorCode.STATEMENT_WAS_CANCELED)
                         throw new QueryCancelledException();
         
        -            throw new IgniteCheckedException("Failed to execute SQL query.", e);
        +            throw new IgniteCheckedException("Failed to execute SQL query. " + e.getMessage(), e);
                 }
                 finally {
                     if (timeoutMillis > 0)
        @@ -1509,7 +1510,7 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri
         
                             int paramsCnt = prepared.getParameters().size();
         
        -                    if (paramsCnt > 0) {
        +                    if (!DmlUtils.isBatched(qry) && paramsCnt > 0) {
                                 if (argsOrig == null || argsOrig.length < firstArg + paramsCnt) {
                                     throw new IgniteException("Invalid number of query parameters. " +
                                         "Cannot find " + (argsOrig.length + 1 - firstArg) + " parameter.");
        @@ -1558,7 +1559,7 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri
                         if (twoStepQry == null) {
                             if (DmlStatementsProcessor.isDmlStatement(prepared)) {
                                 try {
        -                            res.add(dmlProc.updateSqlFieldsDistributed(schemaName, c, prepared,
        +                            res.addAll(dmlProc.updateSqlFieldsDistributed(schemaName, c, prepared,
                                         qry.copy().setSql(sqlQry).setArgs(args), cancel));
         
                                     continue;
        diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java
        index a4a60c3b20ea5..34b50b16e48b0 100644
        --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java
        +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlBatchSender.java
        @@ -17,19 +17,10 @@
         
         package org.apache.ignite.internal.processors.query.h2.dml;
         
        -import org.apache.ignite.IgniteCheckedException;
        -import org.apache.ignite.cluster.ClusterNode;
        -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
        -import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
        -import org.apache.ignite.internal.processors.cache.GridCacheContext;
        -import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
        -import org.apache.ignite.internal.util.typedef.F;
        -
        -import javax.cache.processor.EntryProcessor;
        -import javax.cache.processor.EntryProcessorException;
        -import javax.cache.processor.EntryProcessorResult;
         import java.sql.SQLException;
        +import java.sql.Statement;
         import java.util.ArrayList;
        +import java.util.Collection;
         import java.util.Collections;
         import java.util.HashMap;
         import java.util.LinkedHashSet;
        @@ -37,6 +28,17 @@
         import java.util.Map;
         import java.util.Set;
         import java.util.UUID;
        +import javax.cache.processor.EntryProcessor;
        +import javax.cache.processor.EntryProcessorException;
        +import javax.cache.processor.EntryProcessorResult;
        +import org.apache.ignite.IgniteCheckedException;
        +import org.apache.ignite.cluster.ClusterNode;
        +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
        +import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
        +import org.apache.ignite.internal.processors.cache.GridCacheContext;
        +import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
        +import org.apache.ignite.internal.processors.odbc.SqlStateCode;
        +import org.apache.ignite.internal.util.typedef.F;
         
         import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.createJdbcSqlException;
         
        @@ -51,7 +53,7 @@ public class DmlBatchSender {
             private final int size;
         
             /** Batches. */
        -    private final Map>> batches = new HashMap<>();
        +    private final Map batches = new HashMap<>();
         
             /** Result count. */
             private long updateCnt;
        @@ -62,15 +64,20 @@ public class DmlBatchSender {
             /** Exception. */
             private SQLException err;
         
        +    /** Per row updates counter */
        +    private int[] cntPerRow;
        +
             /**
              * Constructor.
              *
              * @param cctx Cache context.
              * @param size Batch.
        +     * @param qryNum Number of queries.
              */
        -    public DmlBatchSender(GridCacheContext cctx, int size) {
        +    public DmlBatchSender(GridCacheContext cctx, int size, int qryNum) {
                 this.cctx = cctx;
                 this.size = size;
        +        cntPerRow = new int[qryNum];
             }
         
             /**
        @@ -78,39 +85,46 @@ public DmlBatchSender(GridCacheContext cctx, int size) {
              *
              * @param key Key.
              * @param proc Processor.
        +     * @param rowNum Row number.
        +     * @throws IgniteCheckedException If failed.
              */
        -    public void add(Object key, EntryProcessor proc) throws IgniteCheckedException {
        +    public void add(Object key, EntryProcessor proc, int rowNum)
        +        throws IgniteCheckedException {
        +        assert key != null;
        +        assert proc != null;
        +
                 ClusterNode node = cctx.affinity().primaryByKey(key, AffinityTopologyVersion.NONE);
         
                 if (node == null)
                     throw new IgniteCheckedException("Failed to map key to node.");
         
        +        assert rowNum < cntPerRow.length;
        +
                 UUID nodeId = node.id();
         
        -        Map> batch = batches.get(nodeId);
        +        Batch batch = batches.get(nodeId);
         
                 if (batch == null) {
        -            batch = new HashMap<>();
        +            batch = new Batch();
         
                     batches.put(nodeId, batch);
                 }
         
        -        batch.put(key, proc);
        -
        -        if (batch.size() >= size) {
        +        if (batch.containsKey(key)) { // Force cache update if duplicates found.
                     sendBatch(batch);
        -
        -            batch.clear();
                 }
        +
        +        batch.put(key, rowNum, proc);
        +
        +        if (batch.size() >= size)
        +            sendBatch(batch);
             }
         
             /**
              * Flush any remaining entries.
        -     *
        -     * @throws IgniteCheckedException If failed.
              */
        -    public void flush() throws IgniteCheckedException {
        -        for (Map> batch : batches.values()) {
        +    public void flush() {
        +        for (Batch batch : batches.values()) {
                     if (!batch.isEmpty())
                         sendBatch(batch);
                 }
        @@ -137,16 +151,34 @@ public SQLException error() {
                 return err;
             }
         
        +    /**
        +     * Returns per row updates counter as array.
        +     *
        +     * @return Per row updates counter as array.
        +     */
        +    public int[] perRowCounterAsArray() {
        +        return cntPerRow;
        +    }
        +
        +    /**
        +     * Sets row as failed.
        +     *
        +     * @param rowNum Row number.
        +     */
        +    public void setFailed(int rowNum) {
        +        cntPerRow[rowNum] = Statement.EXECUTE_FAILED;
        +    }
        +
             /**
              * Send the batch.
              *
              * @param batch Batch.
        -     * @throws IgniteCheckedException If failed.
              */
        -    private void sendBatch(Map> batch)
        -        throws IgniteCheckedException {
        +    private void sendBatch(Batch batch) {
                 DmlPageProcessingResult pageRes = processPage(cctx, batch);
         
        +        batch.clear();
        +
                 updateCnt += pageRes.count();
         
                 if (failedKeys == null)
        @@ -156,33 +188,48 @@ private void sendBatch(Map> batc
         
                 if (pageRes.error() != null) {
                     if (err == null)
        -                err = error();
        +                err = pageRes.error();
                     else
        -                err.setNextException(error());
        +                err.setNextException(pageRes.error());
                 }
             }
         
             /**
              * Execute given entry processors and collect errors, if any.
              * @param cctx Cache context.
        -     * @param rows Rows to process.
        +     * @param batch Rows to process.
              * @return Triple [number of rows actually changed; keys that failed to update (duplicates or concurrently
              *     updated ones); chain of exceptions for all keys whose processing resulted in error, or null for no errors].
        -     * @throws IgniteCheckedException If failed.
              */
             @SuppressWarnings({"unchecked", "ConstantConditions"})
        -    private static DmlPageProcessingResult processPage(GridCacheContext cctx,
        -        Map> rows) throws IgniteCheckedException {
        -        Map> res = cctx.cache().invokeAll(rows);
        +    private DmlPageProcessingResult processPage(GridCacheContext cctx, Batch batch) {
        +        Map> res;
         
        -        if (F.isEmpty(res))
        -            return new DmlPageProcessingResult(rows.size(), null, null);
        +        try {
        +            res = cctx.cache().invokeAll(batch.rowProcessors());
        +        }
        +        catch (IgniteCheckedException e) {
        +            for (Integer rowNum : batch.rowNumbers().values()) {
        +                assert rowNum != null;
        +
        +                cntPerRow[rowNum] = Statement.EXECUTE_FAILED;
        +            }
        +
        +            return new DmlPageProcessingResult(0, null,
        +                new SQLException(e.getMessage(), SqlStateCode.INTERNAL_ERROR, IgniteQueryErrorCode.UNKNOWN, e));
        +        }
        +
        +        if (F.isEmpty(res)) {
        +            countAllRows(batch.rowNumbers().values());
         
        -        DmlPageProcessingErrorResult splitRes = splitErrors(res);
        +            return new DmlPageProcessingResult(batch.size(), null, null);
        +        }
        +
        +        DmlPageProcessingErrorResult splitRes = splitErrors(res, batch);
         
                 int keysCnt = splitRes.errorKeys().length;
         
        -        return new DmlPageProcessingResult(rows.size() - keysCnt - splitRes.errorCount(), splitRes.errorKeys(),
        +        return new DmlPageProcessingResult(batch.size() - keysCnt - splitRes.errorCount(), splitRes.errorKeys(),
                     splitRes.error());
             }
         
        @@ -191,12 +238,15 @@ private static DmlPageProcessingResult processPage(GridCacheContext cctx,
              * processing yielded an exception.
              *
              * @param res Result of {@link GridCacheAdapter#invokeAll)}
        +     * @param batch Batch.
              * @return pair [array of duplicated/concurrently modified keys, SQL exception for erroneous keys] (exception is
              * null if all keys are duplicates/concurrently modified ones).
              */
        -    private static DmlPageProcessingErrorResult splitErrors(Map> res) {
        +    private DmlPageProcessingErrorResult splitErrors(Map> res, Batch batch) {
                 Set errKeys = new LinkedHashSet<>(res.keySet());
         
        +        countAllRows(batch.rowNumbers().values());
        +
                 SQLException currSqlEx = null;
         
                 SQLException firstSqlEx = null;
        @@ -225,8 +275,125 @@ private static DmlPageProcessingErrorResult splitErrors(Map rowNums) {
        +        for (Integer rowNum : rowNums) {
        +            assert rowNum != null;
        +
        +            if (cntPerRow[rowNum] > -1)
        +                cntPerRow[rowNum]++;
        +        }
        +    }
        +
        +    /**
        +     * Batch for update.
        +     */
        +    private static class Batch  {
        +        /** Map from keys to row numbers. */
        +        private Map rowNums = new HashMap<>();
        +
        +        /** Map from keys to entry processors. */
        +        private Map> rowProcs = new HashMap<>();
        +
        +        /**
        +         * Checks if batch contains key.
        +         *
        +         * @param key Key.
        +         * @return {@code True} if contains.
        +         */
        +        public boolean containsKey(Object key) {
        +            boolean res = rowNums.containsKey(key);
        +
        +            assert res == rowProcs.containsKey(key);
        +
        +            return res;
        +        }
        +
        +        /**
        +         * Returns batch size.
        +         *
        +         * @return Batch size.
        +         */
        +        public int size() {
        +            int res = rowNums.size();
        +
        +            assert res == rowProcs.size();
        +
        +            return res;
        +        }
        +
        +        /**
        +         * Adds row to batch.
        +         *
        +         * @param key Key.
        +         * @param rowNum Row number.
        +         * @param proc Entry processor.
        +         * @return {@code True} if there was an entry associated with the given key.
        +         */
        +        public boolean put(Object key, Integer rowNum, EntryProcessor proc) {
        +            Integer prevNum = rowNums.put(key, rowNum);
        +            EntryProcessor prevProc = rowProcs.put(key, proc);
        +
        +            assert (prevNum == null) == (prevProc == null);
        +
        +            return prevNum != null;
        +        }
        +
        +        /**
        +         * Clears batch.
        +         */
        +        public void clear() {
        +            assert rowNums.size() == rowProcs.size();
        +
        +            rowNums.clear();
        +            rowProcs.clear();
        +        }
        +
        +        /**
        +         * Checks if batch is empty.
        +         *
        +         * @return {@code True} if empty.
        +         */
        +        public boolean isEmpty() {
        +            assert rowNums.size() == rowProcs.size();
        +
        +            return rowNums.isEmpty();
        +        }
        +
        +        /**
        +         * Row numbers map getter.
        +         *
        +         * @return Row numbers map.
        +         */
        +        public Map rowNumbers() {
        +            return rowNums;
        +        }
        +
        +        /**
        +         * Row processors map getter.
        +         *
        +         * @return Row processors map.
        +         */
        +        public Map> rowProcessors() {
        +            return rowProcs;
        +        }
        +    }
         }
        diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java
        index 8d4861ea823ce..03b03d8c6978e 100644
        --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java
        +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlUtils.java
        @@ -22,7 +22,9 @@
         import java.sql.Timestamp;
         import java.util.Date;
         import org.apache.ignite.IgniteCheckedException;
        +import org.apache.ignite.cache.query.SqlFieldsQuery;
         import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
        +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
         import org.apache.ignite.internal.processors.query.IgniteSQLException;
         import org.apache.ignite.internal.processors.query.h2.H2Utils;
         import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor;
        @@ -116,6 +118,16 @@ public static Object convert(Object val, GridH2RowDescriptor desc, Class expC
                 }
             }
         
        +    /**
        +     * Check whether query is batched.
        +     *
        +     * @param qry Query.
        +     * @return {@code True} if batched.
        +     */
        +    public static boolean isBatched(SqlFieldsQuery qry) {
        +        return (qry instanceof SqlFieldsQueryEx) && ((SqlFieldsQueryEx)qry).isBatched();
        +    }
        +
             /**
              * Private constructor.
              */
        diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java
        index f8c7a654f25d5..10d485a84c761 100644
        --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java
        +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java
        @@ -380,8 +380,12 @@ public boolean hasRows() {
         
             /**
              * Extract rows from plan without performing any query.
        +     *
              * @param args Original query arguments.
        -     * @return Rows from plan.
        +     * @return {@link List} of rows from the plan for a single query.
        +     * For example, if we have multiple args in a query: 
        + * {@code INSERT INTO person VALUES (k1, v1), (k2, v2), (k3, v3);}
        + * we will get a {@link List} of {@link List} with items {@code {[k1, v1], [k2, v2], [k3, v3]}}. * @throws IgniteCheckedException if failed. */ public List> createRows(Object[] args) throws IgniteCheckedException { @@ -391,6 +395,59 @@ public List> createRows(Object[] args) throws IgniteCheckedException { GridH2RowDescriptor desc = tbl.rowDescriptor(); + extractArgsValues(args, res, desc); + + return res; + } + + /** + * Extract rows from plan without performing any query. + * + * @param argss Batch of arguments. + * @return {@link List} of rows from the plan for each query. + * For example, if we have a batch of queries with multiple args:
        + * + * INSERT INTO person VALUES (k1, v1), (k2, v2), (k3, v3);
        + * INSERT INTO person VALUES (k4, v4), (k5, v5), (k6, v6);
        + *
        + * we will get a {@link List} of {@link List} of {@link List} with items:
        + * + * {[k1, v1], [k2, v2], [k3, v3]},
        + * {[k4, v4], [k5, v5], [k6, v6]}
        + * + * @throws IgniteCheckedException If failed. + */ + public List>> createRows(List argss) throws IgniteCheckedException { + assert rowsNum > 0 && !F.isEmpty(colNames); + assert argss != null; + + List>> resPerQry = new ArrayList<>(argss.size()); + + GridH2RowDescriptor desc = tbl.rowDescriptor(); + + for (Object[] args : argss) { + List> res = new ArrayList<>(); + + resPerQry.add(res); + + extractArgsValues(args, res, desc); + } + + return resPerQry; + } + + /** + * Extracts values from arguments. + * + * @param args Arguments. + * @param res Result list where to put values to. + * @param desc Row descriptor. + * @throws IgniteCheckedException If failed. + */ + private void extractArgsValues(Object[] args, List> res, GridH2RowDescriptor desc) + throws IgniteCheckedException { + assert res != null; + for (List row : rows) { List resRow = new ArrayList<>(); @@ -408,8 +465,6 @@ public List> createRows(Object[] args) throws IgniteCheckedException { res.add(resRow); } - - return res; } /** diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index 7c5232f280223..bced83667aff4 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -705,7 +705,7 @@ private static DmlDistributedPlanInfo checkPlanCanBeDistributed(IgniteH2Indexing Connection conn, SqlFieldsQuery fieldsQry, boolean loc, String selectQry, String cacheName) throws IgniteCheckedException { - if (loc || !isSkipReducerOnUpdateQuery(fieldsQry)) + if (loc || !isSkipReducerOnUpdateQuery(fieldsQry) || DmlUtils.isBatched(fieldsQry)) return null; assert conn != null; diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java index 8e994aa88f810..704aae53af14b 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java @@ -752,7 +752,7 @@ public Iterator> query( if (wasCancelled(err)) throw new QueryCancelledException(); // Throw correct exception. - throw new CacheException("Failed to run map query remotely.", err); + throw new CacheException("Failed to run map query remotely." + err.getMessage(), err); } if (state instanceof AffinityTopologyVersion) { From 5c4aae5d5b140a79cf752897c21ccf8948802beb Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Sat, 17 Feb 2018 00:14:48 +0300 Subject: [PATCH 240/243] IGNITE-5571: Decoupled SQL fields query from cache. This closes #2283. (cherry picked from commit 53c03c1) --- .../jdbc/suite/IgniteJdbcDriverTestSuite.java | 8 +- .../thin/JdbcThinLocalQueriesSelfTest.java | 73 ++ .../JdbcQueryMultipleStatementsTask.java | 2 +- .../cache/IgniteCacheProxyImpl.java | 2 +- .../odbc/jdbc/JdbcRequestHandler.java | 4 +- .../odbc/odbc/OdbcRequestHandler.java | 9 +- .../processors/query/GridQueryIndexing.java | 18 +- .../processors/query/GridQueryProcessor.java | 136 +--- .../internal/processors/query/QueryUtils.java | 6 +- .../internal/visor/query/VisorQueryTask.java | 2 +- ...niteClientCacheInitializationFailTest.java | 7 +- .../query/h2/DmlStatementsProcessor.java | 21 +- ...PlanKey.java => H2CachedStatementKey.java} | 39 +- .../processors/query/h2/H2StatementCache.java | 28 +- .../processors/query/h2/IgniteH2Indexing.java | 633 ++++++++++++------ .../processors/query/h2/ParsingResult.java | 102 +++ .../query/h2/ddl/DdlStatementsProcessor.java | 24 + .../query/h2/sql/GridSqlQueryParser.java | 60 ++ .../h2/twostep/GridMapQueryExecutor.java | 2 +- .../GridCacheCrossCacheQuerySelfTest.java | 12 +- ...eckClusterStateBeforeExecuteQueryTest.java | 2 +- .../cache/SqlFieldsQuerySelfTest.java | 53 ++ .../IgniteCacheReplicatedQuerySelfTest.java | 29 +- .../cache/index/AbstractSchemaSelfTest.java | 16 +- .../index/DynamicColumnsAbstractTest.java | 4 +- .../DynamicIndexAbstractBasicSelfTest.java | 25 +- .../index/DynamicIndexAbstractSelfTest.java | 33 +- .../index/H2DynamicIndexingComplexTest.java | 2 +- .../cache/index/H2DynamicTableSelfTest.java | 8 +- .../local/IgniteCacheLocalQuerySelfTest.java | 62 +- .../IgnitePersistentStoreSchemaLoadTest.java | 18 +- .../query/IgniteCachelessQueriesSelfTest.java | 420 ++++++++++++ .../query/IgniteQueryDedicatedPoolTest.java | 15 +- .../query/IgniteSqlDefaultValueTest.java | 2 +- .../query/IgniteSqlNotNullConstraintTest.java | 2 +- .../MultipleStatementsSqlQuerySelfTest.java | 8 +- .../processors/query/SqlSchemaSelfTest.java | 12 +- .../IgniteCacheQuerySelfTestSuite.java | 12 +- .../Client/Cache/SqlQueryTest.cs | 228 +++++++ 39 files changed, 1678 insertions(+), 461 deletions(-) create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinLocalQueriesSelfTest.java rename modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/{H2DmlPlanKey.java => H2CachedStatementKey.java} (65%) create mode 100644 modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ParsingResult.java create mode 100644 modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteCachelessQueriesSelfTest.java create mode 100644 modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java index 3278e0f6c7e8f..f1d0d4e74cc23 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java @@ -42,6 +42,7 @@ import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalPartitionedSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalReplicatedSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinComplexQuerySelfTest; import org.apache.ignite.jdbc.thin.JdbcThinConnectionSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinDeleteStatementSelfTest; @@ -54,7 +55,10 @@ import org.apache.ignite.jdbc.thin.JdbcThinEmptyCacheSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinErrorsSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinInsertStatementSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinInsertStatementSkipReducerOnUpdateSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinLocalQueriesSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinMergeStatementSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinMergeStatementSkipReducerOnUpdateSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinMetadataSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinMissingLongArrayResultsTest; import org.apache.ignite.jdbc.thin.JdbcThinNoDefaultSchemaTest; @@ -64,9 +68,6 @@ import org.apache.ignite.jdbc.thin.JdbcThinSelectAfterAlterTable; import org.apache.ignite.jdbc.thin.JdbcThinStatementSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSelfTest; -import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest; -import org.apache.ignite.jdbc.thin.JdbcThinInsertStatementSkipReducerOnUpdateSelfTest; -import org.apache.ignite.jdbc.thin.JdbcThinMergeStatementSkipReducerOnUpdateSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest; /** @@ -176,6 +177,7 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.class)); suite.addTest(new TestSuite(JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinLocalQueriesSelfTest.class)); return suite; } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinLocalQueriesSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinLocalQueriesSelfTest.java new file mode 100644 index 0000000000000..1e28e52d3e3d6 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinLocalQueriesSelfTest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.List; +import java.util.Map; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Test that replicated-only query is executed locally. + */ +public class JdbcThinLocalQueriesSelfTest extends JdbcThinAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + startGrid(0); + + startGrid(1); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } + + /** + * + */ + public void testLocalThinJdbcQuery() throws SQLException { + try (Connection c = connect(grid(0), "replicatedOnly=true")) { + execute(c, "CREATE TABLE Company(id int primary key, name varchar) WITH " + + "\"template=replicated,cache_name=Company\""); + + execute(c, "CREATE TABLE Person(id int primary key, name varchar, companyid int) WITH " + + "\"template=replicated,cache_name=Person\""); + + execute(c, "insert into Company(id, name) values (1, 'Apple')"); + + execute(c, "insert into Person(id, name, companyid) values (2, 'John', 1)"); + + List> res = execute(c, "SELECT p.id, p.name, c.name from Person p left join Company c on " + + "p.companyid = c.id"); + + assertEqualsCollections(F.asList(2, "John", "Apple"), res.get(0)); + + Map twoStepCache = U.field(grid(0).context().query().getIndexing(), "twoStepCache"); + + // No two step queries cached => local select. + assertEquals(0, twoStepCache.size()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java index f907525571a5f..3e5d57564c14f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryMultipleStatementsTask.java @@ -123,7 +123,7 @@ public JdbcQueryMultipleStatementsTask(Ignite ignite, String schemaName, String GridKernalContext ctx = ((IgniteKernal)ignite).context(); - List>> curs = ctx.query().querySqlFieldsNoCache(qry, true, false); + List>> curs = ctx.query().querySqlFields(qry, true, false); List resultsInfo = new ArrayList<>(curs.size()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java index 4c2d0e696b082..7d58a83028631 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java @@ -604,7 +604,7 @@ private QueryCursor> queryContinuous(ContinuousQuery qry, bool if (qry instanceof SqlFieldsQuery) return (FieldsQueryCursor)ctx.kernalContext().query().querySqlFields(ctx, (SqlFieldsQuery)qry, - keepBinary); + keepBinary, true).get(0); if (qry instanceof ScanQuery) return query((ScanQuery)qry, null, projection(qry.isLocal())); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java index 8fe621a6c306b..fd98a77f9058d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java @@ -371,7 +371,7 @@ private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) { qry.setSchema(schemaName); - List>> results = ctx.query().querySqlFieldsNoCache(qry, true, + List>> results = ctx.query().querySqlFields(qry, true, protocolVer.compareTo(VER_2_3_0) < 0); FieldsQueryCursor> fieldsCur = results.get(0); @@ -607,7 +607,7 @@ private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) { private void executeBatchedQuery(SqlFieldsQueryEx qry, List updCntsAcc, IgniteBiTuple firstErr) { try { - List>> qryRes = ctx.query().querySqlFieldsNoCache(qry, true, true); + List>> qryRes = ctx.query().querySqlFields(qry, true, true); for (FieldsQueryCursor> cur : qryRes) { if (cur instanceof BulkLoadContextCursor) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java index 3bc5a23d7946a..0056171e34b3d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java @@ -35,9 +35,9 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.binary.BinaryWriterExImpl; import org.apache.ignite.internal.binary.GridBinaryMarshaller; -import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; +import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; import org.apache.ignite.internal.processors.odbc.ClientListenerRequest; import org.apache.ignite.internal.processors.odbc.ClientListenerRequestHandler; import org.apache.ignite.internal.processors.odbc.ClientListenerResponse; @@ -248,7 +248,7 @@ private ClientListenerResponse executeQuery(OdbcQueryExecuteRequest req) { SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments(), req.timeout()); - List>> cursors = ctx.query().querySqlFieldsNoCache(qry, true, false); + List>> cursors = ctx.query().querySqlFields(qry, true, false); OdbcQueryResults results = new OdbcQueryResults(cursors); @@ -299,7 +299,8 @@ private ClientListenerResponse executeBatchQuery(OdbcQueryExecuteBatchRequest re // Getting meta and do the checks for the first execution. qry.setArgs(paramSet[0]); - QueryCursorImpl> qryCur = (QueryCursorImpl>)ctx.query().querySqlFieldsNoCache(qry, true); + QueryCursorImpl> qryCur = (QueryCursorImpl>)ctx.query() + .querySqlFields(qry, true, true).get(0); if (qryCur.isQuery()) throw new IgniteException("Batching of parameters only supported for DML statements. [query=" + @@ -330,7 +331,7 @@ private ClientListenerResponse executeBatchQuery(OdbcQueryExecuteBatchRequest re private long executeQuery(SqlFieldsQuery qry, Object[] row) { qry.setArgs(row); - QueryCursor> cur = ctx.query().querySqlFieldsNoCache(qry, true); + QueryCursor> cur = ctx.query().querySqlFields(qry, true, true).get(0); return OdbcUtils.rowsAffected(cur); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java index 5fb8e1cec05ea..5ac7b8981596c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java @@ -69,28 +69,22 @@ public interface GridQueryIndexing { * @param cacheName Cache name. * @param qry Query. * @param keepBinary Keep binary flag. - * @param mainCacheId Main cache ID. @return Cursor. * @throws IgniteCheckedException If failed. */ public QueryCursor> queryDistributedSql(String schemaName, String cacheName, SqlQuery qry, - boolean keepBinary, int mainCacheId) throws IgniteCheckedException; + boolean keepBinary) throws IgniteCheckedException; /** - * Parses SQL query into two step query and executes it. - * + * Detect whether SQL query should be executed in distributed or local manner and execute it. * @param schemaName Schema name. * @param qry Query. * @param keepBinary Keep binary flag. - * @param cancel Query cancel. - * @param mainCacheId Main cache ID. - * @param failOnMultipleStmts If {@code true} the method must throws exception when query contains - * more then one SQL statement. + * @param failOnMultipleStmts Whether an exception should be thrown for multiple statements query. + * @param cancel Query cancel state handler. * @return Cursor. - * @throws IgniteCheckedException If failed. */ - public List>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, - boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId, boolean failOnMultipleStmts) - throws IgniteCheckedException; + public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, + boolean failOnMultipleStmts, GridQueryCancel cancel); /** * Perform a MERGE statement using data streamer as receiver. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index de6cf91d82941..af3bd5313feae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -695,7 +695,8 @@ public void onCacheStart0(GridCacheContext cctx, QuerySchema schema) } } - // Ensure that candidates has unique index names. Otherwise we will not be able to apply pending operations. + // Ensure that candidates has unique index names. + // Otherwise we will not be able to apply pending operations. Map tblTypMap = new HashMap<>(); Map idxTypMap = new HashMap<>(); @@ -1851,11 +1852,15 @@ private void checkxEnabled() throws IgniteException { /** * Query SQL fields. * - * @param cctx Cache context. * @param qry Query. * @param keepBinary Keep binary flag. * @return Cursor. */ + public List>> querySqlFields(final SqlFieldsQuery qry, final boolean keepBinary, + final boolean failOnMultipleStmts) { + return querySqlFields(null, qry, keepBinary, failOnMultipleStmts); + } + @SuppressWarnings("unchecked") public FieldsQueryCursor> querySqlFields(final GridCacheContext cctx, final SqlFieldsQuery qry, final boolean keepBinary) { @@ -1865,141 +1870,71 @@ public FieldsQueryCursor> querySqlFields(final GridCacheContext cct /** * Query SQL fields. * - * @param cctx Cache context. - * @param qry Query. - * @param keepBinary Keep binary flag. - * @param failOnMultipleStmts If {@code true} the method must throws exception when query contains - * more then one SQL statement. - * @return Cursor. - */ - @SuppressWarnings("unchecked") - public List>> querySqlFields(final GridCacheContext cctx, final SqlFieldsQuery qry, - final boolean keepBinary, final boolean failOnMultipleStmts) { - checkxEnabled(); - - validateSqlFieldsQuery(qry); - - boolean loc = (qry.isReplicatedOnly() && cctx.isReplicatedAffinityNode()) || cctx.isLocal() || qry.isLocal(); - - if (!busyLock.enterBusy()) - throw new IllegalStateException("Failed to execute query (grid is stopping)."); - - GridCacheContext oldCctx = curCache.get(); - - curCache.set(cctx); - - try { - final String schemaName = qry.getSchema() != null ? qry.getSchema() : idx.schema(cctx.name()); - final int mainCacheId = CU.cacheId(cctx.name()); - - IgniteOutClosureX>>> clo; - - if (loc) { - clo = new IgniteOutClosureX>>>() { - @Override public List>> applyx() throws IgniteCheckedException { - GridQueryCancel cancel = new GridQueryCancel(); - - List>> cursors; - - if (cctx.config().getQueryParallelism() > 1) { - qry.setDistributedJoins(true); - - cursors = idx.queryDistributedSqlFields(schemaName, qry, - keepBinary, cancel, mainCacheId, true); - } - else { - IndexingQueryFilter filter = idx.backupFilter(requestTopVer.get(), qry.getPartitions()); - - cursors = new ArrayList<>(1); - - cursors.add(idx.queryLocalSqlFields(schemaName, qry, keepBinary, filter, cancel)); - } - - sendQueryExecutedEvent(qry.getSql(), qry.getArgs(), cctx.name()); - - return cursors; - } - }; - } - else { - clo = new IgniteOutClosureX>>>() { - @Override public List>> applyx() throws IgniteCheckedException { - return idx.queryDistributedSqlFields(schemaName, qry, keepBinary, null, mainCacheId, failOnMultipleStmts); - } - }; - } - - return executeQuery(GridCacheQueryType.SQL_FIELDS, qry.getSql(), cctx, clo, true); - } - catch (IgniteCheckedException e) { - throw new CacheException(e); - } - finally { - curCache.set(oldCctx); - - busyLock.leaveBusy(); - } - } - - /** - * Query SQL fields without strict dependency on concrete cache. - * * @param qry Query. * @param keepBinary Keep binary flag. * @return Cursor. */ - public FieldsQueryCursor> querySqlFieldsNoCache(final SqlFieldsQuery qry, - final boolean keepBinary) { - return querySqlFieldsNoCache(qry, keepBinary, true).get(0); + public FieldsQueryCursor> querySqlFields(final SqlFieldsQuery qry, final boolean keepBinary) { + return querySqlFields(null, qry, keepBinary, true).get(0); } /** - * Query SQL fields without strict dependency on concrete cache. + * Query SQL fields. * + * @param cctx Cache context. * @param qry Query. * @param keepBinary Keep binary flag. * @param failOnMultipleStmts If {@code true} the method must throws exception when query contains * more then one SQL statement. * @return Cursor. */ - public List>> querySqlFieldsNoCache(final SqlFieldsQuery qry, - final boolean keepBinary, final boolean failOnMultipleStmts) { + @SuppressWarnings("unchecked") + public List>> querySqlFields(@Nullable final GridCacheContext cctx, + final SqlFieldsQuery qry, final boolean keepBinary, final boolean failOnMultipleStmts) { checkxEnabled(); validateSqlFieldsQuery(qry); - if (qry.isLocal()) - throw new IgniteException("Local query is not supported without specific cache."); - if (!ctx.state().publicApiActiveState()) { throw new IgniteException("Can not perform the operation because the cluster is inactive. Note, that " + "the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes " + "join the cluster. To activate the cluster call Ignite.active(true)."); } - if (qry.getSchema() == null) - qry.setSchema(QueryUtils.DFLT_SCHEMA); - if (!busyLock.enterBusy()) throw new IllegalStateException("Failed to execute query (grid is stopping)."); + GridCacheContext oldCctx = curCache.get(); + + curCache.set(cctx); + + final String schemaName = qry.getSchema() != null ? qry.getSchema() + : (cctx != null ? idx.schema(cctx.name()) : QueryUtils.DFLT_SCHEMA); + try { IgniteOutClosureX>>> clo = new IgniteOutClosureX>>>() { @Override public List>> applyx() throws IgniteCheckedException { GridQueryCancel cancel = new GridQueryCancel(); - return idx.queryDistributedSqlFields(qry.getSchema(), qry, keepBinary, cancel, null, - failOnMultipleStmts); + List>> res = + idx.querySqlFields(schemaName, qry, keepBinary, failOnMultipleStmts, cancel); + + if (cctx != null) + sendQueryExecutedEvent(qry.getSql(), qry.getArgs(), cctx.name()); + + return res; } }; - return executeQuery(GridCacheQueryType.SQL_FIELDS, qry.getSql(), null, clo, true); + return executeQuery(GridCacheQueryType.SQL_FIELDS, qry.getSql(), cctx, clo, true); } catch (IgniteCheckedException e) { throw new CacheException(e); } finally { + curCache.set(oldCctx); + busyLock.leaveBusy(); } } @@ -2086,12 +2021,11 @@ private QueryCursor> queryDistributedSql(final GridCacheC try { final String schemaName = idx.schema(cctx.name()); - final int mainCacheId = CU.cacheId(cctx.name()); return executeQuery(GridCacheQueryType.SQL, qry.getSql(), cctx, new IgniteOutClosureX>>() { @Override public QueryCursor> applyx() throws IgniteCheckedException { - return idx.queryDistributedSql(schemaName, cctx.name(), qry, keepBinary, mainCacheId); + return idx.queryDistributedSql(schemaName, cctx.name(), qry, keepBinary); } }, true); } @@ -2115,7 +2049,6 @@ private QueryCursor> queryLocalSql(final GridCacheConte throw new IllegalStateException("Failed to execute query (grid is stopping)."); final String schemaName = idx.schema(cctx.name()); - final int mainCacheId = CU.cacheId(cctx.name()); try { return executeQuery(GridCacheQueryType.SQL, qry.getSql(), cctx, @@ -2135,7 +2068,7 @@ private QueryCursor> queryLocalSql(final GridCacheConte if (cctx.config().getQueryParallelism() > 1) { qry.setDistributedJoins(true); - return idx.queryDistributedSql(schemaName, cctx.name(), qry, keepBinary, mainCacheId); + return idx.queryDistributedSql(schemaName, cctx.name(), qry, keepBinary); } else return idx.queryLocalSql(schemaName, cctx.name(), qry, idx.backupFilter(requestTopVer.get(), @@ -2300,7 +2233,8 @@ private void sendQueryExecutedEvent(String sqlQry, Object[] params, String cache * @param cols Columns to add. * @throws IgniteCheckedException If failed to update type descriptor. */ - private void processDynamicAddColumn(QueryTypeDescriptorImpl d, List cols) throws IgniteCheckedException { + private void processDynamicAddColumn(QueryTypeDescriptorImpl d, List cols) + throws IgniteCheckedException { List props = new ArrayList<>(cols.size()); for (QueryField col : cols) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java index a5dc595abf2d4..135d191522f07 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java @@ -388,8 +388,10 @@ public static QueryTypeCandidate typeForQueryEntity(String cacheName, String sch // Key and value classes still can be available if they are primitive or JDK part. // We need that to set correct types for _key and _val columns. - Class keyCls = U.classForName(qryEntity.findKeyType(), null); - Class valCls = U.classForName(qryEntity.findValueType(), null); + // We better box these types - otherwise, if user provides, say, raw 'byte' for + // key or value (which they could), we'll deem key or value as Object which clearly is not right. + Class keyCls = U.box(U.classForName(qryEntity.findKeyType(), null, true)); + Class valCls = U.box(U.classForName(qryEntity.findValueType(), null, true)); // If local node has the classes and they are externalizable, we must use reflection properties. boolean keyMustDeserialize = mustDeserializeBinary(ctx, keyCls); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java index 1daa1f2bd9ca7..2e322763dc703 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/query/VisorQueryTask.java @@ -90,7 +90,7 @@ private VisorQueryJob(VisorQueryTaskArg arg, boolean debug) { String cacheName = arg.getCacheName(); if (F.isEmpty(cacheName)) - qryCursors = ignite.context().query().querySqlFieldsNoCache(qry, true, false); + qryCursors = ignite.context().query().querySqlFields(qry, true, false); else { IgniteCache c = ignite.cache(cacheName); diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java index ab8fbbf26af58..4fb62c2b56a55 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java @@ -237,14 +237,13 @@ private static class FailedIndexing implements GridQueryIndexing { /** {@inheritDoc} */ @Override public QueryCursor> queryDistributedSql(String schemaName, String cacheName, - SqlQuery qry, boolean keepBinary, int mainCacheId) throws IgniteCheckedException { + SqlQuery qry, boolean keepBinary) throws IgniteCheckedException { return null; } /** {@inheritDoc} */ - @Override public List>> queryDistributedSqlFields(String schemaName, - SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, - @Nullable Integer mainCacheId, boolean failOnMultipleStmts) throws IgniteCheckedException { + @Override public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, + boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) { return null; } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java index ce946e7def09e..a2435a8f9b26a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java @@ -107,7 +107,7 @@ public class DmlStatementsProcessor { private static final int PLAN_CACHE_SIZE = 1024; /** Update plans cache. */ - private final ConcurrentMap planCache = + private final ConcurrentMap planCache = new GridBoundedConcurrentLinkedHashMap<>(PLAN_CACHE_SIZE); /** @@ -128,7 +128,7 @@ public void start(GridKernalContext ctx, IgniteH2Indexing idx) { * @param cacheName Cache name. */ public void onCacheStop(String cacheName) { - Iterator> iter = planCache.entrySet().iterator(); + Iterator> iter = planCache.entrySet().iterator(); while (iter.hasNext()) { UpdatePlan plan = iter.next().getValue(); @@ -364,7 +364,7 @@ List>> updateSqlFieldsDistributed(String schemaName, Con * * @param schemaName Schema. * @param conn Connection. - * @param stmt Prepared statement. + * @param prepared H2 prepared command. * @param fieldsQry Fields query. * @param filters Cache name and key filter. * @param cancel Query cancel. @@ -372,10 +372,10 @@ List>> updateSqlFieldsDistributed(String schemaName, Con * @throws IgniteCheckedException if failed. */ @SuppressWarnings("unchecked") - GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, Connection conn, PreparedStatement stmt, + GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, Connection conn, Prepared prepared, SqlFieldsQuery fieldsQry, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException { - UpdateResult res = updateSqlFields(schemaName, conn, GridSqlQueryParser.prepared(stmt), fieldsQry, true, + UpdateResult res = updateSqlFields(schemaName, conn, prepared, fieldsQry, true, filters, cancel); return new GridQueryFieldsResultAdapter(UPDATE_RESULT_META, @@ -485,10 +485,8 @@ long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, fina */ @SuppressWarnings({"ConstantConditions", "unchecked"}) private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, Connection c, - Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) - throws IgniteCheckedException { - int mainCacheId = cctx.cacheId(); - + Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, + GridQueryCancel cancel) throws IgniteCheckedException { Integer errKeysPos = null; UpdatePlan plan = getPlanForStatement(schemaName, c, prepared, fieldsQry, loc, errKeysPos); @@ -521,7 +519,8 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo .setPageSize(fieldsQry.getPageSize()) .setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS); - cur = idx.queryDistributedSqlFields(schemaName, newFieldsQry, true, cancel, mainCacheId, true).get(0); + cur = (QueryCursorImpl>)idx.querySqlFields(schemaName, newFieldsQry, true, true, + cancel).get(0); } else if (plan.hasRows()) cur = plan.createRows(fieldsQry.getArgs()); @@ -613,7 +612,7 @@ private UpdateResult processDmlSelectResult(GridCacheContext cctx, UpdatePlan pl @SuppressWarnings({"unchecked", "ConstantConditions"}) private UpdatePlan getPlanForStatement(String schema, Connection conn, Prepared p, SqlFieldsQuery fieldsQry, boolean loc, @Nullable Integer errKeysPos) throws IgniteCheckedException { - H2DmlPlanKey planKey = new H2DmlPlanKey(schema, p.getSQL(), loc, fieldsQry); + H2CachedStatementKey planKey = H2CachedStatementKey.forDmlStatement(schema, p.getSQL(), fieldsQry, loc); UpdatePlan res = (errKeysPos == null ? planCache.get(planKey) : null); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2DmlPlanKey.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2CachedStatementKey.java similarity index 65% rename from modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2DmlPlanKey.java rename to modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2CachedStatementKey.java index 455b5e5a3b94d..7b43f52a9f6e5 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2DmlPlanKey.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2CachedStatementKey.java @@ -23,9 +23,9 @@ import org.apache.ignite.internal.util.typedef.internal.S; /** - * H2 DML plan key. + * H2 cached statement key. */ -public class H2DmlPlanKey { +class H2CachedStatementKey { /** Schema name. */ private final String schemaName; @@ -41,11 +41,38 @@ public class H2DmlPlanKey { * @param schemaName Schema name. * @param sql SQL. */ - public H2DmlPlanKey(String schemaName, String sql, boolean loc, SqlFieldsQuery fieldsQry) { + H2CachedStatementKey(String schemaName, String sql) { + this(schemaName, sql, null, false); + } + + /** + * Build key with details relevant to DML plans cache. + * + * @param schemaName Schema name. + * @param sql SQL. + * @param fieldsQry Query with flags. + * @param loc DML {@code SELECT} Locality flag. + * @return Statement key. + * @see UpdatePlanBuilder + * @see DmlStatementsProcessor#getPlanForStatement + */ + static H2CachedStatementKey forDmlStatement(String schemaName, String sql, SqlFieldsQuery fieldsQry, boolean loc) { + return new H2CachedStatementKey(schemaName, sql, fieldsQry, loc); + } + + /** + * Full-fledged constructor. + * + * @param schemaName Schema name. + * @param sql SQL. + * @param fieldsQry Query with flags. + * @param loc DML {@code SELECT} Locality flag. + */ + private H2CachedStatementKey(String schemaName, String sql, SqlFieldsQuery fieldsQry, boolean loc) { this.schemaName = schemaName; this.sql = sql; - if (loc || !UpdatePlanBuilder.isSkipReducerOnUpdateQuery(fieldsQry)) + if (fieldsQry == null || loc || !UpdatePlanBuilder.isSkipReducerOnUpdateQuery(fieldsQry)) this.flags = 0; // flags only relevant for server side updates. else { this.flags = (byte)(1 + @@ -69,13 +96,13 @@ public H2DmlPlanKey(String schemaName, String sql, boolean loc, SqlFieldsQuery f if (o == null || getClass() != o.getClass()) return false; - H2DmlPlanKey other = (H2DmlPlanKey)o; + H2CachedStatementKey other = (H2CachedStatementKey)o; return F.eq(sql, other.sql) && F.eq(schemaName, other.schemaName) && flags == other.flags; } /** {@inheritDoc} */ @Override public String toString() { - return S.toString(H2DmlPlanKey.class, this); + return S.toString(H2CachedStatementKey.class, this); } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java index d39511202234f..673625f865d53 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java @@ -17,16 +17,16 @@ package org.apache.ignite.internal.processors.query.h2; -import org.apache.ignite.internal.util.typedef.internal.U; - import java.sql.PreparedStatement; import java.util.LinkedHashMap; import java.util.Map; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; /** * Statement cache. */ -public class H2StatementCache extends LinkedHashMap { +class H2StatementCache extends LinkedHashMap { /** */ private int size; @@ -43,7 +43,7 @@ public class H2StatementCache extends LinkedHashMap { } /** {@inheritDoc} */ - @Override protected boolean removeEldestEntry(Map.Entry eldest) { + @Override protected boolean removeEldestEntry(Map.Entry eldest) { boolean rmv = size() > size; if (rmv) { @@ -55,6 +55,16 @@ public class H2StatementCache extends LinkedHashMap { return rmv; } + /** + * Get statement for given schema and SQL. + * @param schemaName Schema name. + * @param sql SQL statement. + * @return Cached {@link PreparedStatement}, or {@code null} if none found. + */ + @Nullable public PreparedStatement get(String schemaName, String sql) { + return get(new H2CachedStatementKey(schemaName, sql)); + } + /** * The timestamp of the last usage of the cache. * @@ -70,4 +80,14 @@ public long lastUsage() { public void updateLastUsage() { lastUsage = U.currentTimeMillis(); } + + /** + * Remove statement for given schema and SQL. + * @param schemaName Schema name. + * @param sql SQL statement. + * @return Cached {@link PreparedStatement}, or {@code null} if none found. + */ + @Nullable public PreparedStatement remove(String schemaName, String sql) { + return remove(new H2CachedStatementKey(schemaName, sql)); + } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index 5164862d6408e..b3e140baceb7c 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -104,7 +104,6 @@ import org.apache.ignite.internal.processors.query.h2.database.io.H2LeafIO; import org.apache.ignite.internal.processors.query.h2.ddl.DdlStatementsProcessor; import org.apache.ignite.internal.processors.query.h2.dml.DmlUtils; -import org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode; import org.apache.ignite.internal.processors.query.h2.opt.GridH2DefaultTableEngine; import org.apache.ignite.internal.processors.query.h2.opt.GridH2IndexBase; import org.apache.ignite.internal.processors.query.h2.opt.GridH2PlainRowFactory; @@ -112,8 +111,10 @@ import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row; import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor; import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement; import org.apache.ignite.internal.processors.query.h2.twostep.GridMapQueryExecutor; import org.apache.ignite.internal.processors.query.h2.twostep.GridReduceQueryExecutor; import org.apache.ignite.internal.processors.query.h2.twostep.MapQueryLazyWorker; @@ -151,6 +152,7 @@ import org.h2.api.JavaObjectSerializer; import org.h2.command.Prepared; import org.h2.command.dml.Insert; +import org.h2.command.dml.NoOperation; import org.h2.engine.Session; import org.h2.engine.SysProperties; import org.h2.index.Index; @@ -159,6 +161,7 @@ import org.h2.table.IndexColumn; import org.h2.tools.Server; import org.h2.util.JdbcUtils; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.jsr166.ConcurrentHashMap8; @@ -374,6 +377,22 @@ public Connection connectionForSchema(String schema) { } } + /** + * @param c Connection. + * @param sql SQL. + * @return Cached prepared statement. + */ + @SuppressWarnings("ConstantConditions") + @Nullable private PreparedStatement cachedStatement(Connection c, String sql) { + try { + return prepareStatement(c, sql, true, true); + } + catch (SQLException e) { + // We actually don't except anything SQL related here as we're supposed to work with cache only. + throw new AssertionError(e); + } + } + /** * @param c Connection. * @param sql SQL. @@ -381,34 +400,45 @@ public Connection connectionForSchema(String schema) { * @return Prepared statement. * @throws SQLException If failed. */ - private PreparedStatement prepareStatement(Connection c, String sql, boolean useStmtCache) throws SQLException { - if (useStmtCache) { - Thread curThread = Thread.currentThread(); - - H2StatementCache cache = stmtCache.get(curThread); - - if (cache == null) { - H2StatementCache cache0 = new H2StatementCache(PREPARED_STMT_CACHE_SIZE); + @SuppressWarnings("ConstantConditions") + @NotNull private PreparedStatement prepareStatement(Connection c, String sql, boolean useStmtCache) + throws SQLException { + return prepareStatement(c, sql, useStmtCache, false); + } - cache = stmtCache.putIfAbsent(curThread, cache0); + /** + * @param c Connection. + * @param sql SQL. + * @param useStmtCache If {@code true} uses statement cache. + * @param cachedOnly Whether parsing should be avoided if statement has not been found in cache. + * @return Prepared statement. + * @throws SQLException If failed. + */ + @Nullable private PreparedStatement prepareStatement(Connection c, String sql, boolean useStmtCache, + boolean cachedOnly) throws SQLException { + // We can't avoid parsing and avoid using cache at the same time. + assert useStmtCache || !cachedOnly; - if (cache == null) - cache = cache0; - } + if (useStmtCache) { + H2StatementCache cache = getStatementsCacheForCurrentThread(); - cache.updateLastUsage(); + H2CachedStatementKey key = new H2CachedStatementKey(c.getSchema(), sql); - PreparedStatement stmt = cache.get(sql); + PreparedStatement stmt = cache.get(key); - if (stmt != null && !stmt.isClosed() && !((JdbcStatement)stmt).isCancelled()) { + if (stmt != null && !stmt.isClosed() && !((JdbcStatement)stmt).isCancelled() && + !GridSqlQueryParser.prepared(stmt).needRecompile()) { assert stmt.getConnection() == c; return stmt; } + if (cachedOnly) + return null; + stmt = prepare0(c, sql); - cache.put(sql, stmt); + cache.put(key, stmt); return stmt; } @@ -441,6 +471,28 @@ private PreparedStatement prepare0(Connection c, String sql) throws SQLException return c.prepareStatement(sql); } + /** + * @return {@link H2StatementCache} associated with current thread. + */ + @NotNull private H2StatementCache getStatementsCacheForCurrentThread() { + Thread curThread = Thread.currentThread(); + + H2StatementCache cache = stmtCache.get(curThread); + + if (cache == null) { + H2StatementCache cache0 = new H2StatementCache(PREPARED_STMT_CACHE_SIZE); + + cache = stmtCache.putIfAbsent(curThread, cache0); + + if (cache == null) + cache = cache0; + } + + cache.updateLastUsage(); + + return cache; + } + /** {@inheritDoc} */ @Override public PreparedStatement prepareNativeStatement(String schemaName, String sql) throws SQLException { Connection conn = connectionForSchema(schemaName); @@ -797,7 +849,7 @@ private void executeSql(String schemaName, String sql) throws IgniteCheckedExcep * @param cols Columns. * @return Index. */ - public GridH2IndexBase createSortedIndex(String name, GridH2Table tbl, boolean pk, List cols, + GridH2IndexBase createSortedIndex(String name, GridH2Table tbl, boolean pk, List cols, int inlineSize) { try { GridCacheContext cctx = tbl.cache(); @@ -851,7 +903,7 @@ public GridH2IndexBase createSortedIndex(String name, GridH2Table tbl, boolean p * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") - public GridQueryFieldsResult queryLocalSqlFields(final String schemaName, final String qry, + GridQueryFieldsResult queryLocalSqlFields(final String schemaName, final String qry, @Nullable final Collection params, final IndexingQueryFilter filter, boolean enforceJoinOrder, final int timeout, final GridQueryCancel cancel) throws IgniteCheckedException { final Connection conn = connectionForSchema(schemaName); @@ -874,7 +926,7 @@ public GridQueryFieldsResult queryLocalSqlFields(final String schemaName, final fldsQry.setEnforceJoinOrder(enforceJoinOrder); fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS); - return dmlProc.updateSqlFieldsLocal(schemaName, conn, stmt, fldsQry, filter, cancel); + return dmlProc.updateSqlFieldsLocal(schemaName, conn, p, fldsQry, filter, cancel); } else if (DdlStatementsProcessor.isDdlStatement(p)) throw new IgniteSQLException("DDL statements are supported for the whole cluster only", @@ -1192,7 +1244,7 @@ public void bindParameters(PreparedStatement stmt, * @throws IgniteCheckedException If failed. */ @SuppressWarnings("unchecked") - public GridCloseableIterator> queryLocalSql(String schemaName, String cacheName, + GridCloseableIterator> queryLocalSql(String schemaName, String cacheName, final String qry, String alias, @Nullable final Collection params, String type, final IndexingQueryFilter filter, GridQueryCancel cancel) throws IgniteCheckedException { final H2TableDescriptor tbl = tableDescriptor(schemaName, cacheName, type); @@ -1279,7 +1331,7 @@ UpdateResult runDistributedUpdate( /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public QueryCursor> queryDistributedSql(String schemaName, String cacheName, - SqlQuery qry, boolean keepBinary, int mainCacheId) { + SqlQuery qry, boolean keepBinary) { String type = qry.getType(); H2TableDescriptor tblDesc = tableDescriptor(schemaName, cacheName, type); @@ -1309,7 +1361,7 @@ UpdateResult runDistributedUpdate( fqry.setTimeout(qry.getTimeout(), TimeUnit.MILLISECONDS); final QueryCursor> res = - queryDistributedSqlFields(schemaName, fqry, keepBinary, null, mainCacheId, true).get(0); + querySqlFields(schemaName, fqry, keepBinary, true, null).get(0); final Iterable> converted = new Iterable>() { @Override public Iterator> iterator() { @@ -1391,213 +1443,342 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri return Collections.singletonList(cursor); } - else {try { - List>> ress = new ArrayList<>(1); - FieldsQueryCursor> cursor = ddlProc.runDdlStatement(qry.getSql(), cmd); - ress.add(cursor); + try { + FieldsQueryCursor> res = ddlProc.runDdlStatement(qry.getSql(), cmd); - return ress; + return Collections.singletonList(res); } catch (IgniteCheckedException e) { throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + qry.getSql() + "]: " - + e.getMessage(), e); - } + + e.getMessage(), e); } } + /** + * Check expected statement type (when it is set by JDBC) and given statement type. + * + * @param qry Query. + * @param isQry {@code true} for select queries, otherwise (DML/DDL queries) {@code false}. + */ + private void checkQueryType(SqlFieldsQuery qry, boolean isQry) { + Boolean qryFlag = qry instanceof SqlFieldsQueryEx ? ((SqlFieldsQueryEx) qry).isQuery() : null; + + if (qryFlag != null && qryFlag != isQry) + throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", + IgniteQueryErrorCode.STMT_TYPE_MISMATCH); + } + + /** {@inheritDoc} */ - @Override public List>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, - boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId, boolean failOnMultipleStmts) { + @SuppressWarnings("StringEquality") + @Override public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, + boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) { List>> res = tryQueryDistributedSqlFieldsNative(schemaName, qry); if (res != null) return res; - Connection c = connectionForSchema(schemaName); + { + // First, let's check if we already have a two-step query for this statement... + H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, qry.getSql(), + qry.isCollocated(), qry.isDistributedJoins(), qry.isEnforceJoinOrder(), qry.isLocal()); - final boolean enforceJoinOrder = qry.isEnforceJoinOrder(); - final boolean distributedJoins = qry.isDistributedJoins(); - final boolean grpByCollocated = qry.isCollocated(); + H2TwoStepCachedQuery cachedQry; - final DistributedJoinMode distributedJoinMode = distributedJoinMode(qry.isLocal(), distributedJoins); + if ((cachedQry = twoStepCache.get(cachedQryKey)) != null) { + checkQueryType(qry, true); - String sqlQry = qry.getSql(); + GridCacheTwoStepQuery twoStepQry = cachedQry.query().copy(); - H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, - distributedJoins, enforceJoinOrder, qry.isLocal()); + List meta = cachedQry.meta(); - H2TwoStepCachedQuery cachedQry = twoStepCache.get(cachedQryKey); + res = Collections.singletonList(doRunDistributedQuery(schemaName, qry, twoStepQry, meta, keepBinary, + cancel)); - if (cachedQry != null) { - checkQueryType(qry, true); + if (!twoStepQry.explain()) + twoStepCache.putIfAbsent(cachedQryKey, new H2TwoStepCachedQuery(meta, twoStepQry.copy())); - GridCacheTwoStepQuery twoStepQry = cachedQry.query().copy(); + return res; + } + } - List meta = cachedQry.meta(); + { + // Second, let's check if we already have a parsed statement... + PreparedStatement cachedStmt; - return Collections.singletonList(executeTwoStepsQuery(schemaName, qry.getPageSize(), qry.getPartitions(), - qry.getArgs(), keepBinary, qry.isLazy(), qry.getTimeout(), cancel, sqlQry, enforceJoinOrder, - twoStepQry, meta)); + if ((cachedStmt = cachedStatement(connectionForSchema(schemaName), qry.getSql())) != null) { + Prepared prepared = GridSqlQueryParser.prepared(cachedStmt); + + // We may use this cached statement only for local queries and non queries. + if (qry.isLocal() || !prepared.isQuery()) + return (List>>)doRunPrepared(schemaName, prepared, qry, null, null, + keepBinary, cancel); + } } res = new ArrayList<>(1); - Object[] argsOrig = qry.getArgs(); int firstArg = 0; - Object[] args; - String remainingSql = sqlQry; + + String remainingSql = qry.getSql(); while (remainingSql != null) { - args = null; - GridCacheTwoStepQuery twoStepQry = null; - List meta; + ParsingResult parseRes = parseAndSplit(schemaName, + remainingSql != qry.getSql() ? cloneFieldsQuery(qry).setSql(remainingSql) : qry, firstArg); - final UUID locNodeId = ctx.localNodeId(); + // Let's avoid second reflection getter call by returning Prepared object too + Prepared prepared = parseRes.prepared(); - // Here we will just parse the statement, no need to optimize it at all. - H2Utils.setupConnection(c, /*distributedJoins*/false, /*enforceJoinOrder*/true); + GridCacheTwoStepQuery twoStepQry = parseRes.twoStepQuery(); - GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE) - .distributedJoinMode(distributedJoinMode)); + List meta = parseRes.meta(); - PreparedStatement stmt = null; - Prepared prepared; + SqlFieldsQuery newQry = parseRes.newQuery(); - boolean cachesCreated = false; + remainingSql = parseRes.remainingSql(); - try { + if (remainingSql != null && failOnMultipleStmts) + throw new IgniteSQLException("Multiple statements queries are not supported"); + + firstArg += prepared.getParameters().size(); + + res.addAll(doRunPrepared(schemaName, prepared, newQry, twoStepQry, meta, keepBinary, cancel)); + + if (parseRes.twoStepQuery() != null && parseRes.twoStepQueryKey() != null && + !parseRes.twoStepQuery().explain()) + twoStepCache.putIfAbsent(parseRes.twoStepQueryKey(), new H2TwoStepCachedQuery(meta, twoStepQry.copy())); + } + + return res; + } + + /** + * Execute an all-ready {@link SqlFieldsQuery}. + * @param schemaName Schema name. + * @param prepared H2 command. + * @param qry Fields query with flags. + * @param twoStepQry Two-step query if this query must be executed in a distributed way. + * @param meta Metadata for {@code twoStepQry}. + * @param keepBinary Whether binary objects must not be deserialized automatically. + * @param cancel Query cancel state holder. + * @return Query result. + */ + private List>> doRunPrepared(String schemaName, Prepared prepared, + SqlFieldsQuery qry, GridCacheTwoStepQuery twoStepQry, List meta, boolean keepBinary, + GridQueryCancel cancel) { + String sqlQry = qry.getSql(); + + boolean loc = qry.isLocal(); + + IndexingQueryFilter filter = (loc ? backupFilter(null, qry.getPartitions()) : null); + + if (!prepared.isQuery()) { + if (DmlStatementsProcessor.isDmlStatement(prepared)) { try { - while (true) { - try { - // Do not cache this statement because the whole query object will be cached later on. - stmt = prepareStatement(c, remainingSql, false); + Connection conn = connectionForSchema(schemaName); - break; - } - catch (SQLException e) { - if (!cachesCreated && ( - e.getErrorCode() == ErrorCode.SCHEMA_NOT_FOUND_1 || - e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || - e.getErrorCode() == ErrorCode.INDEX_NOT_FOUND_1) - ) { + if (!loc) + return dmlProc.updateSqlFieldsDistributed(schemaName, conn, prepared, qry, cancel); + else { + final GridQueryFieldsResult updRes = + dmlProc.updateSqlFieldsLocal(schemaName, conn, prepared, qry, filter, cancel); + + return Collections.singletonList(new QueryCursorImpl<>(new Iterable>() { + @Override public Iterator> iterator() { try { - ctx.cache().createMissingQueryCaches(); + return new GridQueryCacheObjectsIterator(updRes.iterator(), objectContext(), + true); } - catch (IgniteCheckedException ignored) { - throw new CacheException("Failed to create missing caches.", e); + catch (IgniteCheckedException e) { + throw new IgniteException(e); } - - cachesCreated = true; } - else - throw new IgniteSQLException("Failed to parse query. " + e.getMessage(), - IgniteQueryErrorCode.PARSING, e); - } + }, cancel)); } + } + catch (IgniteCheckedException e) { + throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e); + } + } - GridSqlQueryParser.PreparedWithRemaining prep = GridSqlQueryParser.preparedWithRemaining(stmt); + if (DdlStatementsProcessor.isDdlStatement(prepared)) { + if (loc) + throw new IgniteSQLException("DDL statements are not supported for LOCAL caches", + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); - // remaining == null if the query string contains single SQL statement. - remainingSql = prep.remainingSql(); + try { + return Collections.singletonList(ddlProc.runDdlStatement(sqlQry, prepared)); + } + catch (IgniteCheckedException e) { + throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e); + } + } - if (remainingSql != null && failOnMultipleStmts) - throw new IgniteSQLException("Multiple statements queries are not supported"); + if (prepared instanceof NoOperation) { + QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl( + Collections.singletonList(Collections.singletonList(0L)), null, false); - sqlQry = prep.prepared().getSQL(); + resCur.fieldsMeta(UPDATE_RESULT_META); - prepared = prep.prepared(); + return Collections.singletonList(resCur); + } - int paramsCnt = prepared.getParameters().size(); + throw new IgniteSQLException("Unsupported DDL/DML operation: " + prepared.getClass().getName()); + } - if (!DmlUtils.isBatched(qry) && paramsCnt > 0) { - if (argsOrig == null || argsOrig.length < firstArg + paramsCnt) { - throw new IgniteException("Invalid number of query parameters. " + - "Cannot find " + (argsOrig.length + 1 - firstArg) + " parameter."); - } + if (twoStepQry != null) { + if (log.isDebugEnabled()) + log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry); - args = Arrays.copyOfRange(argsOrig, firstArg, firstArg + paramsCnt); + checkQueryType(qry, true); - firstArg += paramsCnt; - } + return Collections.singletonList(doRunDistributedQuery(schemaName, qry, twoStepQry, meta, keepBinary, + cancel)); + } - cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, - distributedJoins, enforceJoinOrder, qry.isLocal()); + // We've encountered a local query, let's just run it. + try { + return Collections.singletonList(queryLocalSqlFields(schemaName, qry, keepBinary, filter, cancel)); + } + catch (IgniteCheckedException e) { + throw new IgniteSQLException("Failed to execute local statement [stmt=" + sqlQry + + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e); + } + } - cachedQry = twoStepCache.get(cachedQryKey); + /** + * Parse and split query if needed, cache either two-step query or statement. + * @param schemaName Schema name. + * @param qry Query. + * @param firstArg Position of the first argument of the following {@code Prepared}. + * @return Result: prepared statement, H2 command, two-step query (if needed), + * metadata for two-step query (if needed), evaluated query local execution flag. + */ + private ParsingResult parseAndSplit(String schemaName, SqlFieldsQuery qry, int firstArg) { + Connection c = connectionForSchema(schemaName); - if (cachedQry != null) { - checkQueryType(qry, true); + // For queries that are explicitly local, we rely on the flag specified in the query + // because this parsing result will be cached and used for queries directly. + // For other queries, we enforce join order at this stage to avoid premature optimizations + // (and therefore longer parsing) as long as there'll be more parsing at split stage. + boolean enforceJoinOrderOnParsing = (!qry.isLocal() || qry.isEnforceJoinOrder()); - twoStepQry = cachedQry.query().copy(); - meta = cachedQry.meta(); + H2Utils.setupConnection(c, /*distributedJoins*/false, /*enforceJoinOrder*/enforceJoinOrderOnParsing); - res.add(executeTwoStepsQuery(schemaName, qry.getPageSize(), qry.getPartitions(), args, keepBinary, - qry.isLazy(), qry.getTimeout(), cancel, sqlQry, enforceJoinOrder, - twoStepQry, meta)); + boolean loc = qry.isLocal(); - continue; - } - else { - checkQueryType(qry, prepared.isQuery()); + PreparedStatement stmt = prepareStatementAndCaches(c, qry.getSql()); - if (prepared.isQuery()) { - bindParameters(stmt, F.asList(args)); + if (loc && GridSqlQueryParser.checkMultipleStatements(stmt)) + throw new IgniteSQLException("Multiple statements queries are not supported for local queries"); - twoStepQry = GridSqlQuerySplitter.split(c, prepared, args, - grpByCollocated, distributedJoins, enforceJoinOrder, this); + GridSqlQueryParser.PreparedWithRemaining prep = GridSqlQueryParser.preparedWithRemaining(stmt); - assert twoStepQry != null; - } - } - } - finally { - GridH2QueryContext.clearThreadLocal(); - } + Prepared prepared = prep.prepared(); - // It is a DML statement if we did not create a twoStepQuery. - if (twoStepQry == null) { - if (DmlStatementsProcessor.isDmlStatement(prepared)) { - try { - res.addAll(dmlProc.updateSqlFieldsDistributed(schemaName, c, prepared, - qry.copy().setSql(sqlQry).setArgs(args), cancel)); + checkQueryType(qry, prepared.isQuery()); - continue; - } - catch (IgniteCheckedException e) { - throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + - ", params=" + Arrays.deepToString(args) + "]", e); - } - } + String remainingSql = prep.remainingSql(); - if (DdlStatementsProcessor.isDdlStatement(prepared)) { - try { - res.add(ddlProc.runDdlStatement(sqlQry, prepared)); + int paramsCnt = prepared.getParameters().size(); - continue; - } - catch (IgniteCheckedException e) { - throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e); - } - } + Object[] argsOrig = qry.getArgs(); + + Object[] args = null; + + if (!DmlUtils.isBatched(qry) && paramsCnt > 0) { + if (argsOrig == null || argsOrig.length < firstArg + paramsCnt) { + throw new IgniteException("Invalid number of query parameters. " + + "Cannot find " + (argsOrig != null ? argsOrig.length + 1 - firstArg : 1) + " parameter."); + } + + args = Arrays.copyOfRange(argsOrig, firstArg, firstArg + paramsCnt); + } + + if (prepared.isQuery()) { + try { + bindParameters(stmt, F.asList(args)); + } + catch (IgniteCheckedException e) { + U.closeQuiet(stmt); + + throw new IgniteSQLException("Failed to bind parameters: [qry=" + prepared.getSQL() + ", params=" + + Arrays.deepToString(args) + "]", IgniteQueryErrorCode.PARSING, e); + } + + GridSqlQueryParser parser = null; + + if (!loc) { + parser = new GridSqlQueryParser(false); + + GridSqlStatement parsedStmt = parser.parse(prepared); + + // Legit assertion - we have H2 query flag above. + assert parsedStmt instanceof GridSqlQuery; + + loc = parser.isLocalQuery(qry.isReplicatedOnly()); + } + + if (loc) { + if (parser == null) { + parser = new GridSqlQueryParser(false); + + parser.parse(prepared); } - assert twoStepQry != null; + GridCacheContext cctx = parser.getFirstPartitionedCache(); - List cacheIds = collectCacheIds(mainCacheId, twoStepQry); + if (cctx != null && cctx.config().getQueryParallelism() > 1) { + loc = false; - if (F.isEmpty(cacheIds)) - twoStepQry.local(true); - else { - twoStepQry.cacheIds(cacheIds); - twoStepQry.local(qry.isLocal()); + qry.setDistributedJoins(true); } + } + } - meta = H2Utils.meta(stmt.getMetaData()); + SqlFieldsQuery newQry = cloneFieldsQuery(qry).setSql(prepared.getSQL()).setArgs(args); + + boolean hasTwoStep = !loc && prepared.isQuery(); + + // Let's not cache multiple statements and distributed queries as whole two step query will be cached later on. + if (remainingSql != null || hasTwoStep) + getStatementsCacheForCurrentThread().remove(schemaName, qry.getSql()); + + if (!hasTwoStep) + return new ParsingResult(prepared, newQry, remainingSql, null, null, null); + + final UUID locNodeId = ctx.localNodeId(); + + // Now we're sure to have a distributed query. Let's try to get a two-step plan from the cache, or perform the + // split if needed. + H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, qry.getSql(), + qry.isCollocated(), qry.isDistributedJoins(), qry.isEnforceJoinOrder(), qry.isLocal()); + + H2TwoStepCachedQuery cachedQry; + + if ((cachedQry = twoStepCache.get(cachedQryKey)) != null) { + checkQueryType(qry, true); + + GridCacheTwoStepQuery twoStepQry = cachedQry.query().copy(); + + List meta = cachedQry.meta(); + + return new ParsingResult(prepared, newQry, remainingSql, twoStepQry, cachedQryKey, meta); + } + + try { + GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE) + .distributedJoinMode(distributedJoinMode(qry.isLocal(), qry.isDistributedJoins()))); + + try { + return new ParsingResult(prepared, newQry, remainingSql, split(prepared, newQry), + cachedQryKey, H2Utils.meta(stmt.getMetaData())); } catch (IgniteCheckedException e) { - throw new CacheException("Failed to bind parameters: [qry=" + sqlQry + ", params=" + - Arrays.deepToString(qry.getArgs()) + "]", e); + throw new IgniteSQLException("Failed to bind parameters: [qry=" + newQry.getSql() + ", params=" + + Arrays.deepToString(newQry.getArgs()) + "]", IgniteQueryErrorCode.PARSING, e); } catch (SQLException e) { throw new IgniteSQLException(e); @@ -1605,79 +1786,125 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri finally { U.close(stmt, log); } - - res.add(executeTwoStepsQuery(schemaName, qry.getPageSize(), qry.getPartitions(), args, keepBinary, - qry.isLazy(), qry.getTimeout(), cancel, sqlQry, enforceJoinOrder, - twoStepQry, meta)); - - if (cachedQry == null && !twoStepQry.explain()) { - cachedQry = new H2TwoStepCachedQuery(meta, twoStepQry.copy()); - - twoStepCache.putIfAbsent(cachedQryKey, cachedQry); - } } + finally { + GridH2QueryContext.clearThreadLocal(); + } + } - return res; + /** + * Make a copy of {@link SqlFieldsQuery} with all flags and preserving type. + * @param oldQry Query to copy. + * @return Query copy. + */ + private SqlFieldsQuery cloneFieldsQuery(SqlFieldsQuery oldQry) { + return oldQry.copy().setLocal(oldQry.isLocal()).setPageSize(oldQry.getPageSize()); } /** - * Check expected statement type (when it is set by JDBC) and given statement type. - * - * @param qry Query. - * @param isQry {@code true} for select queries, otherwise (DML/DDL queries) {@code false}. + * Split query into two-step query. + * @param prepared JDBC prepared statement. + * @param qry Original fields query. + * @return Two-step query. + * @throws IgniteCheckedException in case of error inside {@link GridSqlQuerySplitter}. + * @throws SQLException in case of error inside {@link GridSqlQuerySplitter}. */ - private void checkQueryType(SqlFieldsQuery qry, boolean isQry) { - if (qry instanceof SqlFieldsQueryEx && ((SqlFieldsQueryEx)qry).isQuery() != null && - ((SqlFieldsQueryEx)qry).isQuery() != isQry) - throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", - IgniteQueryErrorCode.STMT_TYPE_MISMATCH); + private GridCacheTwoStepQuery split(Prepared prepared, SqlFieldsQuery qry) throws IgniteCheckedException, + SQLException { + GridCacheTwoStepQuery res = GridSqlQuerySplitter.split(connectionForThread(qry.getSchema()), prepared, + qry.getArgs(), qry.isCollocated(), qry.isDistributedJoins(), qry.isEnforceJoinOrder(), this); + + List cacheIds = collectCacheIds(null, res); + + if (F.isEmpty(cacheIds)) + res.local(true); + else { + res.cacheIds(cacheIds); + res.local(qry.isLocal()); + } + + res.pageSize(qry.getPageSize()); + + return res; } /** + * Run distributed query on detected set of partitions. * @param schemaName Schema name. - * @param pageSize Page size. - * @param partitions Partitions. - * @param args Arguments. + * @param qry Original query. + * @param twoStepQry Two-step query. + * @param meta Metadata to set to cursor. * @param keepBinary Keep binary flag. - * @param lazy Lazy flag. - * @param timeout Timeout. - * @param cancel Cancel. - * @param sqlQry SQL query string. - * @param enforceJoinOrder Enforce join orded flag. - * @param twoStepQry Two-steps query. - * @param meta Metadata. - * @return Cursor. - */ - private FieldsQueryCursor> executeTwoStepsQuery(String schemaName, int pageSize, int partitions[], - Object[] args, boolean keepBinary, boolean lazy, int timeout, - GridQueryCancel cancel, String sqlQry, boolean enforceJoinOrder, GridCacheTwoStepQuery twoStepQry, - List meta) { + * @param cancel Cancel handler. + * @return Cursor representing distributed query result. + */ + private FieldsQueryCursor> doRunDistributedQuery(String schemaName, SqlFieldsQuery qry, + GridCacheTwoStepQuery twoStepQry, List meta, boolean keepBinary, + GridQueryCancel cancel) { if (log.isDebugEnabled()) - log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry); + log.debug("Parsed query: `" + qry.getSql() + "` into two step query: " + twoStepQry); - twoStepQry.pageSize(pageSize); + twoStepQry.pageSize(qry.getPageSize()); if (cancel == null) cancel = new GridQueryCancel(); + int partitions[] = qry.getPartitions(); + if (partitions == null && twoStepQry.derivedPartitions() != null) { try { - partitions = calculateQueryPartitions(twoStepQry.derivedPartitions(), args); - } catch (IgniteCheckedException e) { - throw new CacheException("Failed to calculate derived partitions: [qry=" + sqlQry + ", params=" + - Arrays.deepToString(args) + "]", e); + partitions = calculateQueryPartitions(twoStepQry.derivedPartitions(), qry.getArgs()); + } + catch (IgniteCheckedException e) { + throw new CacheException("Failed to calculate derived partitions: [qry=" + qry.getSql() + ", params=" + + Arrays.deepToString(qry.getArgs()) + "]", e); } } QueryCursorImpl> cursor = new QueryCursorImpl<>( - runQueryTwoStep(schemaName, twoStepQry, keepBinary, enforceJoinOrder, timeout, cancel, - args, partitions, lazy), cancel); + runQueryTwoStep(schemaName, twoStepQry, keepBinary, qry.isEnforceJoinOrder(), qry.getTimeout(), cancel, + qry.getArgs(), partitions, qry.isLazy()), cancel); cursor.fieldsMeta(meta); return cursor; } + /** + * Do initial parsing of the statement and create query caches, if needed. + * @param c Connection. + * @param sqlQry Query. + * @return H2 prepared statement. + */ + private PreparedStatement prepareStatementAndCaches(Connection c, String sqlQry) { + boolean cachesCreated = false; + + while (true) { + try { + return prepareStatement(c, sqlQry, true); + } + catch (SQLException e) { + if (!cachesCreated && ( + e.getErrorCode() == ErrorCode.SCHEMA_NOT_FOUND_1 || + e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || + e.getErrorCode() == ErrorCode.INDEX_NOT_FOUND_1) + ) { + try { + ctx.cache().createMissingQueryCaches(); + } + catch (IgniteCheckedException ignored) { + throw new CacheException("Failed to create missing caches.", e); + } + + cachesCreated = true; + } + else + throw new IgniteSQLException("Failed to parse query. " + e.getMessage(), + IgniteQueryErrorCode.PARSING, e); + } + } + } + /** * Run DML request from other node. * @@ -1704,7 +1931,7 @@ public UpdateResult mapDistributedUpdate(String schemaName, SqlFieldsQuery fldsQ /** * @throws IllegalStateException if segmented indices used with non-segmented indices. */ - private void checkCacheIndexSegmentation(List cacheIds) { + private void checkCacheIndexSegmentation(Collection cacheIds) { if (cacheIds.isEmpty()) return; // Nothing to check @@ -2636,7 +2863,7 @@ private int bindPartitionInfoParameter(CacheQueryPartitionInfo partInfo, Object[ * @param twoStepQry Two-step query. * @return Result. */ - public List collectCacheIds(@Nullable Integer mainCacheId, GridCacheTwoStepQuery twoStepQry) { + @Nullable public List collectCacheIds(@Nullable Integer mainCacheId, GridCacheTwoStepQuery twoStepQry) { LinkedHashSet caches0 = new LinkedHashSet<>(); int tblCnt = twoStepQry.tablesCount(); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ParsingResult.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ParsingResult.java new file mode 100644 index 0000000000000..98d28481dac4d --- /dev/null +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ParsingResult.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query.h2; + +import java.util.List; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery; +import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata; +import org.h2.command.Prepared; + +/** + * Result of parsing and splitting SQL from {@link SqlFieldsQuery}. + */ +final class ParsingResult { + /** H2 command. */ + private final Prepared prepared; + + /** New fields query that may be executed right away. */ + private final SqlFieldsQuery newQry; + + /** Remaining SQL statements. */ + private final String remainingSql; + + /** Two-step query, or {@code} null if this result is for local query. */ + private final GridCacheTwoStepQuery twoStepQry; + + /** Two-step query key to cache {@link #twoStepQry}, or {@code null} if there's no need to worry + * about two-step caching. */ + private final H2TwoStepCachedQueryKey twoStepQryKey; + + /** Metadata for two-step query, or {@code} null if this result is for local query. */ + private final List meta; + + /** Simple constructor. */ + ParsingResult(Prepared prepared, SqlFieldsQuery newQry, String remainingSql, GridCacheTwoStepQuery twoStepQry, + H2TwoStepCachedQueryKey twoStepQryKey, List meta) { + this.prepared = prepared; + this.newQry = newQry; + this.remainingSql = remainingSql; + this.twoStepQry = twoStepQry; + this.twoStepQryKey = twoStepQryKey; + this.meta = meta; + } + + /** + * @return Metadata for two-step query, or {@code} null if this result is for local query. + */ + List meta() { + return meta; + } + + /** + * @return New fields query that may be executed right away. + */ + SqlFieldsQuery newQuery() { + return newQry; + } + + /** + * @return H2 command. + */ + Prepared prepared() { + return prepared; + } + + /** + * @return Remaining SQL statements. + */ + String remainingSql() { + return remainingSql; + } + + /** + * @return Two-step query, or {@code} null if this result is for local query. + */ + GridCacheTwoStepQuery twoStepQuery() { + return twoStepQry; + } + + /** + * @return Two-step query key to cache {@link #twoStepQry}, or {@code null} if there's no need to worry + * about two-step caching. + */ + H2TwoStepCachedQueryKey twoStepQueryKey() { + return twoStepQryKey; + } +} diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index 6b054a231ad60..f2119ea9f7dbb 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -33,6 +33,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.query.GridQueryProperty; @@ -116,6 +117,8 @@ public FieldsQueryCursor> runDdlStatement(String sql, SqlCommand cmd) th assert tbl.rowDescriptor() != null; + isDdlSupported(tbl); + QueryIndex newIdx = new QueryIndex(); newIdx.setName(cmd0.indexName()); @@ -147,6 +150,8 @@ else if (cmd instanceof SqlDropIndexCommand) { GridH2Table tbl = idx.dataTableForIndex(cmd0.schemaName(), cmd0.indexName()); if (tbl != null) { + isDdlSupported(tbl); + fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd0.schemaName(), cmd0.indexName(), cmd0.ifExists()); } @@ -209,6 +214,8 @@ public FieldsQueryCursor> runDdlStatement(String sql, Prepared prepared) assert tbl.rowDescriptor() != null; + isDdlSupported(tbl); + QueryIndex newIdx = new QueryIndex(); newIdx.setName(cmd.index().getName()); @@ -240,6 +247,8 @@ else if (stmt0 instanceof GridSqlDropIndex) { GridH2Table tbl = idx.dataTableForIndex(cmd.schemaName(), cmd.indexName()); if (tbl != null) { + isDdlSupported(tbl); + fut = ctx.query().dynamicIndexDrop(tbl.cacheName(), cmd.schemaName(), cmd.indexName(), cmd.ifExists()); } @@ -392,6 +401,21 @@ else if (stmt0 instanceof GridSqlAlterTableAddColumn) { } } + /** + * Check if table supports DDL statement. + * + * @param tbl Table. + */ + private static void isDdlSupported(GridH2Table tbl) { + GridCacheContext cctx = tbl.cache(); + + assert cctx != null; + + if (cctx.isLocal()) + throw new IgniteSQLException("DDL statements are not supported on LOCAL caches", + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + } + /** * @return {@link IgniteSQLException} with the message same as of {@code this}'s and */ diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java index f918d59348458..50b090970f33d 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java @@ -33,6 +33,8 @@ import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.QueryIndex; import org.apache.ignite.cache.QueryIndexType; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; @@ -90,6 +92,7 @@ import org.h2.table.Column; import org.h2.table.FunctionTable; import org.h2.table.IndexColumn; +import org.h2.table.MetaTable; import org.h2.table.RangeTable; import org.h2.table.Table; import org.h2.table.TableBase; @@ -616,6 +619,8 @@ else if (tbl instanceof RangeTable) { res.addChild(parseExpression(RANGE_MIN.get((RangeTable)tbl), false)); res.addChild(parseExpression(RANGE_MAX.get((RangeTable)tbl), false)); } + else if (tbl instanceof MetaTable) + res = new GridSqlTable(tbl); else assert0(false, "Unexpected Table implementation [cls=" + tbl.getClass().getSimpleName() + ']'); @@ -1507,6 +1512,61 @@ public static Query query(Prepared qry) { throw new CacheException("Unsupported query: " + qry); } + /** + * Check if query may be run locally on all caches mentioned in the query. + * @param replicatedOnlyQry replicated-only query flag from original {@link SqlFieldsQuery}. + * @return {@code true} if query may be run locally on all caches mentioned in the query, i.e. there's no need + * to run distributed query. + * @see SqlFieldsQuery#isReplicatedOnly() + */ + public boolean isLocalQuery(boolean replicatedOnlyQry) { + boolean hasCaches = false; + + for (Object o : h2ObjToGridObj.values()) { + if (o instanceof GridSqlAlias) + o = GridSqlAlias.unwrap((GridSqlAst)o); + + if (o instanceof GridSqlTable) { + GridH2Table tbl = ((GridSqlTable)o).dataTable(); + + if (tbl != null) { + hasCaches = true; + + GridCacheContext cctx = tbl.cache(); + + if (!cctx.isLocal() && !(replicatedOnlyQry && cctx.isReplicatedAffinityNode())) + return false; + } + } + } + + // For consistency with old logic, let's not force locality in absence of caches - + // if there are no caches, original SqlFieldsQuery's isLocal flag will be used. + return hasCaches; + } + + /** + * Get first (i.e. random, as we need any one) partitioned cache from parsed query + * to determine expected query parallelism. + * @return Context for the first of partitioned caches mentioned in the query, + * or {@code null} if it does not involve partitioned caches. + */ + public GridCacheContext getFirstPartitionedCache() { + for (Object o : h2ObjToGridObj.values()) { + if (o instanceof GridSqlAlias) + o = GridSqlAlias.unwrap((GridSqlAst)o); + + if (o instanceof GridSqlTable) { + GridH2Table tbl = ((GridSqlTable)o).dataTable(); + + if (tbl != null && tbl.cache().isPartitioned()) + return tbl.cache(); + } + } + + return null; + } + /** * @param stmt Prepared statement. * @return Parsed AST. diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java index 77b928f062e27..8efa7e91499d2 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java @@ -77,8 +77,8 @@ import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.plugin.extensions.communication.Message; -import org.apache.ignite.thread.IgniteThread; import org.apache.ignite.spi.indexing.IndexingQueryFilter; +import org.apache.ignite.thread.IgniteThread; import org.h2.jdbc.JdbcResultSet; import org.h2.value.Value; import org.jetbrains.annotations.Nullable; diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java index bbd3d0d9e85eb..069bdd7092d16 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java @@ -140,7 +140,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { SqlFieldsQuery qry = new SqlFieldsQuery("select f.productId, p.name, f.price " + "from FactPurchase f, \"replicated-prod\".DimProduct p where p.id = f.productId "); - for (List o : qryProc.querySqlFields(cache.context(), qry, false).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { X.println("___ -> " + o); set1.add((Integer)o.get(0)); @@ -154,7 +154,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { qry = new SqlFieldsQuery("select productId from FactPurchase group by productId"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { X.println("___ -> " + o); assertTrue(set0.add((Integer) o.get(0))); @@ -173,7 +173,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { "where p.id = f.productId " + "group by f.productId, p.name"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { X.println("___ -> " + o); assertTrue(names.add((String)o.get(0))); @@ -190,7 +190,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { "group by f.productId, p.name " + "having s >= 15"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { X.println("___ -> " + o); assertTrue(i(o, 1) >= 15); @@ -203,7 +203,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { qry = new SqlFieldsQuery("select top 3 distinct productId " + "from FactPurchase f order by productId desc "); - for (List o : qryProc.querySqlFields(cache.context(), qry, false).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { X.println("___ -> " + o); assertEquals(top--, o.get(0)); @@ -216,7 +216,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { qry = new SqlFieldsQuery("select distinct productId " + "from FactPurchase f order by productId desc limit 2 offset 1"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { X.println("___ -> " + o); assertEquals(top--, o.get(0)); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java index aae6a0ca8a2f0..f57a8c6ce534b 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCheckClusterStateBeforeExecuteQueryTest.java @@ -80,7 +80,7 @@ public void testDynamicSchemaChangesPersistence() throws Exception { assertThrows(log, new Callable() { @Override public Void call() throws Exception { - ig.context().query().querySqlFieldsNoCache(new SqlFieldsQuery("SELECT 1"), false); + ig.context().query().querySqlFields(new SqlFieldsQuery("SELECT 1"), false); return null; } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/SqlFieldsQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/SqlFieldsQuerySelfTest.java index 8860b2ba495e9..f68484171f7bb 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/SqlFieldsQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/SqlFieldsQuerySelfTest.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache; import java.io.Serializable; +import java.sql.PreparedStatement; import java.util.List; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheMode; @@ -25,12 +26,16 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; /** * */ public class SqlFieldsQuerySelfTest extends GridCommonAbstractTest { + /** INSERT statement. */ + private final static String INSERT = "insert into Person(_key, name) values (5, 'x')"; + /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { stopAllGrids(); @@ -60,6 +65,50 @@ public void testSqlFieldsQueryWithTopologyChanges() throws Exception { executeQuery(); } + /** + * @throws Exception If error. + */ + public void testQueryCaching() throws Exception { + startGrid(0); + + PreparedStatement stmt = null; + + for (int i = 0; i < 2; i++) { + createAndFillCache(); + + PreparedStatement stmt0 = grid(0).context().query().prepareNativeStatement("person", INSERT); + + // Statement should either be parsed initially or in response to schema change... + assertTrue(stmt != stmt0); + + stmt = stmt0; + + // ...and be properly compiled considering schema changes to be properly parsed + new GridSqlQueryParser(false).parse(GridSqlQueryParser.prepared(stmt)); + + destroyCache(); + } + + stmt = null; + + createAndFillCache(); + + // Now let's do the same without restarting the cache. + for (int i = 0; i < 2; i++) { + PreparedStatement stmt0 = grid(0).context().query().prepareNativeStatement("person", INSERT); + + // Statement should either be parsed or taken from cache as no schema changes occurred... + assertTrue(stmt == null || stmt == stmt0); + + stmt = stmt0; + + // ...and be properly compiled considering schema changes to be properly parsed + new GridSqlQueryParser(false).parse(GridSqlQueryParser.prepared(stmt)); + } + + destroyCache(); + } + /** * */ @@ -101,6 +150,10 @@ private IgniteCache createAndFillCache() { return cache; } + private void destroyCache() { + grid(0).destroyCache("person"); + } + /** * */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java index a679b159771e2..ec1a16d670028 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java @@ -196,7 +196,22 @@ public void testIterator() throws Exception { /** * @throws Exception If test failed. */ - public void testLocalQuery() throws Exception { + public void testLocalQueryWithExplicitFlag() throws Exception { + doTestLocalQuery(true); + } + + /** + * @throws Exception If test failed. + */ + public void testLocalQueryWithoutExplicitFlag() throws Exception { + doTestLocalQuery(false); + } + + /** + * @param loc Explicit query locality flag. + * @throws Exception if failed. + */ + private void doTestLocalQuery(boolean loc) throws Exception { cache1.clear(); Transaction tx = ignite1.transactions().txStart(); @@ -217,9 +232,9 @@ public void testLocalQuery() throws Exception { throw e; } - checkQueryResults(cache1); - checkQueryResults(cache2); - checkQueryResults(cache3); + checkLocalQueryResults(cache1, loc); + checkLocalQueryResults(cache2, loc); + checkLocalQueryResults(cache3, loc); } /** @@ -403,11 +418,13 @@ public void testNodeLeft() throws Exception { /** * @param cache Cache. + * @param loc Explicit query locality flag. * @throws Exception If check failed. */ - private void checkQueryResults(IgniteCache cache) throws Exception { + private void checkLocalQueryResults(IgniteCache cache, boolean loc) throws Exception { QueryCursor> qry = - cache.query(new SqlQuery(CacheValue.class, "val > 1 and val < 4").setLocal(true)); + cache.query(new SqlQuery(CacheValue.class, "val > 1 and val < 4") + .setReplicatedOnly(true).setLocal(loc)); Iterator> iter = qry.iterator(); diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java index eb3b8e15a9978..94b0a313f84df 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java @@ -17,6 +17,12 @@ package org.apache.ignite.internal.processors.cache.index; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteCheckedException; @@ -46,13 +52,6 @@ import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.jetbrains.annotations.Nullable; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - /** * Tests for dynamic schema changes. */ @@ -79,6 +78,9 @@ public class AbstractSchemaSelfTest extends GridCommonAbstractTest { /** Index name 2 escaped. */ protected static final String IDX_NAME_2_ESCAPED = "idx_2"; + /** Index name 2. */ + protected static final String IDX_NAME_LOCAL = "IDX_LOC"; + /** Key ID field. */ protected static final String FIELD_KEY = "id"; diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java index 2beea8b452567..022110a90072a 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractTest.java @@ -31,9 +31,9 @@ import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; @@ -286,7 +286,7 @@ IgniteConfiguration serverConfiguration(int idx) throws Exception { */ protected List> run(Ignite node, String sql) { return ((IgniteEx)node).context().query() - .querySqlFieldsNoCache(new SqlFieldsQuery(sql).setSchema(QueryUtils.DFLT_SCHEMA), true).getAll(); + .querySqlFields(new SqlFieldsQuery(sql).setSchema(QueryUtils.DFLT_SCHEMA), true).getAll(); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractBasicSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractBasicSelfTest.java index 56f28d439c9e4..9c44352254924 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractBasicSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractBasicSelfTest.java @@ -41,7 +41,6 @@ import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; -import static org.apache.ignite.cache.CacheMode.LOCAL; import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheMode.REPLICATED; @@ -199,7 +198,7 @@ private void checkCreate(CacheMode mode, CacheAtomicityMode atomicityMode, boole dynamicIndexCreate(CACHE_NAME, TBL_NAME, idx, false); assertIndex(CACHE_NAME, TBL_NAME, IDX_NAME_1, field(FIELD_NAME_1_ESCAPED)); - assertSchemaException(new RunnableX() { + assertIgniteSqlException(new RunnableX() { @Override public void run() throws Exception { dynamicIndexCreate(CACHE_NAME, TBL_NAME, idx, false); } @@ -441,7 +440,7 @@ private void checkCreateNoTable(CacheMode mode, CacheAtomicityMode atomicityMode final QueryIndex idx = index(IDX_NAME_1, field(FIELD_NAME_1_ESCAPED)); - assertSchemaException(new RunnableX() { + assertIgniteSqlException(new RunnableX() { @Override public void run() throws Exception { dynamicIndexCreate(CACHE_NAME, randomString(), idx, false); } @@ -517,7 +516,7 @@ private void checkCreateNoColumn(CacheMode mode, CacheAtomicityMode atomicityMod final QueryIndex idx = index(IDX_NAME_1, field(randomString())); - assertSchemaException(new RunnableX() { + assertIgniteSqlException(new RunnableX() { @Override public void run() throws Exception { dynamicIndexCreate(CACHE_NAME, TBL_NAME, idx, false); } @@ -592,7 +591,7 @@ private void checkCreateColumnWithAlias(CacheMode mode, CacheAtomicityMode atomi throws Exception { initialize(mode, atomicityMode, near); - assertSchemaException(new RunnableX() { + assertIgniteSqlException(new RunnableX() { @Override public void run() throws Exception { QueryIndex idx = index(IDX_NAME_1, field(FIELD_NAME_2_ESCAPED)); @@ -773,7 +772,7 @@ public void testDropNoIndexReplicatedTransactional() throws Exception { private void checkDropNoIndex(CacheMode mode, CacheAtomicityMode atomicityMode, boolean near) throws Exception { initialize(mode, atomicityMode, near); - assertSchemaException(new RunnableX() { + assertIgniteSqlException(new RunnableX() { @Override public void run() throws Exception { dynamicIndexDrop(CACHE_NAME, IDX_NAME_1, false); } @@ -877,12 +876,12 @@ private void checkDropNoCache(CacheMode mode, CacheAtomicityMode atomicityMode, public void testFailOnLocalCache() throws Exception { for (Ignite node : Ignition.allGrids()) { if (!node.configuration().isClientMode()) - createSqlCache(node, cacheConfiguration().setCacheMode(LOCAL)); + createSqlCache(node, localCacheConfiguration()); } final QueryIndex idx = index(IDX_NAME_1, field(FIELD_NAME_1_ESCAPED)); - assertSchemaException(new RunnableX() { + assertIgniteSqlException(new RunnableX() { @Override public void run() throws Exception { dynamicIndexCreate(CACHE_NAME, TBL_NAME, idx, true); } @@ -890,9 +889,9 @@ public void testFailOnLocalCache() throws Exception { assertNoIndex(CACHE_NAME, TBL_NAME, IDX_NAME_1); - assertSchemaException(new RunnableX() { + assertIgniteSqlException(new RunnableX() { @Override public void run() throws Exception { - dynamicIndexDrop(CACHE_NAME, IDX_NAME_1, true); + dynamicIndexDrop(CACHE_NAME, IDX_NAME_LOCAL, true); } }, IgniteQueryErrorCode.UNSUPPORTED_OPERATION); } @@ -1107,8 +1106,8 @@ private void assertCompositeIndexOperations(String sql) { * @param r Runnable. * @param expCode Error code. */ - protected static void assertSchemaException(RunnableX r, int expCode) { - assertSchemaException(r, null, expCode); + protected static void assertIgniteSqlException(RunnableX r, int expCode) { + assertIgniteSqlException(r, null, expCode); } /** @@ -1118,7 +1117,7 @@ protected static void assertSchemaException(RunnableX r, int expCode) { * @param msg Exception message to expect, or {@code null} if it can be waived. * @param expCode Error code. */ - protected static void assertSchemaException(RunnableX r, String msg, int expCode) { + private static void assertIgniteSqlException(RunnableX r, String msg, int expCode) { try { r.run(); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java index a39283b0ca332..452ac96ba9e35 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java @@ -17,20 +17,29 @@ package org.apache.ignite.internal.processors.cache.index; +import java.io.Serializable; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import javax.cache.Cache; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.Ignition; import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.QueryEntity; +import org.apache.ignite.cache.QueryIndex; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cache.query.SqlQuery; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.DataRegionConfiguration; -import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.util.typedef.T2; @@ -39,14 +48,6 @@ import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; -import javax.cache.Cache; -import java.io.Serializable; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; - /** * Tests for dynamic index creation. */ @@ -181,6 +182,20 @@ protected CacheConfiguration cacheConfiguration() { return ccfg; } + /** + * @return Local cache configuration with a pre-configured index. + */ + CacheConfiguration localCacheConfiguration() { + CacheConfiguration res = cacheConfiguration(); + + res.getQueryEntities().iterator().next().setIndexes(Collections.singletonList( + new QueryIndex(FIELD_NAME_2_ESCAPED, true, IDX_NAME_LOCAL))); + + res.setCacheMode(CacheMode.LOCAL); + + return res; + } + /** * Ensure index is used in plan. * diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java index 845e3474682d1..a3087452d97a0 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexingComplexTest.java @@ -318,7 +318,7 @@ private void assertPerson(int id, String name, int age, String company, String c * @return Run result. */ private List> executeSql(IgniteEx node, String stmt, Object... args) { - return node.context().query().querySqlFieldsNoCache(new SqlFieldsQuery(stmt).setArgs(args), true).getAll(); + return node.context().query().querySqlFields(new SqlFieldsQuery(stmt).setArgs(args), true).getAll(); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java index 82602f04dbce3..5c9ce8648a5e8 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java @@ -847,11 +847,11 @@ public void testAffinityKey() throws Exception { personId2cityCode.put(i, cityCode); - queryProcessor(client()).querySqlFieldsNoCache(new SqlFieldsQuery("insert into \"Person2\"(\"id\", " + + queryProcessor(client()).querySqlFields(new SqlFieldsQuery("insert into \"Person2\"(\"id\", " + "\"city\") values (?, ?)").setArgs(i, cityName), true).getAll(); } - List> res = queryProcessor(client()).querySqlFieldsNoCache(new SqlFieldsQuery("select \"id\", " + + List> res = queryProcessor(client()).querySqlFields(new SqlFieldsQuery("select \"id\", " + "c.\"code\" from \"Person2\" p left join \"City\" c on p.\"city\" = c.\"name\" where c.\"name\" " + "is not null"), true).getAll(); @@ -1511,7 +1511,7 @@ private IgniteConfiguration commonConfiguration(int idx) throws Exception { * @param sql Statement. */ private List> execute(Ignite node, String sql) { - return queryProcessor(node).querySqlFieldsNoCache(new SqlFieldsQuery(sql).setSchema("PUBLIC"), true).getAll(); + return queryProcessor(node).querySqlFields(new SqlFieldsQuery(sql).setSchema("PUBLIC"), true).getAll(); } /** @@ -1520,7 +1520,7 @@ private List> execute(Ignite node, String sql) { * @param sql Statement. */ private List> executeLocal(GridCacheContext cctx, String sql) { - return queryProcessor(cctx.grid()).querySqlFields(cctx, new SqlFieldsQuery(sql).setLocal(true), true).getAll(); + return queryProcessor(cctx.grid()).querySqlFields(new SqlFieldsQuery(sql).setLocal(true), true).getAll(); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java index e0c63966d4328..2570bc896c664 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java @@ -47,44 +47,50 @@ public class IgniteCacheLocalQuerySelfTest extends IgniteCacheAbstractQuerySelfT * @throws Exception If test failed. */ public void testQueryLocal() throws Exception { - IgniteCache cache = jcache(Integer.class, String.class); + // Let's do it twice to see how prepared statement caching behaves - without recompilation + // check for cached prepared statements this would fail. + for (int i = 0; i < 2; i ++) { + IgniteCache cache = jcache(Integer.class, String.class); - cache.put(1, "value1"); - cache.put(2, "value2"); - cache.put(3, "value3"); - cache.put(4, "value4"); - cache.put(5, "value5"); + cache.put(1, "value1"); + cache.put(2, "value2"); + cache.put(3, "value3"); + cache.put(4, "value4"); + cache.put(5, "value5"); - // Tests equals query. - QueryCursor> qry = - cache.query(new SqlQuery(String.class, "_val='value1'").setLocal(true)); + // Tests equals query. + QueryCursor> qry = + cache.query(new SqlQuery(String.class, "_val='value1'").setLocal(true)); - Iterator> iter = qry.iterator(); + Iterator> iter = qry.iterator(); - Cache.Entry entry = iter.next(); + Cache.Entry entry = iter.next(); - assert !iter.hasNext(); + assert !iter.hasNext(); - assert entry != null; - assert entry.getKey() == 1; - assert "value1".equals(entry.getValue()); + assert entry != null; + assert entry.getKey() == 1; + assert "value1".equals(entry.getValue()); - // Tests like query. - qry = cache.query(new SqlQuery(String.class, "_val like 'value%'").setLocal(true)); + // Tests like query. + qry = cache.query(new SqlQuery(String.class, "_val like 'value%'").setLocal(true)); - iter = qry.iterator(); + iter = qry.iterator(); - assert iter.next() != null; - assert iter.next() != null; - assert iter.next() != null; - assert iter.next() != null; - assert iter.next() != null; - assert !iter.hasNext(); + assert iter.next() != null; + assert iter.next() != null; + assert iter.next() != null; + assert iter.next() != null; + assert iter.next() != null; + assert !iter.hasNext(); - // Test explain for primitive index. - List> res = cache.query(new SqlFieldsQuery( - "explain select _key from String where _val > 'value1'").setLocal(true)).getAll(); + // Test explain for primitive index. + List> res = cache.query(new SqlFieldsQuery( + "explain select _key from String where _val > 'value1'").setLocal(true)).getAll(); - assertTrue("__ explain: \n" + res, ((String)res.get(0).get(0)).toLowerCase().contains("_val_idx")); + assertTrue("__ explain: \n" + res, ((String) res.get(0).get(0)).toLowerCase().contains("_val_idx")); + + cache.destroy(); + } } } \ No newline at end of file diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java index 14749546d9766..2b7c5c2748a89 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgnitePersistentStoreSchemaLoadTest.java @@ -193,8 +193,8 @@ private void checkSchemaStateAfterNodeRestart(boolean aliveCluster) throws Excep CountDownLatch cnt = checkpointLatch(node); - node.context().query().querySqlFieldsNoCache( - new SqlFieldsQuery("create table \"Person\" (\"id\" int primary key, \"name\" varchar)"), false).getAll(); + node.context().query().querySqlFields( + new SqlFieldsQuery("create table \"Person\" (\"id\" int primary key, \"name\" varchar)"), false); assertEquals(0, indexCnt(node, SQL_CACHE_NAME)); @@ -212,7 +212,7 @@ private void checkSchemaStateAfterNodeRestart(boolean aliveCluster) throws Excep checkDynamicSchemaChanges(node, SQL_CACHE_NAME); - node.context().query().querySqlFieldsNoCache(new SqlFieldsQuery("drop table \"Person\""), false).getAll(); + node.context().query().querySqlFields(new SqlFieldsQuery("drop table \"Person\""), false).getAll(); } /** */ @@ -280,13 +280,17 @@ private CountDownLatch checkpointLatch(IgniteEx node) { * @param schema Schema name. */ private void makeDynamicSchemaChanges(IgniteEx node, String schema) { - node.context().query().querySqlFieldsNoCache( + node.context().query().querySqlFields( new SqlFieldsQuery("create index \"my_idx\" on \"Person\" (\"id\", \"name\")").setSchema(schema), false) .getAll(); - node.context().query().querySqlFieldsNoCache( - new SqlFieldsQuery("alter table \"Person\" add column \"age\" int").setSchema(schema), false) - .getAll(); + node.context().query().querySqlFields( + new SqlFieldsQuery("alter table \"Person\" add column (\"age\" int, \"city\" char)") + .setSchema(schema), false).getAll(); + + node.context().query().querySqlFields( + new SqlFieldsQuery("alter table \"Person\" drop column \"city\"").setSchema(schema), false) + .getAll(); } /** diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteCachelessQueriesSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteCachelessQueriesSelfTest.java new file mode 100644 index 0000000000000..a7dae9e849421 --- /dev/null +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteCachelessQueriesSelfTest.java @@ -0,0 +1,420 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query; + +import java.io.Serializable; +import java.util.Map; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheKeyConfiguration; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.cache.query.annotations.QuerySqlField; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery; +import org.apache.ignite.internal.processors.query.h2.H2TwoStepCachedQuery; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +/** + * Tests for behavior in various cases of local and distributed queries. + */ +public class IgniteCachelessQueriesSelfTest extends GridCommonAbstractTest { + /** */ + private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + + /** */ + private final static String SELECT = + "select count(*) from \"pers\".Person p, \"org\".Organization o where p.orgId = o._key"; + + /** */ + private static final String ORG_CACHE_NAME = "org"; + + /** */ + private static final String PERSON_CAHE_NAME = "pers"; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + CacheKeyConfiguration keyCfg = new CacheKeyConfiguration("MyCache", "affKey"); + + cfg.setCacheKeyConfiguration(keyCfg); + + cfg.setPeerClassLoadingEnabled(false); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + disco.setIpFinder(ipFinder); + + cfg.setDiscoverySpi(disco); + + return cfg; + } + + /** @return number of nodes to be prestarted. */ + private int nodesCount() { + return 1; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + startGrids(nodesCount()); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } + + /** + * @param name Cache name. + * @param mode Cache mode. + * @param idxTypes Indexed types. + * @return Cache configuration. + */ + protected CacheConfiguration cacheConfig(String name, TestCacheMode mode, Class... idxTypes) { + return new CacheConfiguration() + .setName(name) + .setCacheMode(mode == TestCacheMode.REPLICATED ? CacheMode.REPLICATED : CacheMode.PARTITIONED) + .setQueryParallelism(mode == TestCacheMode.SEGMENTED ? 5 : 1) + .setAtomicityMode(CacheAtomicityMode.ATOMIC) + .setIndexedTypes(idxTypes); + } + + /** + * + */ + public void testDistributedQueryOnPartitionedCaches() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.PARTITIONED, false, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testDistributedQueryOnPartitionedAndReplicatedCache() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.REPLICATED, false, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testDistributedQueryOnReplicatedCaches() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.REPLICATED, false, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testDistributedQueryOnSegmentedCaches() { + createCachesAndExecuteQuery(TestCacheMode.SEGMENTED, TestCacheMode.SEGMENTED, false, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testDistributedQueryOnReplicatedAndSegmentedCache() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.SEGMENTED, false, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testDistributedQueryOnPartitionedCachesWithReplicatedFlag() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.PARTITIONED, true, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testDistributedQueryOnPartitionedAndReplicatedCacheWithReplicatedFlag() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.REPLICATED, true, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testLocalQueryOnReplicatedCachesWithReplicatedFlag() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.REPLICATED, true, false); + + assertLocalQuery(); + } + + /** + * + */ + public void testDistributedQueryOnSegmentedCachesWithReplicatedFlag() { + createCachesAndExecuteQuery(TestCacheMode.SEGMENTED, TestCacheMode.SEGMENTED, true, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testDistributedQueryOnReplicatedAndSegmentedCacheWithReplicatedFlag() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.SEGMENTED, true, false); + + assertDistributedQuery(); + } + + /** + * + */ + public void testLocalQueryOnPartitionedCachesWithLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.PARTITIONED, false, true); + + assertLocalQuery(); + } + + /** + * + */ + public void testLocalQueryOnPartitionedAndReplicatedCacheWithLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.REPLICATED, false, true); + + assertLocalQuery(); + } + + /** + * + */ + public void testLocalQueryOnReplicatedCachesWithLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.REPLICATED, false, true); + + assertLocalQuery(); + } + + /** + * + */ + public void testLocalTwoStepQueryOnSegmentedCachesWithLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.SEGMENTED, TestCacheMode.SEGMENTED, false, true); + + assertLocalTwoStepQuery(); + } + + /** + * + */ + public void testLocalTwoStepQueryOnReplicatedAndSegmentedCacheWithLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.SEGMENTED, false, true); + + assertLocalTwoStepQuery(); + } + + /** + * + */ + public void testLocalQueryOnPartitionedCachesWithReplicatedAndLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.PARTITIONED, false, true); + + assertLocalQuery(); + } + + /** + * + */ + public void testLocalQueryOnPartitionedAndReplicatedCacheWithReplicatedAndLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.PARTITIONED, TestCacheMode.REPLICATED, true, true); + + assertLocalQuery(); + } + + /** + * + */ + public void testLocalQueryOnReplicatedCachesWithReplicatedAndLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.REPLICATED, true, true); + + assertLocalQuery(); + } + + /** + * + */ + public void testLocalTwoStepQueryOnSegmentedCachesWithReplicatedAndLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.SEGMENTED, TestCacheMode.SEGMENTED, true, true); + + assertLocalTwoStepQuery(); + } + + /** + * + */ + public void testLocalTwoStepQueryOnReplicatedAndSegmentedCacheWithReplicatedAndLocalFlag() { + createCachesAndExecuteQuery(TestCacheMode.REPLICATED, TestCacheMode.SEGMENTED, true, true); + + assertLocalTwoStepQuery(); + } + + /** + * @param firstCacheMode First cache mode. + * @param secondCacheMode Second cache mode. + * @param replicatedOnly Replicated only query flag. + * @param loc Local query flag. + */ + private void createCachesAndExecuteQuery(TestCacheMode firstCacheMode, TestCacheMode secondCacheMode, + boolean replicatedOnly, boolean loc) { + Ignite node = ignite(0); + + node.createCache(cacheConfig(PERSON_CAHE_NAME, firstCacheMode, Integer.class, Person.class)); + node.createCache(cacheConfig(ORG_CACHE_NAME, secondCacheMode, Integer.class, Organization.class)); + + IgniteCache c = node.cache(PERSON_CAHE_NAME); + + c.query(new SqlFieldsQuery(SELECT).setReplicatedOnly(replicatedOnly).setLocal(loc)).getAll(); + } + + /** + * @return Cached two-step query, or {@code null} if none occurred. + */ + private GridCacheTwoStepQuery cachedTwoStepQuery() { + GridQueryIndexing idx = grid(0).context().query().getIndexing(); + + Map m = U.field(idx, "twoStepCache"); + + if (m.isEmpty()) + return null; + + H2TwoStepCachedQuery q = m.values().iterator().next(); + + return q.query(); + } + + /** + * Check that truly distributed query has happened. + */ + private void assertDistributedQuery() { + GridCacheTwoStepQuery q = cachedTwoStepQuery(); + + assertNotNull(q); + + assertFalse(q.isLocal()); + } + + /** + * Check that local two-step query has happened. + */ + private void assertLocalTwoStepQuery() { + GridCacheTwoStepQuery q = cachedTwoStepQuery(); + + assertNotNull(q); + + assertTrue(q.isLocal()); + } + + + /** + * Check that no distributed query has happened. + */ + private void assertLocalQuery() { + GridCacheTwoStepQuery q = cachedTwoStepQuery(); + + assertNull(q); + } + + /** + * + */ + private static class Person implements Serializable { + /** */ + @QuerySqlField(index = true) + Integer orgId; + + /** */ + @QuerySqlField + String name; + + /** + * + */ + public Person() { + // No-op. + } + + /** + * @param orgId Organization ID. + * @param name Name. + */ + public Person(int orgId, String name) { + this.orgId = orgId; + this.name = name; + } + } + + /** + * + */ + private static class Organization implements Serializable { + /** */ + @QuerySqlField + String name; + + /** + * + */ + public Organization() { + // No-op. + } + + /** + * @param name Organization name. + */ + public Organization(String name) { + this.name = name; + } + } + + /** + * Mode for test cache. + */ + private enum TestCacheMode { + /** */ + SEGMENTED, + + /** */ + PARTITIONED, + + /** */ + REPLICATED + } +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteQueryDedicatedPoolTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteQueryDedicatedPoolTest.java index 13c0cb2c5e73d..b2f4e47ec2048 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteQueryDedicatedPoolTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteQueryDedicatedPoolTest.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.managers.communication.GridIoManager; import org.apache.ignite.internal.managers.communication.GridIoPolicy; import org.apache.ignite.internal.processors.cache.CacheEntryImpl; +import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.spi.IgniteSpiAdapter; @@ -96,14 +97,20 @@ public class IgniteQueryDedicatedPoolTest extends GridCommonAbstractTest { } /** - * Tests that SQL queries are executed in dedicated pool + * Tests that SQL queries involving actual network IO are executed in dedicated pool. * @throws Exception If failed. + * @see GridCacheTwoStepQuery#isLocal() */ public void testSqlQueryUsesDedicatedThreadPool() throws Exception { try (Ignite client = startGrid("client")) { IgniteCache cache = client.cache(CACHE_NAME); - QueryCursor> cursor = cache.query(new SqlFieldsQuery("select currentPolicy()")); + // We do this in order to have 1 row in results of select - function is called once per each row of result. + cache.put(1, 1); + + // We have to refer to a cache explicitly in the query in order for it to be executed + // in non local distributed manner (yes, there's a "local distributed" manner too - see link above...) + QueryCursor> cursor = cache.query(new SqlFieldsQuery("select currentPolicy() from Integer")); List> result = cursor.getAll(); @@ -113,8 +120,8 @@ public void testSqlQueryUsesDedicatedThreadPool() throws Exception { Byte plc = (Byte)result.get(0).get(0); - assert plc != null; - assert plc == GridIoPolicy.QUERY_POOL; + assertNotNull(plc); + assertEquals(GridIoPolicy.QUERY_POOL, (byte)plc); } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java index 6747e2810dd4a..2edc93b5ec5d2 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlDefaultValueTest.java @@ -228,7 +228,7 @@ private void checkResults(Collection> exp, Collection> actu * @return Results set. */ private List> sql(String sql, Object ... args) { - return grid(NODE_CLIENT).context().query().querySqlFieldsNoCache( + return grid(NODE_CLIENT).context().query().querySqlFields( new SqlFieldsQuery(sql).setArgs(args), false).getAll(); } } diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java index 0c3b42c483962..1f4e018889a3b 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/IgniteSqlNotNullConstraintTest.java @@ -1020,7 +1020,7 @@ private void executeForNodeAndCache(CacheConfiguration ccfg, Ignite ignite, Test private List> executeSql(String sqlText) throws Exception { GridQueryProcessor qryProc = grid(NODE_CLIENT).context().query(); - return qryProc.querySqlFieldsNoCache(new SqlFieldsQuery(sqlText), true).getAll(); + return qryProc.querySqlFields(new SqlFieldsQuery(sqlText), true).getAll(); } /** */ diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/MultipleStatementsSqlQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/MultipleStatementsSqlQuerySelfTest.java index 8b9bf40eab6f2..becd5865c642c 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/MultipleStatementsSqlQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/MultipleStatementsSqlQuerySelfTest.java @@ -60,7 +60,7 @@ public void testQuery() throws Exception { "select * from test;") .setSchema("PUBLIC"); - List>> res = qryProc.querySqlFieldsNoCache(qry, true, false); + List>> res = qryProc.querySqlFields(qry, true, false); assert res.size() == 4 : "Unexpected cursors count: " + res.size(); @@ -106,7 +106,7 @@ public void testQueryWithParameters() throws Exception { .setSchema("PUBLIC") .setArgs(1, "name_1", 2, "name2", 3, "name_3"); - List>> res = qryProc.querySqlFieldsNoCache(qry, true, false); + List>> res = qryProc.querySqlFields(qry, true, false); assert res.size() == 4 : "Unexpected cursors count: " + res.size(); @@ -145,10 +145,10 @@ public void testQueryMultipleStatementsFailed() throws Exception { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { - node.context().query().querySqlFieldsNoCache(qry, true); + node.context().query().querySqlFields(qry, true, true); return null; } }, IgniteSQLException.class, "Multiple statements queries are not supported"); } -} \ No newline at end of file +} diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java index a4ee2e3a14d68..b271d806d70c0 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSchemaSelfTest.java @@ -66,13 +66,13 @@ public void testQueryWithoutCacheOnPublicSchema() throws Exception { SqlFieldsQuery qry = new SqlFieldsQuery("SELECT 1").setSchema("PUBLIC"); - List> res = qryProc.querySqlFieldsNoCache(qry, true).getAll(); + List> res = qryProc.querySqlFields(qry, true).getAll(); assertEquals(1, res.size()); assertEquals(1, res.get(0).size()); assertEquals(1, res.get(0).get(0)); - Iterator> iter = qryProc.querySqlFieldsNoCache(qry, true).iterator(); + Iterator> iter = qryProc.querySqlFields(qry, true).iterator(); assertTrue(iter.hasNext()); @@ -98,13 +98,13 @@ public void testQueryWithoutCacheOnCacheSchema() throws Exception { SqlFieldsQuery qry = new SqlFieldsQuery("SELECT 1").setSchema(CACHE_PERSON); - List> res = qryProc.querySqlFieldsNoCache(qry, true).getAll(); + List> res = qryProc.querySqlFields(qry, true).getAll(); assertEquals(1, res.size()); assertEquals(1, res.get(0).size()); assertEquals(1, res.get(0).get(0)); - Iterator> iter = qryProc.querySqlFieldsNoCache(qry, true).iterator(); + Iterator> iter = qryProc.querySqlFields(qry, true).iterator(); assertTrue(iter.hasNext()); @@ -236,7 +236,7 @@ public void testCustomSchemaConcurrentUse() throws Exception { @Override public void run() { for (int i = 0; i < 100; i++) { int idx = maxIdx.incrementAndGet(); - + String tbl = "Person" + idx; IgniteCache cache = registerQueryEntity(tbl, "PersonCache" + idx); @@ -282,7 +282,7 @@ private IgniteCache registerQueryEntity(String tbl, String cacheNa private void testQueryEntity(IgniteCache cache, String tbl) { cache.put(1L, new Person("Vasya", 2)); - assertEquals(1, node.context().query().querySqlFieldsNoCache( + assertEquals(1, node.context().query().querySqlFields( new SqlFieldsQuery(String.format("SELECT id, name, orgId FROM TEST.%s where (id = %d)", tbl, 1)), false ).getAll().size()); } diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java index a8a76a305dd2d..71dd84c7b2126 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java +++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java @@ -131,26 +131,27 @@ import org.apache.ignite.internal.processors.cache.query.IndexingSpiQuerySelfTest; import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryTxSelfTest; import org.apache.ignite.internal.processors.client.ClientConnectorConfigurationValidationSelfTest; +import org.apache.ignite.internal.processors.query.IgniteCachelessQueriesSelfTest; +import org.apache.ignite.internal.processors.query.IgniteQueryDedicatedPoolTest; import org.apache.ignite.internal.processors.query.IgniteSqlDefaultValueTest; import org.apache.ignite.internal.processors.query.IgniteSqlDistributedJoinSelfTest; -import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest; -import org.apache.ignite.internal.processors.query.IgniteSqlParameterizedQueryTest; -import org.apache.ignite.internal.processors.query.h2.IgniteSqlBigIntegerKeyTest; -import org.apache.ignite.internal.processors.query.IgniteQueryDedicatedPoolTest; -import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlEntryCacheModeAgnosticTest; import org.apache.ignite.internal.processors.query.IgniteSqlKeyValueFieldsTest; import org.apache.ignite.internal.processors.query.IgniteSqlNotNullConstraintTest; +import org.apache.ignite.internal.processors.query.IgniteSqlParameterizedQueryTest; import org.apache.ignite.internal.processors.query.IgniteSqlRoutingTest; import org.apache.ignite.internal.processors.query.IgniteSqlSchemaIndexingTest; import org.apache.ignite.internal.processors.query.IgniteSqlSegmentedIndexMultiNodeSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlSegmentedIndexSelfTest; +import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlFlagSelfTest; +import org.apache.ignite.internal.processors.query.IgniteSqlSkipReducerOnUpdateDmlSelfTest; import org.apache.ignite.internal.processors.query.IgniteSqlSplitterSelfTest; import org.apache.ignite.internal.processors.query.LazyQuerySelfTest; import org.apache.ignite.internal.processors.query.MultipleStatementsSqlQuerySelfTest; import org.apache.ignite.internal.processors.query.SqlSchemaSelfTest; import org.apache.ignite.internal.processors.query.h2.GridH2IndexingInMemSelfTest; import org.apache.ignite.internal.processors.query.h2.GridH2IndexingOffheapSelfTest; +import org.apache.ignite.internal.processors.query.h2.IgniteSqlBigIntegerKeyTest; import org.apache.ignite.internal.processors.query.h2.IgniteSqlQueryMinMaxTest; import org.apache.ignite.internal.processors.query.h2.sql.BaseH2CompareQueryTest; import org.apache.ignite.internal.processors.query.h2.sql.GridQueryParsingTest; @@ -218,6 +219,7 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(LazyQuerySelfTest.class); suite.addTestSuite(IgniteSqlSplitterSelfTest.class); suite.addTestSuite(IgniteSqlSegmentedIndexSelfTest.class); + suite.addTestSuite(IgniteCachelessQueriesSelfTest.class); suite.addTestSuite(IgniteSqlSegmentedIndexMultiNodeSelfTest.class); suite.addTestSuite(IgniteSqlSchemaIndexingTest.class); suite.addTestSuite(GridCacheQueryIndexDisabledSelfTest.class); diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs new file mode 100644 index 0000000000000..2fcb066b3f42a --- /dev/null +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/SqlQueryTest.cs @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace Apache.Ignite.Core.Tests.Client.Cache +{ + using System; + using System.Linq; + using Apache.Ignite.Core.Cache.Query; + using Apache.Ignite.Core.Client; + using NUnit.Framework; + + /// + /// Tests SQL queries via thin client. + /// + public class SqlQueryTest : SqlQueryTestBase + { + /// + /// Tests the SQL query. + /// + [Test] + public void TestSqlQuery() + { + var cache = GetClientCache(); + + // All items. + var qry = new SqlQuery(typeof(Person), "where 1 = 1"); + Assert.AreEqual(Count, cache.Query(qry).Count()); + + // All items local. + qry.Local = true; + Assert.Greater(Count, cache.Query(qry).Count()); + + // Filter. + qry = new SqlQuery(typeof(Person), "where Name like '%7'"); + Assert.AreEqual(7, cache.Query(qry).Single().Key); + + // Args. + qry = new SqlQuery(typeof(Person), "where Id = ?", 3); + Assert.AreEqual(3, cache.Query(qry).Single().Value.Id); + + // DateTime. + qry = new SqlQuery(typeof(Person), "where DateTime > ?", DateTime.UtcNow.AddDays(Count - 1)); + Assert.AreEqual(Count, cache.Query(qry).Single().Key); + + // Invalid args. + qry.Sql = null; + Assert.Throws(() => cache.Query(qry)); + + qry.Sql = "abc"; + qry.QueryType = null; + Assert.Throws(() => cache.Query(qry)); + } + + /// + /// Tests the SQL query with distributed joins. + /// + [Test] + public void TestSqlQueryDistributedJoins() + { + var cache = GetClientCache(); + + // Non-distributed join returns incomplete results. + var qry = new SqlQuery(typeof(Person), + string.Format("from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", + CacheName, CacheName2)); + + Assert.Greater(Count, cache.Query(qry).Count()); + + // Distributed join fixes the problem. + qry.EnableDistributedJoins = true; + Assert.AreEqual(Count, cache.Query(qry).Count()); + } + + /// + /// Tests the fields query. + /// + [Test] + public void TestFieldsQuery() + { + var cache = GetClientCache(); + + // All items. + var qry = new SqlFieldsQuery("select Id from Person"); + var cursor = cache.Query(qry); + CollectionAssert.AreEquivalent(Enumerable.Range(1, Count), cursor.Select(x => (int) x[0])); + Assert.AreEqual("ID", cursor.FieldNames.Single()); + + // All items local. + qry.Local = true; + Assert.Greater(Count, cache.Query(qry).Count()); + + // Filter. + qry = new SqlFieldsQuery("select Name from Person where Id = ?", 1) + { + Lazy = true, + PageSize = 5, + }; + Assert.AreEqual("Person 1", cache.Query(qry).Single().Single()); + + // DateTime. + qry = new SqlFieldsQuery("select Id, DateTime from Person where DateTime > ?", DateTime.UtcNow.AddDays(9)); + cursor = cache.Query(qry); + Assert.AreEqual(new[] {"ID", "DATETIME" }, cursor.FieldNames); + Assert.AreEqual(cache[Count].DateTime, cursor.Single().Last()); + + // Invalid args. + qry.Sql = null; + Assert.Throws(() => cache.Query(qry)); + } + + /// + /// Tests the SQL fields query with distributed joins. + /// + [Test] + public void TestFieldsQueryDistributedJoins() + { + var cache = GetClientCache(); + + // Non-distributed join returns incomplete results. + var qry = new SqlFieldsQuery(string.Format( + "select p2.Name from \"{0}\".Person, \"{1}\".Person as p2 where Person.Id = 11 - p2.Id", + CacheName, CacheName2)); + + Assert.Greater(Count, cache.Query(qry).Count()); + + // Distributed join fixes the problem. + qry.EnableDistributedJoins = true; + Assert.AreEqual(Count, cache.Query(qry).Count()); + } + + /// + /// Tests the fields query timeout. + /// + [Test] + public void TestFieldsQueryTimeout() + { + var cache = GetClientCache(); + + cache.PutAll(Enumerable.Range(1, 30000).ToDictionary(x => x, x => new Person(x))); + + var qry = new SqlFieldsQuery("select * from Person where Name like '%ers%'") + { + Timeout = TimeSpan.FromMilliseconds(1) + }; + + Assert.Throws(() => cache.Query(qry).GetAll()); + } + + /// + /// Tests the fields query on a missing cache. + /// + [Test] + public void TestFieldsQueryMissingCache() + { + var cache = Client.GetCache("I do not exist"); + var qry = new SqlFieldsQuery("select name from person") + { + Schema = CacheName + }; + + // Schema is set => we still check for cache existence. + var ex = Assert.Throws(() => cache.Query(qry).GetAll()); + Assert.AreEqual("Cache doesn't exist: I do not exist", ex.Message); + + // Schema not set => also exception. + qry.Schema = null; + ex = Assert.Throws(() => cache.Query(qry).GetAll()); + Assert.AreEqual("Cache doesn't exist: I do not exist", ex.Message); + } + + /// + /// Tests fields query with custom schema. + /// + [Test] + public void TestFieldsQueryCustomSchema() + { + var cache1 = Client.GetCache(CacheName); + var cache2 = Client.GetCache(CacheName2); + + cache1.RemoveAll(); + + var qry = new SqlFieldsQuery("select name from person"); + + // Schema not set: cache name is used. + Assert.AreEqual(0, cache1.Query(qry).Count()); + Assert.AreEqual(Count, cache2.Query(qry).Count()); + + // Schema set to first cache: no results both cases. + qry.Schema = cache1.Name; + Assert.AreEqual(0, cache1.Query(qry).Count()); + Assert.AreEqual(0, cache2.Query(qry).Count()); + + // Schema set to second cache: full results both cases. + qry.Schema = cache2.Name; + Assert.AreEqual(Count, cache1.Query(qry).Count()); + Assert.AreEqual(Count, cache2.Query(qry).Count()); + } + + /// + /// Tests the DML. + /// + [Test] + public void TestDml() + { + var cache = GetClientCache(); + + var qry = new SqlFieldsQuery("insert into Person (_key, id, name) values (?, ?, ?)", -10, 1, "baz"); + var res = cache.Query(qry).GetAll(); + + Assert.AreEqual(1, res[0][0]); + Assert.AreEqual("baz", cache[-10].Name); + } + } +} From 789b3cdb7c69219c0e4126ba29425f77b6ac5436 Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Fri, 16 Feb 2018 23:22:03 +0300 Subject: [PATCH 241/243] IGNITE-7253 Streaming in thin JDBC driver (cherry picked from commit 487418b) --- .../jdbc2/JdbcNoCacheStreamingSelfTest.java | 182 +++++++++++ .../internal/jdbc2/JdbcStreamingSelfTest.java | 218 +++++++++++--- .../jdbc/suite/IgniteJdbcDriverTestSuite.java | 3 + .../jdbc/thin/JdbcThinStreamingSelfTest.java | 285 ++++++++++++++++++ .../jdbc/thin/ConnectionProperties.java | 66 ++++ .../jdbc/thin/ConnectionPropertiesImpl.java | 141 ++++++++- .../jdbc/thin/JdbcThinConnection.java | 69 +++++ .../jdbc/thin/JdbcThinPreparedStatement.java | 18 +- .../internal/jdbc/thin/JdbcThinStatement.java | 55 +++- .../internal/jdbc/thin/JdbcThinTcpIo.java | 32 +- .../ignite/internal/jdbc2/JdbcConnection.java | 12 +- .../jdbc2/JdbcStreamedPreparedStatement.java | 2 +- .../cache/IgniteCacheProxyImpl.java | 4 +- .../odbc/jdbc/JdbcConnectionContext.java | 29 +- .../odbc/jdbc/JdbcRequestHandler.java | 82 +++-- .../processors/query/GridQueryIndexing.java | 33 +- .../processors/query/GridQueryProcessor.java | 91 ++++-- .../processors/query/SqlClientContext.java | 195 ++++++++++++ ...niteClientCacheInitializationFailTest.java | 15 +- .../ignite/testframework/GridTestUtils.java | 2 +- .../query/h2/DmlStatementsProcessor.java | 100 +++--- .../processors/query/h2/IgniteH2Indexing.java | 121 ++++++-- .../query/h2/ddl/DdlStatementsProcessor.java | 22 +- .../processors/query/h2/dml/UpdatePlan.java | 2 +- .../query/h2/dml/UpdatePlanBuilder.java | 13 +- .../query/h2/sql/GridSqlQueryParser.java | 12 + .../GridCacheCrossCacheQuerySelfTest.java | 18 +- 27 files changed, 1559 insertions(+), 263 deletions(-) create mode 100644 modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcNoCacheStreamingSelfTest.java create mode 100644 modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingSelfTest.java create mode 100644 modules/core/src/main/java/org/apache/ignite/internal/processors/query/SqlClientContext.java diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcNoCacheStreamingSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcNoCacheStreamingSelfTest.java new file mode 100644 index 0000000000000..74c2820f06f5a --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcNoCacheStreamingSelfTest.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.jdbc2; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.util.Collections; +import java.util.Properties; +import org.apache.ignite.IgniteJdbcDriver; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import static org.apache.ignite.IgniteJdbcDriver.CFG_URL_PREFIX; +import static org.apache.ignite.cache.CacheMode.PARTITIONED; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; + +/** + * Data streaming test for thick driver and no explicit caches. + */ +public class JdbcNoCacheStreamingSelfTest extends GridCommonAbstractTest { + /** JDBC URL. */ + private static final String BASE_URL = CFG_URL_PREFIX + + "cache=default@modules/clients/src/test/config/jdbc-config.xml"; + + /** Connection. */ + protected Connection conn; + + /** */ + protected transient IgniteLogger log; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + return getConfiguration0(gridName); + } + + /** + * @param gridName Grid name. + * @return Grid configuration used for starting the grid. + * @throws Exception If failed. + */ + private IgniteConfiguration getConfiguration0(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + CacheConfiguration cache = defaultCacheConfiguration(); + + cache.setCacheMode(PARTITIONED); + cache.setBackups(1); + cache.setWriteSynchronizationMode(FULL_SYNC); + cache.setIndexedTypes( + Integer.class, Integer.class + ); + + cfg.setCacheConfiguration(cache); + cfg.setLocalHost("127.0.0.1"); + + TcpDiscoverySpi disco = new TcpDiscoverySpi(); + + TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true); + ipFinder.setAddresses(Collections.singleton("127.0.0.1:47500..47501")); + + disco.setIpFinder(ipFinder); + + cfg.setDiscoverySpi(disco); + + cfg.setConnectorConfiguration(new ConnectorConfiguration()); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + startGrids(2); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + } + + /** + * @param allowOverwrite Allow overwriting of existing keys. + * @return Connection to use for the test. + * @throws Exception if failed. + */ + protected Connection createConnection(boolean allowOverwrite) throws Exception { + Properties props = new Properties(); + + props.setProperty(IgniteJdbcDriver.PROP_STREAMING, "true"); + props.setProperty(IgniteJdbcDriver.PROP_STREAMING_FLUSH_FREQ, "500"); + + if (allowOverwrite) + props.setProperty(IgniteJdbcDriver.PROP_STREAMING_ALLOW_OVERWRITE, "true"); + + return DriverManager.getConnection(BASE_URL, props); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + U.closeQuiet(conn); + + ignite(0).cache(DEFAULT_CACHE_NAME).clear(); + + super.afterTest(); + } + + /** + * @throws Exception if failed. + */ + public void testStreamedInsert() throws Exception { + for (int i = 10; i <= 100; i += 10) + ignite(0).cache(DEFAULT_CACHE_NAME).put(i, i * 100); + + try (Connection conn = createConnection(false)) { + try (PreparedStatement stmt = conn.prepareStatement("insert into Integer(_key, _val) values (?, ?)")) { + for (int i = 1; i <= 100; i++) { + stmt.setInt(1, i); + stmt.setInt(2, i); + + stmt.executeUpdate(); + } + } + } + + U.sleep(500); + + // Now let's check it's all there. + for (int i = 1; i <= 100; i++) { + if (i % 10 != 0) + assertEquals(i, grid(0).cache(DEFAULT_CACHE_NAME).get(i)); + else // All that divides by 10 evenly should point to numbers 100 times greater - see above + assertEquals(i * 100, grid(0).cache(DEFAULT_CACHE_NAME).get(i)); + } + } + + /** + * @throws Exception if failed. + */ + public void testStreamedInsertWithOverwritesAllowed() throws Exception { + for (int i = 10; i <= 100; i += 10) + ignite(0).cache(DEFAULT_CACHE_NAME).put(i, i * 100); + + try (Connection conn = createConnection(true)) { + try (PreparedStatement stmt = conn.prepareStatement("insert into Integer(_key, _val) values (?, ?)")) { + for (int i = 1; i <= 100; i++) { + stmt.setInt(1, i); + stmt.setInt(2, i); + + stmt.executeUpdate(); + } + } + } + + U.sleep(500); + + // Now let's check it's all there. + // i should point to i at all times as we've turned overwrites on above. + for (int i = 1; i <= 100; i++) + assertEquals(i, grid(0).cache(DEFAULT_CACHE_NAME).get(i)); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java index 5418ca0dd0551..ebb6bc9b340c8 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java @@ -20,16 +20,24 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; import java.util.Collections; import java.util.Properties; +import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteJdbcDriver; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.binary.BinaryObject; +import org.apache.ignite.binary.BinaryObjectBuilder; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import static org.apache.ignite.IgniteJdbcDriver.CFG_URL_PREFIX; @@ -41,10 +49,12 @@ */ public class JdbcStreamingSelfTest extends GridCommonAbstractTest { /** JDBC URL. */ - private static final String BASE_URL = CFG_URL_PREFIX + "cache=default@modules/clients/src/test/config/jdbc-config.xml"; + private static final String BASE_URL = CFG_URL_PREFIX + + "cache=default@modules/clients/src/test/config/jdbc-config.xml"; - /** Connection. */ - protected Connection conn; + /** Streaming URL. */ + private static final String STREAMING_URL = CFG_URL_PREFIX + + "cache=person@modules/clients/src/test/config/jdbc-config.xml"; /** */ protected transient IgniteLogger log; @@ -90,7 +100,18 @@ private IgniteConfiguration getConfiguration0(String gridName) throws Exception /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + startGrids(2); + + try (Connection c = createOrdinaryConnection()) { + try (Statement s = c.createStatement()) { + s.execute("CREATE TABLE PUBLIC.Person(\"id\" int primary key, \"name\" varchar) WITH " + + "\"cache_name=person,value_type=Person\""); + } + } + + U.sleep(1000); } /** {@inheritDoc} */ @@ -98,28 +119,52 @@ private IgniteConfiguration getConfiguration0(String gridName) throws Exception stopAllGrids(); } + /** + * @return Connection without streaming initially turned on. + * @throws SQLException if failed. + */ + protected Connection createOrdinaryConnection() throws SQLException { + Connection res = DriverManager.getConnection(BASE_URL, new Properties()); + + res.setSchema(QueryUtils.DFLT_SCHEMA); + + return res; + } + + /** + * @param allowOverwrite Allow overwriting of existing keys. + * @return Connection to use for the test. + * @throws Exception if failed. + */ + protected Connection createStreamedConnection(boolean allowOverwrite) throws Exception { + return createStreamedConnection(allowOverwrite, 500); + } + /** * @param allowOverwrite Allow overwriting of existing keys. + * @param flushTimeout Stream flush timeout. * @return Connection to use for the test. * @throws Exception if failed. */ - private Connection createConnection(boolean allowOverwrite) throws Exception { + protected Connection createStreamedConnection(boolean allowOverwrite, long flushTimeout) throws Exception { Properties props = new Properties(); props.setProperty(IgniteJdbcDriver.PROP_STREAMING, "true"); - props.setProperty(IgniteJdbcDriver.PROP_STREAMING_FLUSH_FREQ, "500"); + props.setProperty(IgniteJdbcDriver.PROP_STREAMING_FLUSH_FREQ, String.valueOf(flushTimeout)); if (allowOverwrite) props.setProperty(IgniteJdbcDriver.PROP_STREAMING_ALLOW_OVERWRITE, "true"); - return DriverManager.getConnection(BASE_URL, props); + Connection res = DriverManager.getConnection(STREAMING_URL, props); + + res.setSchema(QueryUtils.DFLT_SCHEMA); + + return res; } /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { - U.closeQuiet(conn); - - ignite(0).cache(DEFAULT_CACHE_NAME).clear(); + cache().clear(); super.afterTest(); } @@ -128,30 +173,59 @@ private Connection createConnection(boolean allowOverwrite) throws Exception { * @throws Exception if failed. */ public void testStreamedInsert() throws Exception { - conn = createConnection(false); - for (int i = 10; i <= 100; i += 10) - ignite(0).cache(DEFAULT_CACHE_NAME).put(i, i * 100); + put(i, nameForId(i * 100)); + + try (Connection conn = createStreamedConnection(false)) { + try (PreparedStatement stmt = conn.prepareStatement("insert into PUBLIC.Person(\"id\", \"name\") " + + "values (?, ?)")) { + for (int i = 1; i <= 100; i++) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + + stmt.executeUpdate(); + } + } + } - PreparedStatement stmt = conn.prepareStatement("insert into Integer(_key, _val) values (?, ?)"); + U.sleep(500); + // Now let's check it's all there. for (int i = 1; i <= 100; i++) { - stmt.setInt(1, i); - stmt.setInt(2, i); + if (i % 10 != 0) + assertEquals(nameForId(i), nameForIdInCache(i)); + else // All that divides by 10 evenly should point to numbers 100 times greater - see above + assertEquals(nameForId(i * 100), nameForIdInCache(i)); + } + } - stmt.executeUpdate(); + /** + * @throws Exception if failed. + */ + public void testStreamedInsertWithoutColumnsList() throws Exception { + for (int i = 10; i <= 100; i += 10) + put(i, nameForId(i * 100)); + + try (Connection conn = createStreamedConnection(false)) { + try (PreparedStatement stmt = conn.prepareStatement("insert into PUBLIC.Person(\"id\", \"name\") " + + "values (?, ?)")) { + for (int i = 1; i <= 100; i++) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + + stmt.executeUpdate(); + } + } } - // Closing connection makes it wait for streamer close - // and thus for data load completion as well - conn.close(); + U.sleep(500); // Now let's check it's all there. for (int i = 1; i <= 100; i++) { if (i % 10 != 0) - assertEquals(i, grid(0).cache(DEFAULT_CACHE_NAME).get(i)); + assertEquals(nameForId(i), nameForIdInCache(i)); else // All that divides by 10 evenly should point to numbers 100 times greater - see above - assertEquals(i * 100, grid(0).cache(DEFAULT_CACHE_NAME).get(i)); + assertEquals(nameForId(i * 100), nameForIdInCache(i)); } } @@ -159,27 +233,99 @@ public void testStreamedInsert() throws Exception { * @throws Exception if failed. */ public void testStreamedInsertWithOverwritesAllowed() throws Exception { - conn = createConnection(true); - for (int i = 10; i <= 100; i += 10) - ignite(0).cache(DEFAULT_CACHE_NAME).put(i, i * 100); - - PreparedStatement stmt = conn.prepareStatement("insert into Integer(_key, _val) values (?, ?)"); - - for (int i = 1; i <= 100; i++) { - stmt.setInt(1, i); - stmt.setInt(2, i); - - stmt.executeUpdate(); + put(i, nameForId(i * 100)); + + try (Connection conn = createStreamedConnection(true)) { + try (PreparedStatement stmt = conn.prepareStatement("insert into PUBLIC.Person(\"id\", \"name\") " + + "values (?, ?)")) { + for (int i = 1; i <= 100; i++) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + + stmt.executeUpdate(); + } + } } - // Closing connection makes it wait for streamer close - // and thus for data load completion as well - conn.close(); + U.sleep(500); // Now let's check it's all there. // i should point to i at all times as we've turned overwrites on above. for (int i = 1; i <= 100; i++) - assertEquals(i, grid(0).cache(DEFAULT_CACHE_NAME).get(i)); + assertEquals(nameForId(i), nameForIdInCache(i)); + } + + /** */ + public void testOnlyInsertsAllowed() { + assertStatementForbidden("CREATE TABLE PUBLIC.X (x int primary key, y int)"); + + assertStatementForbidden("SELECT * from Person"); + + assertStatementForbidden("insert into PUBLIC.Person(\"id\", \"name\") " + + "(select \"id\" + 1, CONCAT(\"name\", '1') from Person)"); + + assertStatementForbidden("DELETE from Person"); + + assertStatementForbidden("UPDATE Person SET \"name\" = 'name0'"); + + assertStatementForbidden("alter table Person add column y int"); + } + + /** + * @param sql Statement to check. + */ + @SuppressWarnings("ThrowableNotThrown") + protected void assertStatementForbidden(String sql) { + GridTestUtils.assertThrows(null, new IgniteCallable() { + @Override public Object call() throws Exception { + try (Connection c = createStreamedConnection(false)) { + try (PreparedStatement s = c.prepareStatement(sql)) { + s.execute(); + } + } + + return null; + } + }, SQLException.class,"Only tuple based INSERT statements are supported in streaming mode"); + } + + /** + * @return Person cache. + */ + protected IgniteCache cache() { + return grid(0).cache("person"); + } + + /** + * @param id id of person to put. + * @param name name of person to put. + */ + protected void put(int id, String name) { + BinaryObjectBuilder bldr = grid(0).binary().builder("Person"); + + bldr.setField("name", name); + + cache().put(id, bldr.build()); + } + + /** + * @param id Person id. + * @return Default name for person w/given id. + */ + protected String nameForId(int id) { + return "Person" + id; + } + + /** + * @param id person id. + * @return Name for person with given id currently stored in cache. + */ + protected String nameForIdInCache(int id) { + Object o = cache().withKeepBinary().get(id); + + assertTrue(String.valueOf(o), o instanceof BinaryObject); + + return ((BinaryObject)o).field("name"); } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java index f1d0d4e74cc23..e75483e80715d 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java @@ -67,6 +67,7 @@ import org.apache.ignite.jdbc.thin.JdbcThinSchemaCaseTest; import org.apache.ignite.jdbc.thin.JdbcThinSelectAfterAlterTable; import org.apache.ignite.jdbc.thin.JdbcThinStatementSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinStreamingSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest; @@ -117,9 +118,11 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcDeleteStatementSelfTest.class)); suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcStatementBatchingSelfTest.class)); suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcErrorsSelfTest.class)); + suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcNoCacheStreamingSelfTest.class)); suite.addTest(new TestSuite(JdbcBlobTest.class)); suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcStreamingSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinStreamingSelfTest.class)); // DDL tests. suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcDynamicIndexAtomicPartitionedNearSelfTest.class)); diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingSelfTest.java new file mode 100644 index 0000000000000..9eba4da2a0a8a --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingSelfTest.java @@ -0,0 +1,285 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Set; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.cache.CachePeekMode; +import org.apache.ignite.internal.jdbc2.JdbcStreamingSelfTest; +import org.apache.ignite.internal.processors.query.SqlClientContext; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Tests for streaming via thin driver. + */ +public class JdbcThinStreamingSelfTest extends JdbcStreamingSelfTest { + /** */ + private int batchSize = 17; + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + super.beforeTestsStarted(); + + batchSize = 17; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + try (Connection c = createOrdinaryConnection()) { + execute(c, "DROP TABLE PUBLIC.T IF EXISTS"); + } + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected Connection createStreamedConnection(boolean allowOverwrite, long flushFreq) throws Exception { + return JdbcThinAbstractSelfTest.connect(grid(0), "streaming=true&streamingFlushFrequency=" + + flushFreq + "&" + "streamingAllowOverwrite=" + allowOverwrite + "&streamingPerNodeBufferSize=1000&" + + "streamingBatchSize=" + batchSize); + } + + /** {@inheritDoc} */ + @Override protected Connection createOrdinaryConnection() throws SQLException { + return JdbcThinAbstractSelfTest.connect(grid(0), null); + } + + /** + * @throws Exception if failed. + */ + public void testStreamedBatchedInsert() throws Exception { + for (int i = 10; i <= 100; i += 10) + put(i, nameForId(i * 100)); + + try (Connection conn = createStreamedConnection(false)) { + assertStreamingOn(); + + try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?), " + + "(?, ?)")) { + for (int i = 1; i <= 100; i+=2) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + stmt.setInt(3, i + 1); + stmt.setString(4, nameForId(i + 1)); + + stmt.addBatch(); + } + + stmt.executeBatch(); + } + } + + U.sleep(500); + + // Now let's check it's all there. + for (int i = 1; i <= 100; i++) { + if (i % 10 != 0) + assertEquals(nameForId(i), nameForIdInCache(i)); + else // All that divides by 10 evenly should point to numbers 100 times greater - see above + assertEquals(nameForId(i * 100), nameForIdInCache(i)); + } + } + + /** + * @throws SQLException if failed. + */ + public void testSimultaneousStreaming() throws Exception { + try (Connection anotherConn = createOrdinaryConnection()) { + execute(anotherConn, "CREATE TABLE PUBLIC.T(x int primary key, y int) WITH " + + "\"cache_name=T,wrap_value=false\""); + } + + // Timeout to let connection close be handled on server side. + U.sleep(500); + + try (Connection conn = createStreamedConnection(false, 10000)) { + assertStreamingOn(); + + PreparedStatement firstStmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)"); + + PreparedStatement secondStmt = conn.prepareStatement("insert into PUBLIC.T(x, y) values (?, ?)"); + + try { + for (int i = 1; i <= 10; i++) { + firstStmt.setInt(1, i); + firstStmt.setString(2, nameForId(i)); + + firstStmt.executeUpdate(); + } + + for (int i = 51; i <= 67; i++) { + secondStmt.setInt(1, i); + secondStmt.setInt(2, i); + + secondStmt.executeUpdate(); + } + + for (int i = 11; i <= 50; i++) { + firstStmt.setInt(1, i); + firstStmt.setString(2, nameForId(i)); + + firstStmt.executeUpdate(); + } + + for (int i = 68; i <= 100; i++) { + secondStmt.setInt(1, i); + secondStmt.setInt(2, i); + + secondStmt.executeUpdate(); + } + + assertCacheEmpty(); + + SqlClientContext cliCtx = sqlClientContext(); + + HashMap> streamers = U.field(cliCtx, "streamers"); + + assertEquals(2, streamers.size()); + + assertEqualsCollections(new HashSet<>(Arrays.asList("person", "T")), streamers.keySet()); + } + finally { + U.closeQuiet(firstStmt); + + U.closeQuiet(secondStmt); + } + } + + // Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush + // on connection close in any way. + U.sleep(1000); + + // Now let's check it's all there. + for (int i = 1; i <= 50; i++) + assertEquals(nameForId(i), nameForIdInCache(i)); + + for (int i = 51; i <= 100; i++) + assertEquals(i, grid(0).cache("T").get(i)); + } + + /** + * + */ + public void testStreamingWithMixedStatementTypes() throws Exception { + String prepStmtStr = "insert into Person(\"id\", \"name\") values (?, ?)"; + + String stmtStr = "insert into Person(\"id\", \"name\") values (%d, '%s')"; + + try (Connection conn = createStreamedConnection(false, 10000)) { + assertStreamingOn(); + + PreparedStatement firstStmt = conn.prepareStatement(prepStmtStr); + + Statement secondStmt = conn.createStatement(); + + try { + for (int i = 1; i <= 100; i++) { + boolean usePrep = Math.random() > 0.5; + + boolean useBatch = Math.random() > 0.5; + + if (usePrep) { + firstStmt.setInt(1, i); + firstStmt.setString(2, nameForId(i)); + + if (useBatch) + firstStmt.addBatch(); + else + firstStmt.execute(); + } + else { + String sql = String.format(stmtStr, i, nameForId(i)); + + if (useBatch) + secondStmt.addBatch(sql); + else + secondStmt.execute(sql); + } + } + } + finally { + U.closeQuiet(firstStmt); + + U.closeQuiet(secondStmt); + } + } + + // Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush + // on connection close in any way. + U.sleep(1000); + + // Now let's check it's all there. + for (int i = 1; i <= 100; i++) + assertEquals(nameForId(i), nameForIdInCache(i)); + } + + /** + * Check that there's nothing in cache. + */ + private void assertCacheEmpty() { + assertEquals(0, grid(0).cache(DEFAULT_CACHE_NAME).size(CachePeekMode.ALL)); + } + + /** + * @param conn Connection. + * @param sql Statement. + * @throws SQLException if failed. + */ + private static void execute(Connection conn, String sql) throws SQLException { + try (Statement s = conn.createStatement()) { + s.execute(sql); + } + } + + /** + * @return Active SQL client context. + */ + private SqlClientContext sqlClientContext() { + Set ctxs = U.field(grid(0).context().query(), "cliCtxs"); + + assertFalse(F.isEmpty(ctxs)); + + assertEquals(1, ctxs.size()); + + return ctxs.iterator().next(); + } + + /** + * Check that streaming state on target node is as expected. + */ + private void assertStreamingOn() { + SqlClientContext cliCtx = sqlClientContext(); + + assertTrue(cliCtx.isStream()); + } + + /** {@inheritDoc} */ + @Override protected void assertStatementForbidden(String sql) { + batchSize = 1; + + super.assertStatementForbidden(sql); + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java index d79348415f4cc..458facad23086 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java @@ -145,4 +145,70 @@ public interface ConnectionProperties { * @param skipReducerOnUpdate Skip reducer on update flag. */ public void setSkipReducerOnUpdate(boolean skipReducerOnUpdate); + + /** + * @return Streamed connection flag. + */ + public boolean isStream(); + + /** + * @param stream Streamed connection flag. + */ + public void setStream(boolean stream); + + /** + * @return Allow overwrites during streaming connection flag. + */ + public boolean isStreamAllowOverwrite(); + + /** + * @param streamAllowOverwrite Allow overwrites during streaming connection flag. + */ + public void setStreamAllowOverwrite(boolean streamAllowOverwrite); + + /** + * @return Number of parallel operations per node during streaming connection param. + */ + public int getStreamParallelOperations(); + + /** + * @param streamParallelOperations Number of parallel operations per node during streaming connection param. + * @throws SQLException if value check failed. + */ + public void setStreamParallelOperations(int streamParallelOperations) throws SQLException; + + /** + * @return Buffer size during streaming connection param. + */ + public int getStreamBufferSize(); + + /** + * @param streamBufSize Buffer size during streaming connection param. + * @throws SQLException if value check failed. + */ + public void setStreamBufferSize(int streamBufSize) throws SQLException; + + /** + * @return Flush timeout during streaming connection param. + */ + public long getStreamFlushFrequency(); + + /** + * @param streamFlushFreq Flush timeout during streaming connection param. + * @throws SQLException if value check failed. + */ + public void setStreamFlushFrequency(long streamFlushFreq) throws SQLException; + + /** + * @return Batch size for streaming (number of commands to accumulate internally before actually + * sending over the wire). + */ + public int getStreamBatchSize(); + + /** + * @param streamBatchSize Batch size for streaming (number of commands to accumulate internally before actually + * sending over the wire). + * @throws SQLException if value check failed. + */ + public void setStreamBatchSize(int streamBatchSize) throws SQLException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java index 86ba2fa832b1d..de98cf786552d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java @@ -96,11 +96,40 @@ public class ConnectionPropertiesImpl implements ConnectionProperties, Serializa private BooleanProperty skipReducerOnUpdate = new BooleanProperty( "skipReducerOnUpdate", "Enable execution update queries on ignite server nodes", false, false); + /** Turn on streaming mode on this connection. */ + private BooleanProperty stream = new BooleanProperty( + "streaming", "Turn on streaming mode on this connection", false, false); + + /** Turn on overwrite during streaming on this connection. */ + private BooleanProperty streamAllowOverwrite = new BooleanProperty( + "streamingAllowOverwrite", "Turn on overwrite during streaming on this connection", false, false); + + /** Number of parallel operations per cluster node during streaming. */ + private IntegerProperty streamParOps = new IntegerProperty( + "streamingPerNodeParallelOperations", "Number of parallel operations per cluster node during streaming", + 0, false, 0, Integer.MAX_VALUE); + + /** Buffer size per cluster node during streaming. */ + private IntegerProperty streamBufSize = new IntegerProperty( + "streamingPerNodeBufferSize", "Buffer size per cluster node during streaming", + 0, false, 0, Integer.MAX_VALUE); + + /** Buffer size per cluster node during streaming. */ + private LongProperty streamFlushFreq = new LongProperty( + "streamingFlushFrequency", "Buffer size per cluster node during streaming", + 0, false, 0, Long.MAX_VALUE); + + /** Buffer size per cluster node during streaming. */ + private IntegerProperty streamBatchSize = new IntegerProperty( + "streamingBatchSize", "Batch size for streaming (number of commands to accumulate internally " + + "before actually sending over the wire)", 10, false, 1, Integer.MAX_VALUE); + /** Properties array. */ - private final ConnectionProperty [] propsArray = { + private final ConnectionProperty [] props = { host, port, distributedJoins, enforceJoinOrder, collocated, replicatedOnly, autoCloseServerCursor, - tcpNoDelay, lazy, socketSendBuffer, socketReceiveBuffer, skipReducerOnUpdate + tcpNoDelay, lazy, socketSendBuffer, socketReceiveBuffer, skipReducerOnUpdate, + stream, streamAllowOverwrite, streamParOps, streamBufSize, streamFlushFreq, streamBatchSize }; /** {@inheritDoc} */ @@ -223,6 +252,66 @@ public class ConnectionPropertiesImpl implements ConnectionProperties, Serializa skipReducerOnUpdate.setValue(val); } + /** {@inheritDoc} */ + @Override public boolean isStream() { + return stream.value(); + } + + /** {@inheritDoc} */ + @Override public void setStream(boolean val) { + stream.setValue(val); + } + + /** {@inheritDoc} */ + @Override public boolean isStreamAllowOverwrite() { + return streamAllowOverwrite.value(); + } + + /** {@inheritDoc} */ + @Override public void setStreamAllowOverwrite(boolean val) { + streamAllowOverwrite.setValue(val); + } + + /** {@inheritDoc} */ + @Override public int getStreamParallelOperations() { + return streamParOps.value(); + } + + /** {@inheritDoc} */ + @Override public void setStreamParallelOperations(int val) throws SQLException { + streamParOps.setValue(val); + } + + /** {@inheritDoc} */ + @Override public int getStreamBufferSize() { + return streamBufSize.value(); + } + + /** {@inheritDoc} */ + @Override public void setStreamBufferSize(int val) throws SQLException { + streamBufSize.setValue(val); + } + + /** {@inheritDoc} */ + @Override public long getStreamFlushFrequency() { + return streamFlushFreq.value(); + } + + /** {@inheritDoc} */ + @Override public void setStreamFlushFrequency(long val) throws SQLException { + streamFlushFreq.setValue(val); + } + + /** {@inheritDoc} */ + @Override public int getStreamBatchSize() { + return streamBatchSize.value(); + } + + /** {@inheritDoc} */ + @Override public void setStreamBatchSize(int val) throws SQLException { + streamBatchSize.setValue(val); + } + /** * @param props Environment properties. * @throws SQLException On error. @@ -230,7 +319,7 @@ public class ConnectionPropertiesImpl implements ConnectionProperties, Serializa void init(Properties props) throws SQLException { Properties props0 = (Properties)props.clone(); - for (ConnectionProperty aPropsArray : propsArray) + for (ConnectionProperty aPropsArray : this.props) aPropsArray.init(props0); } @@ -238,10 +327,10 @@ void init(Properties props) throws SQLException { * @return Driver's properties info array. */ private DriverPropertyInfo[] getDriverPropertyInfo() { - DriverPropertyInfo[] dpis = new DriverPropertyInfo[propsArray.length]; + DriverPropertyInfo[] dpis = new DriverPropertyInfo[props.length]; - for (int i = 0; i < propsArray.length; ++i) - dpis[i] = propsArray[i].getDriverPropertyInfo(); + for (int i = 0; i < props.length; ++i) + dpis[i] = props[i].getDriverPropertyInfo(); return dpis; } @@ -513,7 +602,8 @@ private abstract static class NumberProperty extends ConnectionProperty { else { try { setValue(parse(str)); - } catch (NumberFormatException e) { + } + catch (NumberFormatException e) { throw new SQLException("Failed to parse int property [name=" + name + ", value=" + str + ']', SqlStateCode.CLIENT_CONNECTION_FAILED); } @@ -585,6 +675,38 @@ int value() { } } + /** + * + */ + private static class LongProperty extends NumberProperty { + /** */ + private static final long serialVersionUID = 0L; + + /** + * @param name Name. + * @param desc Description. + * @param dfltVal Default value. + * @param required {@code true} if the property is required. + * @param min Lower bound of allowed range. + * @param max Upper bound of allowed range. + */ + LongProperty(String name, String desc, Number dfltVal, boolean required, long min, long max) { + super(name, desc, dfltVal, required, min, max); + } + + /** {@inheritDoc} */ + @Override protected Number parse(String str) throws NumberFormatException { + return Long.parseLong(str); + } + + /** + * @return Property value. + */ + long value() { + return val.longValue(); + } + } + /** * */ @@ -626,7 +748,10 @@ String value() { /** {@inheritDoc} */ @Override void init(String str) throws SQLException { - val = str; + if (str == null) + val = (String)dfltVal; + else + val = str; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java index 999c793d55ccf..d267070718f28 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.jdbc.thin; import java.sql.Array; +import java.sql.BatchUpdateException; import java.sql.Blob; import java.sql.CallableStatement; import java.sql.Clob; @@ -33,13 +34,19 @@ import java.sql.Savepoint; import java.sql.Statement; import java.sql.Struct; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.Executor; +import java.util.logging.Level; import java.util.logging.Logger; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.odbc.ClientListenerResponse; import org.apache.ignite.internal.processors.odbc.SqlStateCode; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteRequest; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteResult; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQuery; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResponse; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResult; @@ -94,6 +101,12 @@ public class JdbcThinConnection implements Connection { /** Connection properties. */ private ConnectionProperties connProps; + /** Batch for streaming. */ + private List streamBatch; + + /** Last added query to recognize batches. */ + private String lastStreamQry; + /** * Creates new connection. * @@ -130,6 +143,53 @@ public JdbcThinConnection(String url, String schema, Properties props) throws SQ } } + /** + * @return Whether this connection is streamed or not. + */ + public boolean isStream() { + return connProps.isStream(); + } + + /** + * Add another query for batched execution. + * @param sql Query. + * @param args Arguments. + */ + synchronized void addBatch(String sql, List args) throws SQLException { + boolean newQry = (args == null || !F.eq(lastStreamQry, sql)); + + // Providing null as SQL here allows for recognizing subbatches on server and handling them more efficiently. + JdbcQuery q = new JdbcQuery(newQry ? sql : null, args != null ? args.toArray() : null); + + if (streamBatch == null) + streamBatch = new ArrayList<>(connProps.getStreamBatchSize()); + + streamBatch.add(q); + + // Null args means "addBatch(String)" was called on non-prepared Statement, + // we don't want to remember its query string. + lastStreamQry = (args != null ? sql : null); + + if (streamBatch.size() == connProps.getStreamBatchSize()) + executeBatch(); + } + + /** + * @throws SQLException if failed. + */ + private void executeBatch() throws SQLException { + JdbcBatchExecuteResult res = sendRequest(new JdbcBatchExecuteRequest(schema, streamBatch)); + + streamBatch = null; + + lastStreamQry = null; + + if (res.errorCode() != ClientListenerResponse.STATUS_SUCCESS) { + throw new BatchUpdateException(res.errorMessage(), IgniteQueryErrorCode.codeToSqlState(res.errorCode()), + res.errorCode(), res.updateCounts()); + } + } + /** {@inheritDoc} */ @Override public Statement createStatement() throws SQLException { return createStatement(TYPE_FORWARD_ONLY, CONCUR_READ_ONLY, HOLD_CURSORS_OVER_COMMIT); @@ -272,6 +332,15 @@ private void checkCursorOptions(int resSetType, int resSetConcurrency, if (isClosed()) return; + if (!F.isEmpty(streamBatch)) { + try { + executeBatch(); + } + catch (SQLException e) { + LOG.log(Level.WARNING, "Exception during batch send on streamed connection close", e); + } + } + closed = true; cliIo.close(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java index 23d3bbe37102b..b575167ea8965 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinPreparedStatement.java @@ -39,8 +39,8 @@ import java.sql.Timestamp; import java.util.ArrayList; import java.util.Calendar; -import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.odbc.SqlListenerUtils; +import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcMetaParamsRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcMetaParamsResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQuery; @@ -262,13 +262,19 @@ private void executeWithArguments(JdbcStatementType stmtType) throws SQLExceptio @Override public void addBatch() throws SQLException { ensureNotClosed(); - if (batch == null) { - batch = new ArrayList<>(); + batchSize++; + + if (conn.isStream()) + conn.addBatch(sql, args); + else { + if (batch == null) { + batch = new ArrayList<>(); - batch.add(new JdbcQuery(sql, args.toArray(new Object[args.size()]))); + batch.add(new JdbcQuery(sql, args.toArray(new Object[args.size()]))); + } + else + batch.add(new JdbcQuery(null, args.toArray(new Object[args.size()]))); } - else - batch.add(new JdbcQuery(null, args.toArray(new Object[args.size()]))); args = null; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java index 202001149d932..3e1d0171d00e5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java @@ -38,13 +38,13 @@ import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadAckResult; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQuery; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryExecuteMultipleStatementsResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryExecuteRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryExecuteResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResultInfo; -import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcStatementType; import static java.sql.ResultSet.CONCUR_READ_ONLY; @@ -79,6 +79,9 @@ public class JdbcThinStatement implements Statement { /** Result set holdability*/ private final int resHoldability; + /** Batch size to keep track of number of items to return as fake update counters for executeBatch. */ + protected int batchSize; + /** Batch. */ protected List batch; @@ -133,6 +136,19 @@ protected void execute0(JdbcStatementType stmtType, String sql, List arg if (sql == null || sql.isEmpty()) throw new SQLException("SQL query is empty."); + if (conn.isStream()) { + if (stmtType == JdbcStatementType.SELECT_STATEMENT_TYPE) + throw new SQLException("Only tuple based INSERT statements are supported in streaming mode.", + SqlStateCode.INTERNAL_ERROR, + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + + conn.addBatch(sql, args); + + resultSets = Collections.singletonList(resultSetForUpdate(0)); + + return; + } + JdbcResult res0 = conn.sendRequest(new JdbcQueryExecuteRequest(stmtType, schema, pageSize, maxRows, sql, args == null ? null : args.toArray(new Object[args.size()]))); @@ -158,11 +174,8 @@ else if (res0 instanceof JdbcQueryExecuteMultipleStatementsResult) { boolean firstRes = true; for(JdbcResultInfo rsInfo : resInfos) { - if (!rsInfo.isQuery()) { - resultSets.add(new JdbcThinResultSet(this, -1, pageSize, - true, Collections.>emptyList(), false, - conn.autoCloseServerCursor(), rsInfo.updateCount(), closeOnCompletion)); - } + if (!rsInfo.isQuery()) + resultSets.add(resultSetForUpdate(rsInfo.updateCount())); else { if (firstRes) { firstRes = false; @@ -185,6 +198,16 @@ else if (res0 instanceof JdbcQueryExecuteMultipleStatementsResult) { assert resultSets.size() > 0 : "At least one results set is expected"; } + /** + * @param cnt Update counter. + * @return Result set for given update counter. + */ + private JdbcThinResultSet resultSetForUpdate(long cnt) { + return new JdbcThinResultSet(this, -1, pageSize, + true, Collections.>emptyList(), false, + conn.autoCloseServerCursor(), cnt, closeOnCompletion); + } + /** * Sends a file to server in batches via multiple {@link JdbcBulkLoadBatchRequest}s. * @@ -469,6 +492,14 @@ private JdbcThinResultSet nextResultSet() throws SQLException { @Override public void addBatch(String sql) throws SQLException { ensureNotClosed(); + batchSize++; + + if (conn.isStream()) { + conn.addBatch(sql, null); + + return; + } + if (batch == null) batch = new ArrayList<>(); @@ -479,6 +510,8 @@ private JdbcThinResultSet nextResultSet() throws SQLException { @Override public void clearBatch() throws SQLException { ensureNotClosed(); + batchSize = 0; + batch = null; } @@ -488,6 +521,14 @@ private JdbcThinResultSet nextResultSet() throws SQLException { closeResults(); + if (conn.isStream()) { + int[] res = new int[batchSize]; + + batchSize = 0; + + return res; + } + if (batch == null || batch.isEmpty()) throw new SQLException("Batch is empty."); @@ -502,6 +543,8 @@ private JdbcThinResultSet nextResultSet() throws SQLException { return res.updateCounts(); } finally { + batchSize = 0; + batch = null; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java index fec218e368494..79bdc5e9189c7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java @@ -20,27 +20,9 @@ import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.IOException; -import java.io.InputStream; import java.net.InetSocketAddress; -import java.net.MalformedURLException; import java.net.Socket; -import java.net.URL; -import java.security.KeyManagementException; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.UnrecoverableKeyException; -import java.security.cert.CertificateException; import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSocketFactory; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.X509TrustManager; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.binary.BinaryReaderExImpl; import org.apache.ignite.internal.binary.BinaryWriterExImpl; @@ -57,7 +39,6 @@ import org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResponse; import org.apache.ignite.internal.util.ipc.loopback.IpcClientTcpEndpoint; -import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteProductVersion; @@ -77,8 +58,11 @@ public class JdbcThinTcpIo { /** Version 2.4.0. */ private static final ClientListenerProtocolVersion VER_2_4_0 = ClientListenerProtocolVersion.create(2, 4, 0); + /** Version 2.5.0. */ + private static final ClientListenerProtocolVersion VER_2_5_0 = ClientListenerProtocolVersion.create(2, 5, 0); + /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_4_0; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_5_0; /** Initial output stream capacity for handshake. */ private static final int HANDSHAKE_MSG_SIZE = 13; @@ -182,6 +166,11 @@ public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQL writer.writeBoolean(connProps.isAutoCloseServerCursor()); writer.writeBoolean(connProps.isLazy()); writer.writeBoolean(connProps.isSkipReducerOnUpdate()); + writer.writeBoolean(connProps.isStream()); + writer.writeBoolean(connProps.isStreamAllowOverwrite()); + writer.writeInt(connProps.getStreamParallelOperations()); + writer.writeInt(connProps.getStreamBufferSize()); + writer.writeLong(connProps.getStreamFlushFrequency()); send(writer.array()); @@ -215,7 +204,8 @@ public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQL ClientListenerProtocolVersion srvProtocolVer = ClientListenerProtocolVersion.create(maj, min, maintenance); - if (VER_2_3_0.equals(srvProtocolVer) || VER_2_1_5.equals(srvProtocolVer)) + if (VER_2_4_0.equals(srvProtocolVer) || VER_2_3_0.equals(srvProtocolVer) || + VER_2_1_5.equals(srvProtocolVer)) handshake(srvProtocolVer); else if (VER_2_1_0.equals(srvProtocolVer)) handshake_2_1_0(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java index b51e0b95084ef..aaa51a0db6ca7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java @@ -60,6 +60,7 @@ import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.query.GridQueryIndexing; +import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.processors.resource.GridSpringResourceContext; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -82,13 +83,13 @@ import static org.apache.ignite.IgniteJdbcDriver.PROP_LOCAL; import static org.apache.ignite.IgniteJdbcDriver.PROP_MULTIPLE_STMTS; import static org.apache.ignite.IgniteJdbcDriver.PROP_NODE_ID; +import static org.apache.ignite.IgniteJdbcDriver.PROP_SKIP_REDUCER_ON_UPDATE; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_ALLOW_OVERWRITE; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_FLUSH_FREQ; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_PER_NODE_BUF_SIZE; import static org.apache.ignite.IgniteJdbcDriver.PROP_STREAMING_PER_NODE_PAR_OPS; import static org.apache.ignite.IgniteJdbcDriver.PROP_TX_ALLOWED; -import static org.apache.ignite.IgniteJdbcDriver.PROP_SKIP_REDUCER_ON_UPDATE; import static org.apache.ignite.internal.jdbc2.JdbcUtils.convertToSqlException; import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.createJdbcSqlException; @@ -612,10 +613,11 @@ private Ignite getIgnite(String cfgUrl) throws IgniteCheckedException { PreparedStatement nativeStmt = prepareNativeStatement(sql); - if (!idx.isInsertStatement(nativeStmt)) { - throw new SQLException("Only INSERT operations are supported in streaming mode", - SqlStateCode.INTERNAL_ERROR, - IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + try { + idx.checkStatementStreamable(nativeStmt); + } + catch (IgniteSQLException e) { + throw e.toJdbcException(); } IgniteDataStreamer streamer = ignite().dataStreamer(cacheName); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java index 408f0897cad6f..25f55f2f6d2cd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcStreamedPreparedStatement.java @@ -55,7 +55,7 @@ class JdbcStreamedPreparedStatement extends JdbcPreparedStatement { /** {@inheritDoc} */ @Override protected void execute0(String sql, Boolean isQuery) throws SQLException { - assert isQuery != null && !isQuery; + assert isQuery == null || !isQuery; long updCnt = conn.ignite().context().query().streamUpdateQuery(conn.cacheName(), conn.schemaName(), streamer, sql, getArgs()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java index 7d58a83028631..70f7adf727416 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java @@ -571,7 +571,7 @@ private QueryCursor> queryContinuous(ContinuousQuery qry, bool boolean keepBinary = opCtxCall != null && opCtxCall.isKeepBinary(); - return ctx.kernalContext().query().querySqlFields(ctx, qry, keepBinary, false); + return ctx.kernalContext().query().querySqlFields(ctx, qry, null, keepBinary, false); } catch (Exception e) { if (e instanceof CacheException) @@ -604,7 +604,7 @@ private QueryCursor> queryContinuous(ContinuousQuery qry, bool if (qry instanceof SqlFieldsQuery) return (FieldsQueryCursor)ctx.kernalContext().query().querySqlFields(ctx, (SqlFieldsQuery)qry, - keepBinary, true).get(0); + null, keepBinary, true).get(0); if (qry instanceof ScanQuery) return query((ScanQuery)qry, null, projection(qry.isLocal())); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java index 5841a4d450e9f..214d006da9956 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java @@ -28,7 +28,7 @@ import org.apache.ignite.internal.util.GridSpinBusyLock; /** - * ODBC Connection Context. + * JDBC Connection Context. */ public class JdbcConnectionContext implements ClientListenerConnectionContext { /** Version 2.1.0. */ @@ -38,13 +38,16 @@ public class JdbcConnectionContext implements ClientListenerConnectionContext { private static final ClientListenerProtocolVersion VER_2_1_5 = ClientListenerProtocolVersion.create(2, 1, 5); /** Version 2.3.1: added "multiple statements query" feature. */ - public static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); + static final ClientListenerProtocolVersion VER_2_3_0 = ClientListenerProtocolVersion.create(2, 3, 0); /** Version 2.4.0: adds default values for columns feature. */ - public static final ClientListenerProtocolVersion VER_2_4_0 = ClientListenerProtocolVersion.create(2, 4, 0); + static final ClientListenerProtocolVersion VER_2_4_0 = ClientListenerProtocolVersion.create(2, 4, 0); + + /** Version 2.5.0: adds streaming via thin connection. */ + static final ClientListenerProtocolVersion VER_2_5_0 = ClientListenerProtocolVersion.create(2, 5, 0); /** Current version. */ - private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_4_0; + private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_5_0; /** Supported versions. */ private static final Set SUPPORTED_VERS = new HashSet<>(); @@ -66,6 +69,7 @@ public class JdbcConnectionContext implements ClientListenerConnectionContext { static { SUPPORTED_VERS.add(CURRENT_VER); + SUPPORTED_VERS.add(VER_2_4_0); SUPPORTED_VERS.add(VER_2_3_0); SUPPORTED_VERS.add(VER_2_1_5); SUPPORTED_VERS.add(VER_2_1_0); @@ -113,8 +117,23 @@ public JdbcConnectionContext(GridKernalContext ctx, GridSpinBusyLock busyLock, i if (ver.compareTo(VER_2_3_0) >= 0) skipReducerOnUpdate = reader.readBoolean(); + boolean stream = false; + boolean streamAllowOverwrites = false; + int streamParOps = 0; + int streamBufSize = 0; + long streamFlushFreq = 0; + + if (ver.compareTo(VER_2_5_0) >= 0) { + stream = reader.readBoolean(); + streamAllowOverwrites = reader.readBoolean(); + streamParOps = reader.readInt(); + streamBufSize = reader.readInt(); + streamFlushFreq = reader.readLong(); + } + handler = new JdbcRequestHandler(ctx, busyLock, maxCursors, distributedJoins, enforceJoinOrder, - collocated, replicatedOnly, autoCloseCursors, lazyExec, skipReducerOnUpdate, ver); + collocated, replicatedOnly, autoCloseCursors, lazyExec, skipReducerOnUpdate, stream, streamAllowOverwrites, + streamParOps, streamBufSize, streamFlushFreq, ver); parser = new JdbcMessageParser(ctx); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java index fd98a77f9058d..b096cd9256e1d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java @@ -53,6 +53,7 @@ import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor; import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.processors.query.SqlClientContext; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; @@ -88,6 +89,9 @@ public class JdbcRequestHandler implements ClientListenerRequestHandler { /** Kernel context. */ private final GridKernalContext ctx; + /** Client context. */ + private final SqlClientContext cliCtx; + /** Logger. */ private final IgniteLogger log; @@ -103,24 +107,9 @@ public class JdbcRequestHandler implements ClientListenerRequestHandler { /** Current bulk load processors. */ private final ConcurrentHashMap bulkLoadRequests = new ConcurrentHashMap<>(); - /** Distributed joins flag. */ - private final boolean distributedJoins; - - /** Enforce join order flag. */ - private final boolean enforceJoinOrder; - - /** Collocated flag. */ - private final boolean collocated; - /** Replicated only flag. */ private final boolean replicatedOnly; - /** Lazy query execution flag. */ - private final boolean lazy; - - /** Skip reducer on update flag. */ - private final boolean skipReducerOnUpdate; - /** Automatic close of cursors. */ private final boolean autoCloseCursors; @@ -140,22 +129,38 @@ public class JdbcRequestHandler implements ClientListenerRequestHandler { * @param autoCloseCursors Flag to automatically close server cursors. * @param lazy Lazy query execution flag. * @param skipReducerOnUpdate Skip reducer on update flag. + * @param stream Streaming flag. + * @param streamAllowOverwrites Streaming overwrites flag. + * @param streamParOps Number of parallel ops per cluster node during streaming. + * @param streamBufSize Buffer size per cluster node during streaming. + * @param streamFlushFreq Data streamers' flush timeout. * @param protocolVer Protocol version. */ public JdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int maxCursors, boolean distributedJoins, boolean enforceJoinOrder, boolean collocated, boolean replicatedOnly, boolean autoCloseCursors, boolean lazy, boolean skipReducerOnUpdate, + boolean stream, boolean streamAllowOverwrites, int streamParOps, int streamBufSize, long streamFlushFreq, ClientListenerProtocolVersion protocolVer) { this.ctx = ctx; + + this.cliCtx = new SqlClientContext( + ctx, + distributedJoins, + enforceJoinOrder, + collocated, + lazy, + skipReducerOnUpdate, + stream, + streamAllowOverwrites, + streamParOps, + streamBufSize, + streamFlushFreq + ); + this.busyLock = busyLock; this.maxCursors = maxCursors; - this.distributedJoins = distributedJoins; - this.enforceJoinOrder = enforceJoinOrder; - this.collocated = collocated; this.replicatedOnly = replicatedOnly; this.autoCloseCursors = autoCloseCursors; - this.lazy = lazy; - this.skipReducerOnUpdate = skipReducerOnUpdate; this.protocolVer = protocolVer; log = ctx.log(getClass()); @@ -301,6 +306,8 @@ public void onDisconnect() { } bulkLoadRequests.clear(); + + U.close(cliCtx, log); } finally { busyLock.leaveBusy(); @@ -326,6 +333,8 @@ private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) { long qryId = QRY_ID_GEN.getAndIncrement(); + assert !cliCtx.isStream(); + try { String sql = req.sqlQuery(); @@ -347,17 +356,17 @@ private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) { qry = new SqlFieldsQueryEx(sql, false); - if (skipReducerOnUpdate) + if (cliCtx.isSkipReducerOnUpdate()) ((SqlFieldsQueryEx)qry).setSkipReducerOnUpdate(true); } qry.setArgs(req.arguments()); - qry.setDistributedJoins(distributedJoins); - qry.setEnforceJoinOrder(enforceJoinOrder); - qry.setCollocated(collocated); + qry.setDistributedJoins(cliCtx.isDistributedJoins()); + qry.setEnforceJoinOrder(cliCtx.isEnforceJoinOrder()); + qry.setCollocated(cliCtx.isCollocated()); qry.setReplicatedOnly(replicatedOnly); - qry.setLazy(lazy); + qry.setLazy(cliCtx.isLazy()); if (req.pageSize() <= 0) return new JdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Invalid fetch size: " + req.pageSize()); @@ -371,7 +380,7 @@ private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) { qry.setSchema(schemaName); - List>> results = ctx.query().querySqlFields(qry, true, + List>> results = ctx.query().querySqlFields(null, qry, cliCtx, true, protocolVer.compareTo(VER_2_3_0) < 0); FieldsQueryCursor> fieldsCur = results.get(0); @@ -572,11 +581,11 @@ private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) { qry = new SqlFieldsQueryEx(q.sql(), false); - qry.setDistributedJoins(distributedJoins); - qry.setEnforceJoinOrder(enforceJoinOrder); - qry.setCollocated(collocated); + qry.setDistributedJoins(cliCtx.isDistributedJoins()); + qry.setEnforceJoinOrder(cliCtx.isEnforceJoinOrder()); + qry.setCollocated(cliCtx.isCollocated()); qry.setReplicatedOnly(replicatedOnly); - qry.setLazy(lazy); + qry.setLazy(cliCtx.isLazy()); qry.setSchema(schemaName); } @@ -604,10 +613,21 @@ private ClientListenerResponse executeBatch(JdbcBatchExecuteRequest req) { * @param updCntsAcc Per query rows updates counter. * @param firstErr First error data - code and message. */ + @SuppressWarnings("ForLoopReplaceableByForEach") private void executeBatchedQuery(SqlFieldsQueryEx qry, List updCntsAcc, IgniteBiTuple firstErr) { try { - List>> qryRes = ctx.query().querySqlFields(qry, true, true); + if (cliCtx.isStream()) { + List cnt = ctx.query().streamBatchedUpdateQuery(qry.getSchema(), cliCtx, qry.getSql(), + qry.batchedArguments()); + + for (int i = 0; i < cnt.size(); i++) + updCntsAcc.add(cnt.get(i).intValue()); + + return; + } + + List>> qryRes = ctx.query().querySqlFields(null, qry, cliCtx, true, true); for (FieldsQueryCursor> cur : qryRes) { if (cur instanceof BulkLoadContextCursor) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java index 5ac7b8981596c..1c47082706784 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java @@ -78,33 +78,46 @@ public QueryCursor> queryDistributedSql(String schemaNa * Detect whether SQL query should be executed in distributed or local manner and execute it. * @param schemaName Schema name. * @param qry Query. + * @param cliCtx Client context. * @param keepBinary Keep binary flag. * @param failOnMultipleStmts Whether an exception should be thrown for multiple statements query. - * @param cancel Query cancel state handler. - * @return Cursor. + * @param cancel Query cancel state handler. @return Cursor. */ - public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, - boolean failOnMultipleStmts, GridQueryCancel cancel); + public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, + SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel); /** - * Perform a MERGE statement using data streamer as receiver. + * Execute an INSERT statement using data streamer as receiver. * * @param schemaName Schema name. * @param qry Query. * @param params Query parameters. * @param streamer Data streamer to feed data to. - * @return Query result. + * @return Update counter. * @throws IgniteCheckedException If failed. */ public long streamUpdateQuery(String schemaName, String qry, @Nullable Object[] params, IgniteDataStreamer streamer) throws IgniteCheckedException; + /** + * Execute a batched INSERT statement using data streamer as receiver. + * + * @param schemaName Schema name. + * @param qry Query. + * @param params Query parameters. + * @param cliCtx Client connection context. + * @return Update counters. + * @throws IgniteCheckedException If failed. + */ + public List streamBatchedUpdateQuery(String schemaName, String qry, List params, + SqlClientContext cliCtx) throws IgniteCheckedException; + /** * Executes regular query. * * @param schemaName Schema name. * @param cacheName Cache name. - *@param qry Query. + * @param qry Query. * @param filter Cache name and key filter. * @param keepBinary Keep binary flag. @return Cursor. */ @@ -301,10 +314,10 @@ public void remove(String cacheName, GridQueryTypeDescriptor type, KeyCacheObjec public String schema(String cacheName); /** - * Check if passed statement is insert statemtn. + * Check if passed statement is insert statement eligible for streaming, throw an {@link IgniteSQLException} if not. * * @param nativeStmt Native statement. - * @return {@code True} if insert. */ - public boolean isInsertStatement(PreparedStatement nativeStmt); + public void checkStatementStreamable(PreparedStatement nativeStmt); + } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java index af3bd5313feae..5bcf223efdfec 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java @@ -99,6 +99,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.T3; +import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.util.worker.GridWorker; @@ -187,6 +188,9 @@ public class GridQueryProcessor extends GridProcessorAdapter { /** Pending status messages. */ private final LinkedList pendingMsgs = new LinkedList<>(); + /** All currently open client contexts. */ + private final Set cliCtxs = Collections.newSetFromMap(new ConcurrentHashMap<>()); + /** Current cache that has a query running on it. */ private final ThreadLocal curCache = new ThreadLocal<>(); @@ -257,11 +261,15 @@ public GridQueryProcessor(GridKernalContext ctx) throws IgniteCheckedException { if (cancel && idx != null) { try { - while (!busyLock.tryBlock(500)) + while (!busyLock.tryBlock(500)) { idx.cancelAllQueries(); + closeAllSqlStreams(); + } + return; - } catch (InterruptedException ignored) { + } + catch (InterruptedException ignored) { U.warn(log, "Interrupted while waiting for active queries cancellation."); Thread.currentThread().interrupt(); @@ -344,6 +352,32 @@ public void onCacheReconnect() throws IgniteCheckedException { } } + /** + * @param cliCtx Client context to register. + */ + void registerClientContext(SqlClientContext cliCtx) { + A.notNull(cliCtx, "cliCtx"); + + cliCtxs.add(cliCtx); + } + + /** + * @param cliCtx Client context to register. + */ + void unregisterClientContext(SqlClientContext cliCtx) { + A.notNull(cliCtx, "cliCtx"); + + cliCtxs.remove(cliCtx); + } + + /** + * Flush streamers on all currently open client contexts. + */ + private void closeAllSqlStreams() { + for (SqlClientContext cliCtx : cliCtxs) + U.close(cliCtx, log); + } + /** * Process schema propose message from discovery thread. * @@ -1858,13 +1892,7 @@ private void checkxEnabled() throws IgniteException { */ public List>> querySqlFields(final SqlFieldsQuery qry, final boolean keepBinary, final boolean failOnMultipleStmts) { - return querySqlFields(null, qry, keepBinary, failOnMultipleStmts); - } - - @SuppressWarnings("unchecked") - public FieldsQueryCursor> querySqlFields(final GridCacheContext cctx, final SqlFieldsQuery qry, - final boolean keepBinary) { - return querySqlFields(cctx, qry, keepBinary, true).get(0); + return querySqlFields(null, qry, null, keepBinary, failOnMultipleStmts); } /** @@ -1875,7 +1903,7 @@ public FieldsQueryCursor> querySqlFields(final GridCacheContext cct * @return Cursor. */ public FieldsQueryCursor> querySqlFields(final SqlFieldsQuery qry, final boolean keepBinary) { - return querySqlFields(null, qry, keepBinary, true).get(0); + return querySqlFields(null, qry, null, keepBinary, true).get(0); } /** @@ -1883,14 +1911,16 @@ public FieldsQueryCursor> querySqlFields(final SqlFieldsQuery qry, final * * @param cctx Cache context. * @param qry Query. + * @param cliCtx Client context. * @param keepBinary Keep binary flag. * @param failOnMultipleStmts If {@code true} the method must throws exception when query contains * more then one SQL statement. * @return Cursor. */ @SuppressWarnings("unchecked") - public List>> querySqlFields(@Nullable final GridCacheContext cctx, - final SqlFieldsQuery qry, final boolean keepBinary, final boolean failOnMultipleStmts) { + public List>> querySqlFields(@Nullable final GridCacheContext cctx, + final SqlFieldsQuery qry, final SqlClientContext cliCtx, final boolean keepBinary, + final boolean failOnMultipleStmts) { checkxEnabled(); validateSqlFieldsQuery(qry); @@ -1918,7 +1948,7 @@ public List>> querySqlFields(@Nullable final GridCache GridQueryCancel cancel = new GridQueryCancel(); List>> res = - idx.querySqlFields(schemaName, qry, keepBinary, failOnMultipleStmts, cancel); + idx.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, cancel); if (cctx != null) sendQueryExecutedEvent(qry.getSql(), qry.getArgs(), cctx.name()); @@ -1957,7 +1987,7 @@ private static void validateSqlFieldsQuery(SqlFieldsQuery qry) { * @param schemaName Schema name. * @param streamer Data streamer. * @param qry Query. - * @return Iterator. + * @return Update counter. */ public long streamUpdateQuery(@Nullable final String cacheName, final String schemaName, final IgniteDataStreamer streamer, final String qry, final Object[] args) { @@ -1983,6 +2013,33 @@ public long streamUpdateQuery(@Nullable final String cacheName, final String sch } } + /** + * @param schemaName Schema name. + * @param cliCtx Client context. + * @param qry Query. + * @param args Query arguments. + * @return Update counters. + */ + public List streamBatchedUpdateQuery(final String schemaName, final SqlClientContext cliCtx, + final String qry, final List args) { + if (!busyLock.enterBusy()) + throw new IllegalStateException("Failed to execute query (grid is stopping)."); + + try { + return executeQuery(GridCacheQueryType.SQL_FIELDS, qry, null, new IgniteOutClosureX>() { + @Override public List applyx() throws IgniteCheckedException { + return idx.streamBatchedUpdateQuery(schemaName, qry, args, cliCtx); + } + }, true); + } + catch (IgniteCheckedException e) { + throw new CacheException(e); + } + finally { + busyLock.leaveBusy(); + } + } + /** * Execute distributed SQL query. * @@ -2253,15 +2310,13 @@ private void processDynamicAddColumn(QueryTypeDescriptorImpl d, List /** * - * @param cacheName Cache name. + * @param schemaName Cache name. * @param sql Query. * @return {@link PreparedStatement} from underlying engine to supply metadata to Prepared - most likely H2. */ - public PreparedStatement prepareNativeStatement(String cacheName, String sql) throws SQLException { + public PreparedStatement prepareNativeStatement(String schemaName, String sql) throws SQLException { checkxEnabled(); - String schemaName = idx.schema(cacheName); - return idx.prepareNativeStatement(schemaName, sql); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/SqlClientContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/SqlClientContext.java new file mode 100644 index 0000000000000..b1855355cab01 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/SqlClientContext.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.query; + +import java.util.HashMap; +import java.util.Map; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Container for connection properties passed by various drivers (JDBC drivers, possibly ODBC) having notion of an + * SQL connection - Ignite basically does not have one.

        + * Also contains anything that a driver may need to share between threads processing queries of logically same client - + * see JDBC thin driver + */ +public class SqlClientContext implements AutoCloseable { + /** Kernal context. */ + private final GridKernalContext ctx; + + /** Distributed joins flag. */ + private final boolean distributedJoins; + + /** Enforce join order flag. */ + private final boolean enforceJoinOrder; + + /** Collocated flag. */ + private final boolean collocated; + + /** Lazy query execution flag. */ + private final boolean lazy; + + /** Skip reducer on update flag. */ + private final boolean skipReducerOnUpdate; + + /** Allow overwrites for duplicate keys on streamed {@code INSERT}s. */ + private final boolean streamAllowOverwrite; + + /** Parallel ops count per node for data streamer. */ + private final int streamNodeParOps; + + /** Node buffer size for data streamer. */ + private final int streamNodeBufSize; + + /** Auto flush frequency for streaming. */ + private final long streamFlushTimeout; + + /** Streamers for various caches. */ + private final Map> streamers; + + /** Logger. */ + private final IgniteLogger log; + + /** + * @param ctx Kernal context. + * @param distributedJoins Distributed joins flag. + * @param enforceJoinOrder Enforce join order flag. + * @param collocated Collocated flag. + * @param lazy Lazy query execution flag. + * @param skipReducerOnUpdate Skip reducer on update flag. + * @param stream Streaming state flag + * @param streamAllowOverwrite Allow overwrites for duplicate keys on streamed {@code INSERT}s. + * @param streamNodeParOps Parallel ops count per node for data streamer. + * @param streamNodeBufSize Node buffer size for data streamer. + * @param streamFlushTimeout Auto flush frequency for streaming. + */ + public SqlClientContext(GridKernalContext ctx, boolean distributedJoins, boolean enforceJoinOrder, + boolean collocated, boolean lazy, boolean skipReducerOnUpdate, boolean stream, boolean streamAllowOverwrite, + int streamNodeParOps, int streamNodeBufSize, long streamFlushTimeout) { + this.ctx = ctx; + this.distributedJoins = distributedJoins; + this.enforceJoinOrder = enforceJoinOrder; + this.collocated = collocated; + this.lazy = lazy; + this.skipReducerOnUpdate = skipReducerOnUpdate; + this.streamAllowOverwrite = streamAllowOverwrite; + this.streamNodeParOps = streamNodeParOps; + this.streamNodeBufSize = streamNodeBufSize; + this.streamFlushTimeout = streamFlushTimeout; + + streamers = stream ? new HashMap<>() : null; + + log = ctx.log(SqlClientContext.class.getName()); + + ctx.query().registerClientContext(this); + } + + /** + * @return Collocated flag. + */ + public boolean isCollocated() { + return collocated; + } + + /** + * @return Distributed joins flag. + */ + public boolean isDistributedJoins() { + return distributedJoins; + } + + /** + * @return Enforce join order flag. + */ + public boolean isEnforceJoinOrder() { + return enforceJoinOrder; + } + + /** + * @return Lazy query execution flag. + */ + public boolean isLazy() { + return lazy; + } + + /** + * @return Skip reducer on update flag, + */ + public boolean isSkipReducerOnUpdate() { + return skipReducerOnUpdate; + } + + /** + * @return Streaming state flag (on or off). + */ + public boolean isStream() { + return streamers != null; + } + + /** + * @param cacheName Cache name. + * @return Streamer for given cache. + */ + public IgniteDataStreamer streamerForCache(String cacheName) { + Map> curStreamers = streamers; + + if (curStreamers == null) + return null; + + IgniteDataStreamer res = curStreamers.get(cacheName); + + if (res != null) + return res; + + res = ctx.grid().dataStreamer(cacheName); + + IgniteDataStreamer exStreamer = curStreamers.putIfAbsent(cacheName, res); + + if (exStreamer == null) { + res.autoFlushFrequency(streamFlushTimeout); + + res.allowOverwrite(streamAllowOverwrite); + + if (streamNodeBufSize > 0) + res.perNodeBufferSize(streamNodeBufSize); + + if (streamNodeParOps > 0) + res.perNodeParallelOperations(streamNodeParOps); + + return res; + } + else { // Someone got ahead of us. + res.close(); + + return exStreamer; + } + } + + /** {@inheritDoc} */ + @Override public void close() throws Exception { + ctx.query().unregisterClientContext(this); + + if (streamers == null) + return; + + for (IgniteDataStreamer s : streamers.values()) + U.close(s, log); + } +} diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java index 4fb62c2b56a55..176a47b3b92dc 100644 --- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java +++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java @@ -51,6 +51,7 @@ import org.apache.ignite.internal.processors.query.GridRunningQueryInfo; import org.apache.ignite.internal.processors.query.QueryField; import org.apache.ignite.internal.processors.query.QueryIndexDescriptorImpl; +import org.apache.ignite.internal.processors.query.SqlClientContext; import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.lang.GridCloseableIterator; @@ -243,12 +244,18 @@ private static class FailedIndexing implements GridQueryIndexing { /** {@inheritDoc} */ @Override public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, - boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) { + SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) { return null; } /** {@inheritDoc} */ - @Override public long streamUpdateQuery(String spaceName, String qry, @Nullable Object[] params, + @Override public List streamBatchedUpdateQuery(String schemaName, String qry, List params, + SqlClientContext cliCtx) throws IgniteCheckedException { + return Collections.emptyList(); + } + + /** {@inheritDoc} */ + @Override public long streamUpdateQuery(String schemaName, String qry, @Nullable Object[] params, IgniteDataStreamer streamer) throws IgniteCheckedException { return 0; } @@ -366,8 +373,8 @@ private static class FailedIndexing implements GridQueryIndexing { } /** {@inheritDoc} */ - @Override public boolean isInsertStatement(PreparedStatement nativeStmt) { - return false; + @Override public void checkStatementStreamable(PreparedStatement nativeStmt) { + // No-op. } } } diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java index 53ca516967201..b88f722a24bb3 100644 --- a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java +++ b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java @@ -302,7 +302,7 @@ public static Throwable assertThrows(@Nullable IgniteLogger log, Callable cal call.call(); } catch (Throwable e) { - if (cls != e.getClass()) { + if (cls != e.getClass() && !cls.isAssignableFrom(e.getClass())) { if (e.getClass() == CacheException.class && e.getCause() != null && e.getCause().getClass() == cls) e = e.getCause(); else { diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java index a2435a8f9b26a..9d65ec369bf3c 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java @@ -385,6 +385,7 @@ GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, Connection conn, P /** * Perform given statement against given data streamer. Only rows based INSERT is supported. * + * @param schemaName Schema name. * @param streamer Streamer to feed data to. * @param stmt Statement. * @param args Statement arguments. @@ -392,81 +393,74 @@ GridQueryFieldsResult updateSqlFieldsLocal(String schemaName, Connection conn, P * @throws IgniteCheckedException if failed. */ @SuppressWarnings({"unchecked", "ConstantConditions"}) - long streamUpdateQuery(IgniteDataStreamer streamer, PreparedStatement stmt, final Object[] args) + long streamUpdateQuery(String schemaName, IgniteDataStreamer streamer, PreparedStatement stmt, final Object[] args) throws IgniteCheckedException { + idx.checkStatementStreamable(stmt); + Prepared p = GridSqlQueryParser.prepared(stmt); assert p != null; - final UpdatePlan plan = UpdatePlanBuilder.planForStatement(p, true, idx, null, null, null); + final UpdatePlan plan = getPlanForStatement(schemaName, null, p, null, true, null); - if (!F.eq(streamer.cacheName(), plan.cacheContext().name())) - throw new IgniteSQLException("Cross cache streaming is not supported, please specify cache explicitly" + - " in connection options", IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + assert plan.isLocalSubquery(); - if (plan.mode() == UpdateMode.INSERT && plan.rowCount() > 0) { - assert plan.isLocalSubquery(); + final GridCacheContext cctx = plan.cacheContext(); - final GridCacheContext cctx = plan.cacheContext(); + QueryCursorImpl> cur; - QueryCursorImpl> cur; + final ArrayList> data = new ArrayList<>(plan.rowCount()); - final ArrayList> data = new ArrayList<>(plan.rowCount()); - - QueryCursorImpl> stepCur = new QueryCursorImpl<>(new Iterable>() { - @Override public Iterator> iterator() { - try { - Iterator> it; - - if (!F.isEmpty(plan.selectQuery())) { - GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), - plan.selectQuery(), F.asList(U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY)), - null, false, 0, null); + QueryCursorImpl> stepCur = new QueryCursorImpl<>(new Iterable>() { + @Override public Iterator> iterator() { + try { + Iterator> it; - it = res.iterator(); - } - else - it = plan.createRows(U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY)).iterator(); + if (!F.isEmpty(plan.selectQuery())) { + GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()), + plan.selectQuery(), F.asList(U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY)), + null, false, 0, null); - return new GridQueryCacheObjectsIterator(it, idx.objectContext(), cctx.keepBinary()); + it = res.iterator(); } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } - } - }, null); + else + it = plan.createRows(U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY)).iterator(); - data.addAll(stepCur.getAll()); - - cur = new QueryCursorImpl<>(new Iterable>() { - @Override public Iterator> iterator() { - return data.iterator(); + return new GridQueryCacheObjectsIterator(it, idx.objectContext(), cctx.keepBinary()); } - }, null); - - if (plan.rowCount() == 1) { - IgniteBiTuple t = plan.processRow(cur.iterator().next()); + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + } + }, null); - streamer.addData(t.getKey(), t.getValue()); + data.addAll(stepCur.getAll()); - return 1; + cur = new QueryCursorImpl<>(new Iterable>() { + @Override public Iterator> iterator() { + return data.iterator(); } + }, null); - Map rows = new LinkedHashMap<>(plan.rowCount()); + if (plan.rowCount() == 1) { + IgniteBiTuple t = plan.processRow(cur.iterator().next()); - for (List row : cur) { - final IgniteBiTuple t = plan.processRow(row); + streamer.addData(t.getKey(), t.getValue()); - rows.put(t.getKey(), t.getValue()); - } + return 1; + } + + Map rows = new LinkedHashMap<>(plan.rowCount()); - streamer.addData(rows); + for (List row : cur) { + final IgniteBiTuple t = plan.processRow(row); - return rows.size(); + rows.put(t.getKey(), t.getValue()); } - else - throw new IgniteSQLException("Only tuple based INSERT statements are supported in streaming mode", - IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + + streamer.addData(rows); + + return rows.size(); } /** @@ -519,7 +513,7 @@ private UpdateResult executeUpdateStatement(String schemaName, final GridCacheCo .setPageSize(fieldsQry.getPageSize()) .setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS); - cur = (QueryCursorImpl>)idx.querySqlFields(schemaName, newFieldsQry, true, true, + cur = (QueryCursorImpl>)idx.querySqlFields(schemaName, newFieldsQry, null, true, true, cancel).get(0); } else if (plan.hasRows()) @@ -610,7 +604,7 @@ private UpdateResult processDmlSelectResult(GridCacheContext cctx, UpdatePlan pl * @return Update plan. */ @SuppressWarnings({"unchecked", "ConstantConditions"}) - private UpdatePlan getPlanForStatement(String schema, Connection conn, Prepared p, SqlFieldsQuery fieldsQry, + UpdatePlan getPlanForStatement(String schema, Connection conn, Prepared p, SqlFieldsQuery fieldsQry, boolean loc, @Nullable Integer errKeysPos) throws IgniteCheckedException { H2CachedStatementKey planKey = H2CachedStatementKey.forDmlStatement(schema, p.getSQL(), fieldsQry, loc); diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java index b3e140baceb7c..227836437deae 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java @@ -96,6 +96,7 @@ import org.apache.ignite.internal.processors.query.QueryField; import org.apache.ignite.internal.processors.query.QueryIndexDescriptorImpl; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.processors.query.SqlClientContext; import org.apache.ignite.internal.processors.query.h2.database.H2RowFactory; import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex; import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasInnerIO; @@ -104,6 +105,7 @@ import org.apache.ignite.internal.processors.query.h2.database.io.H2LeafIO; import org.apache.ignite.internal.processors.query.h2.ddl.DdlStatementsProcessor; import org.apache.ignite.internal.processors.query.h2.dml.DmlUtils; +import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan; import org.apache.ignite.internal.processors.query.h2.opt.GridH2DefaultTableEngine; import org.apache.ignite.internal.processors.query.h2.opt.GridH2IndexBase; import org.apache.ignite.internal.processors.query.h2.opt.GridH2PlainRowFactory; @@ -151,7 +153,6 @@ import org.h2.api.ErrorCode; import org.h2.api.JavaObjectSerializer; import org.h2.command.Prepared; -import org.h2.command.dml.Insert; import org.h2.command.dml.NoOperation; import org.h2.engine.Session; import org.h2.engine.SysProperties; @@ -494,10 +495,10 @@ private PreparedStatement prepare0(Connection c, String sql) throws SQLException } /** {@inheritDoc} */ - @Override public PreparedStatement prepareNativeStatement(String schemaName, String sql) throws SQLException { + @Override public PreparedStatement prepareNativeStatement(String schemaName, String sql) { Connection conn = connectionForSchema(schemaName); - return prepareStatement(conn, sql, true); + return prepareStatementAndCaches(conn, sql); } /** @@ -983,7 +984,60 @@ else if (DdlStatementsProcessor.isDdlStatement(p)) throw new IgniteSQLException(e); } - return dmlProc.streamUpdateQuery(streamer, stmt, params); + return dmlProc.streamUpdateQuery(schemaName, streamer, stmt, params); + } + + /** {@inheritDoc} */ + @SuppressWarnings("ForLoopReplaceableByForEach") + @Override public List streamBatchedUpdateQuery(String schemaName, String qry, List params, + SqlClientContext cliCtx) throws IgniteCheckedException { + if (cliCtx == null || !cliCtx.isStream()) { + U.warn(log, "Connection is not in streaming mode."); + + return zeroBatchedStreamedUpdateResult(params.size()); + } + + final Connection conn = connectionForSchema(schemaName); + + final PreparedStatement stmt = prepareStatementAndCaches(conn, qry); + + if (GridSqlQueryParser.checkMultipleStatements(stmt)) + throw new IgniteSQLException("Multiple statements queries are not supported for streaming mode.", + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); + + checkStatementStreamable(stmt); + + Prepared p = GridSqlQueryParser.prepared(stmt); + + UpdatePlan plan = dmlProc.getPlanForStatement(schemaName, conn, p, null, true, null); + + IgniteDataStreamer streamer = cliCtx.streamerForCache(plan.cacheContext().name()); + + if (streamer != null) { + List res = new ArrayList<>(params.size()); + + for (int i = 0; i < params.size(); i++) + res.add(dmlProc.streamUpdateQuery(schemaName, streamer, stmt, params.get(i))); + + return res; + } + else { + U.warn(log, "Streaming has been turned off by concurrent command."); + + return zeroBatchedStreamedUpdateResult(params.size()); + } + } + + /** + * @param size Result size. + * @return List of given size filled with 0Ls. + */ + private static List zeroBatchedStreamedUpdateResult(int size) { + Long[] res = new Long[size]; + + Arrays.fill(res, 0); + + return Arrays.asList(res); } /** @@ -1361,7 +1415,7 @@ UpdateResult runDistributedUpdate( fqry.setTimeout(qry.getTimeout(), TimeUnit.MILLISECONDS); final QueryCursor> res = - querySqlFields(schemaName, fqry, keepBinary, true, null).get(0); + querySqlFields(schemaName, fqry, null, keepBinary, true, null).get(0); final Iterable> converted = new Iterable>() { @Override public Iterator> iterator() { @@ -1397,19 +1451,19 @@ UpdateResult runDistributedUpdate( * Try executing query using native facilities. * * @param schemaName Schema name. - * @param qry Query. + * @param sql Query. * @return Result or {@code null} if cannot parse/process this query. */ - private List>> tryQueryDistributedSqlFieldsNative(String schemaName, SqlFieldsQuery qry) { + private List>> tryQueryDistributedSqlFieldsNative(String schemaName, String sql) { // Heuristic check for fast return. - if (!INTERNAL_CMD_RE.matcher(qry.getSql().trim()).find()) + if (!INTERNAL_CMD_RE.matcher(sql.trim()).find()) return null; // Parse. SqlCommand cmd; try { - SqlParser parser = new SqlParser(schemaName, qry.getSql()); + SqlParser parser = new SqlParser(schemaName, sql); cmd = parser.nextCommand(); @@ -1426,32 +1480,33 @@ private List>> tryQueryDistributedSqlFieldsNative(Stri catch (Exception e) { // Cannot parse, return. if (log.isDebugEnabled()) - log.debug("Failed to parse SQL with native parser [qry=" + qry.getSql() + ", err=" + e + ']'); + log.debug("Failed to parse SQL with native parser [qry=" + sql + ", err=" + e + ']'); if (!IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK)) return null; int code = e instanceof SqlParseException ? ((SqlParseException)e).code() : IgniteQueryErrorCode.PARSING; - throw new IgniteSQLException("Failed to parse DDL statement: " + qry.getSql() + ": " + e.getMessage(), + throw new IgniteSQLException("Failed to parse DDL statement: " + sql + ": " + e.getMessage(), code, e); } // Execute. if (cmd instanceof SqlBulkLoadCommand) { - FieldsQueryCursor> cursor = dmlProc.runNativeDmlStatement(qry.getSql(), cmd); + FieldsQueryCursor> cursor = dmlProc.runNativeDmlStatement(sql, cmd); return Collections.singletonList(cursor); } + else { + try { + FieldsQueryCursor> cursor = ddlProc.runDdlStatement(sql, cmd); - try { - FieldsQueryCursor> res = ddlProc.runDdlStatement(qry.getSql(), cmd); - - return Collections.singletonList(res); - } - catch (IgniteCheckedException e) { - throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + qry.getSql() + "]: " - + e.getMessage(), e); + return Collections.singletonList(cursor); + } + catch (IgniteCheckedException e) { + throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sql + "]: " + + e.getMessage(), e); + } } } @@ -1473,8 +1528,8 @@ private void checkQueryType(SqlFieldsQuery qry, boolean isQry) { /** {@inheritDoc} */ @SuppressWarnings("StringEquality") @Override public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, - boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) { - List>> res = tryQueryDistributedSqlFieldsNative(schemaName, qry); + SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) { + List>> res = tryQueryDistributedSqlFieldsNative(schemaName, qry.getSql()); if (res != null) return res; @@ -1512,8 +1567,8 @@ private void checkQueryType(SqlFieldsQuery qry, boolean isQry) { // We may use this cached statement only for local queries and non queries. if (qry.isLocal() || !prepared.isQuery()) - return (List>>)doRunPrepared(schemaName, prepared, qry, null, null, - keepBinary, cancel); + return (List>>)doRunPrepared(schemaName, prepared, qry, null, cliCtx, + null, keepBinary, cancel); } } @@ -1543,7 +1598,7 @@ private void checkQueryType(SqlFieldsQuery qry, boolean isQry) { firstArg += prepared.getParameters().size(); - res.addAll(doRunPrepared(schemaName, prepared, newQry, twoStepQry, meta, keepBinary, cancel)); + res.addAll(doRunPrepared(schemaName, prepared, newQry, twoStepQry, cliCtx, meta, keepBinary, cancel)); if (parseRes.twoStepQuery() != null && parseRes.twoStepQueryKey() != null && !parseRes.twoStepQuery().explain()) @@ -1559,14 +1614,14 @@ private void checkQueryType(SqlFieldsQuery qry, boolean isQry) { * @param prepared H2 command. * @param qry Fields query with flags. * @param twoStepQry Two-step query if this query must be executed in a distributed way. + * @param cliCtx Client context, or {@code null} if not applicable. * @param meta Metadata for {@code twoStepQry}. * @param keepBinary Whether binary objects must not be deserialized automatically. - * @param cancel Query cancel state holder. - * @return Query result. + * @param cancel Query cancel state holder. @return Query result. */ private List>> doRunPrepared(String schemaName, Prepared prepared, - SqlFieldsQuery qry, GridCacheTwoStepQuery twoStepQry, List meta, boolean keepBinary, - GridQueryCancel cancel) { + SqlFieldsQuery qry, GridCacheTwoStepQuery twoStepQry, @Nullable SqlClientContext cliCtx, + List meta, boolean keepBinary, GridQueryCancel cancel) { String sqlQry = qry.getSql(); boolean loc = qry.isLocal(); @@ -2233,10 +2288,10 @@ private Collection tables(String cacheName) { } /** {@inheritDoc} */ - @Override public boolean isInsertStatement(PreparedStatement nativeStmt) { - Prepared prep = GridSqlQueryParser.prepared(nativeStmt); - - return prep instanceof Insert; + @Override public void checkStatementStreamable(PreparedStatement nativeStmt) { + if (!GridSqlQueryParser.isStreamableInsertStatement(nativeStmt)) + throw new IgniteSQLException("Only tuple based INSERT statements are supported in streaming mode.", + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); } /** diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java index f2119ea9f7dbb..6a933125c9adf 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java @@ -103,7 +103,7 @@ public void start(final GridKernalContext ctx, IgniteH2Indexing idx) { * @throws IgniteCheckedException On error. */ @SuppressWarnings("unchecked") - public FieldsQueryCursor> runDdlStatement(String sql, SqlCommand cmd) throws IgniteCheckedException{ + public FieldsQueryCursor> runDdlStatement(String sql, SqlCommand cmd) throws IgniteCheckedException { IgniteInternalFuture fut; try { @@ -170,12 +170,7 @@ else if (cmd instanceof SqlDropIndexCommand) { if (fut != null) fut.get(); - QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl(Collections.singletonList - (Collections.singletonList(0L)), null, false); - - resCur.fieldsMeta(UPDATE_RESULT_META); - - return resCur; + return zeroCursor(); } catch (SchemaOperationException e) { throw convert(e); @@ -188,6 +183,19 @@ else if (cmd instanceof SqlDropIndexCommand) { } } + /** + * @return Single-column, single-row cursor with 0 as number of updated records. + */ + @SuppressWarnings("unchecked") + public static QueryCursorImpl> zeroCursor() { + QueryCursorImpl> resCur = (QueryCursorImpl>)new QueryCursorImpl(Collections.singletonList + (Collections.singletonList(0L)), null, false); + + resCur.fieldsMeta(UPDATE_RESULT_META); + + return resCur; + } + /** * Execute DDL statement. * diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java index 10d485a84c761..98fbb97eb80ad 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java @@ -505,7 +505,7 @@ public String selectQuery() { /** * @return Local subquery flag. */ - @Nullable public boolean isLocalSubquery() { + public boolean isLocalSubquery() { return isLocSubqry; } } diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java index bced83667aff4..d897ac7e9b84c 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java @@ -85,20 +85,21 @@ private UpdatePlanBuilder() { * @param loc Local query flag. * @param idx Indexing. * @param conn Connection. - * @param fieldsQuery Original query. + * @param fieldsQry Original query. * @return Update plan. */ public static UpdatePlan planForStatement(Prepared prepared, boolean loc, IgniteH2Indexing idx, - @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery, @Nullable Integer errKeysPos) + @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQry, @Nullable Integer errKeysPos) throws IgniteCheckedException { - assert !prepared.isQuery(); - GridSqlStatement stmt = new GridSqlQueryParser(false).parse(prepared); if (stmt instanceof GridSqlMerge || stmt instanceof GridSqlInsert) - return planForInsert(stmt, loc, idx, conn, fieldsQuery); + return planForInsert(stmt, loc, idx, conn, fieldsQry); + else if (stmt instanceof GridSqlUpdate || stmt instanceof GridSqlDelete) + return planForUpdate(stmt, loc, idx, conn, fieldsQry, errKeysPos); else - return planForUpdate(stmt, loc, idx, conn, fieldsQuery, errKeysPos); + throw new IgniteSQLException("Unsupported operation: " + prepared.getSQL(), + IgniteQueryErrorCode.UNSUPPORTED_OPERATION); } /** diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java index 50b090970f33d..299aedab6067a 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java +++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java @@ -1949,6 +1949,18 @@ private GridSqlElement parseExpression0(Expression expression, boolean calcTypes expression.getClass().getSimpleName() + ']'); } + /** + * Check if passed statement is insert statement eligible for streaming. + * + * @param nativeStmt Native statement. + * @return {@code True} if streamable insert. + */ + public static boolean isStreamableInsertStatement(PreparedStatement nativeStmt) { + Prepared prep = prepared(nativeStmt); + + return prep instanceof Insert && INSERT_QUERY.get((Insert)prep) == null; + } + /** * @param cond Condition. * @param o Object. diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java index 069bdd7092d16..cf8bb2ebe6b09 100644 --- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java +++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheCrossCacheQuerySelfTest.java @@ -140,7 +140,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { SqlFieldsQuery qry = new SqlFieldsQuery("select f.productId, p.name, f.price " + "from FactPurchase f, \"replicated-prod\".DimProduct p where p.id = f.productId "); - for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, null, false, true).get(0).getAll()) { X.println("___ -> " + o); set1.add((Integer)o.get(0)); @@ -154,7 +154,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { qry = new SqlFieldsQuery("select productId from FactPurchase group by productId"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, null, false, true).get(0).getAll()) { X.println("___ -> " + o); assertTrue(set0.add((Integer) o.get(0))); @@ -173,7 +173,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { "where p.id = f.productId " + "group by f.productId, p.name"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, null, false, true).get(0).getAll()) { X.println("___ -> " + o); assertTrue(names.add((String)o.get(0))); @@ -190,7 +190,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { "group by f.productId, p.name " + "having s >= 15"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, null, false, true).get(0).getAll()) { X.println("___ -> " + o); assertTrue(i(o, 1) >= 15); @@ -203,7 +203,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { qry = new SqlFieldsQuery("select top 3 distinct productId " + "from FactPurchase f order by productId desc "); - for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, null, false, true).get(0).getAll()) { X.println("___ -> " + o); assertEquals(top--, o.get(0)); @@ -216,7 +216,7 @@ public void testTwoStepGroupAndAggregates() throws Exception { qry = new SqlFieldsQuery("select distinct productId " + "from FactPurchase f order by productId desc limit 2 offset 1"); - for (List o : qryProc.querySqlFields(cache.context(), qry, false, true).get(0).getAll()) { + for (List o : qryProc.querySqlFields(cache.context(), qry, null, false, true).get(0).getAll()) { X.println("___ -> " + o); assertEquals(top--, o.get(0)); @@ -256,13 +256,13 @@ public void testMultiStatement() throws Exception { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { - qryProc.querySqlFields(cache.context(), qry, false, true); + qryProc.querySqlFields(cache.context(), qry, null, false, true); return null; } }, IgniteSQLException.class, "Multiple statements queries are not supported"); - List>> cursors = qryProc.querySqlFields(cache.context(), qry, false, false); + List>> cursors = qryProc.querySqlFields(cache.context(), qry, null, false, false); assertEquals(2, cursors.size()); @@ -274,7 +274,7 @@ public void testMultiStatement() throws Exception { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { - qryProc.querySqlFields(cache.context(), qry, false, false); + qryProc.querySqlFields(cache.context(), qry, null, false, false); return null; } From 812cf741b6fb8439afbcba88e35793116dfb1b05 Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Fri, 16 Feb 2018 23:22:03 +0300 Subject: [PATCH 242/243] Test fix --- .../java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java index 49746b690eed1..20594083ce82e 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java @@ -107,7 +107,7 @@ public void testIndexErrors() throws SQLException { */ public void testDmlErrors() throws SQLException { checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, null)", "22004", - "Value for INSERT, MERGE, or UPDATE must not be null"); + "Value for INSERT, COPY, MERGE, or UPDATE must not be null"); checkErrorState("INSERT INTO \"test\".INTEGER(_key, _val) values(1, 'zzz')", "0700B", "Value conversion failed [from=java.lang.String, to=java.lang.Integer]"); From e3a6390444ce52a96ef9ae5982c03e55d886b1a0 Mon Sep 17 00:00:00 2001 From: Alexander Paschenko Date: Mon, 19 Feb 2018 13:14:58 +0300 Subject: [PATCH 243/243] Test fix --- .../Cache/Query/CacheDmlQueriesTest.cs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs index a2584cccbcaae..955a5a7d4dd06 100644 --- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs +++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/CacheDmlQueriesTest.cs @@ -402,8 +402,8 @@ public void TestDefaultValue() Assert.AreEqual(-1, cache.GetConfiguration().QueryEntities.Single().Fields.Single(x => x.Name == "Id").DefaultValue); - cache.Query(new SqlFieldsQuery("insert into foo(_key, id, name) values (?, ?, ?)", 1, 2, "John")).GetAll(); - cache.Query(new SqlFieldsQuery("insert into foo(_key, name) values (?, ?)", 3, "Mary")).GetAll(); + cache.QueryFields(new SqlFieldsQuery("insert into foo(_key, id, name) values (?, ?, ?)", 1, 2, "John")).GetAll(); + cache.QueryFields(new SqlFieldsQuery("insert into foo(_key, name) values (?, ?)", 3, "Mary")).GetAll(); Assert.AreEqual(2, cache[1].Id); Assert.AreEqual(-1, cache[3].Id); @@ -421,8 +421,8 @@ public void TestDefaultValue() var cache2 = Ignition.GetIgnite().CreateCache(cfg).WithKeepBinary(); - cache2.Query(new SqlFieldsQuery("insert into DefValTest(_key, name) values (?, ?)", 1, "John")).GetAll(); - cache2.Query(new SqlFieldsQuery("insert into DefValTest(_key) values (?)", 2)).GetAll(); + cache2.QueryFields(new SqlFieldsQuery("insert into DefValTest(_key, name) values (?, ?)", 1, "John")).GetAll(); + cache2.QueryFields(new SqlFieldsQuery("insert into DefValTest(_key) values (?)", 2)).GetAll(); Assert.AreEqual("John", cache2[1].GetField("Name")); Assert.AreEqual("foo", cache2[2].GetField("Name"));