Skip to content

Commit

Permalink
IGNITE-12821 Added check-sizes parameter to validate_indexes cmd
Browse files Browse the repository at this point in the history
Signed-off-by: Slava Koptilin <slava.koptilin@gmail.com>
  • Loading branch information
tkalkirill authored and sk0x50 committed Apr 14, 2020
1 parent 8a9f8bd commit 54e3aa4
Show file tree
Hide file tree
Showing 26 changed files with 1,694 additions and 393 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.UUID;
import java.util.logging.Logger;
Expand All @@ -34,13 +35,16 @@
import org.apache.ignite.internal.util.typedef.internal.U;
import org.apache.ignite.internal.visor.verify.IndexIntegrityCheckIssue;
import org.apache.ignite.internal.visor.verify.IndexValidationIssue;
import org.apache.ignite.internal.visor.verify.ValidateIndexesCheckSizeIssue;
import org.apache.ignite.internal.visor.verify.ValidateIndexesCheckSizeResult;
import org.apache.ignite.internal.visor.verify.ValidateIndexesPartitionResult;
import org.apache.ignite.internal.visor.verify.VisorValidateIndexesJobResult;
import org.apache.ignite.internal.visor.verify.VisorValidateIndexesTaskArg;
import org.apache.ignite.internal.visor.verify.VisorValidateIndexesTaskResult;

import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT;
import static org.apache.ignite.internal.commandline.CommandLogger.INDENT;
import static org.apache.ignite.internal.commandline.CommandLogger.join;
import static org.apache.ignite.internal.commandline.CommandLogger.optional;
import static org.apache.ignite.internal.commandline.CommandLogger.or;
import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode;
Expand All @@ -53,6 +57,7 @@
import static org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg.CHECK_FIRST;
import static org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg.CHECK_THROUGH;
import static org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg.CHECK_CRC;
import static org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg.CHECK_SIZES;

/**
* Validate indexes command.
Expand All @@ -74,43 +79,65 @@ public class CacheValidateIndexes implements Command<CacheValidateIndexes.Argume
map.put(CHECK_FIRST + " N", "validate only the first N keys");
map.put(CHECK_THROUGH + " K", "validate every Kth key");
map.put(CHECK_CRC.toString(), "check the CRC-sum of pages stored on disk");

usageCache(logger, VALIDATE_INDEXES, description, map,
optional(CACHES), OP_NODE_ID, optional(or(CHECK_FIRST + " N", CHECK_THROUGH + " K")));
map.put(CHECK_SIZES.toString(), "check that index size and cache size are the same");

usageCache(
logger,
VALIDATE_INDEXES,
description,
map,
optional(CACHES),
OP_NODE_ID,
optional(or(CHECK_FIRST + " N", CHECK_THROUGH + " K", CHECK_CRC, CHECK_SIZES))
);
}

/**
* Container for command arguments.
*/
public class Arguments {
/** Caches. */
private Set<String> caches;
private final Set<String> caches;

/** Node id. */
private UUID nodeId;
private final UUID nodeId;

/** Max number of entries to be checked. */
private int checkFirst = -1;
private final int checkFirst;

/** Number of entries to check through. */
private int checkThrough = -1;
private final int checkThrough;

/** Check CRC. */
private final boolean checkCrc;

/** Check CRC */
private boolean checkCrc;
/** Check that index size and cache size are same. */
private final boolean checkSizes;

/**
* @param caches Caches to validate.
* @param nodeId Node Id.
* @param checkFirst Max number of entries to be checked..
* Constructor.
*
* @param caches Caches.
* @param nodeId Node id.
* @param checkFirst Max number of entries to be checked.
* @param checkThrough Number of entries to check through.
* @param checkCrc Check CRC.
* @param checkSizes Check that index size and cache size are same.
*/
public Arguments(Set<String> caches, UUID nodeId, int checkFirst, int checkThrough, boolean checkCrc) {
public Arguments(
Set<String> caches,
UUID nodeId,
int checkFirst,
int checkThrough,
boolean checkCrc,
boolean checkSizes
) {
this.caches = caches;
this.nodeId = nodeId;
this.checkFirst = checkFirst;
this.checkThrough = checkThrough;
this.checkCrc = checkCrc;
this.checkSizes = checkSizes;
}

/**
Expand All @@ -127,13 +154,6 @@ public int checkFirst() {
return checkFirst;
}

/**
* @return Check CRC
*/
public boolean checkCrc() {
return checkCrc;
}

/**
* @return Number of entries to check through.
*/
Expand All @@ -147,6 +167,23 @@ public int checkThrough() {
public UUID nodeId() {
return nodeId;
}

/**
* @return Check CRC.
*/
public boolean checkCrc() {
return checkCrc;
}

/**
* Returns whether to check that index size and cache size are same.
*
* @return {@code true} if need check that index size and cache size
* are same.
*/
public boolean checkSizes() {
return checkSizes;
}
}

/** Command parsed arguments. */
Expand All @@ -164,7 +201,8 @@ public UUID nodeId() {
args.nodeId() != null ? Collections.singleton(args.nodeId()) : null,
args.checkFirst(),
args.checkThrough(),
args.checkCrc()
args.checkCrc(),
args.checkSizes()
);

try (GridClient client = Command.startClient(clientCfg)) {
Expand All @@ -173,46 +211,53 @@ public UUID nodeId() {

boolean errors = CommandLogger.printErrors(taskRes.exceptions(), "Index validation failed on nodes:", logger);

for (Map.Entry<UUID, VisorValidateIndexesJobResult> nodeEntry : taskRes.results().entrySet()) {
if (!nodeEntry.getValue().hasIssues())
for (Entry<UUID, VisorValidateIndexesJobResult> nodeEntry : taskRes.results().entrySet()) {
VisorValidateIndexesJobResult jobRes = nodeEntry.getValue();

if (!jobRes.hasIssues())
continue;

errors = true;

logger.info("Index issues found on node " + nodeEntry.getKey() + ":");

Collection<IndexIntegrityCheckIssue> integrityCheckFailures = nodeEntry.getValue().integrityCheckFailures();
for (IndexIntegrityCheckIssue is : jobRes.integrityCheckFailures())
logger.info(INDENT + is);

if (!integrityCheckFailures.isEmpty()) {
for (IndexIntegrityCheckIssue is : integrityCheckFailures)
logger.info(INDENT + is);
}

Map<PartitionKey, ValidateIndexesPartitionResult> partRes = nodeEntry.getValue().partitionResult();

for (Map.Entry<PartitionKey, ValidateIndexesPartitionResult> e : partRes.entrySet()) {
for (Entry<PartitionKey, ValidateIndexesPartitionResult> e : jobRes.partitionResult().entrySet()) {
ValidateIndexesPartitionResult res = e.getValue();

if (!res.issues().isEmpty()) {
logger.info(INDENT + CommandLogger.join(" ", e.getKey(), e.getValue()));
logger.info(INDENT + join(" ", e.getKey(), e.getValue()));

for (IndexValidationIssue is : res.issues())
logger.info(DOUBLE_INDENT + is);
}
}

Map<String, ValidateIndexesPartitionResult> idxRes = nodeEntry.getValue().indexResult();

for (Map.Entry<String, ValidateIndexesPartitionResult> e : idxRes.entrySet()) {
for (Entry<String, ValidateIndexesPartitionResult> e : jobRes.indexResult().entrySet()) {
ValidateIndexesPartitionResult res = e.getValue();

if (!res.issues().isEmpty()) {
logger.info(INDENT + CommandLogger.join(" ", "SQL Index", e.getKey(), e.getValue()));
logger.info(INDENT + join(" ", "SQL Index", e.getKey(), e.getValue()));

for (IndexValidationIssue is : res.issues())
logger.info(DOUBLE_INDENT + is);
}
}

for (Entry<String, ValidateIndexesCheckSizeResult> e : jobRes.checkSizeResult().entrySet()) {
ValidateIndexesCheckSizeResult res = e.getValue();
Collection<ValidateIndexesCheckSizeIssue> issues = res.issues();

if (issues.isEmpty())
continue;

logger.info(INDENT + join(" ", "Size check", e.getKey(), res));

for (ValidateIndexesCheckSizeIssue issue : issues)
logger.info(DOUBLE_INDENT + issue);
}
}

if (!errors)
Expand All @@ -230,18 +275,15 @@ public UUID nodeId() {
@Override public void parseArguments(CommandArgIterator argIter) {
int checkFirst = -1;
int checkThrough = -1;
boolean checkCrc = false;
UUID nodeId = null;
Set<String> caches = null;
boolean checkCrc = false;
boolean checkSizes = false;

while (argIter.hasNextSubArg()) {
String nextArg = argIter.nextArg("");

ValidateIndexesCommandArg arg = CommandArgUtils.of(nextArg, ValidateIndexesCommandArg.class);
if (arg == CHECK_CRC) {
checkCrc = true;
continue;
}

if (arg == CHECK_FIRST || arg == CHECK_THROUGH) {
if (!argIter.hasNextSubArg())
Expand Down Expand Up @@ -270,6 +312,15 @@ public UUID nodeId() {

continue;
}
else if (arg == CHECK_CRC) {
checkCrc = true;
continue;
}
else if (CHECK_SIZES == arg) {
checkSizes = true;

continue;
}

try {
nodeId = UUID.fromString(nextArg);
Expand All @@ -283,7 +334,7 @@ public UUID nodeId() {
caches = argIter.parseStringSet(nextArg);
}

args = new Arguments(caches, nodeId, checkFirst, checkThrough, checkCrc);
args = new Arguments(caches, nodeId, checkFirst, checkThrough, checkCrc, checkSizes);
}

/** {@inheritDoc} */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,14 @@ public enum ValidateIndexesCommandArg implements CommandArg {
/** Check first. */
CHECK_FIRST("--check-first"),

/** Check crc. */
/** Check through. */
CHECK_THROUGH("--check-through"),

/** Check CRC. */
CHECK_CRC("--check-crc"),

/** Check through. */
CHECK_THROUGH("--check-through");
/** Check sizes. */
CHECK_SIZES("--check-sizes");

/** Option name. */
private final String name;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ public abstract class IgniteDataTransferObject implements Externalizable {
/** Version 6. */
protected static final byte V6 = 6;

/** Version 7. */
protected static final byte V7 = 7;

/**
* @param col Source collection.
* @param <T> Collection type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -465,13 +465,14 @@ public void remove(GridCacheContext cctx, GridQueryTypeDescriptor type, CacheDat
Collection<ColumnInformation> columnsInformation(String schemaNamePtrn, String tblNamePtrn, String colNamePtrn);

/**
* Return index size by schema name and index name.
* Return index size by schema, table and index name.
*
* @param schemaName Schema name.
* @param tblName Table name.
* @param idxName Index name.
* @return Index size (Number of elements) or {@code 0} if index not found.
*/
default long indexSize(String schemaName, String idxName) throws IgniteCheckedException {
default long indexSize(String schemaName, String tblName, String idxName) throws IgniteCheckedException {
return 0;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
Expand Down Expand Up @@ -123,6 +124,7 @@
import org.apache.ignite.thread.IgniteThread;
import org.jetbrains.annotations.Nullable;

import static java.util.Collections.newSetFromMap;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_EXECUTED;
Expand Down Expand Up @@ -189,7 +191,7 @@ public class GridQueryProcessor extends GridProcessorAdapter {
private ClusterNode crd;

/** Registered cache names. */
private final Collection<String> cacheNames = Collections.newSetFromMap(new ConcurrentHashMap<String, Boolean>());
private final Collection<String> cacheNames = newSetFromMap(new ConcurrentHashMap<String, Boolean>());

/** ID history for index create/drop discovery messages. */
private final GridBoundedConcurrentLinkedHashSet<IgniteUuid> dscoMsgIdHist =
Expand All @@ -215,7 +217,7 @@ public class GridQueryProcessor extends GridProcessorAdapter {
private boolean skipFieldLookup;

/** Cache name - value typeId pairs for which type mismatch message was logged. */
private final Set<Long> missedCacheTypes = Collections.newSetFromMap(new ConcurrentHashMap<>());
private final Set<Long> missedCacheTypes = newSetFromMap(new ConcurrentHashMap<>());

/**
* @param ctx Kernal context.
Expand Down Expand Up @@ -2906,13 +2908,11 @@ public <K, V> GridCloseableIterator<IgniteBiTuple<K, V>> queryText(final String
* @return Descriptors.
*/
public Collection<GridQueryTypeDescriptor> types(@Nullable String cacheName) {
Collection<GridQueryTypeDescriptor> cacheTypes = new ArrayList<>();
Collection<GridQueryTypeDescriptor> cacheTypes = newSetFromMap(new IdentityHashMap<>());

for (Map.Entry<QueryTypeIdKey, QueryTypeDescriptorImpl> e : types.entrySet()) {
QueryTypeDescriptorImpl desc = e.getValue();

if (F.eq(e.getKey().cacheName(), cacheName))
cacheTypes.add(desc);
cacheTypes.add(e.getValue());
}

return cacheTypes;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
package org.apache.ignite.internal.processors.query.schema;

import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
Expand Down Expand Up @@ -180,15 +179,14 @@ private String indexStatStr(SchemaIndexCacheStat stat) throws IgniteCheckedExcep
res.a(" Type name=" + type.name());
res.a(U.nl());

final String pk = "_key_PK";
String pk = "_key_PK";
String tblName = type.tableName();

res.a(" Index: name=" + pk + ", size=" + idx.indexSize(type.schemaName(), pk));
res.a(" Index: name=" + pk + ", size=" + idx.indexSize(type.schemaName(), tblName, pk));
res.a(U.nl());

final Map<String, GridQueryIndexDescriptor> indexes = type.indexes();

for (GridQueryIndexDescriptor descriptor : indexes.values()) {
final long size = idx.indexSize(type.schemaName(), descriptor.name());
for (GridQueryIndexDescriptor descriptor : type.indexes().values()) {
long size = idx.indexSize(type.schemaName(), tblName, descriptor.name());

res.a(" Index: name=" + descriptor.name() + ", size=" + size);
res.a(U.nl());
Expand Down
Loading

0 comments on commit 54e3aa4

Please sign in to comment.