rateLimiters;
/* metrics tracking raw number of delays/DNRs */
@@ -169,10 +169,10 @@ public ErrorManager(SkLogger logger,
dnrPercentage = (double)config.getErrorDnrThreshold() * 100.0 /
(double)delayResponseThreshold;
- /* LRU cache */
+ /* Timeout cache */
this.rateLimiters = CacheBuilder.build(
- new CacheConfig().setCapacity(config.getErrorCacheSize())
- .setLifetime(config.getErrorCacheLifetimeMs()));
+ new CacheConfig().setLifetime(config.getErrorCacheLifetimeMs())
+ .setName("ErrorRateCache"));
this.delayPool = new ScheduledThreadPoolExecutor(
config.getErrorDelayPoolSize());
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java
index ef3e4c78..5f43e205 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/PassThroughTableCache.java
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
*
* This file was distributed by Oracle as part of a version of Oracle NoSQL
* Database made available at:
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ProxyThreadPoolExecutor.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ProxyThreadPoolExecutor.java
new file mode 100644
index 00000000..349c324c
--- /dev/null
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ProxyThreadPoolExecutor.java
@@ -0,0 +1,303 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This file was distributed by Oracle as part of a version of Oracle NoSQL
+ * Database made available at:
+ *
+ * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html
+ *
+ * Please see the LICENSE file included in the top-level directory of the
+ * appropriate version of Oracle NoSQL Database for a copy of the license and
+ * additional information.
+ */
+
+package oracle.nosql.proxy.util;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * An implmentation of the Java Executor interface that creates and
+ * manages a fixed-size thread pool in a lightweight manner, with as
+ * little locking as possible.
+ *
+ * The implementation uses a non-blocking queue structure,
+ * ConcurrentLinkedQueue, to hold Runnable tasks executed by threads
+ * kept in a pool.
+ *
+ * This makes insertion and removal of tasks very cheap with good
+ * concurrency. If the queue is empty threads will wait on a separate
+ * lock and condition that are only used when the queue size goes to
+ * zero and increments from zero. This means that locking/waiting only
+ * happens when the queue and Executor are fairly idle vs putting
+ * locks in the path of the Executor when the queue is busy
+ *
+ * Possible enhancements (be careful of performance impact)
+ * o wait time statistics
+ * o allowing for a dynamic vs fixed-size thread pool
+ * o limit queue size. This would likely mean blocking in the execute() path
+ * to slow down producers or rejecting execution
+ *
+ * TODO: consider making this a public, reusable class in this or other
+ * common repository
+ */
+public class ProxyThreadPoolExecutor implements Executor {
+
+ /* simple, concurrent FIFO queue for tasks */
+ ConcurrentLinkedQueue taskQueue =
+ new ConcurrentLinkedQueue<>();
+
+ /* track the size of the queue as well as active threads */
+ private final AtomicInteger queueSize = new AtomicInteger();
+ private final AtomicInteger activeThreadCount = new AtomicInteger();
+
+ /* stats for lifetime of queue */
+ private final AtomicLong totalTasksProcessed = new AtomicLong();
+ private final AtomicLong totalThreadWakeups = new AtomicLong();
+
+ /* lock and condition used for waiting on the queue */
+ private final ReentrantLock waitLock = new ReentrantLock();
+ private final Condition available = waitLock.newCondition();
+
+ /* used to shutdown the Executor */
+ volatile private boolean active = true;
+ /*
+ * tracks queue size on last execute. This is volatile vs Atomic as it's
+ * only used for either set or get
+ */
+ volatile private int lastQueueSize;
+
+ /* the pool */
+ private final Thread[] threadPool;
+ private final int numThreads;
+
+ /**
+ * Create an Executor instance that uses a fixed-size thread pool to
+ * execute the Runnable tasks. The pool must be shutdown using the
+ * {@link #shutdown} method to clean up the queue and thread pool.
+ *
+ * @param numThreads the number of threads for the pool
+ * @param namePrefix a string used to name the threads created for the
+ * pool. The name of a thread is the prefix plus ".N" where N is an
+ * integer. If this parameter is null a default prefix ("ProxyPool") is
+ * used
+ */
+ public ProxyThreadPoolExecutor(int numThreads,
+ String namePrefix) {
+ this.numThreads = numThreads;
+ if (namePrefix == null) {
+ namePrefix = "ProxyPool";
+ }
+ if (numThreads <=0 ) {
+ throw new IllegalArgumentException(
+ "Pool size must be greater than zero");
+ }
+ threadPool = new Thread[numThreads];
+ for (int i = 0; i < numThreads; i++) {
+ Thread th = new Thread(
+ new ExecutorThread(), (namePrefix + "." + i));
+ threadPool[i] = th;
+ th.start();
+ }
+ }
+
+ /**
+ * Returns the current number of elements in the task queue
+ * @return the size
+ */
+ public int getQueueSize() {
+ return queueSize.get();
+ }
+
+ /**
+ * Returns the number of threads actively handling a task
+ * @return the number
+ */
+ public int getActiveThreadCount() {
+ return activeThreadCount.get();
+ }
+
+ /**
+ * Returns the number of queue tasks processed for the lifetime of
+ * the executor
+ * @return the number
+ */
+ public long getTotalTasksProcessed() {
+ return totalTasksProcessed.get();
+ }
+
+ /**
+ * Returns the number of thread wakeup calls done for the lifetime of
+ * the executor
+ * @return the number
+ */
+ public long getTotalThreadWakeups() {
+ return totalThreadWakeups.get();
+ }
+
+ /**
+ * Shutdown the Executor, including waiting for the threads in the
+ * pool, optionally using graceful shutdown to ensure that all current
+ * tasks in the queue are run. If graceful is false the threads still
+ * exit but any tasks not yet run are removed from the queue and
+ * ignored.
+ *
+ * @param graceful if true, shut down gracefully
+ */
+ public void shutdown(boolean graceful) {
+ try {
+ active = false;
+ /*
+ * graceful shutdown allows task queue to be emptied
+ */
+ if (graceful && !taskQueue.isEmpty()) {
+ while (!taskQueue.isEmpty()) {
+ wakeupAll();
+ }
+ }
+ /*
+ * these are redundant for graceful shutdown but
+ * do not hurt; efficiency doesn't matter in shutdown
+ */
+ taskQueue.clear();
+ wakeupAll();
+
+ /* wait for threads */
+ for (int i = 0; i < numThreads; i++) {
+ threadPool[i].join();
+ }
+ } catch (InterruptedException e) {
+ /* ignore */
+ }
+ }
+
+ /*
+ * A note on synchronization between the task queue and threads that operate
+ * on it. The goal is to have sufficient threads running to keep up with the
+ * task queue and also avoid excessive await/signal calls that can be
+ * concurrency hotspots.
+ *
+ * This is done by:
+ * 1. tracking the "last" queue size each time a task is added
+ * 2. if, on adding a task, the queue is larger than it was previously
+ * wakeup a thread, but only if there aren't a number of threads already
+ * running. That number is currently the size of the queue itself. This
+ * number is perhaps subject to change.
+ *
+ * As the queue grows additional threads are adding to
+ * hopefully enable the consumer threads to keep up with the producer
+ * calls. At some point if the queue size grows beyond the thread pool
+ * size the queue itself will keep growing, essentially without bound.
+ * This means that the thread pool should be sized with the
+ * producer/consumer paths in mind.
+ */
+ @Override
+ public void execute(Runnable r) {
+ if (!active) {
+ throw new RejectedExecutionException(
+ "Executor has been shut down");
+ }
+ taskQueue.add(r);
+ int size = queueSize.getAndIncrement();
+ totalTasksProcessed.getAndIncrement();
+ int lqs = lastQueueSize;
+ lastQueueSize = size;
+ int activeThreads = activeThreadCount.get();
+
+ /*
+ * Wake up if any of these is true
+ * o there are no active threads
+ * o the queue is growing (i.e. number of threads isn't keeping up)
+ * o the number of active threads is <= queue size
+ *
+ * If all threads in the pool are active, don't bother with a wakeup
+ */
+ if (((activeThreads == 0 ||
+ (size - lqs) > 0)) &&
+ activeThreads <= size &&
+ activeThreads < numThreads) {
+ totalThreadWakeups.getAndIncrement();
+ signalAvailable();
+ }
+ }
+
+ /*
+ * The wait-related methods are used when the queue is empty, allowing
+ * threads to wait for a task. The lock taken in this path should not
+ * be a concurrency hotspot because it's only used when the queue is
+ * lightly used, indicating less load. In a busy system the queue will
+ * have tasks available most of the time.
+ */
+ private void signalAvailable() {
+ /*
+ * there should be waiting threads, wake one up
+ */
+ try {
+ waitLock.lock();
+ available.signal();
+ } finally {
+ waitLock.unlock();
+ }
+ }
+
+ private void waitForAvailable() {
+ try {
+ waitLock.lock();
+ available.await();
+ } catch (InterruptedException ie) {
+ /* ignore */
+ } finally {
+ waitLock.unlock();
+ }
+ }
+
+ /*
+ * wakeup all waiting threads
+ */
+ private void wakeupAll() {
+ try {
+ waitLock.lock();
+ available.signalAll();
+ } finally {
+ waitLock.unlock();
+ }
+ }
+
+ /**
+ * This is the class that is run by the pool threads. It looks for
+ * Runnable tasks on the queue and runs them. If no tasks are available
+ * it waits on a condition that is signaled when a task is available
+ */
+ private class ExecutorThread implements Runnable {
+ @Override
+ public void run() {
+ /*
+ * this conditional allows for graceful shutdown, handling
+ * the queue before exiting. not-graceful shutdown is done
+ * using the interrupted() status, leaving the queue with
+ * entries if not empty
+ */
+ while (!Thread.currentThread().isInterrupted() &&
+ (active || !taskQueue.isEmpty())) {
+ Runnable task = taskQueue.poll();
+ if (task != null) {
+ queueSize.decrementAndGet();
+ activeThreadCount.incrementAndGet();
+ totalTasksProcessed.getAndIncrement();
+ task.run();
+ activeThreadCount.decrementAndGet();
+ } else {
+ waitForAvailable();
+ }
+ }
+ }
+ }
+}
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java
index 0552ae5f..3dda4ec0 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/ShutdownManager.java
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
*
* This file was distributed by Oracle as part of a version of Oracle NoSQL
* Database made available at:
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java
index dc57b8ab..4cd93de3 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/proxy/util/TableCache.java
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2011, 2024 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
*
* This file was distributed by Oracle as part of a version of Oracle NoSQL
* Database made available at:
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/HostNameResolver.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/HostNameResolver.java
new file mode 100644
index 00000000..708b2068
--- /dev/null
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/HostNameResolver.java
@@ -0,0 +1,53 @@
+package oracle.nosql.util;
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+/**
+ * A utility for all cloud components to get the hostname for the node that is
+ * running the Docker container.
+ * Nodes managed by OKE are given hostnames that are OCIDs.
+ * After we migrate to OKE, it was hard to understand which instance is
+ * referred to; the instance name is easier for humans than the ocid. To be
+ * able to provide the instance name, we save the instance name to the file
+ * /etc/instance-name, which is mounted to /tmp/etc/instance-name inside the
+ * container.
+ * As we want to use instance name as host name if possible, we will resolve
+ * host name in following order:
+ * 1. Read from /tmp/etc/instance-name file.
+ * 2. Read from HOST_NAME system environment.
+ * 3. Resolve host name from DNS.
+ */
+public class HostNameResolver {
+
+ private static final String INSTANCE_NAME_PATH = "/tmp/etc/instance-name";
+ private static String HOST_NAME_ENV = "HOST_NAME";
+
+ public static String getHostName() {
+ String hostName = readFileLine(INSTANCE_NAME_PATH);
+ if (hostName != null && !hostName.isEmpty()) {
+ return hostName;
+ }
+ hostName = System.getenv(HOST_NAME_ENV);
+ if (hostName != null && !hostName.isEmpty()) {
+ return hostName;
+ }
+ try {
+ return InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ throw new RuntimeException("Cannot resolve local host name: " + e);
+ }
+ }
+
+ private static String readFileLine(String filePath) {
+ try (BufferedReader br = new BufferedReader(new FileReader(filePath))) {
+ String line = br.readLine();
+ return line;
+ } catch (IOException e) {
+ return null;
+ }
+ }
+}
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java
index 4e85f82b..c5699ecd 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/filter/Rule.java
@@ -13,28 +13,41 @@
package oracle.nosql.util.filter;
-import java.time.Instant;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.lang.reflect.Type;
+import java.sql.Timestamp;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
-import oracle.nosql.common.JsonBuilder;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.gson.JsonDeserializationContext;
+import com.google.gson.JsonDeserializer;
+import com.google.gson.JsonElement;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParseException;
+import com.google.gson.JsonSerializationContext;
+import com.google.gson.JsonSerializer;
+import com.google.gson.JsonSyntaxException;
/*
* Used in definition of the JSON payloads for the REST APIs between the proxy
* and SC filters service.
*
* To serialize a Java object into a Json string:
- * Foo foo;
- * String jsonPayload = JsonUtils.toJson(foo);
+ * Rule rule;
+ * String jsonPayload = rule.toJson();
*
* To deserialize a Json string into this object:
- * Foo foo = JsonUtils.fromJsont(, Foo.class);
+ * Rule rule = Rule.fromJson( | );
*
* The Rule class represents the filter rule which has below information:
* o name, the rule name, required.
- * o action, the action type of the rule, default to DROP_REQUEST.
+ * o action, the action of the rule, default to DROP_REQUEST.
* o tenant, the principal tenant ocid.
* o user, the principal ocid.
* o table, the target table ocid.
@@ -43,21 +56,32 @@
*/
public class Rule {
- private static final ActionType DEF_ACTION = ActionType.DROP_REQUEST;
+ private static final Gson gson = new GsonBuilder()
+ .registerTypeAdapter(Action.class, new ActionSerializer())
+ .setDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS")
+ .create();
+
+ public static final Action DROP_REQUEST = new DropRequestAction();
+ private static final Action DEFAULT_ACTION = DROP_REQUEST;
public enum OpType {
- ALL,
- DDL,
- WRITE,
- READ
+ ALL, /* all ops */
+ DDL, /* ddl ops */
+ WRITE, /* dml write */
+ READ, /* dml read, read metadata, work-request, usage */
+ CONFIG_READ, /* read configuration */
+ CONFIG_UPDATE /* update configuration */
}
public enum ActionType {
- DROP_REQUEST
+ DROP_REQUEST,
+ RETURN_ERROR
};
+ /* The name of the rule */
private String name;
- private ActionType action;
+ /* The action to take if match the rule */
+ private Action action;
/* The principal tenant ocid */
private String tenant;
@@ -65,30 +89,32 @@ public enum ActionType {
private String user;
/* The target table ocid */
private String table;
+ /* The operations */
private String[] operations;
- private long createTimeMs;
+ /* The time stamp the rule was created */
+ private Timestamp createTime;
private transient Set opTypes;
public static Rule createRule(String name,
- ActionType action,
- String tenantOcid,
- String userOcid,
+ Action action,
+ String tenantId,
+ String userId,
String tableOcid,
String[] operations) {
- return createRule(name, action, tenantOcid, userOcid, tableOcid,
- operations, 0);
+ return new Rule(name, action, tenantId, userId, tableOcid,
+ operations, null /* createTime */);
}
public static Rule createRule(String name,
- ActionType action,
- String tenantOcid,
- String userOcid,
+ Action action,
+ String tenantId,
+ String userId,
String tableOcid,
String[] operations,
- long createTimeMs) {
- return new Rule(name, action, tenantOcid, userOcid, tableOcid,
- operations, createTimeMs);
+ Timestamp createTime) {
+ return new Rule(name, action, tenantId, userId, tableOcid,
+ operations, createTime);
}
/* Needed for serialization */
@@ -96,22 +122,21 @@ public Rule() {
}
private Rule(String name,
- ActionType action,
+ Action action,
String tenantOcid,
String userOcid,
String tableOcid,
String[] ops,
- long createTime) {
+ Timestamp createTime) {
this.name = name;
this.action = action;
tenant = tenantOcid;
user = userOcid;
table = tableOcid;
- createTimeMs = createTime;
+ this.createTime = createTime;
operations = ops;
validate();
- setOpTypes();
}
public void setName(String name) {
@@ -122,8 +147,12 @@ public String getName() {
return name;
}
- public ActionType getAction() {
- return (action != null) ? action : DEF_ACTION;
+ public Action getAction() {
+ return action;
+ }
+
+ public ActionType getActionType() {
+ return getAction().getType();
}
public String getTenant() {
@@ -143,31 +172,17 @@ public String[] getOperations() {
}
public Set getOpTypes() {
- if (opTypes != null) {
- return opTypes;
- }
- setOpTypes();
- return opTypes;
- }
-
- private void setOpTypes() {
- opTypes = new HashSet<>();
- if (getOperations() != null) {
+ if (opTypes == null) {
+ opTypes = new HashSet<>();
for (String op : getOperations()) {
opTypes.add(parseOpType(op));
}
}
+ return opTypes;
}
- public String getCreateTime() {
- if (createTimeMs > 0) {
- return Instant.ofEpochMilli(createTimeMs).toString();
- }
- return null;
- }
-
- public long getCreateTimeMs() {
- return createTimeMs;
+ public Timestamp getCreateTime() {
+ return createTime;
}
/*
@@ -178,7 +193,8 @@ public boolean attributesEqual(Rule o) {
return stringsEqual(getTenant(), o.getTenant()) &&
stringsEqual(getUser(), o.getUser()) &&
stringsEqual(getTable(), o.getTable()) &&
- operationsEqual(getOpTypes(), o.getOpTypes());
+ operationsEqual(getOpTypes(), o.getOpTypes()) &&
+ getAction().equals(o.getAction());
}
public boolean operationsEqual(Set ops) {
@@ -186,31 +202,48 @@ public boolean operationsEqual(Set ops) {
}
public String toJson() {
- JsonBuilder jb = JsonBuilder.create();
- jb.append("name", getName());
- jb.append("action", getAction().name());
- if (getTenant() != null) {
- jb.append("tenant", getTenant());
- }
- if (getUser() != null) {
- jb.append("user", getUser());
- }
- if (getTable() != null) {
- jb.append("table", getTable());
- }
- if (getOperations() != null) {
- jb.startArray("operations");
- for (String op : getOperations()) {
- jb.append(op);
+ return gson.toJson(this);
+ }
+
+ /*
+ * Constructs Rule from JSON stream
+ */
+ public static Rule fromJson(InputStream in) {
+ try (InputStreamReader reader = new InputStreamReader(in)) {
+ Rule rule = gson.fromJson(reader, Rule.class);
+ if (rule == null) {
+ throw new IllegalArgumentException(
+ "Failed to deserailize JSON to Rule object: JSON is empty");
}
- jb.endArray();
+ rule.validate();
+ return rule;
+ } catch (JsonSyntaxException | IOException ex) {
+ throw new IllegalArgumentException(
+ "Failed to deserailize JSON to Rule object: " + ex.getMessage());
}
+ }
- if (getCreateTimeMs() > 0) {
- jb.append("createTimeMs", getCreateTimeMs());
- jb.append("createTime", getCreateTime());
+ /*
+ * Constructs Rule from JSON string
+ */
+ public static Rule fromJson(String json) {
+ try {
+ Rule rule = gson.fromJson(json, Rule.class);
+ if (rule == null) {
+ throw new IllegalArgumentException(
+ "Failed to deserailize JSON to Rule object: " + json);
+ }
+ rule.validate();
+ return rule;
+ } catch (JsonSyntaxException jse) {
+ throw new IllegalArgumentException(
+ "Failed to deserailize JSON to Rule object: " +
+ jse.getMessage() + ", json=" + json);
}
- return jb.toString();
+ }
+
+ public static Gson getGson() {
+ return gson;
}
@Override
@@ -218,20 +251,34 @@ public String toString() {
return toJson();
}
- public void validate() {
+ private void validate() {
if (name == null) {
throw new IllegalArgumentException("Rule name should not be null");
}
+
+ if (action != null) {
+ action.validate();
+ } else {
+ action = DEFAULT_ACTION;
+ }
+
if (operations == null || operations.length == 0) {
throw new IllegalArgumentException(
"Rule operations should not be null or empty");
}
+ for (String op : operations) {
+ parseOpType(op);
+ }
+
+ if (createTime == null) {
+ createTime = new Timestamp(System.currentTimeMillis());
+ }
}
private static OpType parseOpType(String name) {
try {
return OpType.valueOf(name.toUpperCase());
- } catch(IllegalArgumentException iae) {
+ } catch (IllegalArgumentException iae) {
throw new IllegalArgumentException("Invalid operation type '" +
name + "', not one of the values accepted for Enum class: " +
Arrays.toString(OpType.values()));
@@ -242,7 +289,7 @@ private static OpType parseOpType(String name) {
* Checks if the given OpType set represents the all the operation types if
* match any of below 2 conditions:
* 1. contain OpType.ALL
- * 2. contain all the other OpType except OpType.ALL.s
+ * 2. contain all the other OpType except OpType.ALL
*/
public static boolean isAllOpType(Set ops) {
for (OpType op : OpType.values()) {
@@ -283,4 +330,143 @@ public static boolean operationsEqual(Set ops1,
}
return false;
}
+
+ /*
+ * Action to take when the rule is matched.
+ */
+ public static class Action {
+ private ActionType type;
+
+ private Action(ActionType type) {
+ this.type = type;
+ }
+
+ public ActionType getType() {
+ return type;
+ }
+
+ public void validate() {
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof Action)) {
+ return false;
+ }
+ return type == ((Action)obj).getType();
+ }
+ }
+
+ /*
+ * Drops request
+ */
+ public static class DropRequestAction extends Action {
+ public DropRequestAction() {
+ super(ActionType.DROP_REQUEST);
+ }
+ }
+
+ /*
+ * Returns the specified error
+ * - errorCode: the response error code (refer to Response error codes
+ * in httpproxy oracle.nosql.proxy.protocol.Protocol class)
+ * - errorMessage: the returned error message.
+ */
+ public static class ReturnErrorAction extends Action {
+ private int errorCode;
+ private String errorMessage;
+
+ public ReturnErrorAction(int errorCode, String errorMessage) {
+ super(ActionType.RETURN_ERROR);
+ this.errorCode = errorCode;
+ this.errorMessage = errorMessage;
+
+ validate();
+ }
+
+ public int getErrorCode() {
+ return errorCode;
+ }
+
+ public String getErrorMessage() {
+ return errorMessage;
+ }
+
+ @Override
+ public void validate() {
+ if (errorCode <= 0) {
+ throw new IllegalArgumentException(
+ "The errorCode must be positive int, see error " +
+ "codes in oracle.nosql.proxy.protocol class");
+ }
+
+ if (errorMessage == null) {
+ throw new IllegalArgumentException(
+ "The errorMessage must be not null");
+ }
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof ReturnErrorAction)) {
+ return false;
+ }
+
+ ReturnErrorAction o1 = (ReturnErrorAction)obj;
+ return super.equals(obj) &&
+ (getErrorCode() == o1.getErrorCode()) &&
+ Objects.equals(getErrorMessage(), o1.getErrorMessage());
+ }
+ }
+
+ /* Customized Json Serializer/Deserialize for Action */
+ private static class ActionSerializer
+ implements JsonSerializer, JsonDeserializer {
+
+ @Override
+ public JsonElement serialize(Action action,
+ Type typeOfSrc,
+ JsonSerializationContext context) {
+ switch(action.getType()) {
+ case DROP_REQUEST:
+ return context.serialize(action, DropRequestAction.class);
+ case RETURN_ERROR:
+ return context.serialize(action, ReturnErrorAction.class);
+ default:
+ throw new JsonParseException("Unknown action: " + action);
+ }
+ }
+
+ @Override
+ public Action deserialize(JsonElement json,
+ Type typeOfT,
+ JsonDeserializationContext context)
+ throws JsonParseException {
+
+ JsonObject jsonObject = json.getAsJsonObject();
+ String type = jsonObject.get("type").getAsString();
+ ActionType actionType;
+
+ try {
+ actionType = ActionType.valueOf(type);
+ } catch (IllegalArgumentException ex) {
+ throw new JsonParseException("Unknown action type: " + type);
+ }
+
+ switch(actionType) {
+ case DROP_REQUEST:
+ return context.deserialize(json, DropRequestAction.class);
+ case RETURN_ERROR:
+ return context.deserialize(json, ReturnErrorAction.class);
+ default:
+ throw new JsonParseException("Unknown action type: " + type);
+ }
+ }
+ }
}
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java
index 83819f11..269db0a9 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/http/HttpConstants.java
@@ -304,6 +304,9 @@ public class HttpConstants {
* Used for WorkRequest related APIs
*/
public static final String WORK_REQUEST_ID = "workRequestId";
+ public static final String WORK_REQUEST_TYPE = "type";
+ public static final String WORK_REQUEST_DDL = "ddl";
+ public static final String WORK_REQUEST_KMSKEY = "kmskey";
/**
* Used for Backfill to RQS API
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java
index 33028f44..d05176f5 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/ph/HealthReportAgent.java
@@ -23,6 +23,7 @@
import oracle.nosql.common.sklogger.ScheduleStart;
import oracle.nosql.common.sklogger.SkLogger;
+import oracle.nosql.util.HostNameResolver;
import oracle.nosql.util.HttpRequest;
import oracle.nosql.util.HttpRequest.ConnectionHandler;
import oracle.nosql.util.HttpResponse;
@@ -42,7 +43,6 @@
public class HealthReportAgent {
private static long INTERVAL = 60_000;
- private static String HOST_NAME_ENV = "HOST_NAME";
private static String COMPONENT_NAME_ENV = "COMPONENT_NAME";
private static String COMPONENT_ID_ENV = "COMPONENT_ID";
@@ -74,7 +74,7 @@ public HealthReportAgent(boolean isGlobalComponent,
long interval,
SkLogger logger,
HealthSource source) {
- this.hostName = System.getenv(HOST_NAME_ENV);
+ this.hostName = HostNameResolver.getHostName();
this.componentName = System.getenv(COMPONENT_NAME_ENV);
this.componentId = System.getenv(COMPONENT_ID_ENV);
final String phUrl = URL.getPhUrl();
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java
index d5a029ca..4f6dcd4f 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/DdlHistoryEntry.java
@@ -14,6 +14,9 @@
import oracle.nosql.util.fault.ErrorCode;
import oracle.nosql.util.fault.RequestFault;
import oracle.nosql.util.tmi.ReplicaInfo.ReplicaState;
+import oracle.nosql.util.tmi.WorkRequest.ActionType;
+import oracle.nosql.util.tmi.WorkRequest.EntityType;
+import oracle.nosql.util.tmi.WorkRequest.OperationType;
/**
* A bean class to record DDL events.
@@ -832,6 +835,69 @@ public String toString() {
return JsonUtils.toJson(this);
}
+ /*
+ * Converts to a WorkRequest object, representing general work request
+ * information.
+ */
+ public WorkRequest toWorkRequest() {
+ OperationType operationType = null;
+ WorkRequest.Status workRequestStatus = null;
+ ActionType actionType = null;
+
+ DdlOp op = getOperationEnum();
+ if (op == DdlOp.createTable) {
+ operationType = OperationType.CREATE_TABLE;
+ } else if (op == DdlOp.dropTable) {
+ operationType = OperationType.DELETE_TABLE;
+ } else {
+ operationType = OperationType.UPDATE_TABLE;
+ }
+
+ long timeFinished = 0;
+ switch (getStatusEnum()) {
+ case ACCEPTED:
+ workRequestStatus = WorkRequest.Status.ACCEPTED;
+ actionType = ActionType.IN_PROGRESS;
+ break;
+ case INPROGRESS:
+ workRequestStatus = WorkRequest.Status.IN_PROGRESS;
+ actionType = ActionType.IN_PROGRESS;
+ break;
+ case SUCCEEDED:
+ workRequestStatus = WorkRequest.Status.SUCCEEDED;
+ if (op == DdlOp.createTable) {
+ actionType = ActionType.CREATED;
+ } else if (op == DdlOp.dropTable) {
+ actionType = ActionType.DELETED;
+ } else {
+ actionType = ActionType.UPDATED;
+ }
+ timeFinished = updateTime.getTime();
+ break;
+ case FAILED:
+ workRequestStatus = WorkRequest.Status.FAILED;
+ actionType = ActionType.UPDATED;
+ timeFinished = updateTime.getTime();
+ break;
+ }
+
+ return new WorkRequest(workRequestId,
+ operationType,
+ workRequestStatus,
+ compartmentId,
+ tableOcid,
+ tableName,
+ EntityType.TABLE,
+ getTags(),
+ actionType,
+ createTime.getTime(),
+ (startTime != null ?
+ startTime.getTime() : 0),
+ timeFinished,
+ getErrorCodeEnum(),
+ resultMsg);
+ }
+
/* The local sub ddl request information */
public static class SubRequest {
private String workRequestId;
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/KmsKeyInfo.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/KmsKeyInfo.java
new file mode 100644
index 00000000..ea38b1d3
--- /dev/null
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/KmsKeyInfo.java
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ */
+package oracle.nosql.util.tmi;
+
+import java.nio.ByteBuffer;
+
+import oracle.nosql.common.json.JsonUtils;
+
+/**
+ * Used in defining the response payload for the REST API get-kms-key from SC
+ * to proxy.
+ */
+public class KmsKeyInfo {
+
+ public enum KeyState{
+ UPDATING,
+ REVERTING,
+ ACTIVE,
+ DELETED,
+ FAILED,
+ DISABLED
+ }
+
+ private final Boolean isHostedEnv;
+ private final String dedicatedTenantId;
+ private final String keyId;
+ private final String vaultId;
+ private final KeyState state;
+ private final long createTime;
+ private final long updateTime;
+
+ public KmsKeyInfo(Boolean isHostedEnv,
+ String dedicatedTenantId,
+ String keyId,
+ String vaultId,
+ KeyState state,
+ long createTime,
+ long updateTime) {
+ this.isHostedEnv = isHostedEnv;
+ this.dedicatedTenantId = dedicatedTenantId;
+ this.keyId = keyId;
+ this.vaultId = vaultId;
+ this.state = state;
+ this.createTime = createTime;
+ this.updateTime = updateTime;
+ }
+
+ public KmsKeyInfo(Boolean isHostedEnv,
+ String dedicatedTenantId,
+ KeyState state) {
+ this(isHostedEnv, dedicatedTenantId, null /* keyId */,
+ null /* vaultId */, state, 0 /* createTime */,
+ 0 /* updateTime */);
+ }
+
+ public String getDedicatedTenantId() {
+ return dedicatedTenantId;
+ }
+
+ public Boolean isHostedEnv() {
+ return isHostedEnv;
+ }
+
+ public String getKeyId() {
+ return keyId;
+ }
+
+ public String getVaultId() {
+ return vaultId;
+ }
+
+ public KeyState getState() {
+ return state;
+ }
+
+ public long getCreateTime() {
+ return createTime;
+ }
+
+ public long getUpdateTime() {
+ return updateTime;
+ }
+
+ public byte[] getETag() {
+ /*
+ * The "updateTime" reflects the last change to the KmsKeyInfo, use
+ * it as ETag of KmsKeyInfo.
+ */
+ final ByteBuffer buffer = ByteBuffer.allocate(8);
+ buffer.putLong(updateTime > 0 ? updateTime : createTime);
+ return buffer.array();
+ }
+
+ @Override
+ public String toString() {
+ return JsonUtils.print(this);
+ }
+}
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/ListWorkRequestsResult.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/ListWorkRequestsResult.java
new file mode 100644
index 00000000..e4d35f08
--- /dev/null
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/ListWorkRequestsResult.java
@@ -0,0 +1,32 @@
+/*-
+ * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ */
+package oracle.nosql.util.tmi;
+
+/**
+ * Used in defining the response payload for the REST API list-work-requests
+ * from SC to proxy.
+ */
+public class ListWorkRequestsResult {
+ /* The array of WorkRequests */
+ private final WorkRequest[] workRequests;
+ /*
+ * The page token represents the starting point for retrieving next batch
+ * of results.
+ */
+ private final String nextPageToken;
+
+ public ListWorkRequestsResult(WorkRequest[] requests,
+ String nextPageToken) {
+ this.workRequests = requests;
+ this.nextPageToken = nextPageToken;
+ }
+
+ public WorkRequest[] getWorkRequests() {
+ return workRequests;
+ }
+
+ public String getNextPageToken() {
+ return nextPageToken;
+ }
+}
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java
index 9b80c22f..dffc68d0 100644
--- a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/TableInfo.java
@@ -147,6 +147,9 @@ public enum SchemaState {
/* schema state */
private SchemaState schemaState;
+ /* The flag indicates if use FORCE in freezing table schema in ddl */
+ private boolean freezeForce;
+
/*
* MR table information
*/
@@ -558,6 +561,14 @@ public SchemaState getSchemaState() {
return schemaState;
}
+ public void setFreezeForce(boolean value) {
+ freezeForce = value;
+ }
+
+ public boolean getFreezeForce() {
+ return freezeForce;
+ }
+
public boolean isFrozen() {
return schemaState == SchemaState.FROZEN;
}
diff --git a/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/WorkRequest.java b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/WorkRequest.java
new file mode 100644
index 00000000..1d665f0c
--- /dev/null
+++ b/httpproxy/httpproxy/src/main/java/oracle/nosql/util/tmi/WorkRequest.java
@@ -0,0 +1,176 @@
+/*-
+ * Copyright (C) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ */
+package oracle.nosql.util.tmi;
+
+import oracle.nosql.common.json.JsonUtils;
+import oracle.nosql.util.fault.ErrorCode;
+
+/**
+ * Used in defining the response payload for the REST API get-work-request and
+ * list-work-requests from SC to proxy.
+ *
+ * It represents general work request information for DDL operation and CMEK
+ * operation.
+ */
+public class WorkRequest {
+
+ public enum EntityType {
+ TABLE,
+ CONFIGURATION
+ }
+
+ public enum OperationType {
+ CREATE_TABLE,
+ UPDATE_TABLE,
+ DELETE_TABLE,
+ UPDATE_KMS_KEY,
+ REMOVE_KMS_KEY
+ };
+
+ public enum Status {
+ ACCEPTED,
+ IN_PROGRESS,
+ FAILED,
+ SUCCEEDED,
+ CANCELING,
+ CANCELED
+ };
+
+ public enum ActionType {
+ CREATED,
+ UPDATED,
+ DELETED,
+ IN_PROGRESS
+ }
+
+ /* The work request Id */
+ private final String id;
+ /* The operation type */
+ private final OperationType type;
+ /* The status of work request */
+ private final Status status;
+ /* The ocid of the compartment that contains the work request*/
+ private final String compartmentId;
+
+ /*
+ * The resource affected by this work request.
+ */
+
+ /* The resource identifier */
+ private final String entityId;
+ /* The resource name */
+ private final String entityName;
+ /* The resource type */
+ private final EntityType entityType;
+ /* The action type */
+ private final ActionType actionType;
+ /* The tags of the resource */
+ private final byte[] tags;
+
+ /* The time stamp the request was created */
+ private final long timeAccepted;
+ /* The time stamp the request was started */
+ private final long timeStarted;
+ /* The time stamp the request was finished */
+ private final long timeFinished;
+
+ /* The error encountered while executing a work request */
+ private final ErrorCode errorCode;
+ /* The description of the issue encountered */
+ private final String errorMessage;
+
+ public WorkRequest(String id,
+ OperationType type,
+ Status status,
+ String compartmentId,
+ String entityId,
+ String entityName,
+ EntityType entityType,
+ byte[] tags,
+ ActionType actionType,
+ long timeAccepted,
+ long timeStarted,
+ long timeFinished,
+ ErrorCode errorCode,
+ String errorMessage) {
+ this.id = id;
+ this.type = type;
+ this.status = status;
+ this.compartmentId = compartmentId;
+
+ this.entityId = entityId;
+ this.entityName = entityName;
+ this.entityType = entityType;
+ this.actionType = actionType;
+ this.tags = tags;
+
+ this.timeAccepted = timeAccepted;
+ this.timeStarted = timeStarted;
+ this.timeFinished = timeFinished;
+
+ this.errorCode = errorCode;
+ this.errorMessage = errorMessage;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public OperationType getType() {
+ return type;
+ }
+
+ public Status getStatus() {
+ return status;
+ }
+
+ public String getCompartmentId() {
+ return compartmentId;
+ }
+
+ public String getEntityId() {
+ return entityId;
+ }
+
+ public String getEntityName() {
+ return entityName;
+ }
+
+ public EntityType getEntityType() {
+ return entityType;
+ }
+
+ public byte[] getTags() {
+ return tags;
+ }
+
+ public ActionType getActionType() {
+ return actionType;
+ }
+
+ public long getTimeAccepted() {
+ return timeAccepted;
+ }
+
+ public long getTimeStarted() {
+ return timeStarted;
+ }
+
+ public long getTimeFinished() {
+ return timeFinished;
+ }
+
+ public ErrorCode getErrorCode() {
+ return errorCode;
+ }
+
+ public String getErrorMessage() {
+ return errorMessage;
+ }
+
+ @Override
+ public String toString() {
+ return JsonUtils.print(this);
+ }
+ }
diff --git a/httpproxy/pom.xml b/httpproxy/pom.xml
index 07460140..6fad8d9c 100644
--- a/httpproxy/pom.xml
+++ b/httpproxy/pom.xml
@@ -7,7 +7,7 @@
com.oracle.nosql
kv
- 25.1.13
+ 25.3.21
proxy
diff --git a/httpproxy/tests/src/assembly/test.xml b/httpproxy/tests/src/assembly/test.xml
new file mode 100644
index 00000000..2a864b5b
--- /dev/null
+++ b/httpproxy/tests/src/assembly/test.xml
@@ -0,0 +1,21 @@
+
+test
+
+ tar.gz
+ zip
+
+oracle-nosql-proxy-tests-${project.version}
+true
+
+
+ /lib
+ true
+ true
+ false
+ runtime
+
+
+
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/AsyncLatencyTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/AsyncLatencyTest.java
new file mode 100644
index 00000000..655c92cf
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/AsyncLatencyTest.java
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assume.assumeTrue;
+
+import org.junit.Test;
+import org.junit.BeforeClass;
+
+
+/**
+ * Verify that latencies remain stable for asynchronous operations when the
+ * number of concurrent requests is higher then the number of proxy
+ * worker threads.
+ *
+ * These tests only runs against a local server and not minicloud.
+ *
+ * The tests use a KVLite that has a test hook that injects long
+ * latencies into all requests
+ */
+public class AsyncLatencyTest extends LatencyTestBase {
+
+ /*
+ * This test manages its own kvlite/proxy startup to control specific
+ * setup properties to allow for a test hook that injects latency into
+ * kvlite, and start proxy with only 2 worker threads
+ *
+ * note this hides the superclass static method so it won't be called
+ */
+ @BeforeClass
+ public static void staticSetUp()
+ throws Exception {
+
+ // this test doesn't run on minicloud or cloud
+ assumeTrue(!Boolean.getBoolean(USEMC_PROP) &&
+ !Boolean.getBoolean(USECLOUD_PROP));
+
+ latencySetUp(true /*useAsync*/, 100 /*delayMs*/);
+ }
+
+ @Test
+ public void testAsyncGetPutLatency() throws Exception {
+
+ // skip this test if running on minicloud
+ assumeTrue(cloudRunning == false);
+
+ // with async, we should be able to keep the same latencies
+ // even when using more client threads than proxy threads
+ testLatency("asyncGetPutLatency",
+ 3 /*readThreads*/,
+ 3 /*writeThreads*/,
+ 3 /*rwThreads*/,
+ 0 /*qThreads*/,
+ 10 /*runSeconds*/,
+ 90 /*minReadLatencyMs*/,
+ 150 /*maxReadLatencyMs*/,
+ 90 /*minWriteLatencyMs*/,
+ 150 /*maxWriteLatencyMs*/,
+ 0 /*minQueryLatencyMs*/,
+ 0 /*maxQueryLatencyMs*/);
+ }
+
+
+ @Test
+ public void testAsyncQueryLatency() throws Exception {
+
+ // skip this test if running on minicloud
+ assumeTrue(cloudRunning == false);
+
+ // This test has too many random failures in jenkins to be
+ // useful. Most are due to lack of CPU or resources in those
+ // test environments. So only run this test if verbose is
+ // enabled, which isn't by default in jenkins.
+ assumeTrue(verbose);
+
+ // with async, we should be able to keep the same latencies
+ // even when using more client threads than proxy threads
+ testLatency("asyncQueryLatency",
+ 0 /*readThreads*/,
+ 0 /*writeThreads*/,
+ 0 /*rwThreads*/,
+ 8 /*qThreads*/,
+ 10 /*runSeconds*/,
+ 0 /*minReadLatencyMs*/,
+ 0 /*maxReadLatencyMs*/,
+ 0 /*minWriteLatencyMs*/,
+ 0 /*maxWriteLatencyMs*/,
+ 90 /*minQueryLatencyMs*/,
+ 250 /*maxQueryLatencyMs*/);
+ }
+
+
+ @Test
+ public void testAsyncGetPutQueryLatency() throws Exception {
+
+ // skip this test if running on minicloud
+ assumeTrue(cloudRunning == false);
+
+ // with async, we should be able to keep the same latencies
+ // even when using more client threads than proxy threads
+ testLatency("asyncGetPutQueryLatency",
+ 2 /*readThreads*/,
+ 2 /*writeThreads*/,
+ 2 /*rwThreads*/,
+ 4 /*qThreads*/,
+ 10 /*runSeconds*/,
+ 90 /*minReadLatencyMs*/,
+ 170 /*maxReadLatencyMs*/,
+ 90 /*minWriteLatencyMs*/,
+ 170 /*maxWriteLatencyMs*/,
+ 90 /*minQueryLatencyMs*/,
+ 250 /*maxQueryLatencyMs*/);
+ }
+
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/BadProtocolTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/BadProtocolTest.java
new file mode 100644
index 00000000..10ef2637
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/BadProtocolTest.java
@@ -0,0 +1,1879 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static io.netty.handler.codec.http.HttpMethod.POST;
+import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.BAD_PROTOCOL_MESSAGE;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.ILLEGAL_ARGUMENT;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.REQUEST_SIZE_LIMIT_EXCEEDED;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_ARRAY;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BINARY;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BOOLEAN;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_INTEGER;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_MAP;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_STRING;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.UNSUPPORTED_PROTOCOL;
+import static oracle.nosql.proxy.protocol.HttpConstants.ACCEPT;
+import static oracle.nosql.proxy.protocol.HttpConstants.AUTHORIZATION;
+import static oracle.nosql.proxy.protocol.HttpConstants.CONNECTION;
+import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_LENGTH;
+import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_TYPE;
+import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_DATA_PATH;
+import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_VERSION;
+import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_ID_HEADER;
+import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_COMPARTMENT_ID;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.UUID;
+
+import oracle.kv.impl.topo.RepNodeId;
+import oracle.nosql.driver.Consistency;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.TimeToLive;
+import oracle.nosql.driver.Version;
+import oracle.nosql.driver.http.NoSQLHandleImpl;
+import oracle.nosql.driver.httpclient.HttpClient;
+import oracle.nosql.driver.httpclient.ResponseHandler;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutRequest.Option;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.serde.BinarySerializerFactory;
+import oracle.nosql.driver.ops.serde.Serializer;
+import oracle.nosql.driver.query.QueryDriver;
+import oracle.nosql.driver.util.ByteInputStream;
+import oracle.nosql.driver.util.ByteOutputStream;
+import oracle.nosql.driver.util.NettyByteInputStream;
+import oracle.nosql.driver.util.NettyByteOutputStream;
+import oracle.nosql.driver.util.SerializationUtil;
+import oracle.nosql.driver.values.ArrayValue;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.proxy.protocol.Protocol.OpCode;
+import oracle.nosql.proxy.security.SecureTestUtil;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
+import io.netty.handler.codec.http.DefaultFullHttpRequest;
+import io.netty.handler.codec.http.FullHttpRequest;
+import io.netty.handler.codec.http.HttpHeaderNames;
+import io.netty.handler.codec.http.HttpHeaders;
+import io.netty.handler.codec.http.HttpResponseStatus;
+
+/**
+ * Tests on handling bad protocol on proxy side
+ */
+public class BadProtocolTest extends ProxyTestBase {
+
+ private static final short PROXY_SERIAL_VERSION =
+ oracle.nosql.proxy.protocol.BinaryProtocol.SERIAL_VERSION;
+
+ private final static String tableName = "users";
+
+ private final BinarySerializerFactory factory =
+ new BinarySerializerFactory();
+
+ private final MapValue key = createTestKey(1);
+ private final MapValue record = createTestValue();
+
+ private final GetRequest getRequest = new GetRequest()
+ .setTableName(tableName)
+ .setConsistency(Consistency.ABSOLUTE)
+ .setKey(key);
+
+ private final PutRequest putRequest = new PutRequest()
+ .setTableName(tableName)
+ .setValue(record)
+ .setTTL(TimeToLive.ofDays(1));
+
+ private final PutRequest putIfVersionRequest = new PutRequest()
+ .setOption(Option.IfVersion)
+ .setTableName(tableName)
+ .setValue(record)
+ .setMatchVersion(genVersion());
+
+ private final DeleteRequest deleteRequest = new DeleteRequest()
+ .setTableName(tableName)
+ .setKey(key);
+
+ private final DeleteRequest deleteIfVersionRequest = new DeleteRequest()
+ .setTableName(tableName)
+ .setMatchVersion(genVersion())
+ .setKey(key);
+
+ private final MultiDeleteRequest multiDeleteRequest =
+ new MultiDeleteRequest()
+ .setTableName(tableName)
+ .setKey(key)
+ .setMaxWriteKB(1024)
+ .setContinuationKey(genBytes(20, null));
+
+ private final WriteMultipleRequest writeMultipleRequest =
+ new WriteMultipleRequest()
+ .add(putRequest, false);
+
+ private final String statement = "select * from users";
+ private final PrepareRequest prepareRequest = new PrepareRequest()
+ .setStatement(statement);
+
+ private final String boundStatement = "declare $id integer; " +
+ "select * from users where id = $id";
+ private final PrepareRequest prepareBoundStmtRequest = new PrepareRequest()
+ .setStatement(boundStatement);
+
+ private final TableRequest tableRequest = new TableRequest()
+ .setStatement(createTableDDL)
+ .setTableLimits(new TableLimits(50, 50, 50));
+
+ private final TableRequest tableSetLimitsRequest = new TableRequest()
+ .setTableName(tableName)
+ .setTableLimits(new TableLimits(50, 50, 50));
+
+ private final GetIndexesRequest getIndexesRequest = new GetIndexesRequest()
+ .setTableName(tableName)
+ .setIndexName("idx1");
+
+ private final GetTableRequest getTableRequest = new GetTableRequest()
+ .setTableName(tableName)
+ .setOperationId("1");
+
+ private final ListTablesRequest listTablesRequest = new ListTablesRequest()
+ .setStartIndex(0)
+ .setLimit(0);
+
+ private final TableUsageRequest tableUsageRequest = new TableUsageRequest()
+ .setTableName(tableName)
+ .setStartTime(System.currentTimeMillis())
+ .setEndTime(System.currentTimeMillis() + 3600_000)
+ .setLimit(10);
+
+ /* Create a table */
+ private final static String createTableDDL =
+ "CREATE TABLE IF NOT EXISTS " + tableName + "(" +
+ "id INTEGER, " +
+ "name STRING, " +
+ "count LONG, " +
+ "avg DOUBLE, " +
+ "sum NUMBER, " +
+ "exp BOOLEAN, " +
+ "key BINARY, " +
+ "map MAP(INTEGER), " +
+ "array ARRAY(STRING), " +
+ "record RECORD(rid INTEGER, rs STRING), " +
+ "PRIMARY KEY(id))";
+
+ private final static String createIndexDDL =
+ "CREATE INDEX IF NOT EXISTS idx1 ON " + tableName + "(name)";
+
+ private ByteBuf buf;
+ private HttpClient httpClient;
+ private NoSQLHandleConfig httpConfig;
+ private String kvRequestURI;
+ private int timeoutMs;
+ private int requestId = 0;
+
+ @BeforeClass
+ public static void staticSetUp()
+ throws Exception {
+
+ /*
+ * This test composes the request and send to the proxy but does not
+ * use the driver, the request is not signed, therefore cannot be run
+ * in cloud test.
+ */
+ assumeTrue("Skip BadProtocolTest in could test",
+ !Boolean.getBoolean(USECLOUD_PROP));
+
+ ProxyTestBase.staticSetUp();
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+
+ buf = Unpooled.buffer();
+
+ URL url = new URL("http", getProxyHost(), getProxyPort(), "/");
+ httpConfig = new NoSQLHandleConfig(url);
+
+ kvRequestURI = httpConfig.getServiceURL().toString() +
+ NOSQL_VERSION + "/" + NOSQL_DATA_PATH;
+ timeoutMs = httpConfig.getDefaultRequestTimeout();
+
+ httpClient = createHttpClient(getProxyHost(),
+ getProxyPort(),
+ httpConfig.getNumThreads(),
+ "BadProtocolTest",
+ null /* Logger */);
+ assertNotNull(httpClient);
+
+ createTable();
+
+ if (isSecure()) {
+ /* warm up security caches */
+ handle.put(putRequest);
+ handle.get(getRequest);
+ handle.delete(deleteRequest);
+ handle.getTable(new GetTableRequest().setTableName(tableName));
+ handle.getTableUsage(tableUsageRequest);
+ handle.getIndexes(getIndexesRequest);
+ handle.query(createQueryWithBoundStmtRequest());
+ }
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+
+ if (buf != null) {
+ buf.release(buf.refCnt());
+ }
+
+ if (httpClient != null) {
+ httpClient.shutdown();
+ }
+ super.tearDown();
+ }
+
+ @Before
+ public void setVersion() throws Exception {
+ /*
+ * This test suite is completely V2/V3-centric. So
+ * set the serial version to 3 if higher.
+ */
+ forceV3((NoSQLHandleImpl)handle);
+ }
+
+ /**
+ * Test bad protocol data on below values:
+ * 1. SerialVersion
+ * 2. OpCode
+ * 3. RequestTimeout
+ * 4. TableName
+ * 5. ReturnRowFlag
+ * 6. MapValue
+ * 7. IfUpdateTTL
+ * 8. TTLValue
+ */
+ @Test
+ public void testPutRequest() {
+
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: String */,
+ 1 /* ReturnRowFlag: boolean */,
+ 1 /* Durability: one byte */,
+ 1 /* ExactMatch: boolean */,
+ 1 /* IdentityCacheSize: packed int */,
+ 248 /* Record: MapValue */,
+ 1 /* IfUpdateTTL: boolean */,
+ 2 /* TTL: value(packed long) + unit(byte)*/
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, putRequest);
+
+ try {
+ String test;
+ int offset = 0;
+ int pos = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * SerialVersion
+ */
+
+ /* SerialVersion: 0 */
+ test = "Bad serialVersion: 0";
+ buf.setShort(offset, 0);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* SerialVersion: PROXY_SERIAL_VERSION + 1 */
+ test = "Bad serialVersion: PROXY_SERIAL_VERSION + 1";
+ refillBuffer(buf, bufBytes);
+ buf.setShort(offset, PROXY_SERIAL_VERSION + 1);
+ executeRequest(test, buf, UNSUPPORTED_PROTOCOL);
+
+ /*
+ * OpCode
+ */
+
+ /* Invalid OpCode */
+ offset += lengths[pos++];
+ test = "Bad OpCode";
+ int invalidOpCode = OpCode.values().length;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, invalidOpCode);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * RequestTimeout
+ */
+
+ /* requestTimeout: -5000 */
+ test = "Bad requestTimeout: -5000";
+ offset += lengths[pos++];
+ int invalidTimeout = -5000;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, invalidTimeout);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * TableName
+ */
+
+ /* Invalid TableName: null or empty string */
+ String invalidTableName = null;
+ test = "TableName: " + invalidTableName;
+ offset += lengths[pos++];
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, invalidTableName);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ invalidTableName = "";
+ test = "TableName: " + invalidTableName;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, invalidTableName);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * ReturnRowFlag
+ */
+ offset += lengths[pos++];
+
+ /*
+ * Durability
+ * Only in V3 and above
+ */
+ short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion();
+ if (serialVersion > 2) {
+ offset += lengths[pos++];
+ } else {
+ pos++;
+ }
+
+ /*
+ * ExactMatch
+ */
+ offset += lengths[pos++];
+
+ /*
+ * IdentityCacheSize
+ */
+ offset += lengths[pos++];
+
+ /*
+ * MapValue
+ */
+ offset += lengths[pos++];
+ testMapValue(buf, out, bufBytes, offset, lengths[pos]);
+
+ /*
+ * IfUpdateTTLFlag
+ */
+ offset += lengths[pos++];
+
+ /*
+ * TTL
+ */
+ long invalidTTL = -2;
+ offset += lengths[pos++];
+ test = "TTL: " + invalidTTL;
+ refillBuffer(buf, bufBytes);
+ setPackedLong(out, offset, invalidTTL);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ test = "TTL: invalid ttl unit";
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset + 1, -1);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (IOException ioe) {
+ fail("Write failed: " + ioe.getMessage());
+ } finally {
+ out.close();
+ }
+ }
+
+ private void testMapValue(ByteBuf buffer,
+ ByteOutputStream out,
+ byte[] bufBytes,
+ int baseOffset,
+ int length) throws IOException {
+ final int headerLen = 9; /* 1(type) + 4(length) + 4 (size)/*/
+ final String[] fields = new String[] {
+ "avg",
+ "array",
+ "record",
+ "name",
+ "count",
+ "sum",
+ "id",
+ "exp",
+ "map",
+ "key"
+ };
+ final int[] lengths = new int[] {
+ 13, /* avg: DOUBLE, 4(name) + 1(type) + 8(double) */
+ 36, /* array: ARRAY, 6(name) + 1(type) + 29(value) */
+ 34, /* record: RECORD, 7(name) + 1(type) + 26(value) */
+ 19, /* name: STRING, 5(name) + 1(type) + 13(value) */
+ 16, /* count: LONG, 6(name) + 1(type) + 9(value) */
+ 44, /* sum: NUMBER, 4(name) + 1(type) + 39(value) */
+ 5, /* id: INTEGER, 3(name) + 1(type) + 1(value) */
+ 6, /* exp: BOOLEAN, 4(name) + 1(type) + 1(value) */
+ 30, /* map: MAP, 4(name) + 1(type) + 25(value) */
+ 36 /* key: BINARY, 4(name) + 1(type) + 31(value) */
+ };
+
+ final Map offsets = new HashMap();
+ int offset = baseOffset + headerLen;
+ for (int i = 0; i < fields.length; i++) {
+ offsets.put(fields[i], offset);
+ offset += lengths[i];
+ }
+
+ offset = baseOffset;
+ String test;
+ ByteInputStream in;
+ int pos = 0;
+ int value;
+ String svalue;
+
+ /* Corrupted type of top MapValue */
+ value = -1;
+ test = "MapValue: corrupted type of top MapValue, " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_INTEGER;
+ test = "MapValue: corrupted type of top MapValue, " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Wrong length value */
+ offset += 1;
+ refillBuffer(buffer, bufBytes);
+ in = new NettyByteInputStream(buffer);
+ value = bufBytes.length + 1;
+ setInt(out, offset, value);
+ test = "MapValue: wrong length value, " + value ;
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ refillBuffer(buffer, bufBytes);
+ value = -1;
+ setInt(out, offset, value);
+ test = "MapValue: wrong length value, " + value ;
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Wrong size value */
+ offset += 4;
+ refillBuffer(buffer, bufBytes);
+ value = -1;
+ setInt(out, offset, value);
+ test = "MapValue: wrong size value, " + value ;
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Field: avg
+ */
+ String fname = "avg";
+ offset = offsets.get(fname);
+ svalue = null;
+ refillBuffer(buffer, bufBytes);
+ setString(out, offset, svalue);
+ test = "MapValue: field name is null" ;
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ svalue = "";
+ refillBuffer(buffer, bufBytes);
+ setString(out, offset, svalue);
+ test = "MapValue: field name is empty string" ;
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Corrupted value type */
+ value = 100;
+ offset += fname.length() + 1;
+ test = "MapValue: corrupted type of field \"avg\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid value type for DOUBLE */
+ value = TYPE_BOOLEAN;
+ test = "MapValue: invalid value type for field \"avg\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Field: array
+ */
+ final int[] arrayElemLens = new int[] {
+ 4 /* length */,
+ 4 /* size */,
+ 1 /* 1st value's type */,
+ 6 /* 1st value */,
+ 1 /* 2nd value's type */,
+ 6 /* 2nd value */,
+ 1 /* 3rd value's type */,
+ 6 /* 3rd value */,
+ };
+
+ pos = 0;
+ fname = "array";
+ offset = offsets.get(fname);
+
+ /* Invalid value type for array value */
+ offset += fname.length() + 1;
+ value = TYPE_MAP;
+ test = "MapValue: invalid value type for field \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ value = TYPE_INTEGER;
+ test = "MapValue: invalid value type for field \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid length value of array value */
+ length = readInt(in, offset);
+ offset++;
+ value = -1;
+ test = "MapValue: invalid length of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = length + 1;
+ test = "MapValue: invalid length of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = length - 1;
+ test = "MapValue: invalid length of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid size value of array value */
+ offset += arrayElemLens[pos++];
+ int size = readInt(in, offset);
+ value = -1;
+ test = "MapValue: invalid size of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = size + 2;
+ test = "MapValue: invalid size of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid element type */
+ offset += arrayElemLens[pos++];
+ value = -1;
+ test = "MapValue: invalid element type of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_BINARY;
+ test = "MapValue: invalid element type of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ /* Invalid element value */
+ offset += arrayElemLens[pos++];
+ test = "MapValue: invalid element value of \"array\"";
+ refillBuffer(buffer, bufBytes);
+ setPackedInt(out, offset, -1);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ refillBuffer(buffer, bufBytes);
+ setPackedInt(out, offset, 100);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ final int[] mapElemLens = new int[] {
+ 4 /* length */,
+ 4 /* size */,
+ 3 /* k1 */,
+ 1 /* type */,
+ 1 /* k1's value */,
+ 3 /* k2 */,
+ 1 /* type */,
+ 2 /* k2's value */,
+ 3 /* k3 */,
+ 1 /* type */,
+ 2 /* k3's value */,
+ };
+
+ pos = 0;
+ fname = "map";
+ offset = offsets.get(fname);
+
+ /* Invalid value type for map value */
+ offset += fname.length() + 1;
+ value = TYPE_ARRAY;
+ test = "MapValue: invalid value type for field \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ value = TYPE_INTEGER;
+ test = "MapValue: invalid value type for field \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid length value of map value */
+ length = readInt(in, offset);
+ offset++;
+ value = -1;
+ test = "MapValue: invalid length of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = length + 1;
+ test = "MapValue: invalid length of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = length - 1;
+ test = "MapValue: invalid length of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid size value of map value */
+ offset += mapElemLens[pos++];
+ size = readInt(in, offset);
+ value = -1;
+ test = "MapValue: invalid size of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = size + 2;
+ test = "MapValue: invalid size of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid key */
+ offset += mapElemLens[pos++];
+ svalue = null;
+ test = "MapValue: invalid key \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid element type */
+ offset += mapElemLens[pos++];
+ value = -1;
+ test = "MapValue: invalid element type of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_BINARY;
+ test = "MapValue: invalid element type of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ /* Invalid element value */
+ offset += mapElemLens[pos++];
+ test = "MapValue: invalid element value of \"map\"";
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, -1);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ test = "MapValue: invalid element value of \"map\"";
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, 0);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_STRING;
+ test = "MapValue: invalid element value of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset - 1, value);
+ setString(out, offset, null);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_STRING;
+ test = "MapValue: invalid element value of \"map\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset - 1, value);
+ setString(out, offset, "");
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ /* Record value */
+ final int[] recordElemLens = new int[] {
+ 4 /*length*/,
+ 4 /*size*/,
+ 3 /*record.ri's name*/,
+ 1 /*record.ri's type*/,
+ 2 /*record.ri's value*/,
+ 3 /*record.rs's name*/,
+ 1 /*record.rs's type*/,
+ 13 /*record.rs's value*/,
+ };
+
+ pos = 0;
+ fname = "record";
+ offset = offsets.get(fname);
+
+ /* Invalid value type for RECORD */
+ offset += fname.length() + 1;
+ value = TYPE_INTEGER;
+ test = "MapValue: invalid value type for field \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid length value of RECORD */
+ length = readInt(in, offset);
+ offset++;
+ value = -1;
+ test = "MapValue: invalid length of \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = length + 1;
+ test = "MapValue: invalid length of \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = length - 1;
+ test = "MapValue: invalid length of \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid size value of record */
+ offset += recordElemLens[pos++];
+ size = readInt(in, offset);
+ value = -1;
+ test = "MapValue: invalid size of \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = size + 2;
+ test = "MapValue: invalid size of \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid field name */
+ offset += recordElemLens[pos++];
+ svalue = null;
+ test = "MapValue: invalid field name of \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ svalue = "";
+ test = "MapValue: invalid field name of \"record\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+ }
+
+ /**
+ * Test bad protocol data on below values:
+ * 1. Version
+ */
+ @Test
+ public void testPutIfVersionRequst() {
+
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: String */,
+ 1 /* ReturnRowFlag: boolean */,
+ 1 /* Durability: one byte */,
+ 1 /* ExactMatch: boolean */,
+ 1 /* IdentityCacheSize: packed int */,
+ 248 /* Record: MapValue */,
+ 1 /* IfUpdateTTL: boolean */,
+ 1 /* TTL: packed long (-1) */,
+ 51 /* Version: byte array */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, putIfVersionRequest);
+
+ try {
+ String test;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * Version
+ */
+ for (int i = 0; i < lengths.length - 1; i++) {
+ offset += lengths[i];
+ }
+ byte[] versionBytes = null;
+ test = "Version: null";
+ refillBuffer(buf, bufBytes);
+ setByteArray(out, offset, versionBytes);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ versionBytes = new byte[0];
+ test = "Version: empty byte array";
+ refillBuffer(buf, bufBytes);
+
+ setByteArray(out, offset, versionBytes);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ versionBytes = genBytes(10, null);
+ test = "Version: invalid binary format";
+ refillBuffer(buf, bufBytes);
+ setByteArray(out, offset, versionBytes);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (IOException ioe) {
+ fail("Write failed: " + ioe.getMessage());
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Test bad protocol data on below values:
+ * 1. Consistency
+ * 2. PrimaryKey type
+ */
+ @Test
+ public void testGetRequest() {
+
+ final int[] lengths = {
+ 2 /* SerialVersion: short*/,
+ 1 /* OpCode: byte*/,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: string */,
+ 1 /* Consistency: boolean */,
+ 14 /* Key: 1(TYPE_MAP) + 4(length) + 4(size) + 3("id") +
+ 1(TYPE_INT) + 1(1-value) */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, getRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * Consistency
+ */
+
+ /* Move to offset of consistency */
+ for (pos = 0; pos < 4; pos++) {
+ offset += lengths[pos];
+ }
+
+ /* Invalid consistency type */
+ int value = -1;
+ test = "Invalid consistency type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 3;
+ test = "Invalid consistency type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * PrimaryKey
+ */
+ offset += lengths[pos++];
+
+ value = -1;
+ test = "Invalid value type of PrimaryKey: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_ARRAY;
+ test = "Invalid value type of PrimaryKey: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Test bad protocol on below values:
+ * 1. MaxWriteKB
+ * 2. ContinuationKey
+ */
+ @Test
+ public void testMultiDeleteRequest() {
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: string */,
+ 1 /* Durability: one byte */,
+ 14 /* FieldValue: MapValue */,
+ 1 /* HasFieldRange: boolean */,
+ 3 /* MaxWriteKB: packed int */,
+ 21 /* ContinuationKey: byte array */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, multiDeleteRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /* Move to offset of MaxWriteKB */
+ for (pos = 0; pos < 7; pos++) {
+ offset += lengths[pos];
+ }
+
+ /*
+ * MaxWriteKB
+ */
+ int value = -1;
+ test = "Invalid maxWriteKB: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ if (!onprem) {
+ value = rlimits.getRequestWriteKBLimit() + 1;
+ test = "Invalid maxWriteKB: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+ }
+
+ /*
+ * Continuation Key
+ */
+ offset += lengths[pos];
+ value = -2;
+ test = "Invalid continuation key: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 100;
+ test = "Invalid continuation key: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (IOException ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Test bad protocol on below values:
+ * 1. Statement
+ */
+ @Test
+ public void testPrepareStatement() {
+ final int[] lengths = new int[] {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 20 /* Statement: string */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, prepareRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * Statement
+ */
+ for (pos = 0; pos < 3; pos++) {
+ offset += lengths[pos];
+ }
+
+ String svalue = null;
+ test = "Invalid statement: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ svalue = "";
+ test = "Invalid statement: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ int value = statement.length() + 1;
+ test = "Invalid statement value, its length is " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, statement.length() + 1);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = -2;
+ test = "Invalid statement value, its length is " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (IOException ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Test bad protocol on below values:
+ * 1. PreparedStatement
+ * 2. Variables Number
+ * 3. Variable Name
+ * 4. Variable Value
+ */
+ @Test
+ public void testQueryRequest() {
+ final QueryRequest queryReq = createQueryWithBoundStmtRequest();
+
+ final int prepStmtLen =
+ 4 /* int, length of PreparedStatement */+
+ queryReq.getPreparedStatement().getStatement().length;
+
+ final int[] lengths = {
+ 2 /* SerialVersion: short*/,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 1 /* Consistency: byte */,
+ 1 /* NumberLimit: packed int */,
+ 3 /* MaxReadKB: packed int */,
+ 1 /* ContinuationKey: byte array */,
+ 1 /* IsPreparedStatement: boolean */,
+ 2 /* QueryVersion: short */,
+ 1 /* traceLevel: packed int */,
+ 1 /* MaxWriteKB: packed int */,
+ 1 /* MathContext: byte */,
+ 1 /* ToplogySeqNum: packed int */,
+ 1 /* ShardId: packed int */,
+ 1 /* isSimpleQuery: boolean */,
+ prepStmtLen /* PreparedStatement: byte array */,
+ 1 /* VariablesNumber: packed int */,
+ 4 /* VariableName: string */,
+ 2 /* VariableValue: INT_TYPE + packed int */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, queryReq);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * PreparedStatement
+ */
+ for (pos = 0; pos < 15; pos++) {
+ offset += lengths[pos];
+ }
+
+ int value = -1;
+ test = "Invalid prepared Statement";
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 0;
+ test = "Invalid prepared Statement";
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Variables number
+ */
+ value = -1;
+ offset += lengths[pos++];
+ test = "Invalid variable number: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 2;
+ test = "Invalid variable number: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Variable name
+ */
+ offset += lengths[pos++];
+ String svalue = null;
+ test = "Invalid variable name: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ svalue = "";
+ test = "Invalid variable name: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Variable value
+ */
+ offset += lengths[pos++];
+ value = -1;
+ test = "Invalid variable value type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_ARRAY;
+ test = "Invalid variable value type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ } catch (IOException ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Test bad protocol on below values:
+ * 1. Operation number
+ * 2. OpCode of sub request
+ */
+ @Test
+ public void testWriteMultipleRequest() {
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: string */,
+ 1 /* OperationNum: packed int */,
+ 1 /* Durability: one byte */,
+ 1 /* isAbortIfUnsuccessful: boolean */,
+ 253 /* Request */
+ };
+
+ final WriteMultipleRequest umReq = writeMultipleRequest;
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, umReq);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * Operation number
+ */
+ for (pos = 0; pos < 4; pos++) {
+ offset += lengths[pos];
+ }
+
+ int value = -1;
+ test = "Invalid operation number: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 3;
+ test = "Invalid operation number: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* durability */
+ value = 4; /* bad: only one of three values set */
+ offset += lengths[pos++];
+ test = "Invalid durability: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+ /*
+ * OpCode of sub request
+ */
+ offset += lengths[pos++]; /* isAbortIfUnsuccessful */
+ offset += lengths[pos++];
+
+ value = -1;
+ test = "Invalid operation code: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = OpCode.GET.ordinal();
+ test = "Invalid operation code: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+ } catch (IOException ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ /**
+ * Test bad protocol on below values:
+ * 1. ReadKB
+ * 2. WriteKB
+ * 3. StorageGB
+ */
+ @Test
+ public void testTableRequest() {
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 215 /* Statement: string */,
+ 1 /* HasLimit: boolean */,
+ 4 /* ReadKB: int */,
+ 4 /* WriteKB: int */,
+ 4 /* StorageGB: int */,
+ 1 /* LimitsMode: byte */,
+ 1 /* HasTableName: boolean */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, tableRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * ReadKB
+ */
+ for (pos = 0; pos < 5; pos++) {
+ offset += lengths[pos];
+ }
+
+ int value = 0;
+ test = "Invalid readKB: " + value;
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ /*
+ * WriteKB
+ */
+ offset += lengths[pos++];
+ value = 0;
+ test = "Invalid writeKB: " + value;
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ /*
+ * StorageMaxGB
+ */
+ offset += lengths[pos++];
+ value = 0;
+ test = "Invalid StorageMaxGB: " + value;
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, ILLEGAL_ARGUMENT);
+
+ } catch (Exception ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ @Test
+ public void testGetIndexesRequest() {
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: string */,
+ 1 /* HasIndex: boolean */,
+ 5 /* IndexName: string */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, getIndexesRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * Index name
+ */
+ for (pos = 0; pos < 5; pos++) {
+ offset += lengths[pos];
+ }
+
+ String svalue = null;
+ test = "Invalid Index name: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ svalue = "";
+ test = "Invalid Index name: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (Exception ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ @Test
+ public void testListTablesRequest() {
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 4 /* StartIndex: int */,
+ 4 /* Limit: int*/
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, listTablesRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "OK test";
+ executeRequest(test, buf, 0);
+
+ /*
+ * Start index
+ */
+ for (pos = 0; pos < 3; pos++) {
+ offset += lengths[pos];
+ }
+
+ int value = -1;
+ test = "Invalid start index: " + value;
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Limit
+ */
+ offset += lengths[pos++];
+ test = "Invalid limit: " + value;
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeRequest(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (Exception ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ @Test
+ public void testBrokenRequest() {
+ final Request[] requests = new Request[] {
+ getRequest,
+ putRequest,
+ putIfVersionRequest,
+ deleteRequest,
+ deleteIfVersionRequest,
+ multiDeleteRequest,
+ writeMultipleRequest,
+ prepareRequest,
+ createQueryWithBoundStmtRequest(),
+ tableRequest,
+ tableSetLimitsRequest,
+ getIndexesRequest,
+ getTableRequest,
+ listTablesRequest,
+ tableUsageRequest
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ for (Request request : requests) {
+ buf.clear();
+ serializeRequest(out, request);
+ testBrokenMessage(request.getClass().getName(), buf, 1,
+ BAD_PROTOCOL_MESSAGE);
+ }
+ out.close();
+ }
+
+ /*
+ * TODO: Enable this test after enhance the validation check especially
+ * for serialized PreparedStatment.
+ */
+ @Ignore
+ public void testRandomCorruptedRequest() {
+ final Request[] requests = new Request[] {
+ getRequest,
+ putRequest,
+ putIfVersionRequest,
+ deleteRequest,
+ deleteIfVersionRequest,
+ multiDeleteRequest,
+ writeMultipleRequest,
+ prepareRequest,
+ createQueryWithBoundStmtRequest(),
+ tableRequest,
+ tableSetLimitsRequest,
+ getIndexesRequest,
+ getTableRequest,
+ listTablesRequest,
+ tableUsageRequest
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final Random rand = new Random(System.currentTimeMillis());
+ final int round = 10;
+
+ for (Request request : requests) {
+ buf.clear();
+ byte[] bufBytes = serializeRequest(out, request);
+ for (int i = 0; i < round; i++) {
+ final int offset = rand.nextInt(buf.writerIndex() - 2);
+ byte[] corruptedBytes = corruptBuffer(buf, rand, offset);
+ executeRequest(request.getClass().getName(),
+ buf,
+ -1/* don't check error code */,
+ new ExecuteFailHandler() {
+ @Override
+ public void fail(String test, Throwable t) {
+ printBytes(test + " offset=" + offset,
+ corruptedBytes);
+ }
+ }
+ );
+ refillBuffer(buf, bufBytes);
+ }
+ }
+ out.close();
+ }
+
+ /*
+ * Test the check on request size limit on proxy.
+ */
+ @Test
+ public void testRequestSizeLimit() {
+ assumeTrue(onprem == false);
+
+ int limit = rlimits.getRequestSizeLimit();
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+
+ try {
+ String test = "Put request with size > " + limit;
+ PutRequest putReq = new PutRequest()
+ .setTableName(tableName)
+ .setValue(new MapValue()
+ .put("id", 0)
+ .put("key", genBytes(limit, null)));
+
+ serializeRequest(out, putReq);
+ executeRequest(test, buf, REQUEST_SIZE_LIMIT_EXCEEDED);
+ } finally {
+ out.close();
+ }
+ }
+
+ private byte[] corruptBuffer(ByteBuf buffer, Random rand, int offset) {
+ int len = rand.nextInt(buffer.writerIndex() - offset);
+ byte[] bytes = genBytes(len, rand);
+ buffer.setBytes(offset, bytes);
+ return bytes;
+ }
+
+ private QueryRequest createQueryWithBoundStmtRequest() {
+ final PrepareResult prepRet = handle.prepare(prepareBoundStmtRequest);
+ prepRet.getPreparedStatement()
+ .setVariable("$id", new IntegerValue(1));
+
+ final QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepRet)
+ .setMaxReadKB(1024)
+ .setLimit(100);
+ return queryReq;
+ }
+
+ private byte[] serializeRequest(ByteOutputStream out, Request request) {
+
+ request.setDefaults(httpConfig);
+
+ Serializer ser = request.createSerializer(factory);
+ try {
+ short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion();
+ out.writeShort(serialVersion);
+ if (request instanceof QueryRequest ||
+ request instanceof PrepareRequest) {
+ ser.serialize(request, serialVersion,
+ QueryDriver.QUERY_V3, out);
+ } else {
+ ser.serialize(request, serialVersion, out);
+ }
+ } catch (IOException e) {
+ fail("Failed to serialize put request");
+ }
+
+ final byte[] bytes = new byte[buf.writerIndex()];
+ System.arraycopy(buf.array(), 0, bytes, 0, bytes.length);
+
+ return bytes;
+ }
+
+ private void testBrokenMessage(String name,
+ ByteBuf buffer,
+ int offset,
+ int errCode) {
+ for (int i = 0; i < buffer.writerIndex() - 1; i++) {
+ buffer.readerIndex(0);
+ buffer.writerIndex(offset + i);
+ executeRequest("testBrokenMessage - " + name + ": " +
+ buffer.writerIndex(), buffer, errCode);
+ }
+ }
+
+ private void executeRequest(String test, ByteBuf buffer, int expErrCode) {
+ executeRequest(test, buffer, expErrCode, null);
+ }
+
+ private void executeRequest(String test,
+ ByteBuf buffer,
+ int expErrCode,
+ ExecuteFailHandler failHandler) {
+
+ ResponseHandler responseHandler = null;
+ ByteInputStream bis = null;
+
+ /* Increase reference count of buffer by 1*/
+ buffer.retain();
+
+ try {
+ Channel channel = httpClient.getChannel(timeoutMs);
+ responseHandler = new ResponseHandler(httpClient, null, channel);
+
+ final FullHttpRequest request =
+ new DefaultFullHttpRequest(HTTP_1_1, POST, kvRequestURI,
+ buffer,
+ false /* Don't validate hdrs */);
+ HttpHeaders headers = request.headers();
+ headers.add(HttpHeaderNames.HOST, getProxyHost())
+ .add(REQUEST_ID_HEADER, nextRequestId())
+ .set(CONTENT_TYPE, "application/octet-stream")
+ .set(CONNECTION, "keep-alive")
+ .set(ACCEPT, "application/octet-stream")
+ .setInt(CONTENT_LENGTH, buffer.readableBytes());
+
+ if (!onprem) {
+ headers.set(AUTHORIZATION, SecureTestUtil.getAuthHeader(
+ getTenantId(), isSecure()));
+ }
+ if (isSecure()) {
+ headers.add(REQUEST_COMPARTMENT_ID, getTenantId());
+ }
+
+ httpClient.runRequest(request, responseHandler, channel);
+
+ assertFalse("Request timed out after " + timeoutMs + " ms",
+ responseHandler.await(timeoutMs));
+ /* Validates the response from proxy */
+ assertEquals(HttpResponseStatus.OK, responseHandler.getStatus());
+ bis = new NettyByteInputStream(responseHandler.getContent());
+ int errCode = bis.readByte();
+ if (expErrCode >= 0) {
+ if (expErrCode == errCode) {
+ return;
+ }
+ /* support V4 server error codes */
+ if (errCode == 6) { /* nson MAP */
+ errCode = getV4ErrorCode(responseHandler.getContent());
+ }
+ assertEquals(test + " failed", expErrCode, errCode);
+ }
+ } catch (Throwable t) {
+ if (failHandler != null) {
+ failHandler.fail(test, t);
+ }
+ fail(test + " failed: " + t);
+ } finally {
+ if (bis != null) {
+ bis.close();
+ }
+ if (responseHandler != null) {
+ responseHandler.close();
+ }
+ }
+ }
+
+ private String nextRequestId() {
+ return String.valueOf(requestId++);
+ }
+
+ private void refillBuffer(ByteBuf buffer, byte[] bytes) {
+ buffer.setBytes(0, bytes);
+ buffer.readerIndex(0);
+ buffer.writerIndex(bytes.length);
+ }
+
+ private void setPackedInt(ByteOutputStream out, int offset, int value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ SerializationUtil.writePackedInt(out, value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private void setInt(ByteOutputStream out, int offset, int value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ out.writeInt(value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private void setPackedLong(ByteOutputStream out, int offset, long value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ SerializationUtil.writePackedLong(out, value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private void setString(ByteOutputStream out, int offset, String value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ SerializationUtil.writeString(out, value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private void setByteArray(ByteOutputStream out, int offset, byte[] bytes)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ SerializationUtil.writeByteArray(out, bytes);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private int readInt(ByteInputStream in, int offset)
+ throws IOException {
+
+ int savedOffset = in.getOffset();
+ in.setOffset(offset);
+ int value = in.readInt();
+ in.setOffset(savedOffset);
+ return value;
+ }
+
+ private void createTable() {
+ tableOperation(handle, createTableDDL,
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 10000);
+ tableOperation(handle, createIndexDDL, null,
+ TableResult.State.ACTIVE, 10000);
+ }
+
+ private Version genVersion() {
+ final UUID uuid = UUID.randomUUID();
+ final long vlsn = 123456789;
+ final long lsn = 0x1234567812345678L;
+ final RepNodeId repNodeId = new RepNodeId(1234, 1234);
+ final oracle.kv.Version kvVersion =
+ new oracle.kv.Version(uuid, vlsn, repNodeId, lsn);
+ return Version.createVersion(kvVersion.toByteArray());
+ }
+
+ private MapValue createTestValue() {
+ MapValue row = new MapValue();
+ row.put("id", 1);
+ row.put("name", "string value");
+ row.put("count", Long.MAX_VALUE);
+ row.put("avg", Double.MAX_VALUE);
+ row.put("sum", new BigDecimal("12345678901234567890123456789012345678"));
+ row.put("exp", true);
+ row.put("key", genBytes(30, null));
+
+ MapValue map = new MapValue();
+ map.put("k1", 100);
+ map.put("k2", 200);
+ map.put("k3", 300);
+ row.put("map", map);
+
+ ArrayValue array = new ArrayValue();
+ array.add("elem1");
+ array.add("elem2");
+ array.add("elem3");
+ row.put("array", array);
+
+ MapValue rec = new MapValue();
+ rec.put("rid", 1024);
+ rec.put("rs", "nosql");
+ row.put("record", rec);
+
+ return row;
+ }
+
+ private MapValue createTestKey(int id) {
+ return new MapValue().put("id", id);
+ }
+
+ private byte[] genBytes(int length, Random rand) {
+ byte[] bytes = new byte[length];
+ for (int i = 0; i < bytes.length; i++) {
+ bytes[i] = (rand == null)? (byte)(i % 256) :
+ (byte)rand.nextInt(256);
+ }
+ return bytes;
+ }
+
+ private static void printBytes(String title, byte[] bytes) {
+ final char[] hexArray = "0123456789ABCDEF".toCharArray();
+ StringBuilder sb = new StringBuilder(title);
+ sb.append("[");
+ sb.append(bytes.length);
+ sb.append("]");
+ for (int j = 0; j < bytes.length; j++ ) {
+ int v = bytes[j] & 0xFF;
+ if (j % 5 == 0) {
+ sb.append("\n\t");
+ }
+ sb.append("(byte)0x");
+ sb.append(hexArray[v >>> 4]);
+ sb.append(hexArray[v & 0x0F]);
+ sb.append(", ");
+ }
+ System.out.println(sb.toString());
+ }
+
+ /**
+ * For debug purpose.
+ */
+ @SuppressWarnings("unused")
+ private byte[] corruptBuffer(ByteBuf buffer) {
+ int offset = 18;
+ byte[] bytes = new byte[] {
+(byte)0xA1, (byte)0x76, (byte)0x46, (byte)0x11, (byte)0x0C,
+(byte)0xD8, (byte)0x25, (byte)0x66,
+ };
+ buffer.setBytes(offset, bytes);
+ return bytes;
+ }
+
+ /*
+ * Interface invoked by executeRequest() when fails.
+ */
+ @FunctionalInterface
+ private interface ExecuteFailHandler {
+ void fail(String test, Throwable ex);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ChildTableTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ChildTableTest.java
new file mode 100644
index 00000000..32a02544
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ChildTableTest.java
@@ -0,0 +1,1588 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2011, 2020 Oracle and/or its affiliates. All rights reserved.
+ *
+ */
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.junit.Test;
+
+import oracle.nosql.driver.IndexExistsException;
+import oracle.nosql.driver.IndexLimitException;
+import oracle.nosql.driver.IndexNotFoundException;
+import oracle.nosql.driver.KeySizeLimitException;
+import oracle.nosql.driver.RowSizeLimitException;
+import oracle.nosql.driver.TableExistsException;
+import oracle.nosql.driver.TableLimitException;
+import oracle.nosql.driver.TableNotFoundException;
+import oracle.nosql.driver.TimeToLive;
+import oracle.nosql.driver.Version;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutRequest.Option;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableResult.State;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleRequest.OperationRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.ops.WriteMultipleResult.OperationResult;
+import oracle.nosql.driver.values.MapValue;
+
+/**
+ * Test child table operations and data access operations.
+ */
+public class ChildTableTest extends ProxyTestBase {
+ private final static TableLimits limits = new TableLimits(500, 500, 1);
+ private final static int WAIT_MS = 10000;
+
+ private final static String createTDdl =
+ "create table t(id integer, name string, s string, primary key(id))";
+ private final static String createTADdl =
+ "create table t.a(ida integer, name string, s string, primary key(ida))";
+ private final static String createIfNotExistsTADdl =
+ "create table if not exists t.a(" +
+ "ida integer, name string, s string, primary key(ida))";
+ private final static String createTABDdl =
+ "create table t.a.b(idb integer, name string, s string, primary key(idb))";
+ private final static String createTGDdl =
+ "create table t.g(idg integer, name string, s string, primary key(idg))";
+ private final static String createXDdl =
+ "create table x(id integer, name string, s string, primary key(id))";
+
+ /**
+ * Test child table related table operations:
+ * 1. create table
+ * 2. get table
+ * 3. list tables
+ * 4. create/drop index
+ * 5. alter table
+ * 6. drop table
+ */
+ @Test
+ public void testBasicTableOps() {
+
+ TableResult tr;
+
+ /*
+ * Create table
+ */
+ tr = tableOperation(handle, createTDdl, limits, WAIT_MS);
+ if (!onprem) {
+ assertNotNull(tr.getTableLimits());
+ }
+ checkTableInfo(tr, "t", limits);
+
+ tr = tableOperation(handle, createTADdl, null, WAIT_MS);
+ checkTableInfo(tr, "t.a", null);
+
+ /* create table t.a again, expect to get TableExistsException */
+ tableOperation(handle, createTADdl,
+ null /* tableLimits */,
+ null /* tableName */,
+ TableResult.State.ACTIVE,
+ TableExistsException.class);
+
+ /* creating table with if not exists should succeed */
+ tr = tableOperation(handle, createIfNotExistsTADdl, null, WAIT_MS);
+
+ tr = tableOperation(handle, createTABDdl, null, WAIT_MS);
+ checkTableInfo(tr, "t.a.b", null);
+
+ tr = tableOperation(handle, createTGDdl, null, WAIT_MS);
+ checkTableInfo(tr, "t.g", null);
+
+ /*
+ * Get table
+ */
+ tr = getTable("t.a", handle);
+ checkTableInfo(tr, "t.a", null);
+
+ tr = getTable("t.a.b", handle);
+ checkTableInfo(tr, "t.a.b", null);
+
+ /*
+ * List tables
+ */
+ if (!onprem) {
+ ListTablesResult lsr = listTables(handle);
+ assertEquals(4, lsr.getTables().length);
+ String[] tables = lsr.getTables();
+ Arrays.sort(tables);
+ assertTrue(Arrays.equals(new String[] {"t", "t.a", "t.a.b", "t.g"},
+ tables));
+ }
+
+ /*
+ * Create/Drop index
+ */
+ String ddl;
+
+ ddl = "create index idx1 on t.a.b(s)";
+ tr = tableOperation(handle, ddl, null, WAIT_MS);
+
+ /* create index again, expect to get IndexExistsException */
+ tableOperation(handle, ddl, null, null, TableResult.State.ACTIVE,
+ IndexExistsException.class);
+
+ ddl = "create index if not exists idx1 on t.a.b(s)";
+ tr = tableOperation(handle, ddl, null, WAIT_MS);
+
+ ddl = "drop index idx1 on t.a.b";
+ tr = tableOperation(handle, ddl, null, WAIT_MS);
+
+ /* drop index again, expect to get IndexNotFoundException */
+ tableOperation(handle, ddl, null, null, TableResult.State.ACTIVE,
+ IndexNotFoundException.class);
+
+ ddl = "drop index if exists idx1 on t.a.b";
+ tr = tableOperation(handle, ddl, null, WAIT_MS);
+
+ /*
+ * Alter table
+ */
+ ddl = "alter table t.a (add i integer)";
+ tr = tableOperation(handle, ddl, null, WAIT_MS);
+
+ ddl = "alter table t.g (drop s)";
+ tr = tableOperation(handle, ddl, null, WAIT_MS);
+
+ /*
+ * Drop table
+ */
+
+ ddl = "drop table t.a.b";
+ tableOperation(handle, ddl, null, WAIT_MS);
+
+ ddl = "drop table if exists t.a.b";
+ tableOperation(handle, ddl, null, WAIT_MS);
+ }
+
+ /**
+ * Test that the child table will be counted against the tenancy's total
+ * number of tables
+ */
+ @Test
+ public void testLimitTables() {
+ assumeTrue("Skipping testLimitTables if not minicloud or cloud test " +
+ "or tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ final int tableLimit = tenantLimits.getNumTables();
+ if (tableLimit > NUM_TABLES) {
+ /*
+ * To prevent this test from running too long, skip the test if the
+ * table number limit > ProxyTestBase.NUM_TABLES
+ */
+ return;
+ }
+
+ String ddl = "create table p(id integer, s string, primary key(id))";
+ tableOperation(handle, ddl, new TableLimits(50, 50, 1), WAIT_MS);
+
+ String fmt = "create table %s(%s integer, s string, primary key(%s))";
+ for (int i = 0; i < tableLimit; i++) {
+ String table = "p.c" + i;
+ ddl = String.format(fmt, table, "ck", "ck");
+ try {
+ tableOperation(handle, ddl, null, WAIT_MS);
+ if (i == tableLimit - 1) {
+ fail("create table should have failed, num create table: "
+ + i + ", limit: " + tableLimit);
+ }
+ } catch (TableLimitException tle) {
+ if (i < tableLimit - 1) {
+ fail("create table should succeed: " + table);
+ }
+ }
+ }
+
+ /*
+ * List tables
+ */
+ ListTablesResult lsr = listTables(handle);
+ assertEquals(tableLimit, lsr.getTables().length);
+ }
+
+ /**
+ * Test that the number of columns in child table is subjected to the
+ * TableRequestLimits.columnsPerTable.
+ *
+ * The child table inherits the primary key of its parent, its parent
+ * table's primary key fields should be counted.
+ */
+ @Test
+ public void testLimitColumns() {
+ assumeTrue("Skipping testLimitColumns if not minicloud or cloud test " +
+ "or tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ final int columnLimit = tenantLimits.getStandardTableLimits().
+ getColumnsPerTable();
+
+ String ddl = "create table p(" +
+ " k1 integer, " +
+ " k2 integer, " +
+ " k3 integer, " +
+ " s string, " +
+ " primary key(k1, k2, k3))";
+ tableOperation(handle, ddl, new TableLimits(50, 50, 1), WAIT_MS);
+
+ /*
+ * Create table p.c with N columns, N is the number of column per table.
+ */
+ StringBuilder sb;
+ sb = new StringBuilder("create table p.c(c1 integer, primary key(c1)");
+ for (int i = 4; i < columnLimit; i++) {
+ sb.append(", s").append(i).append(" string");
+ }
+ sb.append(")");
+ tableOperation(handle, sb.toString(), null, WAIT_MS);
+
+ /*
+ * Create table p.c.d with N + 1 columns, N is the number of column per
+ * table.
+ */
+ sb = new StringBuilder("create table p.c.d(d1 integer, primary key(d1)");
+ for (int i = 5; i < columnLimit + 1; i++) {
+ sb.append(", s").append(i).append(" string");
+ }
+ sb.append(")");
+ try {
+ tableOperation(handle, sb.toString(), null, WAIT_MS);
+ fail("create table should have failed as its column number " +
+ "exceeded the max number of column per table: " + columnLimit);
+ } catch (IllegalArgumentException iae) {
+ /* expected */
+ }
+
+ /*
+ * Adding more field to p.c should fail as the columns number will
+ * exceed the limit
+ */
+ ddl = "alter table p.c(add n1 integer)";
+ try {
+ tableOperation(handle, ddl, null, WAIT_MS);
+ fail("adding column should have failed as its column number " +
+ "exceeded the max number of column per table: " + columnLimit);
+ } catch (IllegalArgumentException iae) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testLimitIndexes() {
+ assumeTrue("Skipping testLimitIndexes if not minicloud or cloud test " +
+ "or tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ final int indexLimit = tenantLimits.getStandardTableLimits().
+ getIndexesPerTable();
+
+ /* Create table t */
+ tableOperation(handle, createTDdl, limits, WAIT_MS);
+
+ /* Create table t.c and N indexes (N = indexLimit - 2) */
+ StringBuilder sb = new StringBuilder("create table t.c(idc integer, ");
+ for (int i = 0; i < indexLimit; i++) {
+ sb.append("c").append(i).append(" integer, ");
+ }
+ sb.append("primary key(idc))");
+ tableOperation(handle, sb.toString(), null, WAIT_MS);
+
+ for (int i = 0; i < indexLimit; i++) {
+ sb.setLength(0);
+ sb.append("create index idx").append(i)
+ .append(" on t.c(c").append(i).append(")");
+
+ tableOperation(handle, sb.toString(), null, WAIT_MS);
+ }
+
+ String ddl = "create index idxC0C1 on t.c(c0, c1)";
+ tableOperation(handle, ddl, null, null, TableResult.State.ACTIVE,
+ IndexLimitException.class);
+ }
+
+ @Test
+ public void testLimitKeyValueSize() {
+
+ assumeTrue("Skipping testLimitKeyValueSize if onprem test",
+ !onprem && tenantLimits != null);
+
+ String ddl;
+
+ ddl = "create table t(k1 string, s string, primary key(k1))";
+ tableOperation(handle, ddl, limits, WAIT_MS);
+
+ ddl = "create table t.c(k2 string, s string, primary key(k2))";
+ tableOperation(handle, ddl, null, WAIT_MS);
+
+ ddl = "create table t.c.g(k3 string, s string, primary key(k3))";
+ tableOperation(handle, ddl, null, WAIT_MS);
+
+ MapValue row = new MapValue();
+ String s1 = "a";
+
+ final int maxPKeySize = tenantLimits.getStandardTableLimits().
+ getPrimaryKeySizeLimit();
+ final int maxValSize = tenantLimits.getStandardTableLimits().
+ getRowSizeLimit();
+ final int maxIdxKeySize =
+ (cloudRunning ? tenantLimits.getStandardTableLimits().
+ getIndexKeySizeLimit() : 64);
+
+ PutRequest req;
+ String sval;
+
+ /*
+ * Primary key size exceed size limit
+ */
+
+ /* Put row to t.c with max key size, should succeed */
+ sval = genString(maxPKeySize - 1);
+ row.put("k1", s1)
+ .put("k2", sval)
+ .put("s", s1);
+ req = new PutRequest().setTableName("t.c").setValue(row);
+ handle.put(req);
+
+ /*
+ * Put row with max key size + 1, should have failed with
+ * KeySizeLimitException
+ */
+ row.put("k1", sval + "a");
+ try {
+ handle.put(req);
+ fail("Expect to catch KeySizeLimitException but not");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ }
+
+ /* Put row to t.c.g with max key size, should succeed */
+ sval = genString(maxPKeySize - 2);
+ row.put("k1", s1)
+ .put("k2", s1)
+ .put("k3", sval)
+ .put("s", s1);
+ req = new PutRequest().setTableName("t.c.g").setValue(row);
+ handle.put(req);
+
+ /*
+ * Put row to t.c.g with max key size + 1, should have failed with
+ * KeySizeLimitException
+ */
+ row.put("k3", sval + "a");
+ req = new PutRequest().setTableName("t.c.g").setValue(row);
+ try {
+ handle.put(req);
+ fail("Expect to catch KeySizeLimitException but not");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ }
+
+ /*
+ * Row size exceed size limit
+ */
+
+ /*
+ * Put row to t.c with length > max value size, should fail with
+ * RowSizeLimitException
+ */
+ sval = genString(maxValSize);
+ row.put("k1", s1)
+ .put("k2", s1)
+ .put("s", sval);
+ req = new PutRequest().setTableName("t.c").setValue(row);
+ try {
+ handle.put(req);
+ fail("Expect to catch RowSizeLimitException but not");
+ } catch (RowSizeLimitException ex) {
+ /* expected */
+ }
+
+ /*
+ * Put row to t.c.g with length > max value size, should fail with
+ * RowSizeLimitException
+ */
+ row.put("k1", s1)
+ .put("k2", s1)
+ .put("k3", s1)
+ .put("s", sval);
+ req = new PutRequest().setTableName("t.c.g").setValue(row);
+ try {
+ handle.put(req);
+ fail("Expect to catch RowSizeLimitException but not");
+ } catch (RowSizeLimitException ex) {
+ /* expected */
+ }
+
+ /*
+ * Index key size exceed size limit
+ */
+ String[] indexDdls = new String[] {
+ "create index idxc1 on t.c(s)",
+ "create index idxg1 on t.c.g(s)",
+ };
+
+ for (String idxDdl : indexDdls) {
+ tableOperation(handle, idxDdl, null, WAIT_MS);
+ }
+
+ /* Put row to t.c with max index key size, should succeed */
+ sval = genString(maxIdxKeySize);
+ row.put("k1", s1)
+ .put("k2", s1)
+ .put("s", sval);
+ req = new PutRequest().setTableName("t.c").setValue(row);
+ handle.put(req);
+
+ /*
+ * Put row to t.c with max index key size + 1, should fail with
+ * KeySizeLimitException
+ */
+ row.put("s", sval + "a");
+ try {
+ handle.put(req);
+ fail("Expected to catch KeySizeLimitException");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ }
+
+ /* Put row to t.c.g with max index key size, should succeed */
+ row.put("k1", s1)
+ .put("k2", s1)
+ .put("k3", s1)
+ .put("s", sval);
+ req = new PutRequest().setTableName("t.c.g").setValue(row);
+ handle.put(req);
+
+ /*
+ * Put row to t.c.g with max index key size + 1, should fail with
+ * KeySizeLimitException
+ */
+ row.put("s", sval + "a");
+ try {
+ handle.put(req);
+ fail("Expect to catch KeySizeLimitException but not");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ }
+ }
+
+ /**
+ * Test invalid table operations on child table:
+ * 1. Can't set limits on child table when create table
+ * 2. Can't create table if its parent doesn't exist
+ * 3. Don't allow to update limits of child table
+ * 4. Can't drop the parent table if referenced by any child
+ * 5. Don't allow to get table usage of child table
+ */
+ @Test
+ public void testInvalidTableOps() {
+ /* Cannot set limits on child table */
+ tableOperation(handle, createTADdl, limits, null,
+ TableResult.State.ACTIVE,
+ (cloudRunning ? IllegalArgumentException.class :
+ TableNotFoundException.class));
+
+ /* The parent table of t.a does not exist */
+ tableOperation(handle, createTADdl, null, null,
+ TableResult.State.ACTIVE,
+ (cloudRunning ? IllegalArgumentException.class :
+ TableNotFoundException.class));
+
+ tableOperation(handle, createTDdl, limits, WAIT_MS);
+ tableOperation(handle, createTADdl, null, WAIT_MS);
+
+ /* Don't allow to update limits of child table */
+ if (!onprem) {
+ tableOperation(handle, null, new TableLimits(600, 400, 1), "t.a",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+ }
+
+ /* Cannot drop the parent table still referenced by child table */
+ String ddl = "drop table t";
+ tableOperation(handle, ddl, null, null, TableResult.State.DROPPED,
+ IllegalArgumentException.class);
+
+ if (cloudRunning) {
+ /* Don't allow to get table usage of child table */
+ TableUsageRequest tuReq = new TableUsageRequest().setTableName("t.a");
+ try {
+ handle.getTableUsage(tuReq);
+ fail("GetTableUsage on child table should have failed");
+ } catch (IllegalArgumentException iae) {
+ /* expected */
+ }
+ }
+ }
+
+ /**
+ * Test put/get/delete row of child table.
+ */
+ @Test
+ public void testPutGetDelete() {
+ int recordKB = 2;
+ tableOperation(handle, createTDdl, limits, WAIT_MS);
+ tableOperation(handle, createTADdl, null, WAIT_MS);
+ tableOperation(handle, createTABDdl, null, WAIT_MS);
+
+ MapValue row;
+ MapValue key;
+
+ String longStr = genString((recordKB - 1) * 1024);
+ /* put a row to table t */
+ row = makeTRow(1, longStr);
+ doPutRow("t", row, recordKB);
+
+ /* put a row to table t.a */
+ row = makeTARow(1, 2, longStr);
+ doPutRow("t.a", row, recordKB);
+ key = new MapValue().put("id", 1).put("ida", 2);
+ doGetRow("t.a", key, row, recordKB, null);
+ doDeleteRow("t.a", key, recordKB);
+
+ /* put a row to table t.a.b */
+ row = makeTABRow(1, 2, 3, longStr);
+ doPutRow("t.a.b", row, recordKB);
+ key = new MapValue().put("id", 1).put("ida", 2).put("idb", 3);
+ doGetRow("t.a.b", key, row, recordKB, null);
+ doDeleteRow("t.a.b", key, recordKB);
+ }
+
+ /**
+ * Test query against child table
+ */
+ @Test
+ public void testQuery() {
+ tableOperation(handle, createTDdl, limits, WAIT_MS);
+ tableOperation(handle, createTADdl, null, WAIT_MS);
+ tableOperation(handle, createTABDdl, null, WAIT_MS);
+
+ final int keyCost = getMinRead();
+
+ final int numT = 30;
+ final int numAPerT = 2;
+ final int numBPerA = 1;
+
+ final int numA = numT * numAPerT;
+ final int numB = numT * numAPerT * numBPerA;
+
+ final int rkbT = 1;
+ final int rkbA = 2;
+ final int rkbB = 2;
+ final int rkbMaxTA = Math.max(rkbT, rkbA);
+ final int rkbMax = Math.max(Math.max(rkbT, rkbA), rkbB);
+
+ final String s1 = genString(1);
+ final String s1K = genString(1024);
+
+ for (int i = 0; i < numT; i++) {
+ doPutRow("t", makeTRow(i, s1), rkbT);
+ for (int j = 0; j < numAPerT; j++) {
+ doPutRow("t.a", makeTARow(i, j, s1K), rkbA);
+ for (int k = 0; k < numBPerA; k++) {
+ doPutRow("t.a.b", makeTABRow(i, j, k, s1K), rkbB);
+ }
+ }
+ }
+
+ String query;
+ int count;
+ int cost;
+ int limit = 10;
+
+ query = "select id, ida from t.a";
+ count = numA;
+ cost = numA * keyCost;
+ runQueryWithLimit(query, cost/5, limit, count, cost, keyCost);
+
+ query = "select * from t.a";
+ count = numA;
+ cost = numA * rkbA;
+ if (!dontDoubleChargeKey()) {
+ cost += numA * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbA);
+
+ query = "select id, ida, idb from t.a.b where idb = 100";
+ count = 0;
+ cost = numB * keyCost;
+ runQueryWithLimit(query, cost/5, limit, count, cost, keyCost);
+
+ query = "select * from t.a.b where s is null";
+ count = 0;
+ cost = numB * rkbB;
+ if (!dontDoubleChargeKey()) {
+ cost += numB * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbB);
+
+ query = "select t.id, a.ida from nested tables(t descendants(t.a a))";
+ count = numA;
+ cost = (numT + numA) * keyCost;
+ runQueryWithLimit(query, cost/5, limit, count, cost, keyCost);
+
+ query = "select * from nested tables(t descendants(t.a a))";
+ count = numA;
+ cost = numT * rkbT + numA * rkbA;
+ if (!dontDoubleChargeKey()) {
+ cost += (numT + numA) * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbMaxTA);
+
+ query = "select t.id, a.ida, b.idb " +
+ "from nested tables(t descendants(t.a a, t.a.b b))";
+ count = numB;
+ cost = (numT + numA + numB) * keyCost;
+ runQueryWithLimit(query, cost/5, limit, count, cost, keyCost);
+
+ query = "select * from nested tables(t descendants(t.a a, t.a.b b))";
+ count = numB;
+ cost = numT * rkbT + numA * rkbA + numB * rkbB;
+ if (!dontDoubleChargeKey()) {
+ cost += (numT + numA + numB) * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbMax);
+
+ query = "select a.ida, t.id from nested tables(t.a a ancestors(t))";
+ count = numA;
+ cost = (numA + numT) * keyCost;
+ runQueryWithLimit(query, cost/5, limit, count, cost, 2 * keyCost);
+
+ query = "select * from nested tables(t.a a ancestors(t))";
+ count = numA;
+ cost = numA * rkbA + numT * rkbT;
+ if (!dontDoubleChargeKey()) {
+ cost += (numA + numT) * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA);
+
+ query = "select b.idb, a.ida, t.id " +
+ "from nested tables(t.a.b b ancestors(t, t.a a))";
+ count = numB;
+ cost = (numB + numA + numT) * keyCost;
+ runQueryWithLimit(query, cost/5, limit, count, cost, 3 * keyCost);
+
+ query = "select * from nested tables(t.a.b b ancestors(t, t.a a))";
+ count = numB;
+ cost = numB * rkbB + numA * rkbA + numT * rkbT;
+ if (!dontDoubleChargeKey()) {
+ cost += (numB + numA + numT) * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA + rkbB);
+
+ query = "select a.ida, t.id, b.idb " +
+ "from nested tables(t.a a ancestors(t) descendants(t.a.b b))";
+ count = numA;
+ cost = (numA + numT + numB) * keyCost;
+ runQueryWithLimit(query, cost/5, limit, count, cost, 2 * keyCost);
+
+ query = "select * " +
+ "from nested tables(t.a a ancestors(t) descendants(t.a.b b))";
+ count = numB;
+ cost = numA * rkbA + numT * rkbT + numB * rkbB;
+ if (!dontDoubleChargeKey()) {
+ cost += (numA + numT + numB) * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA);
+
+ String ddl = "create index if not exists idxName on t.a(name)";
+ tableOperation(handle, ddl, null, WAIT_MS);
+
+ query = "select a.id, a.ida, a.name, b.idb " +
+ "from nested tables(t.a a ancestors(t) descendants(t.a.b b)) " +
+ "where a.name > 'n'";
+ count = numB;
+ /*
+ * TODO: NOSQL-719
+ * Enable the cost check in cloud test after fix it
+ */
+ if (useCloudService) {
+ cost = 0;
+ } else {
+ cost = (numA + numT + numB) * keyCost;
+ if (!dontDoubleChargeKey()) {
+ cost += numA * keyCost;
+ }
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, 2 * keyCost);
+
+ query = "select * " +
+ "from nested tables(t.a a ancestors(t) descendants(t.a.b b)) " +
+ "where a.name > 'n'";
+ count = numB;
+ cost = numA * rkbA + numT * rkbT + numB * rkbB;
+ if (!dontDoubleChargeKey()) {
+ cost += (2 * numA + numT + numB) * keyCost;
+ }
+ runQueryWithLimit(query, cost/5, limit, count, cost, rkbT + rkbA);
+ }
+
+ @Test
+ public void testWriteMultiple()
+ throws Exception {
+
+ tableOperation(handle, createTDdl, limits, WAIT_MS);
+ tableOperation(handle, createTADdl, null, WAIT_MS);
+ tableOperation(handle, createTABDdl, null, WAIT_MS);
+ tableOperation(handle, createXDdl, limits, WAIT_MS);
+
+ final int rkb = 2;
+ final String s1K = genString((rkb - 1) * 1024 + 1);
+
+ WriteMultipleRequest req = new WriteMultipleRequest();
+ WriteMultipleResult res;
+ MapValue key;
+ MapValue row;
+ PutRequest put;
+ DeleteRequest del;
+
+ /* Put operations */
+ int id = 1;
+ row = makeTRow(id, s1K);
+ put = new PutRequest().setTableName("t").setValue(row);
+ req.add(put, true);
+
+ row = makeTARow(id, 1, s1K);
+ put = new PutRequest().setTableName("t.a").setValue(row);
+ req.add(put, true);
+
+ row = makeTABRow(id, 1, 1, s1K);
+ put = new PutRequest().setTableName("t.a.b").setValue(row);
+ req.add(put, true);
+
+ row = makeTABRow(id, 1, 2, s1K);
+ put = new PutRequest().setTableName("t.a.b").setValue(row);
+ req.add(put, true);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+ if (!onprem) {
+ assertEquals(rkb * req.getNumOperations(), res.getWriteKB());
+ }
+
+ Version verT11 = res.getResults().get(0).getVersion();
+ Version verTA11 = res.getResults().get(1).getVersion();
+ Version verTAB11 = res.getResults().get(2).getVersion();
+
+ /*
+ * Test ReturnInfo
+ */
+ req.clear();
+
+ /* putIfAbsent with existing row */
+ row = makeTRow(id, s1K);
+ put = new PutRequest().setTableName("t")
+ .setOption(Option.IfAbsent)
+ .setReturnRow(true)
+ .setValue(row);
+ req.add(put, false);
+
+ /* putIfVersion with unmatched version */
+ row = makeTARow(id, 1, s1K);
+ put = new PutRequest()
+ .setTableName("t.a")
+ .setMatchVersion(verT11)
+ .setReturnRow(true)
+ .setValue(row);
+ req.add(put, false);
+
+ /* deleteIfVersion with unmatched version*/
+ key = makeTABKey(id, 1, 1);
+ del = new DeleteRequest()
+ .setTableName("t.a.b")
+ .setMatchVersion(verTA11)
+ .setReturnRow(true)
+ .setKey(key);
+ req.add(del, false);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+ assertEquals(0, res.getWriteKB());
+
+ List results = res.getResults();
+ OperationResult r = results.get(0);
+ assertFalse(r.getSuccess());
+ assertEquals(makeTRow(id, s1K), r.getExistingValue());
+ assertTrue(Arrays.equals(verT11.getBytes(),
+ r.getExistingVersion().getBytes()));
+ assertTrue(r.getExistingModificationTime() > 0);
+
+ r = results.get(1);
+ assertFalse(r.getSuccess());
+ assertEquals(makeTARow(id, 1, s1K), r.getExistingValue());
+ assertTrue(Arrays.equals(verTA11.getBytes(),
+ r.getExistingVersion().getBytes()));
+ assertTrue(r.getExistingModificationTime() > 0);
+
+ r = results.get(2);
+ assertFalse(r.getSuccess());
+ assertEquals(makeTABRow(id, 1, 1, s1K), r.getExistingValue());
+ assertTrue(Arrays.equals(verTAB11.getBytes(),
+ r.getExistingVersion().getBytes()));
+ assertTrue(r.getExistingModificationTime() > 0);
+
+ /*
+ * abortIfUnsuccessful = true, check failedOperation only.
+ */
+ req.clear();
+ row = makeTRow(id, s1K);
+ put = new PutRequest().setTableName("t")
+ .setReturnRow(true)
+ .setValue(row);
+ req.add(put, true);
+
+ row = makeTABRow(id, 1, 1, s1K + "_u");
+ put = new PutRequest().setTableName("t.a.b")
+ .setReturnRow(true)
+ .setOption(Option.IfAbsent)
+ .setReturnRow(true)
+ .setValue(row);
+ req.add(put, true);
+
+ res = handle.writeMultiple(req);
+ assertFalse(res.getSuccess());
+ assertEquals(1, res.getFailedOperationIndex());
+ r = res.getFailedOperationResult();
+ assertFalse(r.getSuccess());
+ assertEquals(makeTABRow(id, 1, 1, s1K), r.getExistingValue());
+ assertNotNull(Arrays.equals(verTAB11.getBytes(),
+ r.getExistingVersion().getBytes()));
+ assertTrue(r.getExistingModificationTime() > 0);
+
+ /*
+ * Delete operations
+ */
+ req.clear();
+ key = makeTKey(id);
+ del = new DeleteRequest().setTableName("t").setKey(key);
+ req.add(del, true);
+
+ key = makeTAKey(id, 1);
+ del = new DeleteRequest().setTableName("t.a").setKey(key);
+ req.add(del, true);
+
+ key = makeTABKey(id, 1, 1);
+ del = new DeleteRequest()
+ .setTableName("t.a.b")
+ .setMatchVersion(verTAB11)
+ .setKey(key);
+ req.add(del, true);
+
+ key = makeTABKey(id, 1, 2);
+ del = new DeleteRequest().setTableName("t.a.b").setKey(key);
+ req.add(del, true);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+ if (!onprem) {
+ assertEquals(rkb * req.getNumOperations(), res.getWriteKB());
+ }
+
+ /*
+ * Test GeneratedValue for Identity columns
+ */
+ req.clear();
+
+ String ddl;
+ ddl = "alter table t(add seq integer generated always as identity)";
+ tableOperation(handle, ddl, null, WAIT_MS);
+ ddl = "alter table t.a.b(add seq long generated always as identity" +
+ "(start with 100 increment by -2))";
+ tableOperation(handle, ddl, null, WAIT_MS);
+
+ id++;
+ row = makeTRow(id, s1K);
+ put = new PutRequest().setTableName("T").setValue(row);
+ req.add(put, true);
+
+ row = makeTARow(id, 1, s1K);
+ put = new PutRequest().setTableName("T.A").setValue(row);
+ req.add(put, true);
+
+ int numRows = 3;
+ for (int i = 0; i < numRows; i++) {
+ row = makeTABRow(id, 1, i, s1K);
+ put = new PutRequest().setTableName("T.A.B").setValue(row);
+ req.add(put, true);
+ }
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+ if (!onprem) {
+ assertEquals(rkb * req.getNumOperations(), res.getWriteKB());
+ }
+
+ List ops = req.getOperations();
+ String tname;
+ int seqT = 1;
+ int seqTAB = 100;
+ int seqStep = -2;
+ for (int i = 0; i < res.getResults().size(); i++) {
+ r = res.getResults().get(i);
+ tname = ops.get(i).getRequest().getTableName();
+ if (tname.equalsIgnoreCase("t.a")) {
+ assertNull(r.getGeneratedValue());
+ } else {
+ /*
+ * TODO: NOSQL-720
+ * enable below check in cloud test after fix it
+ */
+ if (!useCloudService) {
+ assertNotNull(r.getGeneratedValue());
+ if (tname.equalsIgnoreCase("t")) {
+ assertEquals(seqT, r.getGeneratedValue().getInt());
+ } else {
+ /* t.a.b */
+ assertEquals(seqTAB, r.getGeneratedValue().getInt());
+ seqTAB += seqStep;
+ }
+ }
+ }
+ }
+
+ /* Test puts to single table */
+ req.clear();
+
+ id++;
+ row = makeTARow(id, 0, s1K);
+ put = new PutRequest().setTableName("t.a").setValue(row);
+ req.add(put, true);
+
+ row = makeTARow(id, 1, s1K);
+ put = new PutRequest().setTableName("T.a").setValue(row);
+ req.add(put, true);
+
+ row = makeTARow(id, 2, s1K);
+ put = new PutRequest()
+ .setTableName("T.A")
+ .setOption(Option.IfAbsent)
+ .setValue(row);
+ req.add(put, true);
+
+ key = makeTAKey(id, 3);
+ del = new DeleteRequest().setTableName("t.A").setKey(key);
+ req.add(del, false);
+
+ key = makeTAKey(id, 4);
+ del = new DeleteRequest().setTableName("t.a").setKey(key);
+ req.add(del, false);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+ int i = 0;
+ for (OperationResult or : res.getResults()) {
+ if (i++ < 3) {
+ assertTrue(or.getSuccess());
+ } else {
+ assertFalse(or.getSuccess());
+ }
+ }
+
+ /*
+ * Negative cases
+ */
+
+ /*
+ * Table not found: t.unknown.
+ *
+ * Sub requests:
+ * put -> t.unknown
+ * put -> t
+ */
+ req.clear();
+
+ row = makeTRow(1, s1K);
+ put = new PutRequest().setTableName("t.unknown").setValue(row);
+ req.add(put, true);
+
+ put = new PutRequest().setTableName("t").setValue(row);
+ req.add(put, true);
+ try {
+ handle.writeMultiple(req);
+ fail("Operation should have failed with TableNotFoundException");
+ } catch (TableNotFoundException e) {
+ /* expected */
+ checkErrorMessage(e);
+ }
+
+ /*
+ * Table not found: t.unknown.
+ *
+ * Sub requests:
+ * put -> t
+ * put -> t.unknown
+ */
+ req.clear();
+
+ row = makeTRow(1, s1K);
+ put = new PutRequest().setTableName("t").setValue(row);
+ req.add(put, true);
+
+ put = new PutRequest().setTableName("t.unknown").setValue(row);
+ req.add(put, true);
+
+ try {
+ handle.writeMultiple(req);
+ fail("Operation should have failed with TableNotFoundException");
+ } catch (TableNotFoundException e) {
+ /* expected */
+ checkErrorMessage(e);
+ }
+
+ /*
+ * IllegalArgumentException: Tables not related: t x
+ *
+ * Sub requests:
+ * put -> t
+ * put -> x
+ */
+ req.clear();
+
+ try {
+ row = makeTRow(1, s1K);
+ put = new PutRequest().setTableName("t").setValue(row);
+ req.add(put, true);
+
+ put = new PutRequest().setTableName("x").setValue(row);
+ req.add(put, true);
+ handle.writeMultiple(req);
+ fail("Operation should have failed with IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ /* expected */
+ checkErrorMessage(e);
+ }
+
+
+ /*
+ * IllegalArgumentException: Shard key does not match
+ *
+ * Sub requests:
+ * put {id=1,..} -> t.a
+ * put {id=1,..} -> t.a.b
+ * put {id=2,..} -> T.A
+ * put {id=2,..} -> T.A.B
+ */
+ req.clear();
+
+ row = makeTARow(1, 1, s1K);
+ put = new PutRequest().setTableName("t.a").setValue(row);
+ req.add(put, true);
+
+ row = makeTABRow(1, 1, 1, s1K);
+ put = new PutRequest().setTableName("t.a.b").setValue(row);
+ req.add(put, true);
+
+ key = makeTAKey(2, 1);
+ del = new DeleteRequest().setTableName("T.A").setKey(key);
+ req.add(del, true);
+
+ key = makeTABKey(2, 1, 1);
+ del = new DeleteRequest().setTableName("T.A.B").setKey(key);
+ req.add(del, true);
+
+ try {
+ handle.writeMultiple(req);
+ fail("Operation should have failed with IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ /* expected */
+ checkErrorMessage(e);
+ }
+
+ /*
+ * IllegalArgumentException: Missing primary key field: idb
+ *
+ * Sub requests:
+ * put {id=1,ida=1} -> t.a
+ * put {id=1,ida=1} -> t.a.b
+ */
+ req.clear();
+
+ row = makeTARow(1, 1, s1K);
+ put = new PutRequest().setTableName("t.a").setValue(row);
+ req.add(put, true);
+
+ put = new PutRequest().setTableName("t.a.b").setValue(row);
+ req.add(put, true);
+
+ try {
+ handle.writeMultiple(req);
+ fail("Operation should have failed with IllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ /* expected */
+ checkErrorMessage(e);
+ }
+ }
+
+ /* Test using table ocid in put/delete operations */
+ @Test
+ public void testWriteMultipleWithOcid() throws Exception {
+ assumeTrue("Skipping testWriteMulitpleWithOcid if not minicloud test",
+ cloudRunning);
+
+ tableOperation(handle, createTDdl, limits, WAIT_MS);
+ tableOperation(handle, createTADdl, null, WAIT_MS);
+ tableOperation(handle, createTABDdl, null, WAIT_MS);
+
+ String ddl = "create table t1(id integer, s string, primary key(id))";
+ tableOperation(handle, ddl, limits, WAIT_MS);
+
+ int rkb = 1;
+ final String s1 = genString(1);
+
+ WriteMultipleRequest req = new WriteMultipleRequest();
+ WriteMultipleResult res;
+ MapValue key;
+ MapValue row;
+ PutRequest put;
+ DeleteRequest del;
+
+ String ocidT = getTable("t", handle).getTableId();
+ String ocidTA = getTable("t.a", handle).getTableId();
+ String ocidTAB = getTable("t.a.b", handle).getTableId();
+ String ocidT1 = getTable("t1", handle).getTableId();
+
+ /* put ops */
+ int id = 1;
+ row = makeTRow(id, s1);
+ put = new PutRequest().setTableName(ocidT).setValue(row);
+ req.add(put, true);
+
+ row = makeTARow(id, 1, s1);
+ put = new PutRequest().setTableName(ocidTA).setValue(row);
+ req.add(put, true);
+
+ row = makeTARow(id, 2, s1);
+ put = new PutRequest().setTableName(ocidTA).setValue(row);
+ req.add(put, true);
+
+ row = makeTABRow(id, 1, 1, s1);
+ put = new PutRequest().setTableName(ocidTAB).setValue(row);
+ req.add(put, true);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+ if (!onprem) {
+ assertEquals(rkb * req.getNumOperations(), res.getWriteKB());
+ }
+
+ /* delete ops */
+ req.clear();
+
+ key = makeTKey(id);
+ del = new DeleteRequest().setTableName(ocidT).setKey(key);
+ req.add(del, true);
+
+ key = makeTAKey(id, 1);
+ del = new DeleteRequest().setTableName(ocidTA).setKey(key);
+ req.add(del, true);
+
+ key = makeTAKey(id, 2);
+ del = new DeleteRequest().setTableName(ocidTA).setKey(key);
+ req.add(del, true);
+
+ key = makeTABKey(id, 1, 1);
+ del = new DeleteRequest().setTableName(ocidTAB).setKey(key);
+ req.add(del, true);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+ if (!onprem) {
+ assertEquals(rkb * req.getNumOperations(), res.getWriteKB());
+ }
+
+ /*
+ * All operations should be on same table or tables belongs to same
+ * parents
+ */
+ req.clear();
+
+ row = makeTABRow(id, 1, 1, s1);
+ put = new PutRequest().setTableName(ocidTAB).setValue(row);
+ req.add(put, true);
+
+ row = new MapValue().put("id", 1).put("s", s1);
+ put = new PutRequest().setTableName(ocidT1).setValue(row);
+ req.add(put, true);
+
+ try {
+ res = handle.writeMultiple(req);
+ fail("Operation should have failed with IAE");
+ } catch (IllegalArgumentException ex) {
+ checkErrorMessage(ex);
+ }
+ }
+
+ @Test
+ public void testWriteMultipleTTL() {
+
+ final TimeToLive tTTL = TimeToLive.ofDays(1);
+ tableOperation(handle, addUsingTTL(createTDdl, tTTL), limits, WAIT_MS);
+
+ final TimeToLive aTTL = TimeToLive.ofDays(3);
+ tableOperation(handle, addUsingTTL(createTADdl, aTTL), null, WAIT_MS);
+
+ final TimeToLive bTTL = TimeToLive.ofDays(5);
+ tableOperation(handle, addUsingTTL(createTABDdl, bTTL), null, WAIT_MS);
+
+ final TimeToLive userTTL = TimeToLive.ofDays(10);
+
+ final String s1 = genString(1);
+ final int rowKB = 1;
+ WriteMultipleRequest req = new WriteMultipleRequest();
+ WriteMultipleResult res;
+ PutRequest put;
+
+ /* Use table default TTL */
+ int id = 1;
+ MapValue trow = makeTRow(id, s1);
+ put = new PutRequest()
+ .setTableName("t")
+ .setValue(trow);
+ req.add(put, true);
+
+ MapValue arow = makeTARow(id, 1, s1);
+ put = new PutRequest()
+ .setTableName("t.a")
+ .setValue(arow);
+ req.add(put, true);
+
+ MapValue brow = makeTABRow(id, 1, 1, s1);
+ put = new PutRequest()
+ .setTableName("t.a.b")
+ .setValue(brow);
+ req.add(put, true);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+
+ doGetRow("t", makeTKey(id), trow, rowKB, tTTL);
+ doGetRow("t.a", makeTAKey(id, 1), arow, rowKB, aTTL);
+ doGetRow("t.a.b", makeTABKey(id, 1, 1), brow, rowKB, bTTL);
+
+ /* Update to user specified TTL */
+ req.clear();
+
+ put = new PutRequest()
+ .setTableName("t")
+ .setValue(trow)
+ .setTTL(userTTL);
+ req.add(put, true);
+
+ put = new PutRequest()
+ .setTableName("t.a")
+ .setValue(arow)
+ .setTTL(userTTL);
+ req.add(put, true);
+
+ put = new PutRequest()
+ .setTableName("t.a.b")
+ .setValue(brow)
+ .setTTL(userTTL);
+ req.add(put, true);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+
+ doGetRow("t", makeTKey(id), trow, rowKB, userTTL);
+ doGetRow("t.a", makeTAKey(id, 1), arow, rowKB, userTTL);
+ doGetRow("t.a.b", makeTABKey(id, 1, 1), brow, rowKB, userTTL);
+
+ /* Update back to default TTL */
+ req.clear();
+
+ put = new PutRequest()
+ .setTableName("t")
+ .setValue(trow)
+ .setUseTableDefaultTTL(true);
+ req.add(put, true);
+
+ put = new PutRequest()
+ .setTableName("t.a")
+ .setValue(arow)
+ .setUseTableDefaultTTL(true);
+ req.add(put, true);
+
+ put = new PutRequest()
+ .setTableName("t.a.b")
+ .setValue(brow)
+ .setUseTableDefaultTTL(true);
+ req.add(put, true);
+
+ res = handle.writeMultiple(req);
+ assertTrue(res.getSuccess());
+
+ doGetRow("t", makeTKey(id), trow, rowKB, tTTL);
+ doGetRow("t.a", makeTAKey(id, 1), arow, rowKB, aTTL);
+ doGetRow("t.a.b", makeTABKey(id, 1, 1), brow, rowKB, bTTL);
+ }
+
+ private void runQueryWithLimit(String query, int maxReadKB, int limit,
+ int expCount, int expReadKB, int recordKB) {
+
+ runQuery(query, 0, 0, expCount, expReadKB, recordKB);
+
+ if (maxReadKB > 0) {
+ runQuery(query, maxReadKB, 0, expCount, expReadKB, recordKB);
+ }
+
+ if (limit > 0) {
+ runQuery(query, 0, limit, expCount, expReadKB, recordKB);
+ }
+
+ if (checkKVVersion(21, 2, 18)) {
+ /* Query should always make progress with small limit */
+ for (int kb = 1; kb <= 5; kb++) {
+ runQuery(query, kb, 0, expCount, expReadKB, recordKB);
+ }
+ }
+ }
+
+ private void runQuery(String statement,
+ int maxReadKB,
+ int limit,
+ int expCount,
+ int expCost,
+ int recordKB) {
+
+ final boolean dispResult = false;
+
+ QueryRequest req = new QueryRequest();
+ PrepareRequest prepReq = new PrepareRequest()
+ .setStatement(statement);
+ PrepareResult prepRet = handle.prepare(prepReq);
+ req.setPreparedStatement(prepRet);
+
+ if (maxReadKB > 0) {
+ req.setMaxReadKB(maxReadKB);
+ }
+ if (limit > 0) {
+ req.setLimit(limit);
+ }
+
+ QueryResult ret;
+ int cnt = 0;
+ int batches = 0;
+ int cost = 0;
+ int batchSize = 0;
+ do {
+ ret = handle.query(req);
+ batches++;
+ batchSize = ret.getResults().size();
+
+ if (maxReadKB > 0) {
+ if (checkKVVersion(21, 2, 18)) {
+ /*
+ * Query should suspend after read the table row or key
+ * (for key only query) if current read cost exceeded size
+ * limit, so at most the readKB over the size limit.
+ */
+ assertTrue("The read cost should be at most " + recordKB +
+ " kb beyond the maximum readKB " + maxReadKB +
+ ", but actual " + ret.getReadKB(),
+ ret.getReadKB() <= maxReadKB + recordKB);
+ } else {
+ assertTrue("The read cost should be at most 1" +
+ " kb beyond the maximum readKB " + maxReadKB +
+ ", but actual " + ret.getReadKB(),
+ ret.getReadKB() <= maxReadKB + 1);
+ }
+ }
+
+ if (limit > 0) {
+ assertTrue("The record count should not exceed the limit of " +
+ limit + ": " + batchSize, batchSize <= limit);
+ }
+
+ cost += ret.getReadKB();
+ cnt += batchSize;
+
+ for (MapValue mv : ret.getResults()) {
+ if (dispResult) {
+ String json = mv.toJson();
+ if (json.length() > 50) {
+ json = json.substring(0, 50) + "..." +
+ (json.length() - 50) + " bytes ...";
+ }
+ System.out.println(json);
+ }
+ }
+ } while(!req.isDone());
+
+ if (expCount > 0) {
+ assertEquals("'" + statement + "'\nshould return " + expCount +
+ " rows but actual got " + cnt + " rows",
+ expCount, cnt);
+ }
+
+ if (maxReadKB == 0 && limit == 0 && expCost < 2 * 1024 * 1024) {
+ assertEquals("'" + statement + "' + " +
+ "should be done in single batch but actual " +
+ batches + " batches", 1, batches);
+ }
+
+ if (checkKVVersion(22, 1, 1) == false) {
+ return;
+ }
+
+ if (!onprem) {
+ assertTrue(cost > 0);
+
+ if (expCost > 0) {
+ if (batches == 1) {
+ assertEquals("'" + statement + "'\nexpect read cost " +
+ expCost + "kb, but actual " + cost + " kb",
+ expCost, cost);
+ }
+ }
+ }
+ }
+
+ private void doPutRow(String tableName, MapValue row, int recordKB) {
+ PutRequest req = new PutRequest()
+ .setTableName(tableName)
+ .setValue(row);
+ PutResult ret = handle.put(req);
+ assertNotNull(ret.getVersion());
+ assertCost(ret, 0, recordKB);
+ }
+
+ private void doGetRow(String tableName,
+ MapValue key,
+ MapValue expRow,
+ int recordKB,
+ TimeToLive ttl) {
+ GetRequest req = new GetRequest()
+ .setTableName(tableName)
+ .setKey(key);
+ GetResult ret = handle.get(req);
+ assertEquals(expRow, ret.getValue());
+ assertCost(ret, recordKB, 0);
+ if (ttl != null) {
+ assertTimeToLive(ttl, ret.getExpirationTime());
+ }
+ }
+
+ private void doDeleteRow(String tableName, MapValue key, int readKB) {
+ DeleteRequest req = new DeleteRequest()
+ .setTableName(tableName)
+ .setKey(key);
+ DeleteResult ret = handle.delete(req);
+ assertTrue(ret.getSuccess());
+ assertCost(ret, getMinRead() * 2 /* key read in absolute consistency */,
+ readKB);
+ }
+
+ @Override
+ void dropAllTables() {
+ dropAllTables(handle, true);
+ }
+
+ private MapValue makeTRow(int id, String longStr) {
+ MapValue row = makeTKey(id);
+ row.put("name", "n" + id)
+ .put("s", longStr);
+ return row;
+ }
+
+ private MapValue makeTKey(int id) {
+ MapValue row = new MapValue();
+ row.put("id", id);
+ return row;
+ }
+
+ private MapValue makeTARow(int id, int ida, String longStr) {
+ MapValue row = makeTAKey(id, ida);
+ row.put("name", "n" + id + "_" + ida)
+ .put("s", longStr);
+ return row;
+ }
+
+ private MapValue makeTAKey(int id, int ida) {
+ MapValue row = new MapValue();
+ row.put("id", id)
+ .put("ida", ida);
+ return row;
+ }
+
+ private MapValue makeTABRow(int id, int ida, int idb, String longStr) {
+ MapValue row = makeTABKey(id, ida, idb);
+ row.put("name", "n" + id + "_" + ida + "_" + idb)
+ .put("s", longStr);
+ return row;
+ }
+
+ private MapValue makeTABKey(int id, int ida, int idb) {
+ MapValue row = new MapValue();
+ row.put("id", id)
+ .put("ida", ida)
+ .put("idb", idb);
+ return row;
+ }
+
+ private void checkTableInfo(TableResult tr,
+ String tableName,
+ TableLimits limits) {
+ assertEquals(tableName, tr.getTableName());
+ assertEquals(State.ACTIVE, tr.getTableState());
+ if (onprem) {
+ return;
+ }
+ if (limits != null) {
+ TableLimits tl = tr.getTableLimits();
+ assertEquals(limits.getReadUnits(), tl.getReadUnits());
+ assertEquals(limits.getWriteUnits(), tl.getWriteUnits());
+ assertEquals(limits.getStorageGB(), tl.getStorageGB());
+ } else {
+ assertNull(tr.getTableLimits());
+ }
+ }
+
+ private static String genString(int len) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < len; i++) {
+ sb.append('a');
+ }
+ return sb.toString();
+ }
+
+ private static String addUsingTTL(final String ddl, TimeToLive ttl) {
+ String newDdl = ddl;
+ if (ttl != null) {
+ newDdl += " using ttl " + ttl.getValue() + " " + ttl.getUnit();
+ }
+ return newDdl;
+ }
+
+ private void assertTimeToLive(TimeToLive ttl, long actual) {
+ final long DAY_IN_MILLIS = 24 * 60 * 60 * 1000;
+ long expected = ttl.toExpirationTime(System.currentTimeMillis());
+ assertTrue("Actual TTL duration " + actual + "ms differs by " +
+ "more than a day from expected duration of " + expected +"ms",
+ Math.abs(actual - expected) < DAY_IN_MILLIS);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ConcurrentDDLTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ConcurrentDDLTest.java
new file mode 100644
index 00000000..78dd736f
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ConcurrentDDLTest.java
@@ -0,0 +1,1329 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+
+//import oracle.nosql.driver.IndexLimitException;
+import oracle.nosql.driver.NoSQLHandle;
+import oracle.nosql.driver.NoSQLHandleConfig;
+//import oracle.nosql.driver.TableLimitException;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetIndexesResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableResult.State;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.NullValue;
+import oracle.nosql.proxy.security.SecureTestUtil;
+import oracle.nosql.util.tmi.TableRequestLimits;
+
+import org.junit.Test;
+
+/**
+ * Concurrently DDL test:
+ * o testSingleTable:
+ * Execute ddls asynchronously to a single table.
+ * o testMultipleTables
+ * Execute ddls on multiple tables.
+ * o testMultiTenants:
+ * Execute ddls on tables in multiple tenants.
+ * o testTableIndexLimits:
+ * Create tables/indexes to reach the limits of tables/indexes.
+ */
+public class ConcurrentDDLTest extends ProxyTestBase {
+
+ /*
+ * The number of threads to run ddl test.
+ */
+ private final static int CONCURRENT_NUM = 3;
+ private final static int waitMillis = 60_000;
+
+ private final Map handleCache =
+ new HashMap();
+
+ private DDLExecutor ddlExecutor;
+
+ @Override
+ public void setUp() throws Exception{
+ super.setUp();
+ ddlExecutor = new ClientExecutor();
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ clearHandleCache();
+ super.tearDown();
+ }
+
+ private void clearHandleCache() {
+ final Iterator> iter =
+ handleCache.entrySet().iterator();
+
+ while(iter.hasNext()) {
+ Entry e = iter.next();
+ String tenantId = e.getKey();
+ if (tenantId.equals(getTenantId())) {
+ continue;
+ }
+ NoSQLHandle nosqlHandle = e.getValue();
+ dropAllTables(nosqlHandle, false);
+
+ if (getSCURL() != null) {
+ deleteTier(tenantId);
+ }
+ nosqlHandle.close();
+ iter.remove();
+ }
+ }
+
+ @Test
+ public void testSingleTable() {
+ assumeTrue("Skip the test if not minicloud or cloud test or " +
+ "tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ TableRequestLimits requestLimits =
+ tenantLimits.getStandardTableLimits();
+ final int numFields = Math.min(requestLimits.getColumnsPerTable(), 10);
+ final int numIndexes = Math.min(requestLimits.getIndexesPerTable(), 3);
+ final int evolveLimit = requestLimits.getSchemaEvolutions();
+ final int numAddField = Math.min((evolveLimit + 1) / 2, 3);
+ final int numDropField = Math.min(((evolveLimit > numAddField) ?
+ (evolveLimit - numAddField) : 0),
+ numAddField);
+
+ final int numRows = 100;
+ final TableLimits tableLimits = new TableLimits(1000, 500, 10);
+ final String tenantId = getTenantId();
+ final NoSQLHandle nosqlHandle = getTenantHandle(tenantId);
+
+ final DDLGenerator plan = new DDLGenerator(tenantId);
+ final String tableName = makeTableName(tenantId, 0);
+ List ddls;
+
+ /*
+ * Create table
+ */
+ ddls = plan.createTable(numFields, 1, tableLimits).build(true);
+ execAsyncAndWait(ddls);
+
+ /* Load rows to table */
+ loadRows(nosqlHandle, tableName, numFields, numRows);
+
+ /*
+ * Create indexes, alter table add field ..
+ */
+ ddls = plan.createIndex(tableName, numIndexes)
+ .addField(tableName, numAddField)
+ .build(true);
+ execAsyncAndWait(ddls);
+
+ /* Verify the existence of indexes and do counting by index */
+ assertNumIndexes(tenantId, tableName, numIndexes);
+ for (int idxIndex = 0; idxIndex < numIndexes; idxIndex++) {
+ assertRowCountByIndex(nosqlHandle, tableName, idxIndex,
+ numRows);
+ }
+ /* Check row value and expiration time */
+ putRow(nosqlHandle, tableName, numRows, numFields);
+ checkRow(nosqlHandle, tableName, numRows, numFields, numAddField);
+
+ /*
+ * Drop index, alter table drop field ..
+ */
+ ddls = plan.dropIndex(tableName, numIndexes)
+ .dropField(tableName, numDropField)
+ .build(true);
+ execAsyncAndWait(ddls);
+
+ /*
+ * Verify no index existed
+ */
+ assertNumIndexes(tenantId, tableName, 0);
+ checkRow(nosqlHandle, tableName, numRows - 1, numFields,
+ (numAddField - numDropField));
+ }
+
+ @Test
+ public void testMultipleTables() {
+ assumeTrue("Skip the test if not minicloud or cloud test or " +
+ "tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ TableRequestLimits requestLimits =
+ tenantLimits.getStandardTableLimits();
+ final int numTables = Math.min(tenantLimits.getNumTables(), 3);
+ final int numFields = Math.min(requestLimits.getColumnsPerTable(), 5);
+ final int numIndexes = Math.min(requestLimits.getIndexesPerTable(), 3);
+ final int evolveLimit = requestLimits.getSchemaEvolutions();
+ final int numAddField = Math.min((evolveLimit + 1) / 2, 3);
+ final int numDropField = Math.min(((evolveLimit > numAddField) ?
+ (evolveLimit - numAddField) : 0),
+ numAddField);
+
+ final int numRows = 100;
+ final TableLimits tableLimits = new TableLimits(1000, 500, 10);
+
+ final NoSQLHandle nosqlHandle = getTenantHandle(getTenantId());
+ final String tenantId = getTenantId();
+ final String[] tableNames = getTableNames(tenantId, numTables);
+
+ final DDLGenerator plan = new DDLGenerator(tenantId);
+ List ddls;
+
+ /*
+ * Create tables
+ */
+ ddls = plan.createTable(numFields, numTables, tableLimits).build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+
+ /* Load rows to tables*/
+ loadRowsToTables(nosqlHandle, CONCURRENT_NUM, tableNames,
+ numFields, numRows);
+
+ /*
+ * Create index, alter table add field ..
+ */
+ for (String tableName : tableNames) {
+ plan.createIndex(tableName, numIndexes)
+ .addField(tableName, numAddField);
+ }
+ ddls = plan.build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+
+ /* Check index and row value */
+ for (String tableName : tableNames) {
+ assertNumIndexes(tenantId, tableName, numIndexes);
+ assertRowCountByIndex(nosqlHandle, tableName,
+ numIndexes - 1, numRows);
+ checkRow(nosqlHandle, tableName, numRows - 1,
+ numFields, numAddField);
+ }
+
+ /*
+ * Drop index, drop field
+ */
+ for (String tableName : tableNames) {
+ plan.dropIndex(tableName, numIndexes)
+ .dropField(tableName, numDropField);
+
+ }
+ ddls = plan.build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+
+ /* Check index and row value */
+ for (String tableName : tableNames) {
+ assertNumIndexes(tenantId, tableName, 0);
+ checkRow(nosqlHandle, tableName, numRows - 1, numFields,
+ (numAddField - numDropField));
+ }
+
+ /*
+ * Drop tables
+ */
+ ddls = plan.dropTable(numTables).build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+ assertNumTables(tenantId, 0);
+ }
+
+ @Test
+ public void testMultiTenants() {
+ /* This test needs 3 tenants, it is not applicable in cloud test */
+ assumeTrue("Skip this test if not minicloud test", useMiniCloud);
+
+ final int numTenants = 3;
+
+ TableRequestLimits requestLimits =
+ tenantLimits.getStandardTableLimits();
+ final int numTables = Math.min(tenantLimits.getNumTables(), 3);
+ final int numFields = Math.min(requestLimits.getColumnsPerTable(), 5);
+ final int numIndexes = Math.min(requestLimits.getIndexesPerTable(), 3);
+ final int evolveLimit = requestLimits.getSchemaEvolutions();
+ final int numAddField = Math.min((evolveLimit + 1) / 2, 3);
+ final int numDropField = Math.min(((evolveLimit > numAddField) ?
+ (evolveLimit - numAddField) : 0),
+ numAddField);
+
+ final int numRows = 100;
+ final TableLimits tableLimits = new TableLimits(1000, 500, 10);
+ final String[] tenantIds = new String[numTenants];
+ for (int i = 0; i < numTenants; i++) {
+ String tenantId = makeTenantId(i);
+ if (getSCURL() != null) {
+ addTier(tenantId, tenantLimits);
+ }
+ tenantIds[i] = tenantId;
+ }
+
+ final DDLGenerator plan = new DDLGenerator(getTenantId());
+ List ddls = null;
+
+ /*
+ * Create tables
+ */
+ for (String tenantId : tenantIds) {
+ plan.setTenantId(tenantId)
+ .createTable(numFields, numTables, tableLimits);
+ }
+ ddls = plan.build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+
+ /* Load rows to tables */
+ for (String tenantId : tenantIds) {
+ NoSQLHandle nosqlHandle = getTenantHandle(tenantId);
+ String[] tableNames = getTableNames(tenantId, numTables);
+ loadRowsToTables(nosqlHandle, CONCURRENT_NUM, tableNames,
+ numFields, numRows);
+ }
+
+ /*
+ * Create Indexes, alter table add fields
+ */
+ for (int tableIndex = 0; tableIndex < numTables; tableIndex++) {
+ for (String tenantId : tenantIds) {
+ String tableName = makeTableName(tenantId, tableIndex);
+ plan.setTenantId(tenantId)
+ .createIndex(tableName, numIndexes)
+ .addField(tableName, numAddField);
+ }
+ }
+ ddls = plan.build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+
+ /* Verify after create index, alter table add field */
+ for (String tenantId : tenantIds) {
+ NoSQLHandle nosqlHandle = getTenantHandle(tenantId);
+ for (int tableIndex = 0; tableIndex < numTables; tableIndex++) {
+ String tableName = makeTableName(tenantId, tableIndex);
+ assertNumIndexes(tenantId, tableName, numIndexes);
+ assertRowCountByIndex(nosqlHandle, tableName,
+ numIndexes - 1, numRows);
+ /* Check row value */
+ checkRow(nosqlHandle, tableName, numRows - 1, numFields,
+ numAddField);
+ }
+ }
+
+ /*
+ * Drop Indexes and drop field
+ */
+ for (int tableIndex = 0; tableIndex < numTables; tableIndex++) {
+ for (String tenantId : tenantIds) {
+ String tableName = makeTableName(tenantId, tableIndex);
+ plan.setTenantId(tenantId)
+ .dropIndex(tableName, numIndexes)
+ .dropField(tableName, numDropField);
+ }
+ }
+ ddls = plan.build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+
+ /* Verify after drop index and drop field */
+ for (String tenantId : tenantIds) {
+ NoSQLHandle nosqlHandle = getTenantHandle(tenantId);
+ for (int tableIndex = 0; tableIndex < numTables; tableIndex++) {
+ String tableName = makeTableName(tenantId, tableIndex);
+ assertNumIndexes(tenantId, tableName, 0);
+ /* Check row value */
+ checkRow(nosqlHandle, tableName, numRows - 1, numFields,
+ (numAddField - numDropField));
+ }
+ }
+
+ /*
+ * Drop tables
+ */
+ for (String tenantId : tenantIds) {
+ plan.setTenantId(tenantId).dropTable(numTables);
+ }
+ ddls = plan.build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+ /* No table exists */
+ for (String tenantId : tenantIds) {
+ assertNumTables(tenantId, 0);
+ }
+ }
+
+ @Test
+ public void testTableIndexLimits() {
+ /*
+ * This test aim to create the max number of tables in a tenant, it is
+ * not applicable for cloud testing
+ */
+ assumeTrue("Skip this test if not minicloud test", useMiniCloud);
+
+ TableRequestLimits requestLimits =
+ tenantLimits.getStandardTableLimits();
+ final int numTables = tenantLimits.getNumTables();
+ final int numIndexes = requestLimits.getIndexesPerTable();
+ final int numFields = numIndexes + 1;
+
+ final TableLimits tableLimits = new TableLimits(1000, 500, 10);
+ final String tenantId = getTenantId();
+
+ final DDLGenerator plan = new DDLGenerator(tenantId);
+ List ddls = null;
+
+ /* Create 2 tables with X column, X is column number limit per table */
+ ddls = plan.createTable(numFields, 2, tableLimits).build(true);
+ execAsyncAndWait(ddls);
+ assertNumTables(tenantId, 2);
+
+ /*
+ * Create M indexes on a table, M is the index number limit per table.
+ */
+ final String table0Name = makeTableName(tenantId, 0);
+ ddls = plan.createIndex(table0Name, numIndexes).build(true);
+ execAsyncAndWait(ddls);
+ assertNumIndexes(tenantId, table0Name, numIndexes);
+
+ /*
+ * Create M + 1 indexes on a table, M is the index number limit per
+ * table. Creating last index should have failed.
+ *
+ * TODO: bug?
+ */
+ /*final String table1Name = makeTableName(tenantId, 1);
+ ddls = plan.createIndex(table1Name, numIndexes + 1).build(true);
+ execWithThreads(numThreads, ddls, IndexLimitException.class, 1);
+ assertNumIndexes(tenantId, table1Name, numIndexes); */
+
+ /* drop tables */
+ ddls = plan.dropTable(2).build(true);
+ execAsyncAndWait(ddls);
+ assertNumTables(tenantId, 0);
+
+ /*
+ * Create N tables, N is the table number limit.
+ */
+ ddls = plan.createTable(numFields, numTables, tableLimits).build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+ assertNumTables(tenantId, numTables);
+
+ /*
+ * Drop last i tables, then create i + 1 tables concurrently, the total
+ * number of tables is N + 1 that exceeded the limit N, so creating
+ * the last table should have failed.
+ *
+ * TODO: bug?
+ */
+ /*
+ int nd = 2;
+ String[] tableNames = getTableNames(tenantId, numTables - nd, nd);
+ ddls = plan.dropTable(tableNames).build(true);
+ execWithThreads(numThreads, ddls);
+ assertNumTables(tenantId, numTables - nd);
+
+ tableNames = getTableNames(tenantId, numTables - nd, nd + 1);
+ ddls = plan.createTable(numFields, tableNames, tableLimits).build(true);
+ execWithThreads(numThreads, ddls, TableLimitException.class, 1);
+ assertNumTables(tenantId, numTables); */
+
+ /* Drop all tables */
+ ddls = plan.dropTable(numTables).build(true);
+ execWithThreads(CONCURRENT_NUM, ddls);
+ assertNumTables(tenantId, 0);
+ }
+
+ private static String[] getTableNames(String tenantId, int numTables) {
+ return getTableNames(tenantId, 0, numTables);
+ }
+
+ private static String[] getTableNames(String tenantId, int from, int num) {
+ final String[] tableNames = new String[num];
+ for (int i = 0; i < tableNames.length; i++) {
+ tableNames[i] = makeTableName(tenantId, from + i);
+ }
+ return tableNames;
+ }
+
+ private void loadRows(NoSQLHandle nosqlHandle,
+ String tableName,
+ int numFields,
+ int numRows) {
+ final PutRequest putReq = new PutRequest().setTableName(tableName);
+
+ PutResult putRet;
+ for (int i = 0; i < numRows; i++) {
+ MapValue row = createRow(i, numFields);
+ putReq.setValue(row);
+ try {
+ putRet = nosqlHandle.put(putReq);
+ assertNotNull(putRet.getVersion());
+ } catch (Exception ex) {
+ fail("Failed to put row to table " + tableName + ": " +
+ ex.getMessage());
+ }
+ }
+ }
+
+ private void execAsyncAndWait(List ddls) {
+
+ final Map> results =
+ new HashMap>();
+
+ String tenantId;
+ TableResult tret;
+ List trets;
+ for (DDLInfo ddl : ddls) {
+ tenantId = ddl.getTenantId();
+ try {
+ tret = ddlExecutor.execNoWait(ddl);
+ } catch (Throwable t) {
+ fail("Execute " + ddl + " failed: " + t);
+ return;
+ }
+ if (results.containsKey(tenantId)) {
+ trets = results.get(tenantId);
+ } else {
+ trets = new ArrayList();
+ results.put(tenantId, trets);
+ }
+ trets.add(tret);
+ }
+
+ /* Wait for completion of ddls' executions */
+ for (Entry> e : results.entrySet()) {
+ for (TableResult ret : e.getValue()) {
+ try {
+ ddlExecutor.waitForDone(e.getKey(), waitMillis, ret);
+ } catch (Throwable t) {
+ fail("WaitForDone failed: " + t);
+ }
+ }
+ }
+ }
+
+ private void execWithThreads(int numThreads, List ddls) {
+ execWithThreads(numThreads, ddls, null, 0);
+ }
+
+ private void execWithThreads(int numThreads,
+ List ddls,
+ Class> expectedExceptionClass,
+ int expNumException) {
+
+ final ArrayBlockingQueue ddlQueue =
+ new ArrayBlockingQueue(ddls.size(), true, ddls);
+
+ /* Start threads */
+ final List threads = new ArrayList(numThreads);
+ final TestExceptionHandler handler =
+ new TestExceptionHandler(expectedExceptionClass);
+
+ for (int i = 0; i < numThreads; i++) {
+ Thread thd = new DDLThread(ddlQueue, "ddlThd" + i);
+ threads.add(thd);
+ thd.setUncaughtExceptionHandler(handler);
+ thd.start();
+ }
+
+ /* Join all threads */
+ for (Thread thread : threads) {
+ try {
+ thread.join();
+ } catch (Exception ex) {
+ fail("Wait for thread " + thread + ": " + ex);
+ }
+ }
+
+ /* Check if expect to catch exception */
+ Map failures = handler.getUnexpectedException();
+ if (!failures.isEmpty()) {
+ for (Entry e : failures.entrySet()) {
+ System.err.println(
+ "Unexpected exception caught from " + e.getKey());
+ e.getValue().printStackTrace();
+ }
+ fail("DDLThreads execute failed " + failures.keySet());
+ }
+
+ if (expectedExceptionClass != null) {
+ String exCls = expectedExceptionClass.getName();
+ int actNumEx = handler.getNumExpectedException();
+ if (expNumException > 0) {
+ assertEquals("Expect to catch " + exCls + " " +
+ expNumException + " times but caught " +
+ actNumEx + " times", expNumException, actNumEx);
+ } else {
+ assertTrue("Expect to catch " + exCls + " " + expNumException +
+ " times but not",
+ actNumEx > 0);
+ }
+ }
+ }
+
+ /**
+ * Load rows to the specified tables
+ */
+ private void loadRowsToTables(NoSQLHandle nosqlHandle,
+ int numThreads,
+ String[] tableNames,
+ int numFields,
+ int numRows) {
+
+ final ExecutorService executor =
+ Executors.newFixedThreadPool(numThreads);
+ final ArrayList> futures =
+ new ArrayList>(numThreads);
+
+ for (String tableName : tableNames) {
+ LoadTask task = new LoadTask(nosqlHandle, tableName, numFields,
+ numRows);
+ futures.add(executor.submit(task));
+ }
+ executor.shutdown();
+
+ for (Future f : futures) {
+ try {
+ int count = f.get().intValue();
+ assertEquals(numRows, count);
+ } catch (Exception ex) {
+ fail("LoadTask failed: " + ex);
+ }
+ }
+ }
+
+ private void putRow(NoSQLHandle nosqlHandle,
+ String tableName,
+ int id,
+ int numFields) {
+ final MapValue row = createRow(id, numFields, 0);
+ PutRequest putReq = new PutRequest()
+ .setTableName(tableName)
+ .setValue(row);
+ try {
+ PutResult putRet = nosqlHandle.put(putReq);
+ assertNotNull(putRet.getVersion());
+ } catch (Exception ex) {
+ fail("Failed to put row to table " + tableName + ": " +
+ ex.getMessage());
+
+ }
+ }
+
+ private void checkRow(NoSQLHandle nosqlHandle,
+ String tableName,
+ int id,
+ int numFields,
+ int numNewFields) {
+
+ final MapValue expRow = createRow(id, numFields, numNewFields);
+
+ final MapValue key = createKey(id);
+ final GetRequest getReq = new GetRequest()
+ .setTableName(tableName)
+ .setKey(key);
+
+ try {
+ GetResult getRet = nosqlHandle.get(getReq);
+ assertEquals(expRow, getRet.getValue());
+ } catch (Exception ex) {
+ fail("Failed to get row: " + ex.getMessage());
+ }
+ }
+
+ private void assertRowCountByIndex(NoSQLHandle nosqlHandle,
+ String tableName,
+ int idxIndex,
+ int expCount) {
+ final String fieldName = makeFieldName(idxIndex);
+ final String query = "SELECT count(" + fieldName+ ") FROM " + tableName;
+ long ret = execCountQuery(nosqlHandle, query);
+ assertEquals(expCount, ret);
+ }
+
+ private long countRows(NoSQLHandle nosqlHandle, String tableName) {
+ final String query = "SELECT count(*) FROM " + tableName;
+ return execCountQuery(nosqlHandle, query);
+ }
+
+ private long execCountQuery(NoSQLHandle nosqlHandle, String query) {
+
+ try {
+ PrepareRequest prepReq = new PrepareRequest()
+ .setStatement(query);
+ PrepareResult prepRet = nosqlHandle.prepare(prepReq);
+
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepRet);
+ List results = new ArrayList();
+ do {
+ QueryResult result = nosqlHandle.query(queryReq);
+ if (!result.getResults().isEmpty()) {
+ results.addAll(result.getResults());
+ }
+ } while (!queryReq.isDone());
+ assertEquals(1, results.size());
+ MapValue value = results.get(0);
+ return value.get("Column_1").asLong().getLong();
+ } catch (Exception ex) {
+ fail("Failed to execute [" + query + "]: " + ex.getMessage());
+ }
+ return -1;
+ }
+
+ private void assertNumTables(String tenantId, int exp) {
+ assertEquals(exp, getNumTables(tenantId));
+ }
+
+ private int getNumTables(String tenantId) {
+ final ListTablesRequest ltReq = new ListTablesRequest();
+ try {
+ NoSQLHandle nosqlHandle = getTenantHandle(tenantId);
+ ListTablesResult ltRet = nosqlHandle.listTables(ltReq);
+ String[] tables = ltRet.getTables();
+ return tables.length;
+ } catch (Exception ex) {
+ fail("Failed to get index: " + ex.getMessage());
+ }
+ return -1;
+ }
+
+ private void assertNumIndexes(String tenantId, String tableName, int exp) {
+ assertEquals(exp, getNumIndexes(tenantId, tableName));
+ }
+
+ private int getNumIndexes(String tenantId, String tableName) {
+ final NoSQLHandle nosqlHandle = getTenantHandle(tenantId);
+ final GetIndexesRequest giReq = new GetIndexesRequest()
+ .setTableName(tableName);
+ try {
+ GetIndexesResult giRet = nosqlHandle.getIndexes(giReq);
+ return giRet.getIndexes().length;
+ } catch (Exception ex) {
+ fail("Failed to get index: " + ex.getMessage());
+ }
+ return -1;
+ }
+
+ private TableResult execTableRequestNoWait(NoSQLHandle nosqlHandle,
+ String statement,
+ TableLimits limits) {
+ TableRequest request = new TableRequest().setStatement(statement).
+ setTableLimits(limits).
+ setTimeout(waitMillis);
+ return nosqlHandle.tableRequest(request);
+ }
+
+ void waitForDone(NoSQLHandle nosqlHandle, int waitMs, TableResult result) {
+ if (result.getTableState() == State.ACTIVE ||
+ result.getTableState() == State.DROPPED) {
+ return;
+ }
+ result.waitForCompletion(nosqlHandle, waitMs, 1500);
+ }
+
+ /* Create Row value */
+ private MapValue createRow(int id, int numFields) {
+ return createRow(id, numFields, 0);
+ }
+
+ private MapValue createRow(int id, int numFields, int numNewFields) {
+ MapValue row = createKey(id);
+ for (int i = 0; i < numFields; i++) {
+ String value = makeString((id % (63 - numFields)) + i + 1, i);
+ row.put(makeFieldName(i), value);
+ }
+ for (int i = 0; i < numNewFields; i++) {
+ row.put(makeNewFieldName(i), NullValue.getInstance());
+ }
+ return row;
+ }
+
+ /* Create primary key value */
+ private MapValue createKey(int id) {
+ return new MapValue().put("id", id);
+ }
+
+ /* Create tenantId */
+ private static String makeTenantId(int index) {
+ return "CDTTenant" + index;
+ }
+
+ /* Create table name */
+ private static String makeTableName(String tenantId, int index) {
+ return tenantId + "T" + index;
+ }
+
+ /* Create index name */
+ private static String makeIndexName(int index, int ... fieldIndex) {
+ final StringBuilder sb = new StringBuilder();
+ sb.append("idx");
+ sb.append(index);
+ for (int f : fieldIndex) {
+ sb.append("_f");
+ sb.append(f);
+ }
+ return sb.toString();
+ }
+
+ private static String makeFieldName(int index) {
+ return "f" + index;
+ }
+
+ private static String makeNewFieldName(int index) {
+ return "nf" + index;
+ }
+
+ private static String makeString(int length, int from) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = from; i < length + from; i++) {
+ sb.append((char)('a' + (i % 26)));
+ }
+ return sb.toString();
+ }
+
+ private NoSQLHandle getTenantHandle(String tenantId) {
+ NoSQLHandle nosqlHandle = handleCache.get(tenantId);
+ if (nosqlHandle != null) {
+ return nosqlHandle;
+ }
+ synchronized(handleCache) {
+ nosqlHandle = handleCache.get(tenantId);
+ if (nosqlHandle == null) {
+ if (tenantId.equals(getTenantId())) {
+ nosqlHandle = handle;
+ } else {
+ try {
+ nosqlHandle = configHandle(getProxyURL(), tenantId);
+ } catch (Exception ex) {
+ fail("Failed to get nosql handle for tenant: " +
+ tenantId);
+ return null;
+ }
+ }
+ handleCache.put(tenantId, nosqlHandle);
+ }
+ return nosqlHandle;
+ }
+ }
+
+ private NoSQLHandle configHandle(URL url, String tenantId) {
+
+ NoSQLHandleConfig hconfig = new NoSQLHandleConfig(url);
+ hconfig.configureDefaultRetryHandler(5, 0);
+ hconfig.setRequestTimeout(10_000);
+ SecureTestUtil.setAuthProvider(hconfig, isSecure(), onprem, tenantId);
+ return getHandle(hconfig);
+ }
+
+ /**
+ * A class to generate the sequence of DDL objects.
+ */
+ private static class DDLGenerator {
+ private final List ddlsList;
+ private String tenantId;
+
+ DDLGenerator(String defaultTenantId) {
+ ddlsList = new ArrayList();
+ tenantId = defaultTenantId;
+ }
+
+ DDLGenerator createTable(int numFields, int num, TableLimits limits) {
+ if (num > 0) {
+ String[] tableNames = getTableNames(tenantId, num);
+ createTable(numFields, tableNames, limits);
+ }
+ return this;
+ }
+
+ DDLGenerator createTable(int numFields,
+ String[] tableNames,
+ TableLimits limits) {
+ if (tableNames != null && tableNames.length > 0) {
+ addDDLs(DDLType.CREATE_TABLE, tableNames,
+ makeCreateTableDDLs(tableNames, numFields),
+ limits);
+ }
+ return this;
+ }
+
+ DDLGenerator createIndex(String tableName, int num) {
+ if (num > 0) {
+ addDDLs(DDLType.CREATE_INDEX, tableName,
+ makeCreateIndexDDLs(tableName, num));
+ }
+ return this;
+ }
+
+ DDLGenerator addField(String tableName, int num) {
+ if (num > 0) {
+ addDDLs(DDLType.ALTER_TABLE, tableName,
+ makeAddFieldDDLs(tableName, num));
+ }
+ return this;
+ }
+
+ DDLGenerator dropField(String tableName, int num) {
+ if (num > 0) {
+ addDDLs(DDLType.ALTER_TABLE, tableName,
+ makeDropFieldDDLs(tableName, num));
+ }
+ return this;
+ }
+
+ DDLGenerator dropIndex(String tableName, int num) {
+ if (num > 0) {
+ addDDLs(DDLType.DROP_INDEX, tableName,
+ makeDropIndexDDLs(tableName, num));
+ }
+ return this;
+ }
+
+ DDLGenerator dropTable(int num) {
+ if (num > 0) {
+ String[] tableNames = getTableNames(tenantId, num);
+ dropTable(tableNames);
+ }
+ return this;
+ }
+
+ DDLGenerator dropTable(String[] tableNames) {
+ if (tableNames != null && tableNames.length > 0) {
+ addDDLs(DDLType.DROP_TABLE, tableNames,
+ makeDropTableDDLs(tableNames),
+ null);
+ }
+ return this;
+ }
+
+ private void addDDLs(DDLType type, String tableName, String[] ddls) {
+ DDLInfo[] ddlInfos = new DDLInfo[ddls.length];
+ for (int i = 0; i < ddls.length; i++) {
+ String ddl = ddls[i];
+ String indexName = null;
+ if (type == DDLType.CREATE_INDEX ||
+ type == DDLType.DROP_INDEX) {
+ indexName = makeIndexName(i, i);
+ }
+ if (tableName == null) {
+ if (type == DDLType.CREATE_TABLE ||
+ type == DDLType.DROP_TABLE) {
+ tableName = makeTableName(getTenantId(), i);
+ }
+ }
+ DDLInfo info = new DDLInfo(getTenantId(), tableName,
+ indexName, type, ddl);
+ ddlInfos[i] = info;
+ }
+ ddlsList.add(ddlInfos);
+ }
+
+ private void addDDLs(DDLType type,
+ String[] tableNames,
+ String[] ddls,
+ TableLimits limits) {
+ DDLInfo[] ddlInfos = new DDLInfo[ddls.length];
+ for (int i = 0; i < ddls.length; i++) {
+ String tableName = tableNames[i];
+ String ddl = ddls[i];
+ DDLInfo info = new DDLInfo(getTenantId(), tableName, type,
+ ddl, limits);
+ ddlInfos[i] = info;
+ }
+ ddlsList.add(ddlInfos);
+ }
+
+ DDLGenerator setTenantId(String tenantId) {
+ this.tenantId = tenantId;
+ return this;
+ }
+
+ String getTenantId() {
+ return tenantId;
+ }
+
+ /*
+ * Merges the elements in multiple DDLInfo[] into a queue.
+ */
+ List build(boolean clear) {
+ final List queue = new ArrayList();
+ @SuppressWarnings("unchecked")
+ Iterator[] ddlIters = new Iterator[ddlsList.size()];
+ List indexes = new ArrayList(ddlIters.length);
+ for (int i = 0; i < ddlIters.length; i++) {
+ DDLInfo[] ddls = ddlsList.get(i);
+ ddlIters[i] = Arrays.asList(ddls).iterator();
+ indexes.add(i);
+ }
+
+ Iterator indIter = indexes.iterator();
+ while(indIter.hasNext()) {
+ int ind = indIter.next();
+ Iterator ddlIter = ddlIters[ind];
+
+ if (ddlIter.hasNext()) {
+ queue.add(ddlIter.next());
+ } else {
+ indIter.remove();
+ }
+ if (!indIter.hasNext()) {
+ if (indexes.isEmpty()) {
+ break;
+ }
+ indIter = indexes.iterator();
+ }
+ }
+ if (clear) {
+ clear();
+ }
+ return queue;
+ }
+
+ void clear() {
+ ddlsList.clear();
+ }
+
+ private static String[] makeCreateTableDDLs(String[] tableNames,
+ int numFields) {
+ final String[] ddls = new String[tableNames.length];
+ int i = 0;
+ for (String tableName : tableNames) {
+ ddls[i++] = makeCreateTableDDL(tableName, numFields);
+ }
+ return ddls;
+ }
+
+ private static String makeCreateTableDDL(String tableName,
+ int numFields) {
+ final StringBuilder sb = new StringBuilder("CREATE TABLE ");
+ sb.append(tableName);
+ sb.append("(");
+
+ sb.append("id INTEGER, ");
+ for (int i = 0; i < numFields; i++) {
+ sb.append(makeFieldName(i));
+ sb.append(" STRING, ");
+ }
+ sb.append("PRIMARY KEY(id))");
+ return sb.toString();
+ }
+
+ private static String[] makeDropTableDDLs(String[] tableNames) {
+ final String[] ddls = new String[tableNames.length];
+ int i = 0;
+ for (String table : tableNames) {
+ ddls[i++] = makeDropTableDDL(table);
+ }
+ return ddls;
+ }
+
+ private static String makeDropTableDDL(String tableName) {
+ final StringBuilder sb = new StringBuilder("DROP TABLE ");
+ sb.append(tableName);
+ return sb.toString();
+ }
+
+ private static String[] makeCreateIndexDDLs(String tableName, int num) {
+ final String[] ddls = new String[num];
+ for (int i = 0; i < ddls.length; i++) {
+ final int fidx = i;
+ ddls[i] = makeCreateIndexDDL(tableName, i, fidx);
+ }
+ return ddls;
+ }
+
+ private static String makeCreateIndexDDL(String tableName,
+ int idxIndex,
+ int... fieldIndex) {
+ final StringBuilder sb = new StringBuilder("CREATE INDEX ");
+ sb.append(makeIndexName(idxIndex, fieldIndex));
+ sb.append(" on ");
+ sb.append(tableName);
+ sb.append("(");
+
+ boolean firstField = true;
+ for (int fidx : fieldIndex) {
+ if (firstField) {
+ firstField = false;
+ } else {
+ sb.append(", ");
+ }
+ sb.append(makeFieldName(fidx));
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ private static String[] makeAddFieldDDLs(String tableName, int num) {
+ final String[] ddls = new String[num];
+ for (int i = 0; i < ddls.length; i++) {
+ ddls[i] = makeAddDropFieldDDL(tableName, true, i);
+ }
+ return ddls;
+ }
+
+ private static String[] makeDropFieldDDLs(String tableName, int num) {
+ final String[] ddls = new String[num];
+ for (int i = 0; i < ddls.length; i++) {
+ ddls[i] = makeAddDropFieldDDL(tableName, false, i);
+ }
+ return ddls;
+ }
+
+ private static String makeAddDropFieldDDL(String tableName,
+ boolean addField,
+ int newFieldIndex) {
+ final StringBuilder sb = new StringBuilder("ALTER TABLE ");
+ sb.append(tableName);
+ sb.append("(");
+ sb.append(addField ? "ADD " : "DROP ");
+ sb.append(makeNewFieldName(newFieldIndex));
+ if (addField) {
+ sb.append(" STRING");
+ }
+ sb.append(")");
+ return sb.toString();
+ }
+
+ private static String[] makeDropIndexDDLs(String tableName, int num) {
+ final String[] ddls = new String[num];
+ for (int i = 0; i < ddls.length; i++) {
+ final int fidx = i;
+ ddls[i] = makeDropIndexDDL(tableName, i, fidx);
+ }
+ return ddls;
+ }
+
+ private static String makeDropIndexDDL(String tableName,
+ int idxIndex,
+ int ... fieldIndex) {
+ final StringBuilder sb = new StringBuilder("DROP INDEX ");
+ sb.append(makeIndexName(idxIndex, fieldIndex));
+ sb.append(" ON ");
+ sb.append(tableName);
+ return sb.toString();
+ }
+ }
+
+ /* DDL types */
+ static enum DDLType {
+ CREATE_TABLE,
+ CREATE_INDEX,
+ DROP_TABLE,
+ DROP_INDEX,
+ ALTER_TABLE
+ }
+
+ /**
+ * A thread to execute DDL statements that read from a queue.
+ */
+ private class DDLThread extends Thread {
+ private final BlockingQueue queue;
+
+ DDLThread(BlockingQueue queue,
+ String name) {
+ this.queue = queue;
+ setName(name);
+ }
+
+ @Override
+ public void run() {
+ DDLInfo ddlInfo;
+ while((ddlInfo = queue.poll()) != null) {
+ TableResult ret = ddlExecutor.execNoWait(ddlInfo);
+ ddlExecutor.waitForDone(ddlInfo.getTenantId(), waitMillis, ret);
+ }
+ }
+ }
+
+ /**
+ * A class encapsulates ddl and the target tenantId.
+ */
+ static class DDLInfo {
+ private final String tenantId;
+ private final String tableName;
+ private final String indexName;
+ private final String ddl;
+ private final TableLimits limits;
+ private final DDLType type;
+
+ DDLInfo(String tenantId,
+ String tableName,
+ DDLType type,
+ String ddl) {
+ this(tenantId, tableName, null, type, ddl);
+ }
+
+ DDLInfo(String tenantId,
+ String tableName,
+ DDLType type,
+ String ddl,
+ TableLimits limits) {
+ this(tenantId, tableName, null, type, ddl, limits);
+ }
+
+ DDLInfo(String tenantId,
+ String tableName,
+ String indexName,
+ DDLType type,
+ String ddl) {
+ this(tenantId, tableName, indexName, type, ddl, null);
+ }
+
+ DDLInfo(String tenantId,
+ String tableName,
+ String indexName,
+ DDLType type,
+ String ddl,
+ TableLimits limits) {
+ this.tenantId = tenantId;
+ this.tableName = tableName;
+ this.indexName = indexName;
+ this.ddl = ddl;
+ this.type = type;
+ this.limits = limits;
+ }
+
+ String getTenantId() {
+ return tenantId;
+ }
+
+ String getTableName() {
+ return tableName;
+ }
+
+ String getIndexName() {
+ return indexName;
+ }
+
+ String getDDL() {
+ return ddl;
+ }
+
+ TableLimits getTableLimits() {
+ return limits;
+ }
+
+ DDLType getType() {
+ return type;
+ }
+
+ @Override
+ public String toString() {
+ return "tenantId=" + tenantId + "; tableName=" + tableName +
+ "; type=" + type + "; ddl=" + ddl;
+ }
+ }
+
+ /**
+ * Load N rows to the specified table.
+ */
+ private class LoadTask implements Callable {
+
+ private final NoSQLHandle nosqlHandle;
+ private final String tableName;
+ private final int nFields;
+ private final int nRows;
+
+ LoadTask(NoSQLHandle nosqlHandle,
+ String tableName,
+ int nFields,
+ int nRows) {
+ this.nosqlHandle = nosqlHandle;
+ this.tableName = tableName;
+ this.nFields = nFields;
+ this.nRows = nRows;
+ }
+
+ @Override
+ public Integer call() throws Exception {
+ loadRows(nosqlHandle, tableName, nFields, nRows);
+ return (int)countRows(nosqlHandle, tableName);
+ }
+ }
+
+ interface DDLExecutor {
+ T execNoWait(DDLInfo ddlInfo);
+ void waitForDone(String tenantId, int waitMs, T result);
+ }
+
+ private class ClientExecutor implements DDLExecutor {
+ @Override
+ public TableResult execNoWait(DDLInfo ddl) {
+ NoSQLHandle nosqlHandle = getTenantHandle(ddl.getTenantId());
+ return execTableRequestNoWait(nosqlHandle, ddl.getDDL(),
+ ddl.getTableLimits());
+ }
+
+ @Override
+ public void waitForDone(String tenantId,
+ int waitMs,
+ TableResult result) {
+ NoSQLHandle nosqlHandle = getTenantHandle(tenantId);
+ ConcurrentDDLTest.this.waitForDone(nosqlHandle, waitMs, result);
+ }
+ }
+
+ /* UncaughtExceptionHandler for DDLThead */
+ private class TestExceptionHandler implements UncaughtExceptionHandler {
+
+ private Class> expectedExceptionCls;
+ private AtomicInteger numExpectedException;
+ private Map unexpectedExceptions;
+
+ public TestExceptionHandler(Class> expExCls) {
+ expectedExceptionCls = expExCls;
+ numExpectedException = new AtomicInteger();
+ unexpectedExceptions =
+ Collections.synchronizedMap(new HashMap());
+ }
+
+ @Override
+ public void uncaughtException(Thread t, Throwable e) {
+ if (e.getClass() == expectedExceptionCls) {
+ numExpectedException.incrementAndGet();
+ } else {
+ unexpectedExceptions.put(t.getName(), e);
+ }
+ }
+
+ public int getNumExpectedException() {
+ return numExpectedException.get();
+ }
+
+ public Map getUnexpectedException() {
+ return unexpectedExceptions;
+ }
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/CreationTimeTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/CreationTimeTest.java
new file mode 100644
index 00000000..2f059557
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/CreationTimeTest.java
@@ -0,0 +1,1520 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import oracle.nosql.driver.Consistency;
+import oracle.nosql.driver.Durability;
+import oracle.nosql.driver.Durability.ReplicaAckPolicy;
+import oracle.nosql.driver.Durability.SyncPolicy;
+import oracle.nosql.driver.SystemException;
+import oracle.nosql.driver.TimeToLive;
+import oracle.nosql.driver.Version;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PreparedStatement;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutRequest.Option;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.ops.WriteRequest;
+import oracle.nosql.driver.ops.WriteResult;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.JsonNullValue;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.NullValue;
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+
+/*
+ * The tests are ordered so that the zzz* test goes last so it picks up
+ * DDL history reliably.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public class CreationTimeTest extends ProxyTestBase {
+
+ @Test
+ public void smokeTest() {
+
+ try {
+
+ MapValue key = new MapValue().put("id", 10);
+
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+
+ /* drop a table */
+ TableResult tres = tableOperation(handle,
+ "drop table if exists testusers",
+ null, TableResult.State.DROPPED,
+ 20000);
+ assertNotNull(tres.getTableName());
+ assertTrue(tres.getTableState() == TableResult.State.DROPPED);
+ assertNull(tres.getTableLimits());
+
+ /* Create a table */
+ tres = tableOperation(
+ handle,
+ "create table if not exists testusers(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(500, 500, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* Create an index */
+ tres = tableOperation(
+ handle,
+ "create index if not exists Name on testusers(name)",
+ null,
+ TableResult.State.ACTIVE,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* PUT */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value) // key is 10
+ .setTableName("testusers");
+
+ long startTime1 = System.currentTimeMillis();
+
+ PutResult res = handle.put(putRequest);
+ assertNotNull("Put failed", res.getVersion());
+ long interval1 = System.currentTimeMillis() - startTime1;
+ // no return row so creation time is 0
+ checkCreationTime(res.getExistingCreationTime(), 0, 0);
+
+
+ long startTime2 = System.currentTimeMillis();
+ /* put another one. set TTL to test that path */
+ putRequest.setTTL(TimeToLive.ofHours(2));
+ value.put("id", 20); // key is 20
+ handle.put(putRequest);
+ long interval2 = System.currentTimeMillis() - startTime2;
+ // no return row so creation time is 0
+ checkCreationTime(res.getExistingCreationTime(), 0, 0);
+
+ /*
+ * Test ReturnRow for simple put of a row that exists. 2 cases:
+ * 1. unconditional (will return info)
+ * 2. if absent (will return info)
+ */
+ value.put("id", 20);
+ // turn on returning row
+ putRequest.setReturnRow(true);
+
+ PutResult pr = handle.put(putRequest);
+
+ assertNotNull(pr.getVersion()); /* success */
+ assertNotNull(pr.getExistingVersion());
+ assertNotNull(pr.getExistingValue());
+ assertTrue(pr.getExistingCreationTime()!=0);
+ checkCreationTime(pr.getExistingCreationTime(), startTime2, interval2);
+ assertTrue(pr.getExistingModificationTime() != 0);
+
+
+ putRequest.setOption(Option.IfAbsent);
+ pr = handle.put(putRequest);
+ assertNull(pr.getVersion()); /* failure */
+ assertNotNull(pr.getExistingVersion());
+ assertNotNull(pr.getExistingValue());
+ checkCreationTime(pr.getExistingCreationTime(), startTime2, interval2);
+ assertTrue(pr.getExistingModificationTime() != 0);
+
+ /* clean up */
+ putRequest.setReturnRow(false);
+ putRequest.setOption(null);
+
+ /* GET first row, id: 10 */
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName("testusers");
+
+ GetResult res1 = handle.get(getRequest);
+ assertNotNull("Get failed", res1.getJsonValue());
+ assertReadKB(res1);
+
+ assertTrue(res1.getCreationTime() > 0);
+ assertTrue(res1.getCreationTime() - startTime1 <= interval1);
+ checkCreationTime(res1.getCreationTime(), startTime1, interval1);
+
+
+ /* DELETE same key, id: 10 */
+ DeleteRequest delRequest = new DeleteRequest()
+ .setKey(key)
+ .setTableName("testusers")
+ .setReturnRow(true);
+
+ DeleteResult del = handle.delete(delRequest);
+ assertTrue("Delete failed", del.getSuccess());
+ checkCreationTime(del.getExistingCreationTime(), startTime1, interval1);
+
+ /* GET -- no row, it was removed above */
+ getRequest.setTableName("testusers");
+ res1 = handle.get(getRequest);
+ assertNull(res1.getValue());
+ // no row hence creationTime is 0
+ assertEquals(0, res1.getCreationTime());
+ } catch (Exception e) {
+ checkErrorMessage(e);
+ e.printStackTrace();
+ fail("Exception in test");
+ }
+ }
+
+ @Test
+ public void testPutGetDelete() {
+
+ final String tableName = "testusers";
+ final int recordKB = 2;
+
+ /* Create a table */
+ TableResult tres = tableOperation(
+ handle,
+ "create table if not exists testusers(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(500, 500, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ final String name = genString((recordKB - 1) * 1024);
+ MapValue value = new MapValue().put("id", 10).put("name", name);
+ MapValue newValue = new MapValue().put("id", 11).put("name", name);
+ MapValue newValue1 = new MapValue().put("id", 12).put("name", name);
+ MapValue newValue2 = new MapValue().put("id", 13).put("name", name);
+
+ /* Durability will be ignored unless run with -Donprem=true */
+ Durability dur = new Durability(SyncPolicy.WRITE_NO_SYNC,
+ SyncPolicy.NO_SYNC,
+ ReplicaAckPolicy.NONE);
+
+
+ /* Put a row */
+ long startTime = System.currentTimeMillis();
+ PutRequest putReq = new PutRequest()
+ .setValue(value) // key is 10
+ .setDurability(dur)
+ .setTableName(tableName)
+ .setReturnRow(true);
+ PutResult putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */ );
+ long interval = System.currentTimeMillis() - startTime;
+ // no return row hence creationTime is 0
+ assertEquals(0, putRes.getExistingCreationTime());
+
+
+ /* Put a row again with SetReturnRow(false).
+ * expect no row returned
+ */
+ putReq.setReturnRow(false);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ true /* put over write */);
+ Version oldVersion = putRes.getVersion();
+ // no return row
+ assertEquals(0, putRes.getExistingCreationTime());
+
+ /*
+ * Put row again with SetReturnRow(true),
+ * expect existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ true /* rowPresent */,
+ value /* expPrevValue */,
+ oldVersion /* expPrevVersion */,
+ true, /* modtime should be zero */
+ recordKB,
+ true /* put overWrite */);
+ oldVersion = putRes.getVersion();
+ checkCreationTime(putRes.getExistingCreationTime(), startTime, interval);
+
+ /*
+ * Put a new row with SetReturnRow(true),
+ * expect no existing row returned.
+ */
+ putReq = new PutRequest()
+ .setValue(newValue)
+ .setDurability(dur)
+ .setTableName(tableName)
+ .setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /* PutIfAbsent an existing row, it should fail */
+ putReq = new PutRequest()
+ .setOption(Option.IfAbsent)
+ .setValue(value)
+ .setDurability(dur)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ false /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /*
+ * PutIfAbsent fails + SetReturnRow(true),
+ * return existing value and version
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ false /* shouldSucceed */,
+ true /* rowPresent */,
+ value /* expPrevValue */,
+ oldVersion /* expPrevVersion */,
+ true, /* modtime should be recent */
+ recordKB,
+ false /* put overWrite */);
+ checkCreationTime(putRes.getExistingCreationTime(), startTime, interval);
+
+ /* PutIfPresent an existing row, it should succeed */
+ putReq = new PutRequest()
+ .setOption(Option.IfPresent)
+ .setValue(value)
+ .setDurability(dur)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+ oldVersion = putRes.getVersion();
+
+ /*
+ * PutIfPresent succeed + SetReturnRow(true),
+ * expect existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ true /* rowPresent */,
+ value /* expPrevValue */,
+ oldVersion /* expPrevVersion */,
+ true, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ checkCreationTime(putRes.getExistingCreationTime(), startTime, interval);
+ Version ifVersion = putRes.getVersion();
+
+ /* PutIfPresent a new row, it should fail */
+ putReq = new PutRequest()
+ .setOption(Option.IfPresent)
+ .setValue(newValue1)
+ .setDurability(dur)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ false /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // op didn't succeed
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /*
+ * PutIfPresent fail + SetReturnRow(true),
+ * expect no existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ false /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // op didn't succeed, no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /* PutIfAbsent a new row, it should succeed */
+ putReq = new PutRequest()
+ .setOption(Option.IfAbsent)
+ .setValue(newValue1)
+ .setDurability(dur)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ assertNull(putRes.getExistingRowMetadata());
+ // no returnRow
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /* PutIfAbsent success + SetReturnRow(true) */
+ putReq.setValue(newValue2).setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /*
+ * PutIfVersion an existing row with unmatched version, it should fail.
+ */
+ putReq = new PutRequest()
+ .setOption(Option.IfVersion)
+ .setMatchVersion(oldVersion)
+ .setValue(value)
+ .setDurability(dur)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ false /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // op didn't succeed
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /*
+ * PutIfVersion fails + SetReturnRow(true),
+ * expect existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ false /* shouldSucceed */,
+ true /* rowPresent */,
+ value /* expPrevValue */,
+ ifVersion /* expPrevVersion */,
+ true, /* modtime should be recent */
+ recordKB,
+ false /* put overWrite */);
+ checkCreationTime(putRes.getExistingCreationTime(), startTime, interval);
+
+
+ /*
+ * Put an existing row with matching version, it should succeed.
+ */
+ putReq = new PutRequest()
+ .setOption(Option.IfVersion)
+ .setMatchVersion(ifVersion)
+ .setValue(value)
+ .setDurability(dur)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+ ifVersion = putRes.getVersion();
+
+ /*
+ * PutIfVersion succeed + SetReturnRow(true),
+ * expect no existing row returned.
+ */
+ putReq.setMatchVersion(ifVersion).setReturnRow(true);
+ putRes = handle.put(putReq);
+ checkPutResult(putReq, putRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB,
+ false /* put overWrite */);
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+ Version newVersion = putRes.getVersion();
+
+
+ /*
+ * Get
+ */
+ MapValue key = new MapValue().put("id", 10);
+
+ /* Get a row */
+ GetRequest getReq = new GetRequest()
+ .setKey(key)
+ .setTableName(tableName);
+ GetResult getRes = handle.get(getReq);
+ checkGetResult(getReq, getRes,
+ true /* rowPresent*/,
+ value,
+ null, /* Don't check version if Consistency.EVENTUAL */
+ true, /* modtime should be recent */
+ recordKB);
+ checkCreationTime(getRes.getCreationTime(), startTime, interval);
+
+ /* Get a row with ABSOLUTE consistency */
+ getReq.setConsistency(Consistency.ABSOLUTE);
+ getRes = handle.get(getReq);
+ checkGetResult(getReq, getRes,
+ true /* rowPresent*/,
+ value,
+ newVersion,
+ true, /* modtime should be recent */
+ recordKB);
+ checkCreationTime(getRes.getCreationTime(), startTime, interval);
+
+ getReq = new GetRequest()
+ .setKey(key)
+ .setTableName(tableName);
+ getRes = handle.get(getReq);
+ checkGetResult(getReq, getRes,
+ true /* rowPresent*/,
+ value,
+ null, /* Don't check version if Consistency.EVENTUAL */
+ true, /* modtime should be recent */
+ recordKB);
+ checkCreationTime(getRes.getCreationTime(), startTime, interval);
+
+
+ /* Get non-existing row */
+ key = new MapValue().put("id", 100);
+ getReq = new GetRequest()
+ .setKey(key)
+ .setTableName(tableName);
+ getRes = handle.get(getReq);
+ checkGetResult(getReq, getRes,
+ false /* rowPresent*/,
+ null /* expValue */,
+ null /* expVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ // no row
+ assertEquals(0, getRes.getCreationTime());
+
+ /* Get a row with ABSOLUTE consistency */
+ getReq.setConsistency(Consistency.ABSOLUTE);
+ getRes = handle.get(getReq);
+ checkGetResult(getReq, getRes,
+ false /* rowPresent*/,
+ null /* expValue */,
+ null /* expVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ // no row
+ assertEquals(0, getRes.getCreationTime());
+
+ /* Delete a row */
+ key = new MapValue().put("id", 10);
+ DeleteRequest delReq = new DeleteRequest()
+ .setKey(key)
+ .setTableName(tableName);
+ DeleteResult delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ // no return row
+ checkCreationTime(delRes.getExistingCreationTime(), 0, 0);
+
+ /* Put the row back to store */
+ startTime = System.currentTimeMillis();
+ putReq = new PutRequest()
+ .setValue(value)
+ .setReturnRow(true)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ oldVersion = putRes.getVersion();
+ assertNotNull(oldVersion);
+ interval = System.currentTimeMillis() - startTime;
+ // in NsonProtocol.writeReturnRow():1344 result contains creationTime
+ // and modificationTime but version is null so they all get skipped.
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /* Delete succeed + setReturnRow(true), existing row returned. */
+ delReq.setReturnRow(true);
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ true /* shouldSucceed */,
+ true /* rowPresent */,
+ value /* expPrevValue */,
+ oldVersion /* expPrevVersion */,
+ true, /* modtime should be zero */
+ recordKB);
+ checkCreationTime(delRes.getExistingCreationTime(), startTime, interval);
+
+ /* Delete fail + setReturnRow(true), no existing row returned. */
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ false /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ assertEquals(0, delRes.getExistingCreationTime());
+
+ /* Put the row back to store */
+ startTime = System.currentTimeMillis();
+ putReq = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ ifVersion = putRes.getVersion();
+ interval = System.currentTimeMillis() - startTime;
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+
+ /* DeleteIfVersion with unmatched version, it should fail */
+ delReq = new DeleteRequest()
+ .setMatchVersion(oldVersion)
+ .setKey(key)
+ .setTableName(tableName);
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ false /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ // no return row
+ assertEquals(0, delRes.getExistingCreationTime());
+
+ /*
+ * DeleteIfVersion with unmatched version + setReturnRow(true),
+ * the existing row returned.
+ */
+ delReq.setReturnRow(true);
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ false /* shouldSucceed */,
+ true /* rowPresent */,
+ value /* expPrevValue */,
+ ifVersion /* expPrevVersion */,
+ true, /* modtime should be recent */
+ recordKB);
+ checkCreationTime(delRes.getExistingCreationTime(), startTime, interval);
+
+ /* DeleteIfVersion with matched version, it should succeed. */
+ delReq = new DeleteRequest()
+ .setMatchVersion(ifVersion)
+ .setKey(key)
+ .setTableName(tableName);
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ true /* shouldSucceed */,
+ false /* rowPresent */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ // no return row
+ checkCreationTime(delRes.getExistingCreationTime(), 0, 0);
+
+ /* Put the row back to store */
+ startTime = System.currentTimeMillis();
+ putReq = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ ifVersion = putRes.getVersion();
+ interval = System.currentTimeMillis() - startTime;
+ // no return row
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /*
+ * DeleteIfVersion with matched version + setReturnRow(true),
+ * it should succeed but no existing row returned.
+ */
+ delReq.setMatchVersion(ifVersion).setReturnRow(true);
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ true /* shouldSucceed */,
+ false /* returnRow */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ // no return row
+ checkCreationTime(delRes.getExistingCreationTime(), 0, 0);
+
+ /* DeleteIfVersion with a key not existing, it should fail. */
+ delReq = new DeleteRequest()
+ .setMatchVersion(ifVersion)
+ .setKey(key)
+ .setTableName(tableName);
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ false /* shouldSucceed */,
+ false /* returnRow */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ assertEquals(0, delRes.getExistingCreationTime());
+
+ /*
+ * DeleteIfVersion with a key not existing + setReturnRow(true),
+ * it should fail and no existing row returned.
+ */
+ delReq.setReturnRow(true);
+ delRes = handle.delete(delReq);
+ checkDeleteResult(delReq, delRes,
+ false /* shouldSucceed */,
+ false /* returnRow */,
+ null /* expPrevValue */,
+ null /* expPrevVersion */,
+ false, /* modtime should be zero */
+ recordKB);
+ assertEquals(0, delRes.getExistingCreationTime());
+ }
+
+ private void checkCreationTime(long creationTime, long startTime, long interval) {
+ assertTrue("creationTime should be >= than " + startTime + " " +
+ (creationTime - startTime),
+ creationTime >= startTime);
+
+ assertTrue("creationTime not in interval: " + interval,
+ creationTime - startTime <= interval);
+
+// if (creationTime >= startTime &&
+// creationTime - startTime <= interval) {
+// System.out.println(" PASSED ct: " + creationTime + " i:" + interval);
+// } else {
+// System.out.println(" !!! FAILED ct: " + creationTime + " i:" + interval + " !!!");
+// }
+ }
+
+ private void checkModTime(long modTime, boolean modTimeRecent) {
+ if (modTimeRecent) {
+ if (modTime < (System.currentTimeMillis() - 2000)) {
+ fail("Expected modtime to be recent, got " + modTime);
+ }
+ } else {
+ if (modTime != 0) {
+ fail("Expected modtime to be zero, got " + modTime);
+ }
+ }
+ }
+
+ private void checkPutResult(PutRequest request,
+ PutResult result,
+ boolean shouldSucceed,
+ boolean rowPresent,
+ MapValue expPrevValue,
+ Version expPrevVersion,
+ boolean modTimeRecent,
+ int recordKB,
+ boolean putOverWrite) {
+ if (shouldSucceed) {
+ assertNotNull("Put should succeed", result.getVersion());
+ } else {
+ assertNull("Put should fail", result.getVersion());
+ }
+ checkExistingValueVersion(request, result, shouldSucceed, rowPresent,
+ expPrevValue, expPrevVersion);
+
+ checkModTime(result.getExistingModificationTime(), modTimeRecent);
+
+ int[] expCosts = getPutReadWriteCost(request,
+ shouldSucceed,
+ rowPresent,
+ recordKB,
+ putOverWrite);
+
+ if (onprem == false) {
+ assertReadKB(result, expCosts[0], true /* isAbsolute */);
+ assertWriteKB(result, expCosts[1]);
+ }
+ }
+
+ private void checkDeleteResult(DeleteRequest request,
+ DeleteResult result,
+ boolean shouldSucceed,
+ boolean rowPresent,
+ MapValue expPrevValue,
+ Version expPrevVersion,
+ boolean modTimeRecent,
+ int recordKB) {
+
+ assertEquals("Delete should " + (shouldSucceed ? "succeed" : " fail"),
+ shouldSucceed, result.getSuccess());
+ checkExistingValueVersion(request, result, shouldSucceed, rowPresent,
+ expPrevValue, expPrevVersion);
+
+ checkModTime(result.getExistingModificationTime(), modTimeRecent);
+
+ int[] expCosts = getDeleteReadWriteCost(request,
+ shouldSucceed,
+ rowPresent,
+ recordKB);
+
+ if (onprem == false) {
+ assertReadKB(result, expCosts[0], true /* isAbsolute */);
+ assertWriteKB(result, expCosts[1]);
+ }
+ }
+
+ private void checkGetResult(GetRequest request,
+ GetResult result,
+ boolean rowPresent,
+ MapValue expValue,
+ Version expVersion,
+ boolean modTimeRecent,
+ int recordKB) {
+
+
+ if (rowPresent) {
+ if (expValue != null) {
+ assertEquals("Unexpected value", expValue, result.getValue());
+ } else {
+ assertNotNull("Unexpected value", expValue);
+ }
+ if (expVersion != null) {
+ assertArrayEquals("Unexpected version",
+ expVersion.getBytes(),
+ result.getVersion().getBytes());
+ } else {
+ assertNotNull("Unexpected version", result.getVersion());
+ }
+ } else {
+ assertNull("Unexpected value", expValue);
+ assertNull("Unexpected version", result.getVersion());
+ }
+
+ checkModTime(result.getModificationTime(), modTimeRecent);
+
+ final int minRead = getMinRead();
+ int expReadKB = rowPresent ? recordKB : minRead;
+
+ if (onprem == false) {
+ assertReadKB(result, expReadKB,
+ (request.getConsistencyInternal() == Consistency.ABSOLUTE));
+ assertWriteKB(result, 0);
+ }
+ }
+
+ private void checkExistingValueVersion(WriteRequest request,
+ WriteResult result,
+ boolean shouldSucceed,
+ boolean rowPresent,
+ MapValue expPrevValue,
+ Version expPrevVersion) {
+
+ boolean hasReturnRow = rowPresent;
+ if (hasReturnRow) {
+ assertNotNull("PrevValue should be non-null",
+ result.getExistingValueInternal());
+ if (expPrevValue != null) {
+ assertEquals("Unexpected PrevValue",
+ expPrevValue, result.getExistingValueInternal());
+ }
+ assertNotNull("PrevVersion should be non-null",
+ result.getExistingVersionInternal());
+ if (expPrevVersion != null) {
+ assertNotNull(result.getExistingVersionInternal());
+ assertArrayEquals("Unexpected PrevVersion",
+ expPrevVersion.getBytes(),
+ result.getExistingVersionInternal().getBytes());
+ }
+ } else {
+ assertNull("PrevValue should be null",
+ result.getExistingValueInternal());
+ assertNull("PrevVersion should be null",
+ result.getExistingVersionInternal());
+ }
+ }
+
+ @Test
+ public void testReadQuery() throws InterruptedException {
+ final String createTable1 =
+ "create table tjson(id integer, info json, primary key(id))";
+ final String createTable2 =
+ "create table trecord(id integer, " +
+ "info record(name string, age integer), " +
+ "primary key(id))";
+
+ tableOperation(handle, createTable1, new TableLimits(10, 10, 1),
+ null, TableResult.State.ACTIVE, null);
+ tableOperation(handle, createTable2, new TableLimits(10, 10, 1),
+ null, TableResult.State.ACTIVE, null);
+
+ MapValue rowNull = new MapValue()
+ .put("id", 0)
+ .put("info",
+ new MapValue()
+ .put("name", NullValue.getInstance())
+ .put("age", 20));
+ MapValue rowJsonNull = new MapValue()
+ .put("id", 0)
+ .put("info",
+ new MapValue()
+ .put("name", JsonNullValue.getInstance())
+ .put("age", 20));
+
+ MapValue[] rows = new MapValue[] {rowNull, rowJsonNull};
+ Map tableExpRows = new HashMap();
+ tableExpRows.put("tjson", rowJsonNull);
+ tableExpRows.put("trecord", rowNull);
+
+ long startTime = System.currentTimeMillis();
+ /*
+ * Put rows with NullValue or JsonNullValue, they should be converted
+ * to the right value for the target type.
+ */
+ for (Map.Entry e : tableExpRows.entrySet()) {
+ String table = e.getKey();
+ MapValue expRow = e.getValue();
+
+ for (MapValue row : rows) {
+ PutRequest putReq = new PutRequest()
+ .setTableName(table)
+ .setValue(row);
+
+ PutResult putRet = handle.put(putReq);
+ Version pVersion = putRet.getVersion();
+ assertNotNull(pVersion);
+ long interval = System.currentTimeMillis() - startTime;
+
+ MapValue key = new MapValue().put("id", row.get("id"));
+ GetRequest getReq = new GetRequest()
+ .setTableName(table)
+ .setConsistency(Consistency.ABSOLUTE)
+ .setKey(key);
+ GetResult getRet = handle.get(getReq);
+ assertEquals(expRow, getRet.getValue());
+ assertNotNull(getRet.getVersion());
+ assertTrue(Arrays.equals(pVersion.getBytes(),
+ getRet.getVersion().getBytes()));
+ checkCreationTime(getRet.getCreationTime(), startTime, interval);
+ assertTrue(getRet.getModificationTime() > 0);
+ }
+ }
+ long interval = System.currentTimeMillis() - startTime;
+
+ /*
+ * Query with variable for json field and set NullValue or
+ * JsonNullValue to variable, the NullValue is expected to be converted
+ * to JsonNullValue.
+ */
+ String query =
+ "select id, info, creation_time($t) as ct, " +
+ "creation_time_millis($t) as ctm, " +
+ "modification_time($t) as mt " +
+ "from tjson $t ";
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = handle.prepare(prepReq);
+ PreparedStatement prepStmt = prepRet.getPreparedStatement();
+
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ boolean shouldRetry = false;
+ do {
+ try {
+ QueryResult queryRet = handle.query(queryReq);
+ assertEquals(1, queryRet.getResults().size());
+
+ for (MapValue v : queryRet.getResults()) {
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ assertTrue(v.get("mt").isTimestamp());
+ assertTrue(v.get("mt").asTimestamp().getValue().getTime() > 0);
+ }
+
+ } catch (SystemException e) {
+ shouldRetry = e.okToRetry();
+ System.out.println("Caught " + (e.okToRetry() ? "retryable" :
+ "") + " ex: " + e.getMessage());
+ System.out.println(
+ "Retrying query: " + queryReq.getStatement());
+ e.printStackTrace();
+ Thread.sleep(300);
+ }
+ } while (shouldRetry);
+ }
+
+ @Test
+ public void testTableMultiWrite() {
+ final String createTable =
+ "create table tMW(s integer, id integer, info json, primary key(shard(s), id))";
+
+ tableOperation(handle, createTable, new TableLimits(10, 10, 1),
+ null, TableResult.State.ACTIVE, null);
+
+ /* multi write */
+ WriteMultipleRequest wmReq = new WriteMultipleRequest();
+ String tableName = "tMW";
+
+ for (int i = 0; i < 10; i++) {
+ PutRequest pr = new PutRequest()
+ .setTableName(tableName)
+ .setRowMetadata("{\"n\":" + i + "}")
+ .setValue(new MapValue()
+ .put("s", 1)
+ .put("id", i)
+ .put("info", new MapValue().put("name", "John")));
+ wmReq.add(pr, true);
+ }
+
+ long startTime = System.currentTimeMillis();
+ WriteMultipleResult wmRes = handle.writeMultiple(wmReq);
+ assertEquals(10, wmRes.getResults().size());
+ long interval = System.currentTimeMillis() - startTime;
+
+
+ QueryRequest queryReq = new QueryRequest()
+ .setStatement("select s, id, $t.info.name as name, " +
+ "creation_time($t) as ct, creation_time_millis($t) as ctm " +
+ "from " + tableName + " $t ORDER BY id ASC");
+ QueryResult qRes = handle.query(queryReq);
+
+ int i = 0;
+ for (MapValue v : qRes.getResults()) {
+ assertEquals(1, v.get("s").asInteger().getInt());
+ assertEquals(i, v.get("id").asInteger().getInt());
+ assertEquals("John", v.get("name").asString().getString());
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ i++;
+ }
+ assertEquals(10, qRes.getResults().size());
+ assertEquals(10, i);
+
+ wmReq.getOperations().forEach((req) -> {
+ PutRequest put = (PutRequest)req.getRequest();
+ put.setReturnRow(true);
+ });
+
+ wmRes = handle.writeMultiple(wmReq);
+ wmRes.getResults().forEach((res) -> {
+ checkCreationTime(res.getExistingCreationTime(), startTime, interval);
+ });
+ }
+
+ @Test
+ public void testNullJsonNull() throws InterruptedException {
+ final String createTable1 =
+ "create table tjson(id integer, info json, primary key(id))";
+ final String createTable2 =
+ "create table trecord(id integer, " +
+ "info record(name string, age integer), " +
+ "primary key(id))";
+
+ tableOperation(handle, createTable1, new TableLimits(10, 10, 1),
+ null, TableResult.State.ACTIVE, null);
+ tableOperation(handle, createTable2, new TableLimits(10, 10, 1),
+ null, TableResult.State.ACTIVE, null);
+
+ MapValue rowNull = new MapValue()
+ .put("id", 0)
+ .put("info",
+ new MapValue()
+ .put("name", NullValue.getInstance())
+ .put("age", 20));
+ MapValue rowJsonNull = new MapValue()
+ .put("id", 0)
+ .put("info",
+ new MapValue()
+ .put("name", JsonNullValue.getInstance())
+ .put("age", 20));
+
+ MapValue[] rows = new MapValue[] {rowNull, rowJsonNull};
+ Map tableExpRows = new HashMap();
+ tableExpRows.put("tjson", rowJsonNull);
+ tableExpRows.put("trecord", rowNull);
+
+ long startTime = System.currentTimeMillis();
+ /*
+ * Put rows with NullValue or JsonNullValue, they should be converted
+ * to the right value for the target type.
+ */
+ for (Map.Entry e : tableExpRows.entrySet()) {
+ String table = e.getKey();
+ MapValue expRow = e.getValue();
+
+ for (MapValue row : rows) {
+ PutRequest putReq = new PutRequest()
+ .setTableName(table)
+ .setValue(row);
+
+ PutResult putRet = handle.put(putReq);
+ Version pVersion = putRet.getVersion();
+ assertNotNull(pVersion);
+ long interval = System.currentTimeMillis() - startTime;
+
+ MapValue key = new MapValue().put("id", row.get("id"));
+ GetRequest getReq = new GetRequest()
+ .setTableName(table)
+ .setConsistency(Consistency.ABSOLUTE)
+ .setKey(key);
+ GetResult getRet = handle.get(getReq);
+ assertEquals(expRow, getRet.getValue());
+ assertNotNull(getRet.getVersion());
+ assertTrue(Arrays.equals(pVersion.getBytes(),
+ getRet.getVersion().getBytes()));
+ checkCreationTime(getRet.getCreationTime(), startTime, interval);
+ }
+ }
+ long interval = System.currentTimeMillis() - startTime;
+
+ String query =
+ "select id, info, creation_time($t) as ct, " +
+ "creation_time_millis($t) as ctm " +
+ "from tjson $t";
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = handle.prepare(prepReq);
+ PreparedStatement prepStmt = prepRet.getPreparedStatement();
+
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ boolean shouldRetry = false;
+ do {
+ try {
+ QueryResult queryRet = handle.query(queryReq);
+ assertEquals(1, queryRet.getResults().size());
+
+ MapValue v = queryRet.getResults().get(0);
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ } catch (SystemException e) {
+ shouldRetry = e.okToRetry();
+ System.out.println("Caught " + (e.okToRetry() ? "retryable" : "") + " ex: " + e.getMessage());
+ System.out.println("Retrying query: " + queryReq.getStatement());
+ e.printStackTrace();
+ Thread.sleep(300);
+ }
+ } while (shouldRetry);
+
+
+ query =
+ "select id, info, creation_time($t) as ct, " +
+ "creation_time_millis($t) as ctm " +
+ "from trecord $t";
+ prepReq = new PrepareRequest().setStatement(query);
+ prepRet = handle.prepare(prepReq);
+ prepStmt = prepRet.getPreparedStatement();
+
+ queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ shouldRetry = false;
+ do {
+ try {
+ QueryResult queryRet = handle.query(queryReq);
+ assertEquals(1, queryRet.getResults().size());
+
+ MapValue v = queryRet.getResults().get(0);
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ } catch (SystemException e) {
+ shouldRetry = e.okToRetry();
+ System.out.println("Caught " + (e.okToRetry() ? "retryable" : "") + " ex: " + e.getMessage());
+ System.out.println("Retrying query: " + queryReq.getStatement());
+ e.printStackTrace();
+ Thread.sleep(300);
+ }
+ } while (shouldRetry);
+ }
+
+ private String genString(int length) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < length; i++) {
+ sb.append((char)('A' + i % 26));
+ }
+ return sb.toString();
+ }
+
+ @Test
+ public void testCollection() {
+ final String tableName = "testusersColl";
+
+ /* Create a table */
+ TableResult tres = tableOperation(
+ handle,
+ "create table if not exists " + tableName +
+ "(id integer, primary key(id)) as json collection",
+ new TableLimits(500, 500, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ MapValue value = new MapValue().put("id", 10).put("name", "John");
+
+ /* Put row */
+ long startTime = System.currentTimeMillis();
+ PutRequest putReq = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+ PutResult putRes = handle.put(putReq);
+ long interval = System.currentTimeMillis() - startTime;
+ // no return value
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /* Get the row back */
+ GetRequest getReq = new GetRequest()
+ .setTableName(tableName)
+ .setKey(value);
+ GetResult getRet = handle.get(getReq);
+ checkCreationTime(getRet.getCreationTime(), startTime, interval);
+
+ /* Delete row check prev/existing is still null */
+ DeleteRequest delReq = new DeleteRequest()
+ .setKey(value)
+ .setTableName(tableName)
+ .setReturnRow(true);
+ DeleteResult delRes = handle.delete(delReq);
+ checkCreationTime(delRes.getExistingCreationTime(), startTime, interval);
+
+
+ /* Put again */
+ startTime = System.currentTimeMillis();
+ putReq = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+ putRes = handle.put(putReq);
+ interval = System.currentTimeMillis() - startTime;
+ // no return
+ checkCreationTime(putRes.getExistingCreationTime(), 0, 0);
+
+ /* Query */
+ QueryRequest queryReq = new QueryRequest()
+ .setStatement("select id, name, creation_time($t) as ct," +
+ "creation_time_millis($t) as ctm from " +
+ tableName + " $t");
+ QueryResult qRes = handle.query(queryReq);
+
+ assertEquals(1, qRes.getResults().size());
+ assertEquals(10, qRes.getResults().get(0).get("id").getInt());
+ assertEquals("John", qRes.getResults().get(0).get("name").getString());
+ assertTrue(qRes.getResults().get(0).get("ct").isTimestamp());
+ checkCreationTime(qRes.getResults().get(0).get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(qRes.getResults().get(0).get("ctm").isLong());
+ checkCreationTime(qRes.getResults().get(0).get("ctm").asLong().getValue(), startTime, interval);
+ }
+
+ @Test
+ public void testCollectionMultiWrite() {
+ final String tableName = "testusersCollMWrite";
+
+ /* Create a table */
+ TableResult tres = tableOperation(
+ handle,
+ "create table if not exists " + tableName +
+ "(s integer, id integer, primary key(shard(s), id)) as json collection",
+ new TableLimits(500, 500, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* multi write */
+ WriteMultipleRequest wmReq = new WriteMultipleRequest();
+
+ for (int i = 0; i < 10; i++) {
+ PutRequest pr = new PutRequest()
+ .setTableName(tableName)
+ .setValue(new MapValue()
+ .put("s", 1)
+ .put("id", i)
+ .put("name", "John"));
+ wmReq.add(pr, true);
+ }
+ long startTime = System.currentTimeMillis();
+ WriteMultipleResult wmRes = handle.writeMultiple(wmReq);
+ assertEquals(10, wmRes.getResults().size());
+ long interval = System.currentTimeMillis() - startTime;
+
+ /* query read metadata */
+ QueryRequest queryReq = new QueryRequest()
+ .setStatement("select s, id, name, creation_time($t) as ct," +
+ "creation_time_millis($t) as ctm from " +
+ tableName + " $t ORDER BY id ASC");
+ QueryResult qRes = handle.query(queryReq);
+
+ int i = 0;
+ for (MapValue v : qRes.getResults()) {
+ assertEquals(1, v.get("s").asInteger().getInt());
+ assertEquals(i, v.get("id").asInteger().getInt());
+ assertEquals("John", v.get("name").asString().getString());
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ i++;
+ }
+ assertEquals(10, qRes.getResults().size());
+ assertEquals(10, i);
+ }
+
+ @Test
+ public void testWriteQuery() throws InterruptedException {
+ final String tableName = "t";
+ final String createTable1 =
+ "create table "+ tableName +" (s integer, id integer, info json, primary key(shard(s), id))";
+
+ tableOperation(handle, createTable1, new TableLimits(10, 10, 1),
+ null, TableResult.State.ACTIVE, null);
+
+ // do a few inserts
+ long startTime = System.currentTimeMillis();
+ String query = "declare $id integer; insert into " + tableName + " values( 0, $id, {})";
+ for (int i = 0; i < 10; i++) {
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = handle.prepare(prepReq);
+
+ PreparedStatement prepStmt = prepRet.getPreparedStatement();
+ prepStmt.setVariable("$id", new IntegerValue(i));
+
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ QueryResult queryRes = handle.query(queryReq);
+ assertNotNull(queryRes);
+ assertEquals(1, queryRes.getResults().get(0).asMap().get("NumRowsInserted").asInteger().getInt());
+ }
+ long interval = System.currentTimeMillis() - startTime;
+
+ // check they have the correct row metadata
+ query =
+ "select $t.s, $t.id, $t.info, creation_time($t) as ct, " +
+ "creation_time_millis($t) as ctm from " +
+ tableName + " $t order by $t.id";
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = handle.prepare(prepReq);
+ PreparedStatement prepStmt = prepRet.getPreparedStatement();
+
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ QueryResult qRes = handle.query(queryReq);
+ int i = 0;
+ for (MapValue v : qRes.getResults()) {
+ assertEquals(0, v.get("s").asInteger().getInt());
+ assertEquals(i, v.get("id").asInteger().getInt());
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ i++;
+ }
+ assertEquals(10, i);
+
+
+ // update many
+ query = "update " + tableName + " t SET t.info = t.info where t.s = 0";
+
+ queryReq = new QueryRequest()
+ .setStatement(query);
+
+ qRes = handle.query(queryReq);
+ assertEquals(1, qRes.getResults().size());
+ assertEquals(10, qRes.getResults().get(0).asMap().get("NumRowsUpdated").asInteger().getInt());
+
+
+ // check they have the correct row metadata
+ query =
+ "select $t.s, $t.id, $t.info, creation_time($t) as ct, " +
+ "creation_time_millis($t) as ctm from " +
+ tableName + " $t order by $t.id";
+ prepReq = new PrepareRequest().setStatement(query);
+ prepRet = handle.prepare(prepReq);
+ prepStmt = prepRet.getPreparedStatement();
+
+ queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ qRes = handle.query(queryReq);
+ i = 0;
+ for (MapValue v : qRes.getResults()) {
+ assertEquals(0, v.get("s").asInteger().getInt());
+ assertEquals(i, v.get("id").asInteger().getInt());
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ i++;
+ }
+ assertEquals(10, i);
+ }
+
+ @Test
+ public void testWriteQueryCollection() throws InterruptedException {
+ final String tableName = "t";
+ final String createTable1 =
+ "create table "+ tableName +" (s integer, id integer, primary key(shard(s), id)) as json collection";
+
+ tableOperation(handle, createTable1, new TableLimits(10, 10, 1),
+ null, TableResult.State.ACTIVE, null);
+
+ // do a few inserts
+ long startTime = System.currentTimeMillis();
+ String query = "declare $id integer; insert into " + tableName + " values( 0, $id, {\"info\":1})";
+ for (int i = 0; i < 10; i++) {
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = handle.prepare(prepReq);
+
+ PreparedStatement prepStmt = prepRet.getPreparedStatement();
+ prepStmt.setVariable("$id", new IntegerValue(i));
+
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ QueryResult queryRes = handle.query(queryReq);
+ assertNotNull(queryRes);
+ assertEquals(1, queryRes.getResults().get(0).asMap().get("NumRowsInserted").asInteger().getInt());
+ }
+ long interval = System.currentTimeMillis() - startTime;
+
+ // check they have the correct row metadata
+ query =
+ "select $t.s, $t.id, $t.info, creation_time($t) as ct, " +
+ "creation_time_millis($t) as ctm," +
+ "modification_time($t) as mt " +
+ "from " + tableName + " $t order by $t.id";
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = handle.prepare(prepReq);
+ PreparedStatement prepStmt = prepRet.getPreparedStatement();
+
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ QueryResult qRes = handle.query(queryReq);
+ int i = 0;
+ for (MapValue v : qRes.getResults()) {
+ assertEquals(0, v.get("s").asInteger().getInt());
+ assertEquals(i, v.get("id").asInteger().getInt());
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ assertTrue(v.get("mt").isTimestamp());
+ assertTrue(v.get("mt").asTimestamp().getValue().getTime() > 0);
+ i++;
+ }
+ assertEquals(10, i);
+
+
+ // update many
+ query = "update " + tableName + " t SET t.info=3 where t.s = 0";
+
+ queryReq = new QueryRequest()
+ .setStatement(query);
+
+ qRes = handle.query(queryReq);
+ assertEquals(1, qRes.getResults().size());
+ assertEquals(10, qRes.getResults().get(0).asMap().get("NumRowsUpdated").asInteger().getInt());
+
+
+ // check they have the correct row metadata
+ query =
+ "select $t.s, $t.id, $t.info,creation_time($t) as ct, " +
+ "creation_time_millis($t) as ctm, " +
+ "modification_time($t) as mt " +
+ "from " + tableName + " $t order by $t.id";
+ prepReq = new PrepareRequest().setStatement(query);
+ prepRet = handle.prepare(prepReq);
+ prepStmt = prepRet.getPreparedStatement();
+
+ queryReq = new QueryRequest()
+ .setPreparedStatement(prepStmt);
+
+ qRes = handle.query(queryReq);
+ i = 0;
+ for (MapValue v : qRes.getResults()) {
+ assertEquals(0, v.get("s").asInteger().getInt());
+ assertEquals(i, v.get("id").asInteger().getInt());
+ assertTrue(v.get("ct").isTimestamp());
+ checkCreationTime(v.get("ct").asTimestamp().getValue().getTime(), startTime, interval);
+ assertTrue(v.get("ctm").isLong());
+ checkCreationTime(v.get("ctm").asLong().getValue(), startTime, interval);
+ assertTrue(v.get("mt").isTimestamp());
+ assertTrue(v.get("mt").asTimestamp().getValue().getTime() > 0);
+ i++;
+ }
+ assertEquals(10, i);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/DDosTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DDosTest.java
new file mode 100644
index 00000000..c2a35fcb
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DDosTest.java
@@ -0,0 +1,1053 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static io.netty.handler.codec.http.HttpMethod.POST;
+import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_ARRAY;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_BOOLEAN;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_INTEGER;
+import static oracle.nosql.proxy.protocol.BinaryProtocol.TYPE_MAP;
+import static oracle.nosql.proxy.protocol.HttpConstants.ACCEPT;
+import static oracle.nosql.proxy.protocol.HttpConstants.AUTHORIZATION;
+import static oracle.nosql.proxy.protocol.HttpConstants.CONNECTION;
+import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_LENGTH;
+import static oracle.nosql.proxy.protocol.HttpConstants.CONTENT_TYPE;
+import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_DATA_PATH;
+import static oracle.nosql.proxy.protocol.HttpConstants.NOSQL_VERSION;
+import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_COMPARTMENT_ID;
+import static oracle.nosql.proxy.protocol.HttpConstants.REQUEST_ID_HEADER;
+import static oracle.nosql.proxy.protocol.Protocol.BAD_PROTOCOL_MESSAGE;
+import static oracle.nosql.proxy.protocol.Protocol.ILLEGAL_ARGUMENT;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.io.IOException;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.TimeoutException;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import io.netty.channel.Channel;
+import io.netty.handler.codec.http.DefaultFullHttpRequest;
+import io.netty.handler.codec.http.FullHttpRequest;
+import io.netty.handler.codec.http.HttpHeaderNames;
+import io.netty.handler.codec.http.HttpHeaders;
+import io.netty.handler.codec.http.HttpResponseStatus;
+import oracle.nosql.driver.Consistency;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.TimeToLive;
+import oracle.nosql.driver.http.NoSQLHandleImpl;
+import oracle.nosql.driver.httpclient.HttpClient;
+import oracle.nosql.driver.httpclient.ResponseHandler;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.serde.BinarySerializerFactory;
+import oracle.nosql.driver.ops.serde.Serializer;
+import oracle.nosql.driver.query.QueryDriver;
+import oracle.nosql.driver.util.ByteInputStream;
+import oracle.nosql.driver.util.ByteOutputStream;
+import oracle.nosql.driver.util.NettyByteInputStream;
+import oracle.nosql.driver.util.NettyByteOutputStream;
+import oracle.nosql.driver.util.SerializationUtil;
+import oracle.nosql.driver.values.ArrayValue;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.proxy.protocol.Protocol.OpCode;
+import oracle.nosql.proxy.security.SecureTestUtil;
+
+/**
+ * Tests on handling bad protocol on proxy side
+ */
+public class DDosTest extends ProxyTestBase {
+
+ private final static String tableName = "users";
+
+ private final BinarySerializerFactory factory =
+ new BinarySerializerFactory();
+
+ private final MapValue key = createTestKey(1);
+ private final MapValue record = createTestValue();
+
+ private final GetRequest getRequest = new GetRequest()
+ .setTableName(tableName)
+ .setConsistency(Consistency.ABSOLUTE)
+ .setKey(key);
+
+ private final PutRequest putRequest = new PutRequest()
+ .setTableName(tableName)
+ .setValue(record)
+ .setTTL(TimeToLive.ofDays(1));
+
+ private final DeleteRequest deleteRequest = new DeleteRequest()
+ .setTableName(tableName)
+ .setKey(key);
+
+ private final String statement = "select * from users";
+ private final PrepareRequest prepareRequest = new PrepareRequest()
+ .setStatement(statement);
+
+ private final String boundStatement = "declare $id integer; " +
+ "select * from users where id = $id";
+ private final PrepareRequest prepareBoundStmtRequest = new PrepareRequest()
+ .setStatement(boundStatement);
+
+ private final GetIndexesRequest getIndexesRequest = new GetIndexesRequest()
+ .setTableName(tableName)
+ .setIndexName("idx1");
+
+ private final TableUsageRequest tableUsageRequest = new TableUsageRequest()
+ .setTableName(tableName)
+ .setStartTime(System.currentTimeMillis())
+ .setEndTime(System.currentTimeMillis() + 3600_000)
+ .setLimit(10);
+
+ /* Create a table */
+ private final static String createTableDDL =
+ "CREATE TABLE IF NOT EXISTS " + tableName + "(" +
+ "id INTEGER, " +
+ "name STRING, " +
+ "count LONG, " +
+ "avg DOUBLE, " +
+ "sum NUMBER, " +
+ "exp BOOLEAN, " +
+ "key BINARY, " +
+ "map MAP(INTEGER), " +
+ "array ARRAY(STRING), " +
+ "record RECORD(rid INTEGER, rs STRING), " +
+ "PRIMARY KEY(id))";
+
+ private final static String createIndexDDL =
+ "CREATE INDEX IF NOT EXISTS idx1 ON " + tableName + "(name)";
+
+ private ByteBuf buf;
+ private HttpClient httpClient;
+ private NoSQLHandleConfig httpConfig;
+ private String kvRequestURI;
+ private int timeoutMs;
+ private int requestId = 0;
+
+ @BeforeClass
+ public static void staticSetUp()
+ throws Exception {
+
+ assumeTrue("Skip DDosTest in onprem or minicloud or cloud test",
+ !Boolean.getBoolean(ONPREM_PROP) &&
+ !Boolean.getBoolean(USEMC_PROP) &&
+ !Boolean.getBoolean(USECLOUD_PROP));
+
+ /* this test requires error limiting */
+ System.setProperty(PROXY_ERROR_LIMITING_PROP, "true");
+
+ staticSetUp(tenantLimits);
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ if (onprem || cloudRunning) {
+ return;
+ }
+ super.setUp();
+
+ buf = Unpooled.buffer();
+
+ URL url = new URL("http", getProxyHost(), getProxyPort(), "/");
+ httpConfig = new NoSQLHandleConfig(url);
+
+ httpConfig.configureDefaultRetryHandler(0, 0);
+ timeoutMs = 1000;
+ httpConfig.setRequestTimeout(timeoutMs);
+
+ kvRequestURI = httpConfig.getServiceURL().toString() +
+ NOSQL_VERSION + "/" + NOSQL_DATA_PATH;
+
+ httpClient = createHttpClient(getProxyHost(),
+ getProxyPort(),
+ httpConfig.getNumThreads(),
+ "DDosTest",
+ null /* Logger */);
+ assertNotNull(httpClient);
+ createTable();
+
+ if (isSecure()) {
+ /* warm up security caches */
+ handle.put(putRequest);
+ handle.get(getRequest);
+ handle.delete(deleteRequest);
+ handle.getTable(new GetTableRequest().setTableName(tableName));
+ handle.getTableUsage(tableUsageRequest);
+ handle.getIndexes(getIndexesRequest);
+ handle.query(createQueryWithBoundStmtRequest());
+ }
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ if (onprem || cloudRunning) {
+ return;
+ }
+
+ if (buf != null) {
+ buf.release(buf.refCnt());
+ }
+
+ if (httpClient != null) {
+ httpClient.shutdown();
+ }
+ super.tearDown();
+ }
+
+ @Before
+ public void setVersion() throws Exception {
+ /*
+ * This test suite is somewhat V2/V3-centric. So
+ * set the serial version to 3 if higher.
+ */
+ forceV3((NoSQLHandleImpl)handle);
+ }
+
+ /*
+ * Test bad protocol data on below values:
+ * 1. SerialVersion
+ * 2. OpCode
+ * 3. RequestTimeout
+ * 4. TableName
+ * 5. ReturnRowFlag
+ * 6. MapValue
+ * 7. IfUpdateTTL
+ * 8. TTLValue
+ */
+ @Test
+ public void testPutDDoS() {
+
+ assumeTrue(onprem == false);
+ assumeTrue(cloudRunning == false);
+
+ final int[] lengths = {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: String */,
+ 1 /* ReturnRowFlag: boolean */,
+ 1 /* Durability: one byte */,
+ 1 /* ExactMatch: boolean */,
+ 1 /* IdentityCacheSize: packed int */,
+ 248 /* Record: MapValue */,
+ 1 /* IfUpdateTTL: boolean */,
+ 2 /* TTL: value(packed long) + unit(byte)*/
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, putRequest);
+
+ try {
+ String test;
+ int offset = 0;
+ int pos = 0;
+
+ test = "PUT OK test";
+ executeDDoSRequests(test, buf, 0);
+
+ /*
+ * SerialVersion
+ */
+
+ /* SerialVersion: 0 */
+ test = "PUT Bad serialVersion: 0";
+ buf.setShort(offset, 0);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * OpCode
+ */
+
+ /* Invalid OpCode */
+ offset += lengths[pos++];
+ test = "PUT Bad OpCode";
+ int invalidOpCode = OpCode.values().length;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, invalidOpCode);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * RequestTimeout
+ */
+
+ /* requestTimeout: -5000 */
+ test = "PUT Bad requestTimeout: -5000";
+ offset += lengths[pos++];
+ int invalidTimeout = -5000;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, invalidTimeout);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * TableName
+ */
+
+ /* Invalid TableName: empty string */
+ String invalidTableName = "";
+ test = "PUT empty TableName";
+ offset += lengths[pos++];
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, invalidTableName);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * ReturnRowFlag
+ */
+ offset += lengths[pos++];
+
+ /*
+ * Durability
+ * Only in V3 and above
+ */
+ short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion();
+ if (serialVersion > 2) {
+ offset += lengths[pos++];
+ } else {
+ pos++;
+ }
+
+ /*
+ * ExactMatch
+ */
+ offset += lengths[pos++];
+
+ /*
+ * IdentityCacheSize
+ */
+ offset += lengths[pos++];
+
+ /*
+ * MapValue
+ */
+ offset += lengths[pos++];
+ testMapValue(buf, out, bufBytes, offset, lengths[pos]);
+
+ /*
+ * IfUpdateTTLFlag
+ */
+ offset += lengths[pos++];
+
+ /*
+ * TTL
+ */
+ long invalidTTL = -2;
+ offset += lengths[pos++];
+ test = "PUT TTL: " + invalidTTL;
+ refillBuffer(buf, bufBytes);
+ setPackedLong(out, offset, invalidTTL);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ test = "PUT TTL: invalid ttl unit";
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset + 1, -1);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (IOException ioe) {
+ fail("Write failed: " + ioe.getMessage());
+ } finally {
+ out.close();
+ }
+ }
+
+ private void testMapValue(ByteBuf buffer,
+ ByteOutputStream out,
+ byte[] bufBytes,
+ int baseOffset,
+ int length) throws IOException {
+ final int headerLen = 9; /* 1(type) + 4(length) + 4 (size)/*/
+ final String[] fields = new String[] {
+ "avg",
+ "array",
+ "record",
+ "name",
+ "count",
+ "sum",
+ "id",
+ "exp",
+ "map",
+ "key"
+ };
+ final int[] lengths = new int[] {
+ 13, /* avg: DOUBLE, 4(name) + 1(type) + 8(double) */
+ 36, /* array: ARRAY, 6(name) + 1(type) + 29(value) */
+ 34, /* record: RECORD, 7(name) + 1(type) + 26(value) */
+ 19, /* name: STRING, 5(name) + 1(type) + 13(value) */
+ 16, /* count: LONG, 6(name) + 1(type) + 9(value) */
+ 44, /* sum: NUMBER, 4(name) + 1(type) + 39(value) */
+ 5, /* id: INTEGER, 3(name) + 1(type) + 1(value) */
+ 6, /* exp: BOOLEAN, 4(name) + 1(type) + 1(value) */
+ 30, /* map: MAP, 4(name) + 1(type) + 25(value) */
+ 36 /* key: BINARY, 4(name) + 1(type) + 31(value) */
+ };
+
+ final Map offsets = new HashMap();
+ int offset = baseOffset + headerLen;
+ for (int i = 0; i < fields.length; i++) {
+ offsets.put(fields[i], offset);
+ offset += lengths[i];
+ }
+
+ offset = baseOffset;
+ String test;
+ ByteInputStream in;
+ int value;
+ String svalue;
+
+ /* Corrupted type of top MapValue */
+ value = -1;
+ test = "MapValue: corrupted type of top MapValue, " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Wrong length value */
+ offset += 1;
+ refillBuffer(buffer, bufBytes);
+ in = new NettyByteInputStream(buffer);
+ value = bufBytes.length + 1;
+ setInt(out, offset, value);
+ test = "MapValue: wrong length value, " + value ;
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Wrong size value */
+ offset += 4;
+ refillBuffer(buffer, bufBytes);
+ value = -1;
+ setInt(out, offset, value);
+ test = "MapValue: wrong size value, " + value ;
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Field: avg
+ */
+ String fname = "avg";
+ offset = offsets.get(fname);
+ svalue = null;
+ refillBuffer(buffer, bufBytes);
+ setString(out, offset, svalue);
+ test = "MapValue: field name is null" ;
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Corrupted value type */
+ value = 100;
+ offset += fname.length() + 1;
+ test = "MapValue: corrupted type of field \"avg\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid value type for DOUBLE */
+ value = TYPE_BOOLEAN;
+ test = "MapValue: invalid value type for field \"avg\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ fname = "array";
+ offset = offsets.get(fname);
+
+ /* Invalid value type for array value */
+ offset += fname.length() + 1;
+ value = TYPE_MAP;
+ test = "MapValue: invalid value type for field \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeDDoSRequests(test, buf, ILLEGAL_ARGUMENT);
+
+ value = TYPE_INTEGER;
+ test = "MapValue: invalid value type for field \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ buffer.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /* Invalid length value of array value */
+ length = readInt(in, offset);
+ offset++;
+ value = -1;
+ test = "MapValue: invalid length of \"array\", " + value ;
+ refillBuffer(buffer, bufBytes);
+ setInt(out, offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+ }
+
+
+ /*
+ * Test bad protocol data on below values:
+ * 1. Consistency
+ * 2. PrimaryKey type
+ */
+ @Test
+ public void testGetDDoS() {
+
+ assumeTrue(onprem == false);
+ assumeTrue(cloudRunning == false);
+
+ final int[] lengths = {
+ 2 /* SerialVersion: short*/,
+ 1 /* OpCode: byte*/,
+ 3 /* RequestTimeout: packed int */,
+ 6 /* TableName: string */,
+ 1 /* Consistency: boolean */,
+ 14 /* Key: 1(TYPE_MAP) + 4(length) + 4(size) + 3("id") +
+ 1(TYPE_INT) + 1(1-value) */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, getRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "GET OK test";
+ executeDDoSRequests(test, buf, 0);
+
+ /*
+ * Consistency
+ */
+
+ /* Move to offset of consistency */
+ for (pos = 0; pos < 4; pos++) {
+ offset += lengths[pos];
+ }
+
+ /* Invalid consistency type */
+ int value = -1;
+ test = "GET Invalid consistency type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 3;
+ test = "GET Invalid consistency type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * PrimaryKey
+ */
+ offset += lengths[pos++];
+
+ value = -1;
+ test = "GET Invalid value type of PrimaryKey: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_ARRAY;
+ test = "GET Invalid value type of PrimaryKey: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } finally {
+ out.close();
+ }
+ }
+
+ /*
+ * Test bad protocol on below values:
+ * 1. Statement
+ */
+ @Test
+ public void testPrepareDDoS() {
+
+ assumeTrue(onprem == false);
+ assumeTrue(cloudRunning == false);
+
+ final int[] lengths = new int[] {
+ 2 /* SerialVersion: short */,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 20 /* Statement: string */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, prepareRequest);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "PREPARE OK test";
+ executeDDoSRequests(test, buf, 0);
+
+ /*
+ * Statement
+ */
+ for (pos = 0; pos < 3; pos++) {
+ offset += lengths[pos];
+ }
+
+ String svalue = null;
+ test = "PREPARE Invalid statement: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ svalue = "";
+ test = "PREPARE Invalid statement: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ } catch (IOException ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ /*
+ * Test bad protocol on below values:
+ * 1. PreparedStatement
+ * 2. Variables Number
+ * 3. Variable Name
+ * 4. Variable Value
+ */
+ @Test
+ public void testQueryDDoS() {
+
+ assumeTrue(onprem == false);
+ assumeTrue(cloudRunning == false);
+
+ final QueryRequest queryReq = createQueryWithBoundStmtRequest();
+
+ final int prepStmtLen =
+ 4 /* int, length of PreparedStatement */+
+ queryReq.getPreparedStatement().getStatement().length;
+
+ final int[] lengths = {
+ 2 /* SerialVersion: short*/,
+ 1 /* OpCode: byte */,
+ 3 /* RequestTimeout: packed int */,
+ 1 /* Consistency: byte */,
+ 1 /* NumberLimit: packed int */,
+ 3 /* MaxReadKB: packed int */,
+ 1 /* ContinuationKey: byte array */,
+ 1 /* IsPreparedStatement: boolean */,
+ 2 /* QueryVersion: short */,
+ 1 /* traceLevel: packed int */,
+ 1 /* MaxWriteKB: packed int */,
+ 1 /* MathContext: byte */,
+ 1 /* ToplogySeqNum: packed int */,
+ 1 /* ShardId: packed int */,
+ 1 /* isSimpleQuery: boolean */,
+ prepStmtLen /* PreparedStatement: byte array */,
+ 1 /* VariablesNumber: packed int */,
+ 4 /* VariableName: string */,
+ 2 /* VariableValue: INT_TYPE + packed int */
+ };
+
+ final ByteOutputStream out = new NettyByteOutputStream(buf);
+ final byte[] bufBytes = serializeRequest(out, queryReq);
+
+ try {
+ String test;
+ int pos;
+ int offset = 0;
+
+ test = "QUERY OK test";
+ executeDDoSRequests(test, buf, 0);
+
+ /*
+ * PreparedStatement
+ */
+ for (pos = 0; pos < 15; pos++) {
+ offset += lengths[pos];
+ }
+
+ int value = -1;
+ test = "QUERY Invalid prepared Statement";
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 0;
+ test = "QUERY Invalid prepared Statement";
+ refillBuffer(buf, bufBytes);
+ setInt(out, offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Variables number
+ */
+ value = -1;
+ offset += lengths[pos++];
+ test = "QUERY Invalid variable number: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = 2;
+ test = "QUERY Invalid variable number: " + value;
+ refillBuffer(buf, bufBytes);
+ setPackedInt(out, offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Variable name
+ */
+ offset += lengths[pos++];
+ String svalue = null;
+ test = "QUERY Invalid variable name: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ svalue = "";
+ test = "QUERY Invalid variable name: " + svalue;
+ refillBuffer(buf, bufBytes);
+ setString(out, offset, svalue);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ /*
+ * Variable value
+ */
+ offset += lengths[pos++];
+ value = -1;
+ test = "QUERY Invalid variable value type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeDDoSRequests(test, buf, BAD_PROTOCOL_MESSAGE);
+
+ value = TYPE_ARRAY;
+ test = "QUERY Invalid variable value type: " + value;
+ refillBuffer(buf, bufBytes);
+ buf.setByte(offset, value);
+ executeDDoSRequests(test, buf, ILLEGAL_ARGUMENT);
+
+ } catch (IOException ioe) {
+ fail("Failed to write to buffer: " + ioe);
+ } finally {
+ out.close();
+ }
+ }
+
+ private QueryRequest createQueryWithBoundStmtRequest() {
+ final PrepareResult prepRet = handle.prepare(prepareBoundStmtRequest);
+ prepRet.getPreparedStatement()
+ .setVariable("$id", new IntegerValue(1));
+
+ final QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepRet)
+ .setMaxReadKB(1024)
+ .setLimit(100);
+ return queryReq;
+ }
+
+ private byte[] serializeRequest(ByteOutputStream out, Request request) {
+
+ request.setDefaults(httpConfig);
+
+ Serializer ser = request.createSerializer(factory);
+ try {
+ short serialVersion = ((NoSQLHandleImpl)handle).getSerialVersion();
+ out.writeShort(serialVersion);
+ if (request instanceof QueryRequest ||
+ request instanceof PrepareRequest) {
+ ser.serialize(request, serialVersion,
+ QueryDriver.QUERY_V3, out);
+ } else {
+ ser.serialize(request, serialVersion, out);
+ }
+ } catch (IOException e) {
+ fail("Failed to serialize put request");
+ }
+
+ final byte[] bytes = new byte[buf.writerIndex()];
+ System.arraycopy(buf.array(), 0, bytes, 0, bytes.length);
+
+ return bytes;
+ }
+
+ private void executeRequest(String test,
+ ByteBuf buffer,
+ int expErrCode,
+ int minLatencyMs,
+ int maxLatencyMs,
+ int requestNum) {
+
+ ResponseHandler responseHandler = null;
+ ByteInputStream bis = null;
+
+ try {
+ Channel channel = httpClient.getChannel(timeoutMs);
+ responseHandler = new ResponseHandler(httpClient, null, channel);
+
+ final FullHttpRequest request =
+ new DefaultFullHttpRequest(HTTP_1_1, POST, kvRequestURI,
+ buffer,
+ false /* Don't validate hdrs */);
+ HttpHeaders headers = request.headers();
+ headers.add(HttpHeaderNames.HOST, getProxyHost())
+ .add(REQUEST_ID_HEADER, nextRequestId())
+ .set(CONTENT_TYPE, "application/octet-stream")
+ .set(CONNECTION, "keep-alive")
+ .set(ACCEPT, "application/octet-stream")
+ .setInt(CONTENT_LENGTH, buffer.readableBytes());
+
+ if (!onprem) {
+ headers.set(AUTHORIZATION, SecureTestUtil.getAuthHeader(
+ getTenantId(), isSecure()));
+ }
+ if (isSecure()) {
+ headers.add(REQUEST_COMPARTMENT_ID, getTenantId());
+ }
+
+ long startMs = System.currentTimeMillis();
+ httpClient.runRequest(request, responseHandler, channel);
+
+ if (responseHandler.await(timeoutMs)) {
+ throw new TimeoutException();
+ }
+
+ long endMs = System.currentTimeMillis();
+ int latencyMs = (int)(endMs - startMs);
+
+ if (latencyMs < minLatencyMs || latencyMs > maxLatencyMs) {
+ fail("Request " + requestNum + " took " + latencyMs +
+ "ms, expected between " + minLatencyMs + "ms and " +
+ maxLatencyMs + "ms");
+ }
+
+ if (verbose) {
+ System.out.println("Request " + requestNum + " took " +
+ latencyMs + "ms");
+ }
+
+ /* Validates the response from proxy */
+ assertEquals(HttpResponseStatus.OK, responseHandler.getStatus());
+ bis = new NettyByteInputStream(responseHandler.getContent());
+ int errCode = bis.readByte();
+ if (expErrCode >= 0) {
+ if (expErrCode == errCode) {
+ return;
+ }
+ /* support V4 server error codes */
+ if (errCode == 6) { /* nson MAP */
+ errCode = getV4ErrorCode(responseHandler.getContent());
+ }
+ assertEquals(test + " failed", expErrCode, errCode);
+ }
+ } catch (Throwable t) {
+ if (t instanceof TimeoutException) {
+ /* did we expect a timeout? */
+ /* if timeoutMs is within min/max latency, yes */
+ if (maxLatencyMs > timeoutMs ) {
+ /* all good, expected */
+ if (verbose) {
+ System.out.println("Request " + requestNum +
+ " timed out (expected)");
+ }
+ } else {
+ fail(test + " Request " + requestNum +
+ " timed out after " + timeoutMs + " ms");
+ }
+ } else {
+ fail(test + " failed: " + t);
+ }
+ } finally {
+ if (bis != null) {
+ bis.close();
+ }
+ if (responseHandler != null) {
+ responseHandler.close();
+ }
+ }
+ }
+
+ private void executeDDoSRequests(String test,
+ ByteBuf buffer,
+ int expErrCode) {
+ if (expErrCode != 0) {
+ /* sleep to cool down error limiters */
+ try {
+ if (verbose) {
+ System.out.println(test + " Sleeping for 4 seconds...");
+ }
+ Thread.sleep(4000);
+ } catch (Exception e) {
+ fail(e.getMessage());
+ }
+ }
+
+ /* first 5 should return expected error code */
+ /* latency should be in single-digit ms, after first */
+ for (int x=0; x<5; x++) {
+ executeRequest(test, buffer.retainedDuplicate(), expErrCode, 0,
+ (x==0) ? 500 : 100, x);
+ }
+
+ /* next 5 should be slowed to >200ms latency */
+ for (int x=0; x<5; x++) {
+ executeRequest(test, buffer.retainedDuplicate(), expErrCode,
+ (expErrCode==0) ? 0 : 200,
+ (expErrCode==0) ? 100 : 500, x);
+ }
+
+ if (expErrCode == 0) {
+ return;
+ }
+
+ /* at this point we expect requests to mostly timeout */
+
+ /* fire off parallel threads to effect >10 errs/sec */
+ Thread threads[] = new Thread[5];
+ for(int x=0; x<5; x++) {
+ threads[x] = new Thread(() ->
+ {
+ for (int y=0; y<3; y++) {
+ executeRequest(test, buffer.retainedDuplicate(),
+ expErrCode, 200, timeoutMs + 100, y);
+ }
+ });
+ threads[x].start();
+ }
+ /* wait for threads to finish */
+ for(int x=0; x<5; x++) {
+ try {
+ threads[x].join();
+ } catch (Exception ignored) {}
+ }
+ }
+
+ private String nextRequestId() {
+ return String.valueOf(requestId++);
+ }
+
+ private void refillBuffer(ByteBuf buffer, byte[] bytes) {
+ buffer.setBytes(0, bytes);
+ buffer.readerIndex(0);
+ buffer.writerIndex(bytes.length);
+ }
+
+ private void setPackedInt(ByteOutputStream out, int offset, int value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ SerializationUtil.writePackedInt(out, value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private void setInt(ByteOutputStream out, int offset, int value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ out.writeInt(value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private void setPackedLong(ByteOutputStream out, int offset, long value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ SerializationUtil.writePackedLong(out, value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private void setString(ByteOutputStream out, int offset, String value)
+ throws IOException {
+
+ int savedOffset = out.getOffset();
+ out.setWriteIndex(offset);
+ SerializationUtil.writeString(out, value);
+ out.setWriteIndex(savedOffset);
+ }
+
+ private int readInt(ByteInputStream in, int offset)
+ throws IOException {
+
+ int savedOffset = in.getOffset();
+ in.setOffset(offset);
+ int value = in.readInt();
+ in.setOffset(savedOffset);
+ return value;
+ }
+
+ private void createTable() {
+ tableOperation(handle, createTableDDL,
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 10000);
+ tableOperation(handle, createIndexDDL, null,
+ TableResult.State.ACTIVE, 10000);
+ }
+
+ private MapValue createTestValue() {
+ MapValue row = new MapValue();
+ row.put("id", 1);
+ row.put("name", "string value");
+ row.put("count", Long.MAX_VALUE);
+ row.put("avg", Double.MAX_VALUE);
+ row.put("sum", new BigDecimal("12345678901234567890123456789012345678"));
+ row.put("exp", true);
+ row.put("key", genBytes(30, null));
+
+ MapValue map = new MapValue();
+ map.put("k1", 100);
+ map.put("k2", 200);
+ map.put("k3", 300);
+ row.put("map", map);
+
+ ArrayValue array = new ArrayValue();
+ array.add("elem1");
+ array.add("elem2");
+ array.add("elem3");
+ row.put("array", array);
+
+ MapValue rec = new MapValue();
+ rec.put("rid", 1024);
+ rec.put("rs", "nosql");
+ row.put("record", rec);
+
+ return row;
+ }
+
+ private MapValue createTestKey(int id) {
+ return new MapValue().put("id", id);
+ }
+
+ private byte[] genBytes(int length, Random rand) {
+ byte[] bytes = new byte[length];
+ for (int i = 0; i < bytes.length; i++) {
+ bytes[i] = (rand == null)? (byte)(i % 256) :
+ (byte)rand.nextInt(256);
+ }
+ return bytes;
+ }
+
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/DistributedRateLimitingTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DistributedRateLimitingTest.java
new file mode 100644
index 00000000..0c3e5742
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/DistributedRateLimitingTest.java
@@ -0,0 +1,791 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assume.assumeTrue;
+import static org.junit.Assert.fail;
+
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import oracle.nosql.driver.NoSQLHandle;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.ReadThrottlingException;
+import oracle.nosql.driver.WriteThrottlingException;
+import oracle.nosql.driver.RequestTimeoutException;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.RetryStats;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.proxy.security.SecureTestUtil;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests for distributed rate limiting.
+ *
+ * These tests use many client threads and multiple
+ * client handles to simulate many different clients.
+ * In all cases, overall read/write units are maintained for
+ * each table used, and compared periodically and at the
+ * end of each test to verify they are reasonably close to
+ * the specified table limits.
+ * They also verify that there are very few or no throttling
+ * exceptions.
+ *
+ * Note that this test does NOT use driver-side rate limiters.
+ * TODO: add that as a config, verify use of both works OK
+ */
+public class DistributedRateLimitingTest extends ProxyTestBase {
+
+ protected static int numTables = Integer.getInteger("test.numtables", 3);
+ protected static int baseUnits = Integer.getInteger("test.baseunits", 10);
+ protected static Random rand = new Random(System.currentTimeMillis());
+ protected static int maxRowSize = 20000;
+ protected static int maxRows = 10000;
+ protected static int readTimeoutMs = 3000;
+ protected static int writeTimeoutMs = 3000;
+
+ protected static int writerIntervalMs =
+ Integer.getInteger("test.outintervalms", 1000);
+ protected static PrintWriter printWriter;
+ static {
+ String outFile = System.getProperty("test.outfile");
+ if (outFile == null) {
+ printWriter = null;
+ } else {
+ try {
+ printWriter = new PrintWriter(new FileWriter(outFile));
+ } catch (Exception e) {
+ printWriter = null;
+ }
+ }
+ }
+
+ @BeforeClass
+ public static void staticSetUp()
+ throws Exception {
+
+ assumeTrue("Skipping DistributedRateLimitingTest for minicloud or " +
+ "cloud or onprem runs",
+ !Boolean.getBoolean(USEMC_PROP) &&
+ !Boolean.getBoolean(USECLOUD_PROP) &&
+ !Boolean.getBoolean(ONPREM_PROP));
+
+ staticSetUp(tenantLimits);
+ }
+
+ /* note this overrides base class @Before */
+ @Override
+ @Before
+ public void setUp() throws Exception {
+
+ /*
+ * Configure the endpoint
+ */
+ if (handles == null) {
+ handles = new NoSQLHandle[numProxies];
+ for (int x=0; x {doPopulateThread(tNum);});
+ threads[x].start();
+ }
+ /* wait for threads to finish */
+ verbose("Waiting for population threads to finish...");
+ for(int x=0; x0) rc.writer.printf(",");
+ rc.writer.printf("%.2f,%.2f", rrate, wrate);
+ }
+ }
+ if (rc.writer != null) {
+ rc.writer.printf("\n");
+ rc.writer.flush();
+ }
+ }
+ }
+
+ protected static void doReadThread(RunConfig rc, int tableNum) {
+ try {
+ doReads(rc, tableNum);
+ } catch (IOException e) {
+ }
+ }
+
+ protected static void doReads(RunConfig rc, int tableNum)
+ throws IOException {
+
+ MapValue key = new MapValue();
+ GetRequest getRequest = new GetRequest();
+ getRequest.setTimeout(rc.readTimeoutMs);
+ if (rc.preferThrottling) {
+ if (setPreferThrottling(getRequest) == false) {
+ return;
+ }
+ }
+
+ long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000);
+
+ while (System.currentTimeMillis() < endMillis) {
+ long id = rand.nextLong() % rc.maxID;
+ key.put("cookie_id", id);
+ getRequest.setTableName("RLTable" + tableNum);
+ getRequest.setKey(key);
+ NoSQLHandle handle = handles[rand.nextInt(handles.length)];
+ try {
+ GetResult getRes = handle.get(getRequest);
+ rc.readCollectors[tableNum].collect(getRes.getReadUnits());
+ rc.writeCollectors[tableNum].collect(getRes.getWriteUnits());
+ } catch (Exception e) {
+ verbose(" " + e);
+ if (e instanceof ReadThrottlingException) {
+ rc.readCollectors[tableNum].addError();
+ }
+ }
+ RetryStats rs = getRequest.getRetryStats();
+ if (rs != null) {
+ rc.readCollectors[tableNum].addErrors(
+ rs.getNumExceptions(ReadThrottlingException.class));
+ }
+ }
+
+ }
+
+ protected static void doWriteThread(RunConfig rc, int tableNum) {
+ try {
+ doWrites(rc, tableNum);
+ } catch (IOException e) {
+ }
+ }
+
+ protected static void doWrites(RunConfig rc, int tableNum)
+ throws IOException {
+
+ /* generate random data for puts */
+ final int leftLimit = 32; /* space */
+ final int rightLimit = 126; /* tilde */
+ String generatedString = rand.ints(leftLimit, rightLimit + 1)
+ .limit(rc.maxSize+1)
+ .collect(StringBuilder::new,
+ StringBuilder::appendCodePoint, StringBuilder::append)
+ .toString();
+
+ MapValue value = new MapValue();
+ PutRequest putRequest = new PutRequest();
+ putRequest.setTimeout(rc.writeTimeoutMs);
+ if (rc.preferThrottling) {
+ if (setPreferThrottling(putRequest) == false) {
+ return;
+ }
+ }
+
+ long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000);
+
+ while (System.currentTimeMillis() < endMillis) {
+ /* set up random data */
+ long id = rand.nextLong() % rc.maxID;
+
+ value.put("cookie_id", id);
+ int begin = 0;
+ int end = begin + rand.nextInt(rc.maxSize);
+ //int begin = rand.nextInt(rc.maxSize / 4);
+ //int end = begin + rand.nextInt((rc.maxSize * 3) / 4);
+ String sub = generatedString.substring(begin, end);
+ value.put("audience_data", sub);
+
+ putRequest.setTableName("RLTable" + tableNum);
+ putRequest.setValue(value);
+ NoSQLHandle handle = handles[rand.nextInt(handles.length)];
+ try {
+ PutResult putRes = handle.put(putRequest);
+ if (putRes.getVersion() == null) {
+ verbose("put failed!");
+ }
+ rc.readCollectors[tableNum].collect(putRes.getReadUnits());
+ rc.writeCollectors[tableNum].collect(putRes.getWriteUnits());
+ } catch (Exception e) {
+ verbose(" " + e);
+ if (e instanceof WriteThrottlingException) {
+ rc.writeCollectors[tableNum].addError();
+ }
+ }
+ RetryStats rs = putRequest.getRetryStats();
+ if (rs != null) {
+ rc.writeCollectors[tableNum].addErrors(
+ rs.getNumExceptions(WriteThrottlingException.class));
+ }
+ }
+ }
+
+ protected static void doQueryThread(RunConfig rc, int tableNum) {
+ try {
+ runOneQueryClient(rc, tableNum);
+ } catch (IOException e) {
+ } catch (InterruptedException ie) {
+ return;
+ }
+ }
+
+ private static void runQuery(RunConfig rc,
+ String query,
+ int tableNum,
+ NoSQLHandle handle,
+ long endMillis) {
+ QueryRequest qreq = null;
+ try {
+ List allResults = new ArrayList();
+/* TODO: in proxy: check current rate for table. if over, reduce maxReadKB */
+ int maxKB = getTableUnits(tableNum) / 10;
+ if (maxKB < 5) maxKB = 5;
+ qreq = new QueryRequest().setStatement(query)
+ .setTimeout(10000)
+ .setMaxReadKB(maxKB);
+ if (rc.preferThrottling) {
+ if (setPreferThrottling(qreq) == false) {
+ return;
+ }
+ }
+ do {
+ QueryResult qr = handle.query(qreq);
+ List results = qr.getResults();
+ for (MapValue mv : results) {
+ /* need to walk values, in case iteration triggers */
+ /* more requests internally */
+ allResults.add(mv);
+ }
+ rc.readCollectors[tableNum].collect(qr.getReadUnits());
+ rc.writeCollectors[tableNum].collect(qr.getWriteUnits());
+ /* this must be called _after_ getResults() */
+ RetryStats rs = qr.getRetryStats();
+ if (rs != null) {
+ int ne = rs.getNumExceptions(ReadThrottlingException.class);
+ if (ne > 0) {
+ rc.readCollectors[tableNum].addErrors(ne);
+ }
+ }
+ if (System.currentTimeMillis() > endMillis) {
+ break;
+ }
+ } while (!qreq.isDone());
+ } catch (RequestTimeoutException rte) {
+ verbose("query '" + query + "' timed out: " + rte);
+ } catch (Exception e) {
+ verbose("query '" + query + "' got error: " + e);
+ RetryStats rs = qreq.getRetryStats();
+ if (rs != null) {
+ int ne = rs.getNumExceptions(ReadThrottlingException.class);
+ if (ne > 0) {
+ rc.readCollectors[tableNum].addErrors(ne);
+ }
+ }
+ }
+ }
+
+
+ private static void runOneQueryClient(RunConfig rc, int tableNum)
+ throws IOException, InterruptedException {
+
+ verbose("Driver thread " + Thread.currentThread().getId() +
+ " performing query operations...");
+
+ long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000);
+
+ NoSQLHandle handle;
+
+ while (System.currentTimeMillis() < endMillis) {
+
+ /* simple count */
+ handle = handles[rand.nextInt(handles.length)];
+ runQuery(rc, "select count(*) from RLTable" + tableNum,
+ tableNum, handle, endMillis);
+
+ /* full scan/dump */
+ handle = handles[rand.nextInt(handles.length)];
+ runQuery(rc, "select * from RLTable" + tableNum,
+ tableNum, handle, endMillis);
+
+ /* more complex, with sort */
+ handle = handles[rand.nextInt(handles.length)];
+ runQuery(rc, "select audience_data from RLTable" + tableNum +
+ " where cookie_id > 1000 and cookie_id < 10000" +
+ " order by audience_data", tableNum, handle, endMillis);
+ }
+ }
+
+ protected static void runTest(
+ int readThreads,
+ int writeThreads,
+ int qThreads,
+ int runSeconds)
+ throws Exception {
+
+ /* skip this test if running on minicloud */
+ assumeTrue(cloudRunning == false);
+
+ boolean preferThrottling = Boolean.getBoolean("test.preferthrottling");
+
+ final int totalThreads =
+ readThreads + writeThreads + qThreads;
+
+ TPCollector[] readCollectors = new TPCollector[numTables];
+ TPCollector[] writeCollectors = new TPCollector[numTables];
+ for (int x=0; x {collecterWatcher(rc);});
+ threads[numThreads].start();
+ numThreads++;
+
+ for(int x=0; x {doWriteThread(rc, tNum);});
+ threads[numThreads].start();
+ numThreads++;
+ }
+ for(int x=0; x {doReadThread(rc, tNum);});
+ threads[numThreads].start();
+ numThreads++;
+ }
+ for(int x=0; x {doQueryThread(rc, tNum);});
+ threads[numThreads].start();
+ numThreads++;
+ }
+
+ /* wait for threads to finish */
+ for(int x=0; x 0 || qThreads > 0) {
+ double RUs = readCollectors[x].getOverallRate();
+ System.out.println("RUs=" + getTableUnits(x) + " actual=" + RUs);
+ System.out.println(" read throttling errors = " +
+ readCollectors[x].getErrors());
+ if (RUs > max || RUs < min) {
+ sb.append("RUs for " + getTableUnits(x) +
+ "RUs table failed: " + "min=" + min + ", max=" +
+ max + ", actual=" + RUs + "\n");
+ }
+ }
+ if (writeThreads > 0) {
+ double WUs = writeCollectors[x].getOverallRate();
+ System.out.println("WUs=" + getTableUnits(x) + " actual=" + WUs);
+ System.out.println(" write throttling errors = " +
+ writeCollectors[x].getErrors());
+ if (WUs > max || WUs < min) {
+ sb.append("WUs for " + getTableUnits(x) +
+ "WUs table failed: " + "min=" + min + ", max=" +
+ max + ", actual=" + WUs + "\n");
+ }
+// TODO: error counts, maybe less than 1/sec? some threshold?
+// Only if preferThrottling == false
+ }
+ }
+ if (sb.length() > 0) {
+ fail(sb.toString());
+ }
+ }
+
+ private static boolean setPreferThrottling(Request req) {
+ Class> requestClass = null;
+ try {
+ requestClass = Class.forName("oracle.nosql.driver.ops.Request");
+ } catch (Throwable e) {
+ System.out.println("Could not find Request class:" + e);
+ return false;
+ }
+ Method setThrottleFunction = null;
+ try {
+ setThrottleFunction = requestClass.getMethod(
+ "setPreferThrottlingExceptions",
+ boolean.class);
+ } catch (Throwable e) {
+ verbose("Could not find " +
+ "Request.setPreferThrottlingExceptions(): " + e);
+ verbose("Skipping test");
+ return false;
+ }
+ try {
+ setThrottleFunction.invoke(req, true);
+ } catch (Exception e) {
+ verbose("Could not invoke " +
+ "Request.setPreferThrottlingExceptions(): " + e);
+ verbose("Skipping test");
+ return false;
+ }
+ return true;
+ }
+
+ @Test
+ public void basicWriteTest() throws Exception {
+ runTest(0, 15, 0, 15);
+ }
+
+ @Test
+ public void basicReadTest() throws Exception {
+ runTest(numTables * 5, 0, 0, 15);
+ }
+
+ @Test
+ public void basicReadWriteTest() throws Exception {
+ runTest(numTables * 5, numTables * 5, 0, 15);
+ }
+
+ @Test
+ public void basicQueryTest() throws Exception {
+ runTest(0, 0, numTables * 5, 20);
+ }
+
+ @Test
+ public void readWriteQueryTest() throws Exception {
+ runTest(numTables * 4, numTables * 4, numTables * 4, 30);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ElasticityTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ElasticityTest.java
new file mode 100644
index 00000000..a806296e
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ElasticityTest.java
@@ -0,0 +1,1403 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.io.File;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Iterator;
+import java.util.Random;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+import oracle.kv.KVStore;
+import oracle.kv.KVStoreConfig;
+import oracle.kv.KVStoreFactory;
+import oracle.kv.StatementResult;
+import oracle.kv.impl.admin.CommandServiceAPI;
+import oracle.kv.impl.api.KVStoreImpl;
+import oracle.kv.impl.api.table.TableKey;
+import oracle.kv.impl.sna.StorageNodeAgent;
+import oracle.kv.impl.topo.DatacenterId;
+import oracle.kv.impl.topo.PartitionId;
+import oracle.kv.impl.topo.StorageNodeId;
+import oracle.kv.impl.util.CommonLoggerUtils;
+import oracle.kv.impl.util.FileUtils;
+import oracle.kv.impl.util.FormatUtils;
+import oracle.kv.impl.util.PollCondition;
+import oracle.kv.table.Index;
+import oracle.kv.table.IndexKey;
+import oracle.kv.table.PrimaryKey;
+import oracle.kv.table.ReadOptions;
+import oracle.kv.table.RecordValue;
+import oracle.kv.table.Row;
+import oracle.kv.table.Table;
+import oracle.kv.table.TableAPI;
+
+import oracle.nosql.driver.NoSQLHandle;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.NoSQLHandleFactory;
+import oracle.nosql.driver.RequestTimeoutException;
+import oracle.nosql.driver.TableNotFoundException;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PreparedStatement;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.StringValue;
+
+import oracle.nosql.proxy.security.SecureTestUtil;
+import oracle.nosql.proxy.util.PortFinder;
+import oracle.nosql.proxy.util.CreateStoreUtils;
+import oracle.nosql.proxy.util.CreateStore;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+/**
+ * Tests the correctness of query under elasticity operations.
+ *
+ * Two kind of elasticity operations are used in this tests. A store expansion
+ * expand a 3x1 store into a 6x1 store. A store contraction contracts a 6x1
+ * store into a 3x1 store. A series of such expansion and contraction can be
+ * conducted in a test.
+ *
+ * The secondary query tests work in the folloiwng pattern. The table rows have
+ * the following schema:
+ * userId name count
+ * where userId is an integer which is the primary key, name is a string which
+ * is not unique and count is the number of rows that has the same name. When
+ * we insert the rows, we group rows of the same name together, i.e., into
+ * consecutive userId blocks. The index is on the name field and the tests
+ * query the rows with a specified name. The test then start up two threads, an
+ * elasticity thread and a query thread. The elasticity thread does a series of
+ * elasticity operations described above while in the mean time the query
+ * thread does the query and verify the results. We also verify the query
+ * results before and after the elasticity operations to make sure there is no
+ * problem with the test insertion or elasticity operation.
+ *
+ * [KVSTORE-1518]
+ */
+public class ElasticityTest extends ProxyTestBase {
+
+ private static boolean trace = false;
+
+ private static final int startPort = 5000;
+ private static final int haRange = 5;
+ private static final Random rand = new Random();
+
+ private static final int POLL_CONDITION_INTERVAL = 1000;
+ private static final int NUM_ERRORS_TO_DISPLAY = 10;
+ private static final AtomicInteger topoCandidateSequencer =
+ new AtomicInteger(0);
+
+ private final Logger logger = Logger.getLogger(getClass().getName());
+ private CreateStore createStore = null;
+ private KVStore kvstore = null;
+ private StorageNodeAgent[] extraSNAs = new StorageNodeAgent[3];
+
+ private Proxy proxy;
+ private NoSQLHandle handle = null;
+
+ private int maxReadKB = 25;
+
+ String usersDDL =
+ "CREATE TABLE IF NOT EXISTS users ( " +
+ " uid integer, " +
+ " name string, " +
+ " int integer, " +
+ " count long, " +
+ " PRIMARY KEY (uid))";
+
+ String childDDL =
+ "CREATE TABLE IF NOT EXISTS users.child ( " +
+ " cid integer, " +
+ " cname string, " +
+ " cint integer, " +
+ " count integer, " +
+ " PRIMARY KEY (cid))";
+
+ String idxNameDDL =
+ "CREATE INDEX IF NOT EXISTS idx_name ON users(name)";
+
+ String idxIntDDL =
+ "CREATE INDEX IF NOT EXISTS idx_int ON users(int)";
+
+ String users2DDL =
+ "CREATE TABLE IF NOT EXISTS users2 ( " +
+ " uid1 integer, " +
+ " uid2 integer, " +
+ " name string, " +
+ " int integer, " +
+ " count long, " +
+ " PRIMARY KEY(shard(uid1), uid2))";
+
+ String idx2IntDDL =
+ "CREATE INDEX IF NOT EXISTS idx_int ON users2(int)";
+
+ String[] queries = {
+ // 0
+ "declare $name string; " +
+ "select * from users where name = $name",
+ // 1
+ "declare $low integer; $high integer; " +
+ "select * from users where $low <= int and int <= $high " +
+ "order by int",
+ // 2
+ "declare $low integer; $high integer; " +
+ "select * from users where $low <= int and int <= $high " +
+ "order by int desc",
+ // 3
+ "declare $low integer; $high integer; " +
+ "select int, count(*) as count " +
+ "from users " +
+ "where $low <= int and int <= $high " +
+ "group by int",
+ // 4
+ "declare $low integer; $high integer; " +
+ "select int, count(*) as count " +
+ "from users " +
+ "group by int",
+ // 5
+ "declare $low integer; $high integer; " +
+ "select * from users where $low <= uid and uid <= $high ",
+ // 6
+ "declare $uid1 integer; $low integer; $high integer; " +
+ "select * from users2 where uid1 = $uid1 and $low <= int and int <= $high",
+ // 7
+ "declare $name string; " +
+ "select p.uid, p.name, p.count as pcount, c.cid, c.count as ccount " +
+ "from nested tables(users p descendants(users.child c)) " +
+ "where p.name = $name",
+ // 8
+ "declare $name string; " +
+ "select p.uid, p.name, p.count as pcount, c.cid, c.count as ccount " +
+ "from users p, users.child c " +
+ "where p.uid = c.uid and p.name = $name",
+ };
+
+ int numRows;
+ int maxNameId1;
+ int maxNameId2;
+ int minNumRowsPerName = 1;
+
+ final int maxChildRows = 30;
+
+ // Maps nameId to count
+ final Map countMap = new HashMap();
+
+ /** Represents the test state. */
+ private class TestState {
+
+ TestState(int numQueryThreads) {
+ this.numQueryThreads = numQueryThreads;
+ }
+
+ int numQueryThreads;
+
+ private final AtomicInteger elasticityCount =
+ new AtomicInteger(0);
+ private final AtomicBoolean elasticityDone =
+ new AtomicBoolean(false);
+ private final AtomicInteger queryThreadDoneCount =
+ new AtomicInteger(0);
+ private final ConcurrentLinkedQueue errors =
+ new ConcurrentLinkedQueue<>();
+
+ private int getElasticityCount() {
+ return elasticityCount.get();
+ }
+
+ private void incElasticityCount() {
+ elasticityCount.getAndIncrement();
+ }
+
+ private boolean isElasticityDone() {
+ return elasticityDone.get();
+ }
+
+ private void setElasticityDone() {
+ elasticityDone.set(true);
+ }
+
+ private boolean areQueriesDone() {
+ return queryThreadDoneCount.get() >= numQueryThreads;
+ }
+
+ private void setQueryThreadDone() {
+ int done = queryThreadDoneCount.getAndIncrement();
+ }
+
+ private void reportError(Throwable t) {
+ errors.add(t);
+ }
+
+ private Collection getErrors() {
+ return errors;
+ }
+ }
+
+ private class QueryException extends RuntimeException {
+
+ public static final long serialVersionUID = 1L;
+
+ private long timestamp;
+ private String qname;
+
+ private QueryException(String qname, String message) {
+ super(message);
+ this.timestamp = System.currentTimeMillis();
+ this.qname = qname;
+ }
+
+ @Override
+ public String toString() {
+ return String.format(
+ "Error executing query at %s with name=%s : %s",
+ FormatUtils.formatDateTimeMillis(timestamp),
+ qname,
+ getMessage());
+ }
+ }
+
+ private class ElasticityException extends RuntimeException {
+
+ public static final long serialVersionUID = 1L;
+
+ private ElasticityException(Throwable cause) {
+ super(cause);
+ }
+ }
+
+ private static void trace(String msg) {
+
+ if (trace) {
+ System.out.println(msg);
+ }
+ }
+
+
+ /* these override the Before/AfterClass methods in ProxyTestBase */
+ @BeforeClass
+ public static void staticSetUp() {
+ assumeTrue("Skipping ElasticityTest for minicloud or cloud test",
+ !Boolean.getBoolean(USEMC_PROP) &&
+ !Boolean.getBoolean(USECLOUD_PROP));
+ }
+
+ @AfterClass
+ public static void staticTearDown() {}
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+
+ if (proxy != null) {
+ proxy.shutdown(3, TimeUnit.SECONDS);
+ proxy = null;
+ }
+
+ if (handle != null) {
+ handle.close();
+ handle = null;
+ }
+
+ if (kvstore != null) {
+ kvstore.close();
+ kvstore = null;
+ }
+
+ if (createStore != null) {
+ createStore.shutdown();
+ createStore = null;
+ }
+ }
+
+ private static void cleanupTestDir(String testDir) {
+ File testDirFile = new File(testDir);
+ if (!testDirFile.exists()) {
+ return;
+ }
+ clearDirectory(testDirFile);
+ }
+
+ private void createStore(
+ String testSubDir,
+ int capacity,
+ int partitions) throws Exception {
+
+ int port = getKVPort();
+ String testDir = getTestDir() + "/" + testSubDir;
+
+ cleanupTestDir(testDir);
+
+ createStore =
+ new CreateStore(
+ testDir,
+ getStoreName(),
+ port,
+ 3, /* nsns */
+ 3, /* rf */
+ partitions,
+ capacity,
+ 256, /* mb */
+ false, /* use threads */
+ null);
+ final File root = new File(testDir);
+ root.mkdirs();
+ createStore.start();
+
+ kvstore = KVStoreFactory.getStore(
+ new KVStoreConfig(getStoreName(),
+ String.format("%s:%s", getHostName(), port)));
+
+ proxy = ProxyTestBase.startProxy();
+
+ handle = createHandle();
+ }
+
+ private NoSQLHandle createHandle() {
+
+ NoSQLHandleConfig hconfig =
+ new NoSQLHandleConfig(ProxyTestBase.getProxyEndpoint());
+
+ /* 5 retries, default retry algorithm */
+ hconfig.configureDefaultRetryHandler(5, 0);
+
+ hconfig.setRequestTimeout(30000);
+ //hconfig.setNumThreads(20);
+
+ SecureTestUtil.setAuthProvider(hconfig,
+ ProxyTestBase.SECURITY_ENABLED,
+ ProxyTestBase.onprem(),
+ ProxyTestBase.getTenantId());
+ hconfig.setLogger(logger);
+
+ /* Open the handle */
+ NoSQLHandle h = NoSQLHandleFactory.createNoSQLHandle(hconfig);
+
+ /* do a simple op to set the protocol version properly */
+ try {
+ GetTableRequest getTable =
+ new GetTableRequest().setTableName("noop");
+ h.getTable(getTable);
+ } catch (TableNotFoundException e) {}
+
+ return h;
+ }
+
+ private void expandStore(int capacity) throws Exception {
+
+ final CommandServiceAPI cs = createStore.getAdmin();
+ final String hostname = createStore.getHostname();
+ final String poolname = CreateStore.STORAGE_NODE_POOL_NAME;
+ final int portsPerFinder = 20;
+
+ /* deploy 3 more sns */
+ for (int i = 0; i < 3; ++i) {
+ int sid = i + 4;
+ PortFinder pf = new PortFinder(
+ startPort + (3 + i) * portsPerFinder, haRange, hostname);
+ int port = pf.getRegistryPort();
+
+ extraSNAs[i] = CreateStoreUtils.createUnregisteredSNA(
+ createStore.getRootDir(),
+ pf,
+ capacity,
+ String.format("config%s.xml", i + 3),
+ false /* useThreads */,
+ false /* createAdmin */,
+ 2 /* mb */,
+ null /* extra params */);
+
+ CreateStoreUtils.waitForAdmin(hostname, port, 20, logger);
+ createStore.setExpansionSnas(extraSNAs);
+
+ int planId = cs.createDeploySNPlan(
+ String.format("deploy sn%s", sid),
+ new DatacenterId(1),
+ hostname,
+ port,
+ "comment");
+
+ runPlan(planId);
+
+ StorageNodeId snid = extraSNAs[i].getStorageNodeId();
+ cs.addStorageNodeToPool(poolname, snid);
+ }
+
+ String expandTopoName =
+ String.format("expand-%s",
+ topoCandidateSequencer.getAndIncrement());
+ cs.copyCurrentTopology(expandTopoName);
+ cs.redistributeTopology(expandTopoName, poolname);
+
+ int planId = cs.createDeployTopologyPlan(
+ "deploy expansion", expandTopoName, null);
+ runPlan(planId);
+ }
+
+ private void contractStore() throws Exception {
+
+ final CommandServiceAPI cs = createStore.getAdmin();
+ final String poolname = CreateStore.STORAGE_NODE_POOL_NAME;
+
+ for (int i = 0; i < 3; ++i) {
+ cs.removeStorageNodeFromPool(poolname, new StorageNodeId(i + 4));
+ }
+
+ String contractTopoName =
+ String.format("contract-%s",
+ topoCandidateSequencer.getAndIncrement());
+ cs.copyCurrentTopology(contractTopoName);
+ cs.contractTopology(contractTopoName, poolname);
+
+ verbose("Elasticity: Starting deploy-contraction plan");
+
+ int planId = cs.createDeployTopologyPlan(
+ "deploy contraction", contractTopoName, null);
+ runPlan(planId);
+
+ for (int i = 0; i < 3; ++i) {
+
+ verbose("Elasticity: Starting remove SN plan for SN " + (i+4));
+
+ planId = cs.createRemoveSNPlan(
+ String.format("remove sn%s", i + 4),
+ new StorageNodeId(i + 4));
+ runPlan(planId);
+
+ extraSNAs[i].shutdown(true, true, "contration");
+ extraSNAs[i] = null;
+
+ Files.deleteIfExists(
+ Paths.get(createStore.getRootDir(),
+ String.format("config%s.xml", i + 3)));
+ FileUtils.deleteDirectory(
+ Paths.get(createStore.getRootDir(),
+ createStore.getStoreName(),
+ String.format("sn%s", i + 4))
+ .toFile());
+ }
+ }
+
+ private void runPlan(int planId) throws Exception {
+ final CommandServiceAPI cs = createStore.getAdmin();
+ cs.approvePlan(planId);
+ cs.executePlan(planId, false);
+ cs.awaitPlan(planId, 0, null);
+ cs.assertSuccess(planId);
+ }
+
+ private void createTableAndIndex() {
+
+ TableLimits limits = new TableLimits(90000, 15000, 50);
+ int timeout = 20000;
+
+ ProxyTestBase.tableOperation(handle, usersDDL, limits,
+ TableResult.State.ACTIVE, timeout);
+ ProxyTestBase.tableOperation(handle, childDDL, null,
+ TableResult.State.ACTIVE, timeout);
+ ProxyTestBase.tableOperation(handle, idxNameDDL, null,
+ TableResult.State.ACTIVE, timeout);
+ ProxyTestBase.tableOperation(handle, idxIntDDL, null,
+ TableResult.State.ACTIVE, timeout);
+ ProxyTestBase.tableOperation(handle, users2DDL, limits,
+ TableResult.State.ACTIVE, timeout);
+ ProxyTestBase.tableOperation(handle, idx2IntDDL, null,
+ TableResult.State.ACTIVE, timeout);
+ }
+
+ /**
+ * Populates {@code numRows} of rows and returns the maximum name ID.
+ *
+ * The userID is the natural series. The name is in the form of name.id
+ * where id is also the natural series but multiple rows can have the same
+ * id. The number of rows having the same name.id is picked at random
+ * between 1 and 5% {@code numRows}. The count stores the total number of
+ * rows that has the same name.id with that row so that by looking at the
+ * count field, we know how many rows should be returned for a query on
+ * that name.id.
+ */
+ private void populateRows(String tableName, String testName) {
+
+ TableAPI tableAPI = kvstore.getTableAPI();
+ Table table = tableAPI.getTable("in.valid.iac.name.space:" + tableName);
+ Table childTable = tableAPI.getTable("in.valid.iac.name.space:users.child");
+ boolean users2 = tableName.equals("users2");
+ boolean innerJoin = testName.contains("InnerJoin");
+
+ MapValue row = new MapValue();
+ int maxRowsPerNameId = Math.max(1, numRows * 5 / 100);
+ int uid1 = 0;
+ int uid2 = 0;
+ int nameId = 0;
+ int nrows = 0;
+
+ Row kvrow = null;
+ if (table != null) {
+ kvrow = table.createRow();
+ }
+
+ PutRequest putRequest = new PutRequest()
+ .setValue(row)
+ .setTableName(tableName);
+
+ if (users2) {
+ uid1 = 2; // all rows will go to partition 4
+ }
+
+ while (nrows < numRows) {
+ int maxRowsPerNameId2 = Math.min(numRows - nrows, maxRowsPerNameId);
+ long rowsPerNameId = (
+ maxRowsPerNameId2 <= minNumRowsPerName ?
+ minNumRowsPerName :
+ minNumRowsPerName
+ + rand.nextInt(maxRowsPerNameId2 - minNumRowsPerName));
+
+ String name = ("name." + nameId);
+ if (!users2) {
+ countMap.put(nameId, rowsPerNameId);
+ }
+
+ for (int i = 0; i < rowsPerNameId; ++i) {
+ if (users2) {
+ row.put("uid1", uid1);
+ row.put("uid2", uid2);
+ if (kvrow != null) {
+ kvrow.put("uid1", uid1);
+ kvrow.put("uid2", uid2);
+ }
+ } else {
+ row.put("uid", uid1);
+ if (kvrow != null) {
+ kvrow.put("uid", uid1);
+ }
+ }
+ row.put("name", name);
+ row.put("int", nameId);
+ row.put("count", rowsPerNameId);
+
+ PutResult res = handle.put(putRequest);
+ assertNotNull("Put failed", res.getVersion());
+
+ if (users2) {
+ ++uid2;
+ } else {
+ int numChildRows = rand.nextInt(maxChildRows);
+ if (numChildRows == 0 && innerJoin) {
+ numChildRows = 1;
+ }
+
+ if (numChildRows > 0) {
+
+ MapValue childRow = new MapValue();
+ PutRequest cputRequest = new PutRequest()
+ .setValue(childRow)
+ .setTableName("users.child");
+
+ childRow.put("uid", uid1);
+
+ for (int j = 0; j < numChildRows; ++j) {
+ childRow.put("cid", j);
+ childRow.put("cint", rand.nextInt(10));
+ childRow.put("count", numChildRows);
+
+ PutResult cres = handle.put(cputRequest);
+ assertNotNull("Put failed", cres.getVersion());
+ }
+ }
+
+ ++uid1;
+ }
+ ++nrows;
+
+ /*
+ if (!users2) {
+ PartitionId pid = ((KVStoreImpl)kvstore).
+ getPartitionId(TableKey.createKey(table, kvrow, false).getKey());
+ trace("Inserted row " + row +
+ " in P-" + pid.getPartitionId());
+ }
+ */
+ }
+
+ ++nameId;
+ }
+
+ if (users2) {
+ maxNameId2 = nameId;
+ } else {
+ maxNameId1 = nameId;
+ }
+ }
+
+ private int getUserId(MapValue row) {
+ return row.get("uid").getInt();
+ }
+
+ private int getUserId2(MapValue row) {
+ return row.get("uid2").getInt();
+ }
+
+ private String getName(MapValue row) {
+ return row.get("name").getString();
+ }
+
+ private int getInt(MapValue row) {
+ return row.get("int").getInt();
+ }
+
+ private long getCount(MapValue row) {
+ return row.get("count").getLong();
+ }
+
+ private PartitionId getPartition(Table table, RecordValue rec) {
+ Row row = table.createRow(rec);
+ return ((KVStoreImpl) kvstore).getPartitionId(
+ TableKey.createKey(table, row, false).getKey());
+ }
+
+ /** Executes a secondary query and verifies the result. */
+ private void queryAndVerify(
+ TestState testState,
+ int qid,
+ int qcount) {
+
+ String qname = ("Q" + qid + "-" + qcount);
+
+ PrepareRequest preq = new PrepareRequest();
+ preq.setGetQueryPlan(true);
+ preq.setStatement(queries[qid]);
+ PrepareResult pres = handle.prepare(preq);
+ PreparedStatement prep = pres.getPreparedStatement();
+
+ //verbose("driver topo seq num = " + prep.topologySeqNum());
+
+ QueryRequest qreq = new QueryRequest();
+ qreq.setPreparedStatement(prep);
+ qreq.setTimeout(30000);
+ qreq.setQueryName(qname);
+ qreq.setMaxReadKB(maxReadKB);
+ qreq.setTraceLevel(3);
+ //if (qcount > 4800) {
+ // qreq.setTraceLevel(0);
+ //}
+
+ int maxNameId = maxNameId1;
+ int searchPKey = -1;
+ if (qid == 5) {
+ searchPKey = rand.nextInt(numRows);
+ } else if (qid == 6) {
+ searchPKey = 2;
+ maxNameId = maxNameId2;
+ }
+
+ int searchNameId = rand.nextInt(maxNameId);
+
+ String searchKey = ("name." + searchNameId);
+
+ //verbose("Executing query " + qname + " with search pkey " +
+ // searchPKey + " and search key " + searchKey);
+
+ if (qid == 0 || qid == 7 || qid == 8) {
+ prep.setVariable("$name", new StringValue(searchKey));
+ } else if (qid == 5) {
+ int lowKey = searchPKey - 20;
+ int highKey = searchPKey + 20;
+ prep.setVariable("$low", new IntegerValue(lowKey));
+ prep.setVariable("$high", new IntegerValue(highKey));
+ } else if (qid == 6) {
+ int lowKey = searchNameId - 100;
+ int highKey = searchNameId + 100;
+ prep.setVariable("$uid1", new IntegerValue(searchPKey));
+ prep.setVariable("$low", new IntegerValue(lowKey));
+ prep.setVariable("$high", new IntegerValue(highKey));
+ } else {
+ int lowKey = searchNameId - 2;
+ int highKey = searchNameId + 2;
+ prep.setVariable("$low", new IntegerValue(lowKey));
+ prep.setVariable("$high", new IntegerValue(highKey));
+ }
+
+ List results = new ArrayList<>();
+
+ try {
+ do {
+ QueryResult res = handle.query(qreq);
+ List list = res.getResults();
+ for (MapValue val : list) {
+ results.add(val);
+ }
+ } while (!qreq.isDone());
+
+ if (results.isEmpty()) {
+ throw new QueryException(qname, "no records found");
+ }
+
+ verifyQueryResults(qid, qname,
+ maxNameId, searchNameId, searchPKey,
+ results);
+ } catch (QueryException e) {
+ testState.reportError(e);
+ if (testState.errors.size() == 1) {
+ trace("Elasticity:-1 query " + qname + " failed");
+ trace(CommonLoggerUtils.getStackTrace(e));
+ if (trace) {
+ qreq.printTrace(System.out);
+ }
+ }
+ throw e;
+ } catch (RequestTimeoutException rte) {
+ /* Don't fail the test. Due to the high load imposed to the store
+ * by ElasticityTest, it is possible for queries to timeout or
+ * get a "failed to read base topology" errors. We don't want
+ * to consider such erros as failures; only wrong query results
+ * are considered failures. */
+ testState.reportError(rte);
+ verbose("Elasticity:-2 query " + qname + " failed");
+ verbose(CommonLoggerUtils.getStackTrace(rte));
+ } catch (TableNotFoundException tnfe) {
+ /* Don't fail the test. Same reason as above */
+ testState.reportError(tnfe);
+ verbose("Elasticity:-2 query " + qname + " failed");
+ verbose(CommonLoggerUtils.getStackTrace(tnfe));
+ } catch (Throwable t) {
+ testState.reportError(t);
+ if (testState.errors.size() <= 10) {
+ trace("Elasticity:-2 query " + qname + " failed: " + t);
+ }
+ verbose("Elasticity:-2 query " + qname + " failed");
+ verbose(CommonLoggerUtils.getStackTrace(t));
+ throw new QueryException(qname, t.getMessage());
+ }
+ }
+
+ /**
+ * Verifies that the query result consists of a block of consecutive rows
+ * and the count matches the specified value.
+ */
+ private void verifyQueryResults(
+ int qid,
+ String qname,
+ int maxNameId,
+ int searchNameId,
+ int searchPKey,
+ List results) {
+
+ if (qid == 7 || qid == 8) {
+ verifyQ7Results(qid, qname, results);
+ return;
+ }
+
+ long expectedCount = 0;
+ long actualCount = 0;
+ int prevInt = -1;
+
+ /* Queries with range scan: compute expected count
+ * Make sure results are sorted and */
+ if (qid == 1 || qid == 2 || qid == 6) {
+
+ for (MapValue row : results) {
+
+ ++actualCount;
+ int currInt = getInt(row);
+
+ if (prevInt < 0) {
+ expectedCount += getCount(row);
+ prevInt = currInt;
+ continue;
+ }
+
+ if (qid == 1 && prevInt > currInt) {
+ throw new QueryException(qname, "Query results are out of order");
+ }
+
+ if (qid == 2 && prevInt < currInt) {
+ throw new QueryException(qname, "Query results are out of order");
+ }
+
+ if (prevInt != currInt) {
+ expectedCount += getCount(row);
+ prevInt = currInt;
+ }
+ }
+ }
+
+ /* Group-by queries */
+ if (qid == 3 || qid == 4) {
+ if (qid == 4) {
+ expectedCount = maxNameId;
+ } else {
+ expectedCount = 5;
+ if (searchNameId == 0 || searchNameId == maxNameId - 1) {
+ expectedCount = 3;
+ } else if (searchNameId == 1 || searchNameId == maxNameId - 2) {
+ expectedCount = 4;
+ }
+ }
+
+ for (MapValue row : results) {
+
+ ++actualCount;
+ int currInt = getInt(row);
+ long currCount = getCount(row);
+
+ if (countMap.get(currInt) != currCount) {
+ throw new QueryException(qname,
+ "Unexpected group count. Expected = " + countMap.get(currInt) +
+ " actual = " + currCount);
+ }
+
+ if (prevInt < 0) {
+ prevInt = currInt;
+ continue;
+ }
+
+ if (prevInt > currInt) {
+ throw new QueryException(qname, "Query results are out of order");
+ }
+
+ if (prevInt != currInt) {
+ prevInt = currInt;
+ }
+ }
+ }
+
+ if (qid == 5) {
+ expectedCount = 41;
+
+ if (searchPKey - 20 < 0) {
+ expectedCount -= (20 - searchPKey);
+ } else if (searchPKey + 20 >= numRows) {
+ expectedCount -= (searchPKey + 20 - numRows + 1);
+ }
+ }
+
+ if (qid < 3 || qid == 5) {
+ /* Make sure the row Ids are consecutive without missing or duplicate */
+ Collections.sort(results, Comparator.comparingInt((r) -> getUserId(r)));
+
+ MapValue firstRow = results.get(0);
+ if (qid == 0) {
+ expectedCount = getCount(firstRow);
+ }
+ int startId = getUserId(firstRow);
+ int prevId = startId - 1;
+
+ actualCount = 0;
+ for (MapValue row : results) {
+ ++actualCount;
+ int currId = getUserId(row);
+ if (prevId < currId - 1) {
+ notifyIncorrectRows("missing", qid, qname,
+ currId-1, prevId, currId);
+ } else if (prevId == currId) {
+
+ notifyIncorrectRows("duplicating", qid, qname,
+ currId, prevId, currId);
+ }
+ prevId = currId;
+ }
+ }
+
+ if (qid == 6) {
+ /* Make sure the row Ids are consecutive without missing or duplicate */
+ Collections.sort(results, Comparator.comparingInt((r) -> getUserId2(r)));
+
+ MapValue firstRow = results.get(0);
+ int startId = getUserId2(firstRow);
+ int prevId = startId - 1;
+
+ actualCount = 0;
+ for (MapValue row : results) {
+ ++actualCount;
+ int currId = getUserId2(row);
+ if (prevId < currId - 1) {
+ notifyIncorrectRows("missing", qid, qname,
+ currId-1, prevId, currId);
+ } else if (prevId == currId) {
+
+ notifyIncorrectRows("duplicating", qid, qname,
+ currId, prevId, currId);
+ }
+ prevId = currId;
+ }
+ }
+
+ /* Make sure the count is correct. */
+ if (actualCount != expectedCount) {
+ throw new QueryException(
+ qname, "incorrect count, expected = " +
+ expectedCount + " actual = " + actualCount);
+ }
+ }
+
+ private void verifyQ7Results(
+ int qid,
+ String qname,
+ List results) {
+
+ /* Make sure the row Ids are consecutive without missing or duplicate */
+ Collections.sort(results, Comparator.comparingInt((r) -> getUserId(r)));
+
+ MapValue firstRow = results.get(0);
+ int startUid = getUserId(firstRow);
+ int prevUid = startUid - 1;
+ long numExpectedParentRows = firstRow.get("pcount").getLong();
+ long numActualParentRows = 0;
+ int prevCid = -1;
+ int numExpectedChildRows = -1;
+ int numActualChildRows = -1;
+
+ for (MapValue row : results) {
+
+ //verbose(row);
+
+ int currUid = getUserId(row);
+
+ if (prevUid < currUid - 1) {
+ notifyIncorrectRows("missing", qid, qname,
+ currUid-1, prevUid, currUid);
+ }
+
+ if (prevUid == currUid) {
+
+ if (prevCid == -1) {
+ notifyIncorrectRows("duplicating", qid, qname,
+ currUid, prevUid, currUid);
+ }
+
+ ++numActualChildRows;
+ int currCid = row.get("cid").getInt();
+ if (prevCid < currCid - 1) {
+ throw new QueryException(
+ qname,
+ "missing row (" + currUid + ", " + (prevCid+1) + ")" +
+ "prevCid = " + prevCid + " currCid = " + currCid);
+ } else if (prevCid == currCid) {
+ throw new QueryException(
+ qname,
+ "duplicating row (" + currUid + ", " + currCid + ")");
+ }
+
+ prevCid = currCid;
+ } else {
+ if (numActualChildRows != numExpectedChildRows) {
+ throw new QueryException(
+ qname,
+ "incorrect number of child rows for parent uid : " +
+ currUid + " expected = " + numExpectedChildRows +
+ " actual = " + numActualChildRows);
+ }
+
+ ++numActualParentRows;
+
+ if (row.get("ccount").isNull()) {
+ prevCid = -1;
+ numExpectedChildRows = 0;
+ numActualChildRows = 0;
+ } else {
+ int currCid = row.get("cid").getInt();
+ if (currCid != 0) {
+ throw new QueryException(
+ qname,
+ "missing row (" + currUid + ", " + 0 + ")" +
+ " currCid = " + currCid);
+ }
+ prevCid = 0;
+ numExpectedChildRows = row.get("ccount").getInt();
+ numActualChildRows = 1;
+ }
+ }
+
+ prevUid = currUid;
+ }
+
+ if (numActualParentRows != numExpectedParentRows) {
+ throw new QueryException(
+ qname,
+ "incorrect number of parent rows: expected = " +
+ numExpectedParentRows + " actual = " +
+ numActualParentRows);
+ }
+ }
+
+ private void notifyIncorrectRows(String cause,
+ int qid,
+ String qname,
+ int problemId,
+ int prevId,
+ int currId) {
+
+ String tableName;
+ if (qid == 6) {
+ tableName = "users2";
+ } else {
+ tableName = "users";
+ }
+
+ Table table = kvstore.getTableAPI().getTable("in.valid.iac.name.space:" +
+ tableName);
+ final Row row = table.createRow();
+ if (qid == 6) {
+ row.put("uid", problemId);
+ } else {
+ row.put("uid", problemId);
+ }
+ throw new QueryException(
+ qname,
+ String.format(
+ "%s row with userId %s, "
+ + "prevId=%s, currId=%s, "
+ + "partition of the %s row: %s",
+ cause, problemId, prevId, currId, cause,
+ getPartition(table, row)));
+
+ }
+
+ /**
+ * Verifies the test state after elasticity ops and queries are done.
+ */
+ private void verifyTestState(TestState testState) {
+
+ if (testState.isElasticityDone() &&
+ testState.areQueriesDone() &&
+ testState.getErrors().isEmpty()) {
+ return;
+ }
+
+ List errorMessages = new ArrayList<>();
+
+ /*
+ Predicate unexpectedError =
+ (t) -> (!(t instanceof ElasticityException)) &&
+ (!(t instanceof QueryException));
+
+ if (testState.getErrors().stream()
+ .filter(unexpectedError).count() != 0) {
+ errorMessages.add(
+ String.format(
+ "Unexpected exceptions. %s",
+ toErrorString(testState, unexpectedError)));
+ }
+ */
+
+ Predicate elasticityError =
+ (t) -> (t instanceof ElasticityException);
+
+ if (testState.getErrors().stream()
+ .filter(elasticityError).count() != 0) {
+ errorMessages.add(
+ String.format(
+ "Unexpected elasticity exceptions. "
+ + "Total elasticity routines done: %s. "
+ + "%s",
+ testState.getElasticityCount(),
+ toErrorString(testState, elasticityError)));
+ }
+
+ collectQueryErrors(testState, errorMessages);
+
+ assertTrue(errorMessages.stream()
+ .collect(Collectors.joining("\n")),
+ errorMessages.isEmpty());
+ }
+
+ private String toErrorString(TestState testState,
+ Predicate filter) {
+ StringBuilder sb = new StringBuilder();
+ long totalCount = testState.getErrors().stream().filter(filter).count();
+ sb.append("Total number of errors: ").append(totalCount).append("\n");
+
+ testState.getErrors().stream().filter(filter)
+ .limit(NUM_ERRORS_TO_DISPLAY)
+ .forEach((t) -> {
+ sb.append("> ").append(CommonLoggerUtils.getStackTrace(t)).
+ append("\n");
+ });
+ return sb.toString();
+ }
+
+ private void collectQueryErrors(TestState testState,
+ List errorMessages) {
+
+ List queryErrors = new ArrayList<>();
+ for (Throwable t : testState.getErrors()) {
+ if (!(t instanceof QueryException)) {
+ continue;
+ }
+ QueryException qe = (QueryException) t;
+ queryErrors.add(qe);
+ }
+
+ if (queryErrors.isEmpty()) {
+ return;
+ }
+
+ StringBuilder sb = new StringBuilder();
+ queryErrors.stream()
+ .limit(NUM_ERRORS_TO_DISPLAY)
+ .forEach((qe) -> { sb.append("> ").append(qe).append("\n"); });
+ errorMessages.add(
+ String.format("Unexpected query exceptions. " +
+ "Total number of failures: %s.\n" + "%s",
+ queryErrors.size(),
+ sb.toString()));
+ }
+
+ /* Start a thread to execute queries and verify the results. */
+ private void startQueryThread(TestState testState, int qid) {
+
+ Thread th = new Thread(() -> {
+ try {
+ int count = 0;
+ //while (count < 200) {
+ while (!testState.isElasticityDone()) {
+ queryAndVerify(testState, qid, count);
+ count++;
+ }
+ testState.setQueryThreadDone();
+ } catch (Throwable t) {
+ /* make sure all query threads exit on failure */
+ testState.setQueryThreadDone();
+ testState.setElasticityDone();
+ fail(t.getMessage());
+ }
+ });
+ th.setDaemon(true);
+ th.start();
+ }
+
+ private void startElasticityThread(TestState testState,
+ List routines) {
+ Thread th = new Thread(() -> {
+ try {
+ for (ElasticityRoutine routine : routines) {
+ routine.run();
+ testState.incElasticityCount();
+ }
+ } catch (Throwable t) {
+ testState.reportError(new ElasticityException(t));
+ } finally {
+ testState.setElasticityDone();
+ }
+ });
+ th.setDaemon(true);
+ th.start();
+ }
+
+ private interface ElasticityRoutine {
+ void run() throws Exception;
+ }
+
+ /**
+ * Tests the basic case that a query is executed under store expansion.
+ * This test is expected to exercise the most basic interaction between
+ * query and partition migration.
+ */
+ @Test
+ public void testSmallExpansion() throws Exception {
+ int[] qids = { 0 };
+ numRows = 1000;
+ testExpansion("smallExpansion", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testSmallExpansionSort() throws Exception {
+ int[] qids = { 1 };
+ numRows = 1000;
+ testExpansion("smallExpansionSort", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testSmallExpansionSortDesc() throws Exception {
+ int[] qids = { 2 };
+ numRows = 1000;
+ testExpansion("smallExpansionSortDesc", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testSmallExpansionGroup() throws Exception {
+ int[] qids = { 3 };
+ numRows = 1000;
+ testExpansion("smallExpansionGroup", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testSmallExpansionGroup2() throws Exception {
+ int[] qids = { 4 };
+ numRows = 1000;
+ testExpansion("smallExpansionGroup2", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testSmallExpansionAllPartitions() throws Exception {
+ int[] qids = { 5 };
+ numRows = 1000;
+ testExpansion("smallExpansionAllParititions", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testSmallExpansionSinglePartition() throws Exception {
+ int[] qids = { 6, 6, 6, 6, 6 };
+ numRows = 3000;
+ testExpansion("smallExpansionSinglePartition", 1, 10, qids, "users2");
+ }
+
+ @Test
+ public void testSmallExpansionJoin() throws Exception {
+ int[] qids = { 7 };
+ numRows = 1000;
+ testExpansion("smallExpansionJoin", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testSmallExpansionInnerJoin() throws Exception {
+ int[] qids = { 8 };
+ numRows = 1000;
+ testExpansion("smallExpansionInnerJoin", 1, 10, qids, "users");
+ }
+
+ @Test
+ public void testBigExpansionJoin() throws Exception {
+ org.junit.Assume.assumeTrue(!isLinux); /* skip if linux */
+ int[] qids = { 7 };
+ numRows = 10000;
+ testExpansion("bigExpansionJoin", 3, 20, qids, "users");
+ }
+
+ @Test
+ public void testBigExpansion() throws Exception {
+ org.junit.Assume.assumeTrue(!isLinux); /* skip if linux */
+ int[] qids = { 0, 1, 4 };
+ numRows = 10000;
+ testExpansion("bigExpansion", 3, 20, qids, "users");
+ }
+
+ private void testExpansion(
+ String testSubDir,
+ int capacity,
+ int partitions,
+ int[] qids,
+ String tableName)
+ throws Exception {
+
+ createStore(testSubDir, capacity, partitions);
+ createTableAndIndex();
+ populateRows(tableName, testSubDir);
+
+ TestState testState = new TestState(qids.length);
+
+ startElasticityThread(testState, Arrays.asList(() -> expandStore(capacity)));
+ //testState.setElasticityDone();
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ }
+
+ for (int qid : qids) {
+ startQueryThread(testState, qid);
+ }
+
+ final long timeoutMillis = 600 * 1000;
+ PollCondition.await(
+ POLL_CONDITION_INTERVAL, timeoutMillis,
+ () ->
+ testState.isElasticityDone() && testState.areQueriesDone());
+
+ verifyTestState(testState);
+ }
+
+ /**
+ * Tests the basic case that a query is executed under store contraction.
+ * This test is expected to exercise the most basic interaction between
+ * query and partition migration.
+ */
+ @Test
+ public void testSmallContraction() throws Exception {
+ int[] qids = { 0 };
+ numRows = 1000;
+ testContraction("smallContraction", 1, 10, qids);
+ }
+
+ @Test
+ public void testSmallContractionSort() throws Exception {
+ int[] qids = { 1 };
+ numRows = 1000;
+ testContraction("smallContractionSort", 1, 10, qids);
+ }
+
+ public void testContraction(
+ String testSubDir,
+ int capacity,
+ int partitions,
+ int[] qids) throws Exception {
+
+ createStore(testSubDir, capacity, partitions);
+ expandStore(capacity);
+ createTableAndIndex();
+ populateRows("users", testSubDir);
+ TestState testState = new TestState(qids.length);
+
+ verbose("Elasticity: Starting store contraction");
+
+ for (int qid : qids) {
+ startQueryThread(testState, qid);
+ }
+ //testState.setQueryAndVerifyDone();
+ startElasticityThread(testState, Arrays.asList(() -> contractStore()));
+
+ /* Waits for both to finish. */
+ final long timeoutMillis = 60 * 1000;
+ PollCondition.await(
+ POLL_CONDITION_INTERVAL, timeoutMillis,
+ () ->
+ testState.isElasticityDone() && testState.areQueriesDone());
+
+ verbose("Elasticity: Store contraction done");
+
+ /* Verify the results. */
+ verifyTestState(testState);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/JsonCollectionTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/JsonCollectionTest.java
new file mode 100644
index 00000000..98777aca
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/JsonCollectionTest.java
@@ -0,0 +1,846 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Timestamp;
+import java.util.ArrayList;
+
+
+import oracle.nosql.driver.Version;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.values.BinaryValue;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.JsonNullValue;
+import oracle.nosql.driver.values.JsonUtils;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.TimestampValue;
+
+import org.junit.Ignore;
+import org.junit.Test;
+
+
+public class JsonCollectionTest extends ProxyTestBase {
+
+ /*
+ * Basic data tests for JSON Collection
+ * put, get, writemultiple, simple query
+ */
+ @Test
+ public void testJsonCollection() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ String createTable = "create table noschema " +
+ "(id integer, sid integer, primary key(shard(id), sid)) " +
+ "as json collection";
+ String insertQ = "insert into noschema(id, sid, name) values(5, 6, 'jack') returning *";
+
+ tableOperation(handle, createTable, limits, null,
+ TableResult.State.ACTIVE, null);
+ PrepareRequest prepReq = new PrepareRequest()
+ .setStatement(insertQ);
+ PrepareResult prepRet = handle.prepare(prepReq);
+
+
+ QueryRequest qReq = new QueryRequest().setPreparedStatement(prepRet);
+ QueryResult qRes = handle.query(qReq);
+
+ /* expected value from above insert query */
+ MapValue value = new MapValue()
+ .put("id", 5)
+ .put("sid", 6)
+ .put("name", "jack");
+
+ for (MapValue res : qRes.getResults()) {
+ assertEquals(value, res);
+ }
+
+ qReq = new QueryRequest()
+ .setStatement("select * from noschema");
+
+ qRes = handle.query(qReq);
+ for (MapValue res : qRes.getResults()) {
+ assertEquals(value, res);
+ }
+
+ value = new MapValue()
+ .put("a", "aval")
+ .put("id", 10)
+ .put("name", "jane")
+ .put("sid", 7);
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("noschema");
+
+ PutResult pres = handle.put(putRequest);
+ assertNotNull("Put failed", pres.getVersion());
+
+ qReq = new QueryRequest()
+ .setStatement("select * from noschema");
+
+ qRes = handle.query(qReq);
+ for (MapValue res : qRes.getResults()) {
+ if (res.get("id").getInt() == 10) {
+ assertEquals(res, value);
+ }
+ }
+
+ /* do a write multiple */
+ WriteMultipleRequest wmReq = new WriteMultipleRequest();
+ for (int i = 0; i < 10; i++) {
+ /* need new MapValue -- it is not copied */
+ value = new MapValue()
+ .put("a", "aval")
+ .put("name", "jane")
+ .put("id", 10) // needs to be the same, it's shard key
+ .put("sid", i + 20)
+ .put("multindex", i);
+ PutRequest pr = new PutRequest()
+ .setValue(value)
+ .setTableName("noschema");
+ wmReq.add(pr, false);
+ }
+
+ WriteMultipleResult wmRes = handle.writeMultiple(wmReq);
+ assertEquals(10, wmRes.getResults().size());
+ qReq = new QueryRequest()
+ .setStatement("select * from noschema");
+
+ int count = 0;
+ do {
+ qRes = handle.query(qReq);
+ for (MapValue res : qRes.getResults()) {
+ ++count;
+ }
+ } while (!qReq.isDone());
+ assertEquals(count, 12); // 12 rows
+
+ /* do a delete query */
+ qReq = new QueryRequest()
+ .setStatement("delete from noschema where id = 10 and sid = 20");
+ qRes = handle.query(qReq);
+ for (MapValue res : qRes.getResults()) {
+ assertEquals(1, res.get("numRowsDeleted").getInt());
+ }
+ }
+
+ /*
+ * Exercise code that allows valid non-information-losing casts for
+ * primary key fields
+ */
+ @Test
+ public void testJsonCollectionKeyCast() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ String createTable = "create table noschema " +
+ "(id integer, id1 long, id2 number, id3 string, id4 double, " +
+ "primary key(shard(id), id1, id2, id3, id4)) " +
+ "as json collection";
+ tableOperation(handle, createTable, limits, null,
+ TableResult.State.ACTIVE, null);
+
+ /* normal types */
+ MapValue value = new MapValue()
+ .put("id", 5)
+ .put("id1", 6L)
+ .put("id2", 7.6)
+ .put("id3", "jack")
+ .put("id4", 5.6);
+
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("noschema");
+
+ PutResult pres = handle.put(putRequest);
+ assertNotNull("Put failed", pres.getVersion());
+
+ qry("select * from noschema");
+
+ /* use some coercion, success means no exception */
+ value.put("id", "6") // string to int
+ .put("id1", "6789") // string to long
+ .put("id2", 5L) // long to number
+ .put("id3", "joe") // strings must be strings
+ .put("id4", 7L); // long to double
+ pres = handle.put(putRequest);
+ assertNotNull("Put failed", pres.getVersion());
+ qry("select * from noschema");
+
+ value.put("id", 5678L) // long to int, no loss
+ .put("id1", 1.0) // float/double to long
+ .put("id2", 56.67F) // float to number
+ .put("id3", "jane") // strings must be strings
+ .put("id4", "56.0005"); // string to double
+ pres = handle.put(putRequest);
+ assertNotNull("Put failed", pres.getVersion());
+ qry("select * from noschema");
+
+ /* invalid coercion */
+ value.put("id", 56780000000L) // long to int, data loss
+ .put("id1", 1.0) // float/double to long
+ .put("id2", 56.67F) // float to number
+ .put("id3", "jane") // strings must be strings
+ .put("id4", true);
+ try {
+ pres = handle.put(putRequest);
+ fail("should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /* valid, except for non-string for string */
+ value.put("id", 7);
+ value.put("id3", 8);
+ value.put("id4", 5.6);
+ try {
+ pres = handle.put(putRequest);
+ fail("should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+ }
+
+ private void qry(String query) {
+ if (!verbose) {
+ return;
+ }
+ QueryRequest qReq = new QueryRequest()
+ .setStatement(query);
+ QueryResult qRes = handle.query(qReq);
+ System.out.println("Results of " + query + ":");
+ for (MapValue res : qRes.getResults()) {
+ System.out.println("\t" + res);
+ }
+ }
+
+ @Test
+ public void testPutIf() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ String createTable = "create table noschema(" +
+ "majorKey1 STRING, " +
+ "majorKey2 STRING, " +
+ "minorKey STRING, " +
+ "PRIMARY KEY (SHARD(majorKey1, majorKey2), minorKey))" +
+ "as json collection";
+
+ tableOperation(handle, createTable, limits, null,
+ TableResult.State.ACTIVE, null);
+
+ final MapValue mapVal = new MapValue()
+ .put("majorKey1", "k020f3dd0")
+ .put("majorKey2", "80")
+ .put("minorKey", "1e")
+ .put("firstThread", false)
+ .put("operation", "POPULATE")
+ .put("index", 27777);
+
+ final MapValue mapVal1 = new MapValue()
+ .put("majorKey1", "k020f3dd1")
+ .put("majorKey2", "81")
+ .put("minorKey", "1f")
+ .put("firstThread", false)
+ .put("operation", "POPULATE")
+ .put("index", 27777);
+
+ final MapValue mapVal2 = new MapValue()
+ .put("majorKey1", "k020f3dd2")
+ .put("majorKey2", "81")
+ .put("minorKey", "1f")
+ .put("firstThread", false)
+ .put("operation", "POPULATE")
+ .put("index", 27777);
+
+ final MapValue mapVal3 = new MapValue()
+ .put("majorKey1", "k020f3dd3")
+ .put("majorKey2", "81")
+ .put("minorKey", "1f")
+ .put("firstThread", false)
+ .put("operation", "POPULATE")
+ .put("index", 27777);
+
+ /* Put a row */
+ PutRequest putReq = new PutRequest()
+ .setValue(mapVal)
+ .setTableName("noschema");
+ PutResult putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(0, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /* Put a row again with SetReturnRow(false).
+ * expect no row returned.
+ */
+ putReq.setReturnRow(false);
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(0, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits());
+ Version oldVersion = putRes.getVersion();
+
+ /*
+ * Put row again with SetReturnRow(true),
+ * expect existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertEquals(oldVersion, putRes.getExistingVersion());
+ assertNotNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits());
+ oldVersion = putRes.getVersion();
+
+ /*
+ * Put a new row with SetReturnRow(true),
+ * expect no existing row returned.
+ */
+ putReq = new PutRequest()
+ .setValue(mapVal1)
+ .setTableName("noschema")
+ .setReturnRow(true);
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(0, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /* PutIfAbsent an existing row, it should fail */
+ putReq = new PutRequest()
+ .setValue(mapVal)
+ .setTableName("noschema")
+ .setOption(PutRequest.Option.IfAbsent);
+ putRes = handle.put(putReq);
+ assertNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits());
+
+
+ /*
+ * PutIfAbsent fails + SetReturnRow(true),
+ * return existing value and version
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ assertNull(putRes.getVersion());
+ assertEquals(mapVal, putRes.getExistingValue());
+ assertEquals(oldVersion, putRes.getExistingVersion());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /* PutIfPresent an existing row, it should succeed */
+ putReq = new PutRequest()
+ .setValue(mapVal)
+ .setTableName("noschema")
+ .setOption(PutRequest.Option.IfPresent);
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits());
+ oldVersion = putRes.getVersion();
+
+ /*
+ * PutIfPresent succeed + SetReturnRow(true),
+ * expect existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertEquals(mapVal, putRes.getExistingValue());
+ assertEquals(oldVersion, putRes.getExistingVersion());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits());
+ Version ifVersion = putRes.getVersion();
+
+ /* PutIfPresent a new row, it should fail */
+ putReq = new PutRequest()
+ .setValue(mapVal2)
+ .setTableName("noschema")
+ .setOption(PutRequest.Option.IfPresent);
+ putRes = handle.put(putReq);
+ assertNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /*
+ * PutIfPresent fail + SetReturnRow(true),
+ * expect no existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ assertNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /* PutIfAbsent a new row, it should succeed */
+ putReq = new PutRequest()
+ .setOption(PutRequest.Option.IfAbsent)
+ .setValue(mapVal2)
+ .setTableName("noschema");
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /* PutIfAbsent success + SetReturnRow(true) */
+ putReq = new PutRequest()
+ .setOption(PutRequest.Option.IfAbsent)
+ .setValue(mapVal3)
+ .setTableName("noschema");
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(1, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /*
+ * PutIfVersion an existing row with unmatched version, it should fail.
+ */
+ putReq = new PutRequest()
+ .setOption(PutRequest.Option.IfVersion)
+ .setMatchVersion(oldVersion)
+ .setValue(mapVal)
+ .setTableName("noschema");
+ putRes = handle.put(putReq);
+ assertNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /*
+ * PutIfVersion fails + SetReturnRow(true),
+ * expect existing row returned.
+ */
+ putReq.setReturnRow(true);
+ putRes = handle.put(putReq);
+ assertNull(putRes.getVersion());
+ assertEquals(ifVersion, putRes.getExistingVersion());
+ assertEquals(mapVal, putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(0, putRes.getWriteKB(), putRes.getWriteUnits());
+
+ /*
+ * Put an existing row with matching version, it should succeed.
+ */
+ putReq = new PutRequest()
+ .setOption(PutRequest.Option.IfVersion)
+ .setMatchVersion(ifVersion)
+ .setValue(mapVal)
+ .setTableName("noschema");
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits());
+ ifVersion = putRes.getVersion();
+
+ /*
+ * PutIfVersion succeed + SetReturnRow(true),
+ * expect existing row returned.
+ */
+ putReq.setMatchVersion(ifVersion).setReturnRow(true);
+ putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+ assertNull(putRes.getExistingVersion());
+ assertNull(putRes.getExistingValue());
+ assertReadKB(1, putRes.getReadKB(), putRes.getReadUnits(), true);
+ assertWriteKB(2, putRes.getWriteKB(), putRes.getWriteUnits());
+ }
+
+ @Test
+ public void testIndexes() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ String createTable = "create table noschema " +
+ "(iD integer, SiD integer, primary key(shard(id), sid)) " +
+ "as json collection";
+ String createIndex = "create index idx on noschema(name as string)";
+ String createIndex1 = "create index idx1 on noschema(age as integer)";
+
+ tableOperation(handle, createTable, limits, null,
+ TableResult.State.ACTIVE, null);
+ tableOperation(handle, createIndex, null, null,
+ TableResult.State.ACTIVE, null);
+ tableOperation(handle, createIndex1, null, null,
+ TableResult.State.ACTIVE, null);
+
+ MapValue value = new MapValue()
+ .put("a", "aval")
+ .put("sid", 7);
+
+ PutRequest putRequest = new PutRequest()
+ .setTableName("noschema");
+ for (int i = 0; i < 10; i++) {
+ value.put("id", i)
+ .put("nAme", ("jane" + i))
+ .put("age", i);
+ putRequest.setValue(value);
+ PutResult pres = handle.put(putRequest);
+ assertNotNull("Put failed", pres.getVersion());
+ }
+
+ QueryRequest qReq =
+ new QueryRequest().setStatement("select * from noschema");
+ QueryResult qRes = handle.query(qReq);
+ assertEquals(10, qRes.getResults().size());
+ for (MapValue res : qRes.getResults()) {
+ /* assert case-preservation */
+ assertTrue(res.toString().contains("SiD"));
+ assertTrue(res.toString().contains("nAme"));
+ }
+
+ qReq =
+ new QueryRequest().setStatement(
+ "select * from noschema where age > 3 order by age");
+
+ ArrayList results = new ArrayList();
+ do {
+ qRes = handle.query(qReq);
+ results.addAll(qRes.getResults());
+ } while (!qReq.isDone());
+ assertEquals(6, results.size());
+ for (MapValue res : results) {
+ /* assert case-preservation */
+ assertTrue(res.toString().contains("nAme"));
+ assertTrue(res.toString().contains("SiD"));
+ }
+ }
+
+ @Ignore
+ public void testGeoIndexes() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ String createTable = "create table geo " +
+ "(id integer, primary key(id)) as json collection";
+ final String pointIndex =
+ "create index idx_kind_ptn on geo(info.kind as string," +
+ "info.point as point)";
+ final String geoIndex =
+ "create index idx_geom on geo(info.geom as geometry " +
+ "{\"max_covering_cells\":400})";
+
+ final String[] data = new String[] {
+ "insert into geo values(1, {\"info\": { " +
+ "\"kind\": \"farm\", \"point\": {\"type\":\"point\", " +
+ "\"coordinates\": [23.549, 35.2908]}}})",
+ "insert into geo values(2, {\"info\": { " +
+ "\"kind\": \"park\", \"point\": {\"type\":\"point\", " +
+ "\"coordinates\": [24.9, 35.4]}}})"
+ };
+
+ tableOperation(handle, createTable, limits, null,
+ TableResult.State.ACTIVE, null);
+ tableOperation(handle, pointIndex, null, null,
+ TableResult.State.ACTIVE, null);
+ tableOperation(handle, geoIndex, null, null,
+ TableResult.State.ACTIVE, null);
+
+ for (String q : data) {
+ QueryRequest qReq = new QueryRequest().setStatement(q);
+ QueryResult qRes = handle.query(qReq);
+ System.out.println(qRes);
+ }
+
+ QueryRequest qReq =
+ new QueryRequest().setStatement(
+ "select /* FORCE_PRIMARY_INDEX(geo) */ * from geo g where geo_near(g.info.point, " +
+ "{\"type\": \"point\", \"coordinates\": [24.0175, 35.5156]}," +
+ "5000)");
+ QueryResult qRes = handle.query(qReq);
+ for (MapValue val : qRes.getResults()) {
+ System.out.println(val);
+ }
+ }
+
+ /*
+ * Check edge and invalid situations for JSON Collection.
+ * Invalid:
+ * o invalid (not JSON) types
+ * o attempt to schema evolve
+ * o bad key
+ * Edge:
+ * o identity column as key, evolve sequence
+ * o TTL, with evolution
+ */
+ @Test
+ public void testJsonCollectionEdge() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ String createTable = "create table noschema " +
+ "(id integer, primary key(id)) as json collection";
+ tableOperation(handle, createTable, limits, null,
+ TableResult.State.ACTIVE, null);
+
+ /*
+ * Bad types
+ */
+ final Timestamp ts = Timestamp.valueOf("2018-05-02 10:23:42.123");
+ final FieldValue tsVal = new TimestampValue(ts);
+ badType("time", tsVal, "noschema");
+
+ badType("bin", new BinaryValue(new byte[4]), "noschema");
+
+ /*
+ * Try to evolve in an illegal manner
+ */
+ final String alter = "alter table noschema(add name string)";
+ TableResult tres = tableOperation(handle, alter, null, null,
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /*
+ * new table, with identity col, evolve it to change sequence start
+ */
+ createTable = "create table noschema1 " +
+ "(id integer generated always as identity, " +
+ "primary key(id)) as json collection";
+ tres = tableOperation(handle, createTable, limits, 5000);
+
+ tres = tableOperation(handle, "alter table noschema1 (modify id " +
+ "generated always as identity(start with 1002))",
+ null, 5000);
+
+ /*
+ * Put a row and verify that the generated value is 1002
+ */
+ MapValue value = new MapValue()
+ .put("name", "myname")
+ .put("nullval", JsonNullValue.getInstance());
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("noschema1");
+ PutResult pres = handle.put(putRequest);
+
+ QueryRequest qReq = new QueryRequest()
+ .setStatement("select * from noschema1");
+ QueryResult qRes = handle.query(qReq);
+ for (MapValue res : qRes.getResults()) {
+ assertTrue(res.get("id").getInt() == 1002);
+ }
+
+ /*
+ * Add a TTL
+ */
+ tres = tableOperation(handle, "alter table noschema1 using TTL 5 days",
+ null, 5000);
+ assertTrue(tres.getDdl().toLowerCase().contains("5 days"));
+
+ tres = tableOperation(handle, "alter table noschema1 using TTL 2 hours",
+ null, 5000);
+ assertTrue(tres.getDdl().toLowerCase().contains("2 hours"));
+ }
+
+ @Test
+ public void testNested() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ final String createTable = "create table noschema(id long, " +
+ "primary key(id)) as json collection";
+ TableResult tres = tableOperation(handle, createTable, limits, 5000);
+ String json = "{" +
+ "\"id\":0," +
+ "\"name\": \"Foo\"," +
+ "\"tags\": [\"rock\",\"metal\",\"bar\"]" +
+ "}";
+
+ String json1 = "{" +
+ "\"id\":1," +
+ "\"name\": \"Foo\"," +
+ "\"obj\": {\"a\":1,\"b\":2,\"c\":3, " +
+ "\"tags\": [\"rock\",\"metal\",\"bar\"]" +
+ "}}";
+
+ String json2 = "{" +
+ "\"id\":2," +
+ "\"obj\": {\"a\":1,\"b\":2,\"c\":3, " +
+ "\"obj1\": {\"d\":1,\"e\":2,\"f\":3} " +
+ "}}";
+
+ String[] docs = new String[]{json, json1, json2};
+ int i = 0;
+ for (String doc : docs) {
+ MapValue val = (MapValue)FieldValue.createFromJson(doc,null);
+ PutRequest pr = new PutRequest()
+ .setValue(val)
+ .setTableName("noschema");
+ PutResult pres = handle.put(pr);
+ assertNotNull("Put failed", pres.getVersion());
+
+ GetRequest gr = new GetRequest()
+ .setKey(new MapValue().put("id", i++))
+ .setTableName("noschema");
+ GetResult gres = handle.get(gr);
+ assertTrue(JsonUtils.jsonEquals(val.toString(),
+ gres.getValue().toString()));
+ }
+ }
+
+ @Test
+ public void testDelete() {
+ TableLimits limits = new TableLimits(10, 10, 1);
+ String createTable = "create table noschema(" +
+ "majorKey1 STRING, " +
+ "majorKey2 STRING, " +
+ "minorKey STRING, " +
+ "PRIMARY KEY (SHARD(majorKey1, majorKey2), minorKey))" +
+ "as json collection";
+
+ tableOperation(handle, createTable, limits, null,
+ TableResult.State.ACTIVE, null);
+
+ final MapValue key = new MapValue()
+ .put("majorKey1", "k020f3dd0")
+ .put("majorKey2", "80")
+ .put("minorKey", "1e");
+
+ /* put a row */
+ PutRequest putReq = new PutRequest()
+ .setTableName("noschema")
+ .setValue(key);
+ PutResult putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+
+ /* Delete a row */
+ DeleteRequest delReq = new DeleteRequest()
+ .setKey(key)
+ .setTableName("noschema");
+ DeleteResult delRes = handle.delete(delReq);
+ assertTrue(delRes.getSuccess());
+ assertNull(delRes.getExistingVersion());
+ assertNull(delRes.getExistingValue());
+ assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true);
+ assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits());
+
+ /* Put the row back to store */
+ putReq = new PutRequest().setValue(key).setTableName("noschema");
+ putRes = handle.put(putReq);
+ Version oldVersion = putRes.getVersion();
+ assertNotNull(oldVersion);
+
+ /* Delete succeed + setReturnRow(true), existing row returned. */
+ delReq.setReturnRow(true);
+ delRes = handle.delete(delReq);
+ assertTrue(delRes.getSuccess());
+ assertEquals(oldVersion, delRes.getExistingVersion());
+ assertEquals(key, delRes.getExistingValue());
+ assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true);
+ assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits());
+
+ /* Delete fail + setReturnRow(true), no existing row returned. */
+ delRes = handle.delete(delReq);
+ assertFalse(delRes.getSuccess());
+ assertNull(delRes.getExistingVersion());
+ assertNull(delRes.getExistingValue());
+ assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true);
+ assertWriteKB(0, delRes.getWriteKB(), delRes.getWriteUnits());
+
+ /* Put the row back to store */
+ putReq = new PutRequest().setValue(key).setTableName("noschema");
+ putRes = handle.put(putReq);
+ Version ifVersion = putRes.getVersion();
+ assertNotNull(ifVersion);
+
+ /* DeleteIfVersion with unmatched version, it should fail */
+ delReq = new DeleteRequest()
+ .setMatchVersion(oldVersion)
+ .setKey(key)
+ .setTableName("noschema");
+ delRes = handle.delete(delReq);
+ assertFalse(delRes.getSuccess());
+ assertNull(delRes.getExistingVersion());
+ assertNull(delRes.getExistingValue());
+ assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true);
+ assertWriteKB(0, delRes.getWriteKB(), delRes.getWriteUnits());
+
+ /*
+ * DeleteIfVersion with unmatched version + setReturnRow(true),
+ * the existing row returned.
+ */
+ delReq.setReturnRow(true);
+ delRes = handle.delete(delReq);
+ assertFalse(delRes.getSuccess());
+ assertEquals(ifVersion, delRes.getExistingVersion());
+ assertEquals(key, delRes.getExistingValue());
+ assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true);
+ assertWriteKB(0, delRes.getWriteKB(), delRes.getWriteUnits());
+
+ /* DeleteIfVersion with matched version, it should succeed. */
+ delReq = new DeleteRequest()
+ .setMatchVersion(ifVersion)
+ .setKey(key)
+ .setTableName("noschema");
+ delRes = handle.delete(delReq);
+ assertTrue(delRes.getSuccess());
+ assertNull(delRes.getExistingVersion());
+ assertNull(delRes.getExistingValue());
+ assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true);
+ assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits());
+
+ /* Put the row back to store */
+ putReq = new PutRequest().setValue(key).setTableName("noschema");
+ putRes = handle.put(putReq);
+ ifVersion = putRes.getVersion();
+ assertNotNull(ifVersion);
+
+ /*
+ * DeleteIfVersion with matched version + setReturnRow(true),
+ * it should succeed but no existing row returned.
+ */
+ delReq.setMatchVersion(ifVersion).setReturnRow(true);
+ delRes = handle.delete(delReq);
+ assertTrue(delRes.getSuccess());
+ assertNull(delRes.getExistingVersion());
+ assertNull(delRes.getExistingValue());
+ assertReadKB(1, delRes.getReadKB(), delRes.getReadUnits(), true);
+ assertWriteKB(1, delRes.getWriteKB(), delRes.getWriteUnits());
+ }
+
+ private void badType(String fieldName, FieldValue val, String tableName) {
+ MapValue value = new MapValue()
+ .put("id", 10)
+ .put(fieldName, val);
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+
+ try {
+ PutResult pres = handle.put(putRequest);
+ fail("operation should have thrown IAE");
+ } catch (IllegalArgumentException iae) {
+ assertTrue(iae.getMessage().contains("Invalid JSON type"));
+ }
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/LatencyTestBase.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LatencyTestBase.java
new file mode 100644
index 00000000..98db1cf3
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LatencyTestBase.java
@@ -0,0 +1,671 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assume.assumeTrue;
+import static org.junit.Assert.fail;
+
+import java.net.URL;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import oracle.kv.impl.api.Request;
+import oracle.kv.impl.api.RequestHandlerImpl;
+import oracle.kv.impl.rep.RepNodeService;
+import oracle.kv.impl.sna.ManagedRepNode;
+import oracle.kv.impl.sna.ManagedService;
+import oracle.kv.impl.test.TestHook;
+import oracle.kv.impl.test.TestStatus;
+import oracle.kv.util.kvlite.KVLite;
+
+import oracle.nosql.driver.NoSQLHandle;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.NoSQLHandleFactory;
+import oracle.nosql.driver.RequestTimeoutException;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.proxy.security.SecureTestUtil;
+
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+
+
+/**
+ * A Base to use for running tests with artificially injected latency.
+ *
+ * These tests only runs against a local server and not minicloud.
+ *
+ * The tests use a KVLite that has a test hook that injects
+ * latencies (specified in latencySetUp) into all requests
+ */
+public class LatencyTestBase extends ProxyTestBase implements TestHook {
+
+ private static int requestDelayMs;
+ private static int previousRequestThreads;
+ private static int previousPoolThreads;
+ private static boolean previousUseThreads;
+ private static boolean previousUseAsync;
+
+ // note this hides the superclass static method so it won't be called
+ // and we don't do anything, instead provide a method for subclasses to
+ // call after their BeforeClass method.
+ @BeforeClass
+ public static void staticSetUp()
+ throws Exception {
+ }
+
+ // This can be called in subclass BeforeClass methods
+ public static void latencySetUp(boolean useAsync, int delayMs)
+ throws Exception {
+
+ requestDelayMs = delayMs;
+
+ /*
+ * Run kvlite in this jvm, so we can set a testHook on each
+ * request to inject latency.
+ * Note: it is currently impossible to create a thread-based kvlite
+ * in anything other than a 1x1 (single node, single shard)
+ * configuration. But that's OK for the purposes of this test.
+ */
+ previousUseThreads = Boolean.getBoolean(KVLITE_USETHREADS_PROP);
+ System.setProperty(KVLITE_USETHREADS_PROP, "true");
+
+ /*
+ * configure the proxy to use sync or async calls (note this
+ * overrides any given cmdline parameters, which will get reset after
+ * this testcase finishes)
+ */
+ previousUseAsync = Boolean.getBoolean(PROXY_ASYNC_PROP);
+ System.setProperty(PROXY_ASYNC_PROP, Boolean.toString(useAsync));
+
+ /*
+ * set the number of request processing threads very low, so
+ * we can verify no additional latency when RCn > PTn
+ * (RCn = number of concurrent requests)
+ * (PTn = number of proxy request threads)
+ *
+ * store the old value so we can reset it, if it was set
+ */
+ previousRequestThreads = Integer.getInteger(
+ PROXY_REQUEST_THREADS_PROP, 0);
+ System.setProperty(PROXY_REQUEST_THREADS_PROP, "2");
+
+ previousPoolThreads = Integer.getInteger(
+ PROXY_REQUEST_POOL_SIZE_PROP, 0);
+ System.setProperty(PROXY_REQUEST_POOL_SIZE_PROP, "0");
+
+ /* this will silence stderr from kvlite */
+ TestStatus.setActive(true);
+
+ staticSetUp(tenantLimits);
+ }
+
+ @AfterClass
+ public static void resetProperties() {
+ System.setProperty(KVLITE_USETHREADS_PROP,
+ Boolean.toString(previousUseThreads));
+ System.setProperty(PROXY_ASYNC_PROP,
+ Boolean.toString(previousUseAsync));
+ if (previousRequestThreads <= 0) {
+ System.clearProperty(PROXY_REQUEST_THREADS_PROP);
+ } else {
+ System.setProperty(PROXY_REQUEST_THREADS_PROP,
+ Integer.toString(previousRequestThreads));
+ }
+ System.setProperty(PROXY_REQUEST_POOL_SIZE_PROP,
+ Integer.toString(previousPoolThreads));
+ }
+
+ @Before
+ public void asyncSetUp()
+ throws Exception {
+
+ // set a test hook such that every request takes at least Nms
+ setRequestDelayHook(kvlite);
+ }
+
+ @Override
+ public void doHook(Request r) {
+ // this will be run at the beginning of each request in kvlite
+ try {
+ Thread.sleep(requestDelayMs);
+ } catch (InterruptedException e) {
+ }
+ }
+
+ /**
+ * set a per-request test hook, if defined.
+ */
+ private void setRequestDelayHook(KVLite kvlite) {
+ if (requestDelayMs <= 0) {
+ return;
+ }
+
+ /*
+ * KVLite runs one RN with useThreads=true. So we can get its
+ * ManagedRepNode service from the static ManagedService class.
+ */
+ ManagedRepNode mrn = (ManagedRepNode)ManagedService.getMainService();
+ if (mrn == null) {
+ throw new RuntimeException(
+ "Error: can't set request delay hook: no ManagedRepNode");
+ }
+
+ RepNodeService rns = mrn.getRepNodeService();
+ if (rns == null) {
+ throw new RuntimeException(
+ "Error: can't set request delay hook: no RepNodeService");
+ }
+
+ RequestHandlerImpl rhi = rns.getReqHandler();
+ if (rhi == null) {
+ throw new RuntimeException(
+ "Error: can't set request delay hook: no RequestHandlerImpl");
+ }
+
+ rhi.setTestHook(this);
+ if (verbose) {
+ System.out.println("Set request delay hook " +
+ "for " + requestDelayMs + "ms delay on " +
+ rns.getRepNodeParams().getRepNodeId().getFullName());
+ }
+ }
+
+
+ protected static NoSQLHandle createClientHandleAndTestTable(
+ String tableName, int numThreads)
+ throws Exception {
+
+ URL serviceURL =
+ new URL("http", getProxyHost(), getProxyPort(), "/");
+ NoSQLHandleConfig config = new NoSQLHandleConfig(serviceURL);
+ SecureTestUtil.setAuthProvider(config, isSecure(),
+ onprem, getTenantId());
+ config.configureDefaultRetryHandler(0, 0);
+ NoSQLHandle myhandle = NoSQLHandleFactory.createNoSQLHandle(config);
+
+ /*
+ * Create a simple table with an integer key and a single
+ * string field
+ */
+ final String createTableStatement =
+ "CREATE TABLE IF NOT EXISTS " + tableName +
+ "(cookie_id LONG, audience_data STRING, PRIMARY KEY(cookie_id))";
+
+ TableRequest tableRequest = new TableRequest()
+ .setStatement(createTableStatement)
+ .setTableLimits(new TableLimits(100000, 100000, 50));
+ TableResult tres = myhandle.tableRequest(tableRequest);
+ if (verbose) {
+ System.out.println("Creating table " + tableName);
+ }
+ /*
+ * The table request is asynchronous, so wait for the operation
+ * to complete.
+ */
+ tres.waitForCompletion(myhandle,
+ 60000, /* wait 60 sec */
+ 100); /* delay ms for poll */
+ if (verbose) {
+ System.out.println("Created table " + tableName);
+ }
+ /*
+ * Ideally this would be done earlier but at this time kv
+ * requires that a table be created before the system table
+ * is initialized. TODO: watch kv for changes in this area
+ */
+ waitForStoreInit(20); // wait 20s for init
+ return myhandle;
+ }
+
+ protected static void dropTableAndCloseHandle(
+ NoSQLHandle myhandle, String tableName)
+ throws Exception {
+
+ // drop the table
+ if (verbose) {
+ System.out.println("Dropping table " + tableName);
+ }
+ TableRequest tableRequest = new TableRequest()
+ .setStatement("DROP TABLE IF EXISTS " + tableName);
+ myhandle.tableRequest(tableRequest);
+
+ // close handle
+ if (verbose) {
+ System.out.println("Closing handle...");
+ }
+ myhandle.close();
+ }
+
+
+ protected static class LatencyCollector {
+ String ltype;
+ long[] latencies;
+ AtomicInteger sampleNum;
+ AtomicLong total_us;
+
+ LatencyCollector(String ltype, int numSamples) {
+ this.ltype = ltype;
+ this.latencies = new long[numSamples];
+ this.sampleNum = new AtomicInteger(0);
+ this.total_us = new AtomicLong(0);
+ }
+
+ void collect(long lat_ns) {
+ if (lat_ns==0) return;
+ int sample = sampleNum.incrementAndGet();
+ latencies[sample % latencies.length] = lat_ns;
+ total_us.addAndGet(lat_ns / 1000);
+ }
+
+ long avgLatencyUs() {
+ return total_us.get() / totalSamples();
+ }
+
+ int totalSamples() {
+ return sampleNum.get();
+ }
+
+ long avgLatencyMs() {
+ return avgLatencyUs() / 1000;
+ }
+
+ void dumpLatencies() {
+ int totalSamples = totalSamples();
+ if (totalSamples > latencies.length) {
+ totalSamples = latencies.length;
+ }
+ System.out.println("latencies: " + totalSamples + " samples:");
+ for (int i=0; i latencies.length) {
+ totalSamples = latencies.length;
+ }
+ Arrays.sort(latencies, 0, totalSamples);
+ if (pct >= 100) {
+ return latencies[(totalSamples - 1)] / 1000;
+ }
+ return latencies[(totalSamples * pct) / 100] / 1000;
+ }
+
+ long percentileLatencyMs(int pct) {
+ return percentileLatencyUs(pct) / 1000;
+ }
+ }
+
+ protected static class RunConfig {
+ NoSQLHandle handle;
+ String tableName;
+ int runSeconds;
+ long maxID;
+ int maxSize;
+ int readTimeoutMs;
+ int writeTimeoutMs;
+ LatencyCollector readLatencyCollector;
+ LatencyCollector writeLatencyCollector;
+ LatencyCollector queryLatencyCollector;
+
+ protected RunConfig(
+ NoSQLHandle handle,
+ String tableName,
+ int runSeconds,
+ long maxID,
+ int maxSize,
+ int readTimeoutMs,
+ int writeTimeoutMs) {
+ this.handle = handle;
+ this.tableName = tableName;
+ this.runSeconds = runSeconds;
+ this.maxID = maxID;
+ this.maxSize = maxSize;
+ this.readTimeoutMs = readTimeoutMs;
+ this.writeTimeoutMs = writeTimeoutMs;
+ readLatencyCollector = new LatencyCollector("get", 100000);
+ writeLatencyCollector = new LatencyCollector("put", 100000);
+ queryLatencyCollector = new LatencyCollector("query", 100000);
+ }
+ }
+
+ protected static void runClient(RunConfig rc, int get_pct, int put_pct) {
+ try {
+ runOneClient(rc, get_pct, put_pct);
+ } catch (IOException e) {
+ }
+ }
+
+ protected static void runOneClient(RunConfig rc, int get_pct, int put_pct)
+ throws IOException {
+
+ Random rand = new Random(System.currentTimeMillis());
+
+ // generate random data for puts
+ final int leftLimit = 32; // space
+ final int rightLimit = 126; // tilde
+ String generatedString = rand.ints(leftLimit, rightLimit + 1)
+ .limit(rc.maxSize)
+ .collect(StringBuilder::new,
+ StringBuilder::appendCodePoint, StringBuilder::append)
+ .toString();
+
+ MapValue value = new MapValue();
+ MapValue key = new MapValue();
+
+ PutRequest putRequest = new PutRequest()
+ .setTableName(rc.tableName);
+ putRequest.setTimeout(rc.writeTimeoutMs);
+ GetRequest getRequest = new GetRequest()
+ .setTableName(rc.tableName);
+ getRequest.setTimeout(rc.readTimeoutMs);
+
+ if (verbose) {
+ System.out.println("Driver thread " +
+ Thread.currentThread().getId() + " performing " +
+ get_pct + "% get, " + put_pct + "% put operations...");
+ }
+
+ /* factor out proxy warmup for table and store */
+ boolean done = false;
+ while (!done) {
+ try {
+ key.put("cookie_id", 0L);
+ getRequest.setKey(key);
+ rc.handle.get(getRequest);
+ done = true;
+ } catch (Exception e) {
+ // ignore
+ }
+ }
+
+ long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000);
+
+ while (System.currentTimeMillis() < endMillis) {
+
+ boolean do_get = (rand.nextInt(100) > (100 - get_pct));
+ boolean do_put = (rand.nextInt(100) > (100 - put_pct));
+
+ // if neither, load next line and continue
+ if (do_get==false && do_put==false) {
+ continue;
+ }
+
+ // set up random data
+ long id = rand.nextLong() % rc.maxID;
+
+ if (do_put) {
+ value.put("cookie_id", id);
+ int begin = rand.nextInt(rc.maxSize / 4);
+ int end = begin + rand.nextInt((rc.maxSize * 3) / 4);
+ String sub = generatedString.substring(begin, end);
+ value.put("audience_data", sub);
+
+ long start = System.nanoTime();
+ putRequest.setValue(value);
+ try {
+ PutResult putRes = rc.handle.put(putRequest);
+ if (putRes.getVersion() == null) {
+ System.err.println("put failed!");
+ }
+ } catch (Exception e) {
+ System.err.println(System.currentTimeMillis() + " PUT E");
+ if (verbose) {
+ System.err.println(" " + e);
+ }
+ }
+ long elapsed = System.nanoTime() - start;
+ rc.writeLatencyCollector.collect(elapsed);
+ }
+
+ if (do_get) {
+ long start = System.nanoTime();
+ key.put("cookie_id", id);
+ getRequest.setKey(key);
+ try {
+ rc.handle.get(getRequest);
+ } catch (Exception e) {
+ System.err.println(System.currentTimeMillis() + " GET E");
+ if (verbose) {
+ System.err.println(" " + e);
+ }
+ }
+ long elapsed = System.nanoTime() - start;
+ rc.readLatencyCollector.collect(elapsed);
+ }
+
+ }
+
+ }
+
+ protected static void runQueries(RunConfig rc) {
+ try {
+ runOneQueryClient(rc);
+ } catch (IOException e) {
+ } catch (InterruptedException ie) {
+ return;
+ }
+ }
+
+ private static void runQuery(RunConfig rc, String query) {
+ try {
+ List allResults = new ArrayList();
+ /* factor out the one-time cost of prepare */
+ PrepareRequest preq = new PrepareRequest().setStatement(query);
+ PrepareResult pres = rc.handle.prepare(preq);
+ QueryRequest qreq = new QueryRequest().
+ setPreparedStatement(pres.getPreparedStatement());
+ long start = System.nanoTime();
+ do {
+ QueryResult qr = rc.handle.query(qreq);
+ List results = qr.getResults();
+ for (MapValue mv : results) {
+ // need to walk values, in case iteration triggers
+ // more requests internally
+ allResults.add(mv);
+ }
+ } while (!qreq.isDone());
+ long elapsed = System.nanoTime() - start;
+ rc.queryLatencyCollector.collect(elapsed);
+ //System.err.println("query '" + query + "' ran to completion, " +
+ //"numResults=" + allResults.size());
+ } catch (RequestTimeoutException rte) {
+ System.err.println("query '" + query + "' timed out: " + rte);
+ } catch (Exception e) {
+ System.err.println("query '" + query + "' got error: " + e);
+ }
+ }
+
+ private static void runOneQueryClient(RunConfig rc)
+ throws IOException, InterruptedException {
+
+ if (verbose) {
+ System.out.println("Driver thread " +
+ Thread.currentThread().getId() +
+ " performing query operations...");
+ }
+
+ long endMillis = System.currentTimeMillis() + (rc.runSeconds * 1000);
+
+ while (System.currentTimeMillis() < endMillis) {
+
+ // simple count
+ runQuery(rc, "select count(*) from " + rc.tableName);
+
+ // full scan/dump
+ runQuery(rc, "select * from " + rc.tableName);
+
+ // more complex, with sort
+ runQuery(rc, "select audience_data from " + rc.tableName +
+ " where cookie_id > 1000 and cookie_id < 10000" +
+ " order by audience_data");
+
+ TimeUnit.MILLISECONDS.sleep(10);
+ }
+ }
+
+ private static void checkOpLatencies(
+ long minLatencyMs, long maxLatencyMs,
+ final String opType, LatencyCollector lc) {
+
+ if (minLatencyMs <= 0 || maxLatencyMs <= 0) {
+ return;
+ }
+
+ long latencyMs = lc.avgLatencyMs();
+ if (latencyMs < minLatencyMs || latencyMs > maxLatencyMs) {
+ if (verbose) {
+ lc.dumpLatencies();
+ }
+ long max = lc.percentileLatencyMs(100);
+ long lat99 = lc.percentileLatencyMs(99);
+ long lat95 = lc.percentileLatencyMs(95);
+ fail(opType + " latency of " + latencyMs +
+ "ms is out of range.\n" +
+ "Expected average latency is between " +
+ minLatencyMs + "ms and " +
+ maxLatencyMs + "ms. 95th=" + lat95 + " 99th=" + lat99 +
+ " max=" + max + " (samples=" + lc.totalSamples() + ")");
+ }
+ }
+
+ protected static void testLatency(
+ String tableName,
+ int readThreads,
+ int writeThreads,
+ int rwThreads,
+ int qThreads,
+ int runSeconds,
+ int minReadLatencyMs,
+ int maxReadLatencyMs,
+ int minWriteLatencyMs,
+ int maxWriteLatencyMs,
+ int minQueryLatencyMs,
+ int maxQueryLatencyMs)
+ throws Exception {
+
+ // skip this test if running on minicloud
+ assumeTrue(cloudRunning == false);
+
+ /*
+ * create threads, have them all hit the proxy as fast as
+ * possible with get/put requests for about 10 seconds.
+ * Verify that the resultant latency average is just over 100ms.
+ * (in the sync case, this will be much higher)
+ */
+
+ final int totalThreads =
+ readThreads + writeThreads + rwThreads + qThreads;
+
+ NoSQLHandle myhandle =
+ createClientHandleAndTestTable(tableName, totalThreads);
+
+ RunConfig rc = new RunConfig(
+ myhandle,
+ tableName,
+ runSeconds,
+ 10000 /*maxID*/,
+ 5000 /*maxSize*/,
+ 2000 /*readTimeoutMs*/,
+ 2000 /*writeTimeoutMs*/);
+
+ Thread threads[] = new Thread[totalThreads];
+
+ if (qThreads == totalThreads) {
+ // run puts to prepopulate data
+ for(int x=0; x {runClient(rc, 0, 100);});
+ threads[x].start();
+ }
+ for(int x=0; x {runClient(rc, 100, 0);});
+ threads[numThreads].start();
+ numThreads++;
+ }
+ for(int x=0; x {runClient(rc, 0, 100);});
+ threads[numThreads].start();
+ numThreads++;
+ }
+ for(int x=0; x {runClient(rc, 50, 50);});
+ threads[numThreads].start();
+ numThreads++;
+ }
+ for(int x=0; x {runQueries(rc);});
+ threads[numThreads].start();
+ numThreads++;
+ }
+
+ // wait for threads to finish
+ for(int x=0; x 0 || rwThreads > 0) {
+ System.out.println("average latency for get ops: " +
+ rc.readLatencyCollector.avgLatencyMs() + "ms");
+ System.out.println("99th percentile latency for get ops: " +
+ rc.readLatencyCollector.percentileLatencyMs(99) + "ms");
+ }
+ if (writeThreads > 0 || rwThreads > 0) {
+ System.out.println("average latency for put ops: " +
+ rc.writeLatencyCollector.avgLatencyMs() + "ms");
+ System.out.println("99th percentile latency for put ops: " +
+ rc.writeLatencyCollector.percentileLatencyMs(99) + "ms");
+ }
+ if (qThreads > 0) {
+ System.out.println("average latency for query ops: " +
+ rc.queryLatencyCollector.avgLatencyMs() + "ms");
+ System.out.println("99th percentile latency for query ops: " +
+ rc.queryLatencyCollector.percentileLatencyMs(99) + "ms");
+ }
+
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/LimitsTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LimitsTest.java
new file mode 100644
index 00000000..06b7e02c
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/LimitsTest.java
@@ -0,0 +1,1312 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static oracle.nosql.driver.ops.TableLimits.CapacityMode.ON_DEMAND;
+import static oracle.nosql.driver.ops.TableLimits.CapacityMode.PROVISIONED;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.util.UUID;
+
+import org.junit.Test;
+
+import oracle.kv.impl.topo.RepNodeId;
+import oracle.nosql.driver.DeploymentException;
+import oracle.nosql.driver.EvolutionLimitException;
+import oracle.nosql.driver.IndexLimitException;
+import oracle.nosql.driver.KeySizeLimitException;
+import oracle.nosql.driver.OperationThrottlingException;
+import oracle.nosql.driver.RequestSizeLimitException;
+import oracle.nosql.driver.RowSizeLimitException;
+import oracle.nosql.driver.TableLimitException;
+import oracle.nosql.driver.TableNotFoundException;
+import oracle.nosql.driver.Version;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PreparedStatement;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutRequest.Option;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableResult.State;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.StringValue;
+import oracle.nosql.util.tmi.TableRequestLimits;
+import oracle.nosql.util.tmi.TenantLimits;
+
+/**
+ * Test various limits:
+ * DDL limits (minicloud only)
+ * o num indexes
+ * o num tables
+ * o num schema evolutions
+ * Data limits (minicloud and not)
+ * o key size
+ * o index key size
+ */
+public class LimitsTest extends ProxyTestBase {
+
+ final static int INDEX_KEY_SIZE_LIMIT = 64;
+ final static int KEY_SIZE_LIMIT = rlimits.getPrimaryKeySizeLimit();
+ final static int QUERY_SIZE_LIMIT = rlimits.getQueryStringSizeLimit();
+ final static int REQUEST_SIZE_LIMIT = rlimits.getRequestSizeLimit();
+ final static int ROW_SIZE_LIMIT = rlimits.getRowSizeLimit();
+ final static int BATCH_REQUEST_SIZE_LIMIT =
+ rlimits.getBatchRequestSizeLimit();
+
+ final static String tableName = "limitTable";
+
+ /* Create a table */
+ final static String createTableDDL =
+ "CREATE TABLE IF NOT EXISTS limitTable (" +
+ "sid INTEGER, " +
+ "id INTEGER, " +
+ "name STRING, " +
+ "json JSON, " +
+ "PRIMARY KEY(SHARD(sid), id))";
+
+ /* Create a table used for key limits */
+ final static String createKeyTable1DDL =
+ "CREATE TABLE IF NOT EXISTS keyLimitTable1 (" +
+ "name STRING, " +
+ "city STRING, " +
+ "PRIMARY KEY(name))";
+
+ /* Create a table used for key limits */
+ final static String createKeyTable2DDL =
+ "CREATE TABLE IF NOT EXISTS keyLimitTable2 (" +
+ "name STRING, " +
+ "city STRING, " +
+ "address STRING, " +
+ "PRIMARY KEY(shard(name), city))";
+
+ /* Create an index for key limits */
+ final static String createKeyTableIndexDDL =
+ "CREATE INDEX CityIndex on keyLimitTable1 (city)";
+
+ /**
+ * Test limit on number of indexes
+ */
+ @Test
+ public void testIndexLimit() throws Exception {
+ assumeTrue("Skip the test if not minicloud or cloud test or " +
+ "tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ TableRequestLimits limits = tenantLimits.getStandardTableLimits();
+ int indexLimit = limits.getIndexesPerTable();
+
+ /* create a table with a bunch of fields */
+ StringBuilder sb = new StringBuilder();
+ sb.append("create table limitTable(id integer, ");
+ for (int i = 0; i < indexLimit + 1; i++) {
+ sb.append("name").append(i).append(" string,");
+ }
+ sb.append("primary key(id))");
+
+ tableOperation(handle, sb.toString(),
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 20000);
+
+ for (int i = 0; i < indexLimit + 1; i++) {
+ sb = new StringBuilder();
+ sb.append("create index idx").append(i).
+ append(" on limitTable(name").append(i).append(")");
+ final String statement = sb.toString();
+ if (i == indexLimit) {
+ try {
+ tableOperation(handle, statement, null,
+ TableResult.State.ACTIVE, 20000);
+ fail("Adding index should have failed");
+ } catch (IndexLimitException ile) {}
+ } else {
+ tableOperation(handle, statement, null,
+ TableResult.State.ACTIVE, 20000);
+ }
+ }
+
+ /* listIndexes is a test-only method right now */
+ String[] indexes = listIndexes(handle, tableName);
+ assertEquals("Unexpected number of indexes", indexLimit, indexes.length);
+ }
+
+ /**
+ * Test limit on index size
+ */
+ @Test
+ public void testIndexSizeLimit() throws Exception {
+ assumeTrue(onprem == false); /* not for onprem */
+
+ /* create a table add some long-ish fields */
+ final String tableDDL = "create table limitTable(id integer, " +
+ "data string, primary key(id))";
+
+ tableOperation(handle, tableDDL,
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 20000);
+
+ /* add some rows with data that exceeds default 64 bytes */
+ String data = makeString(400);
+ for (int i = 0; i < 20; i++) {
+ PutRequest prq = new PutRequest().setTableName("limitTable")
+ .setValue(new MapValue().put("id", i).put("data", data));
+ PutResult prs = handle.put(prq);
+ assertNotNull(prs.getVersion());
+ }
+
+ /*
+ * Create an index that should fail because of key size limit
+ */
+ try {
+ final String statement = "create index idx on limitTable(data)";
+ tableOperation(handle, statement, null,
+ TableResult.State.ACTIVE, 20000);
+ } catch (IllegalArgumentException iae) {
+ assertTrue(iae.getMessage().contains("KeySizeLimitException"));
+ /* expected */
+ }
+ }
+
+ /**
+ * Test limit on number of schema evolutions
+ */
+ @Test
+ public void testEvolutionLimit() throws Exception {
+ assumeTrue("Skip the test if not minicloud or cloud test or " +
+ "tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ TableRequestLimits limits = tenantLimits.getStandardTableLimits();
+ int evoLimit = limits.getSchemaEvolutions();
+ if (evoLimit > NUM_SCHEMA_EVOLUTIONS) {
+ /*
+ * To prevent this test from running too long, skip the test if the
+ * table evolution times limit > ProxyTestBase.NUM_SCHEMA_EVOLUTIONS
+ */
+ return;
+ }
+
+ createTable();
+
+ for (int i = 0; i < evoLimit + 1; i++) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("alter table limitTable(add name").
+ append(i).append(" string)");
+ final String statement = sb.toString();
+ if (i == evoLimit) {
+ try {
+ tableOperation(handle, statement, null,
+ TableResult.State.ACTIVE, 20000);
+ fail("Alter table should have failed, num alter table: "
+ + i + ", limit: " + evoLimit);
+ } catch (EvolutionLimitException ele) {}
+ } else {
+ tableOperation(handle, statement, null,
+ TableResult.State.ACTIVE, 20000);
+ }
+ }
+ }
+
+ /**
+ * Test limit on number of tables
+ */
+ @Test
+ public void testNumTablesLimit() throws Exception {
+ assumeTrue("Skip the test if not minicloud or cloud test or " +
+ "tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ TenantLimits limits = tenantLimits;
+ int tableLimit = limits.getNumTables();
+ if (tableLimit > NUM_TABLES) {
+ /*
+ * To prevent this test from running too long, skip the test if the
+ * table number limit > ProxyTestBase.NUM_TABLES
+ */
+ return;
+ }
+
+ for (int i = 0; i < tableLimit + 1; i++) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("create table t").append(i).append("(id integer, ").
+ append("primary key(id))");
+ final String statement = sb.toString();
+ if (i == tableLimit) {
+ /*
+ * Make the limits on these small so they don't trip
+ * the size/throughput per-tenant limits before hitting the
+ * number of tables limits
+ */
+ try {
+ tableOperation(handle, statement,
+ new TableLimits(500, 500, 10),
+ TableResult.State.ACTIVE, 20000);
+ fail("create table should have failed, num create table: "
+ + i + ", limit: " + tableLimit);
+ } catch (TableLimitException iae) {}
+ } else {
+ tableOperation(handle, statement,
+ new TableLimits(500, 500, 10),
+ TableResult.State.ACTIVE, 20000);
+ }
+ }
+ }
+
+ /*
+ * Not strictly a limit, but test the case where an index is created on
+ * existing JSON and there is a type mismatch.
+ */
+ @Test
+ public void testBadIndexType() {
+ final String jsonRow =
+ "{\"sid\": 1, \"id\": 1, \"name\": \"joe\", \"json\":" +
+ "{\"age\":5}"+
+ "}";
+ final String conflictingIndex =
+ "create index idx on limitTable(json.age as string)";
+
+ createTable();
+
+ /* put the JSON */
+ PutRequest prq = new PutRequest().setValueFromJson(jsonRow, null).
+ setTableName(tableName);
+ PutResult prs = handle.put(prq);
+ assertNotNull(prs.getVersion());
+
+ /* create a conflicting index, string index on integer field */
+ try {
+ tableOperation(handle, conflictingIndex, null,
+ TableResult.State.ACTIVE, 20000);
+ fail("Attempt to add a conflicting index should have failed");
+ } catch (IllegalArgumentException iae) {
+ // success
+ }
+ }
+
+ /**
+ * The query size limit is artificially 200. See ProxyTestBase
+ */
+ @Test
+ public void testQuerySizeLimit() {
+ assumeTrue(onprem == false);
+ createTable();
+ final StringBuilder sb = new StringBuilder();
+ sb.append("select aaaaa,bbbbb,ccccccccccc, dddddddd,")
+ .append("eeeeeee,ffffff ")
+ .append("from limitTable ")
+ .append("where xxxxxxxxxxxxxxxx = yyyyyyyyyyyyyyyyyyy");
+ while (sb.toString().length() < QUERY_SIZE_LIMIT) {
+ sb.append(" and xxxxxxxxxxxxxxx = yyyyyyyyyyyyyyyyyyy");
+ }
+
+ final String longQuery = sb.toString();
+
+ createTable();
+
+ QueryRequest qr = new QueryRequest().setStatement(longQuery);
+ PrepareRequest pr = new PrepareRequest().setStatement(longQuery);
+
+ try {
+ handle.query(qr);
+ fail("Query should have failed");
+ } catch (IllegalArgumentException iae) {
+ // success
+ }
+
+ try {
+ handle.prepare(pr);
+ fail("Prepare should have failed");
+ } catch (IllegalArgumentException iae) {
+ // success
+ }
+ }
+
+ /**
+ * Test key size limits (primary, index) and value size limit
+ */
+ @Test
+ public void testKeyValueSizeLimit() {
+ assumeTrue("Skip the test if onprem test or tenantLimits is not provided",
+ !onprem && tenantLimits != null);
+
+ final oracle.kv.Version kvver =
+ new oracle.kv.Version(UUID.randomUUID(), 1, new RepNodeId(1, 1), 1);
+ final Version dummyVersion = Version.createVersion(kvver.toByteArray());
+
+ createKeyTable();
+
+ /*
+ * PrimaryKey size limit
+ */
+ final int keySizeLimit = tenantLimits.getStandardTableLimits()
+ .getPrimaryKeySizeLimit();
+ PutRequest putReq;
+ WriteMultipleRequest umReq = new WriteMultipleRequest();
+ int expFailIndex = keySizeLimit + 1;
+ for (int i = 32; i <= keySizeLimit + 1; i++) {
+ String name = makeName(i);
+ /* Put */
+ putReq = new PutRequest()
+ .setTableName("keyLimitTable1")
+ .setValue(new MapValue()
+ .put("name", name)
+ .put("city", "Omaha"));
+ try {
+ handle.put(putReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* WriteMultipleRequest(Put) */
+ umReq.clear();
+ umReq.add(putReq, true);
+ try {
+ handle.writeMultiple(umReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* PutIfAbsent */
+ putReq.setOption(Option.IfAbsent);
+ try {
+ handle.put(putReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* WriteMultipleRequest(PutIfAbsent) */
+ umReq.clear();
+ umReq.add(putReq, true);
+ try {
+ handle.writeMultiple(umReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* PutIfPresent */
+ putReq.setOption(Option.IfPresent);
+ try {
+ handle.put(putReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* WriteMultipleRequest(PutIfPresent) */
+ umReq.clear();
+ umReq.add(putReq, true);
+ try {
+ handle.writeMultiple(umReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* PutIfVersion */
+ putReq.setOption(Option.IfVersion)
+ .setMatchVersion(dummyVersion);
+ try {
+ handle.put(putReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* WriteMultipleRequest(PutIfVersion) */
+ umReq.clear();
+ umReq.add(putReq, true);
+ try {
+ handle.writeMultiple(umReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+ }
+
+ /* Primary key contains 2 fields: 1 shard key + 1 minor key */
+ String name = makeName(32);
+ expFailIndex = keySizeLimit - name.length() + 1;
+ for (int i = 0; i <= keySizeLimit - name.length() + 1; i++) {
+ String city = makeName(i);
+ /* Put */
+ putReq = new PutRequest()
+ .setTableName("keyLimitTable2")
+ .setValue(new MapValue().put("name", name)
+ .put("city", city));
+
+ try {
+ handle.put(putReq);
+ if (i + name.length() > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* PutIfAbsent */
+ putReq.setOption(Option.IfAbsent);
+ try {
+ handle.put(putReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* PutIfPresent */
+ putReq.setOption(Option.IfPresent);
+ try {
+ handle.put(putReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+
+ /* PutIfVersion */
+ putReq.setOption(Option.IfVersion)
+ .setMatchVersion(dummyVersion);
+ try {
+ handle.put(putReq);
+ if (i > keySizeLimit) {
+ fail("Put should have failed");
+ }
+ } catch (KeySizeLimitException ex) {
+ assertEquals(expFailIndex, i);
+ }
+ }
+
+ /*
+ * IndexKey size limit
+ */
+ final int indexKeyLimit = INDEX_KEY_SIZE_LIMIT;
+
+ /*
+ * Expect index size limit exception
+ */
+ name = makeName(62);
+ for (int i = 1; i < indexKeyLimit + 1; i++) {
+ String city = makeName(i);
+ try {
+ putReq = new PutRequest()
+ .setTableName("keyLimitTable1")
+ .setValue(new MapValue().put("name", name)
+ .put("city", city));
+ handle.put(putReq);
+ if (i > indexKeyLimit) {
+ fail("Put should have failed");
+ }
+ } catch (Exception e) {
+ assertEquals(indexKeyLimit, i);
+ }
+ }
+
+ /*
+ * Value size limit
+ */
+ final int rowSizeLimit = ROW_SIZE_LIMIT;
+ String address = makeName(rowSizeLimit);
+ try {
+ putReq = new PutRequest()
+ .setTableName("keyLimitTable2")
+ .setValue(new MapValue().put("name", "aaaa")
+ .put("city", "Omaha")
+ .put("address", address));
+ handle.put(putReq);
+ fail("Put should have failed");
+ } catch (RowSizeLimitException ex) {
+ }
+
+ /*
+ * Value size limit check on WriteMultiple sub request.
+ */
+ address = makeName(ROW_SIZE_LIMIT);
+ umReq = new WriteMultipleRequest();
+ umReq.add(new PutRequest()
+ .setTableName("keyLimitTable2")
+ .setValue(new MapValue().put("name", "aaaa")
+ .put("city", "Omaha")
+ .put("address", address)),
+ false);
+ try {
+ handle.writeMultiple(umReq);
+ fail("WriteMultiple should have failed");
+ } catch (RowSizeLimitException ex) {
+ }
+ }
+
+ @Test
+ public void testInsertKeyValueSize() {
+ assumeTrue("Skip testKeyValueSizeInsert() if run against on-prem",
+ onprem == false);
+ assumeKVVersion("testInsertKeyValueSize", 21, 3, 5);
+
+ final int maxKeySize = rlimits.getPrimaryKeySizeLimit();
+ final int maxRowSize = rlimits.getRowSizeLimit();
+
+ tableOperation(handle, createKeyTable1DDL,
+ new TableLimits(1000, 1000, 5),
+ TableResult.State.ACTIVE, 10000);
+
+ String ddl = "CREATE TABLE IF NOT EXISTS testId(" +
+ "id INTEGER GENERATED ALWAYS AS IDENTITY, pk STRING, " +
+ "s STRING, PRIMARY KEY(pk, id))";
+ tableOperation(handle, ddl, new TableLimits(1000, 1000, 5),
+ TableResult.State.ACTIVE, 10000);
+
+
+ ddl = "CREATE TABLE IF NOT EXISTS test2pk(" +
+ "sk STRING, pk STRING, s STRING, PRIMARY KEY(shard(sk), pk))";
+ tableOperation(handle, ddl, new TableLimits(1000, 1000, 5),
+ TableResult.State.ACTIVE, 10000);
+
+ /* Test insert query */
+ PrepareRequest preq;
+ PrepareResult pret;
+
+ PreparedStatement pstmt;
+ QueryRequest qreq;
+ QueryResult qret;
+ String insert;
+
+ String fmt = "insert into keyLimitTable1 values('%s', '%s')";
+ String name64 = makeName(maxKeySize);
+ String city512K = makeName(maxRowSize - 5); /* 5 - overhead */
+
+ insert = String.format(fmt, name64, city512K);
+ preq = new PrepareRequest().setStatement(insert);
+ pret = handle.prepare(preq);
+ qreq = new QueryRequest().setPreparedStatement(pret);
+ qret = handle.query(qreq);
+ assertEquals(1, qret.getResults().size());
+
+ insert = String.format(fmt, name64 + "a", city512K);
+ preq = new PrepareRequest().setStatement(insert);
+ try {
+ handle.prepare(preq);
+ fail("Prepare should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ qreq = new QueryRequest().setStatement(insert);
+ try {
+ handle.query(qreq);
+ fail("Query should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ insert = String.format(fmt, name64, city512K + "a");
+ preq = new PrepareRequest().setStatement(insert);
+ try {
+ handle.prepare(preq);
+ fail("Prepare should fail: value size exceeded");
+ } catch (RowSizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ qreq = new QueryRequest().setStatement(insert);
+ try {
+ handle.query(qreq);
+ fail("Query should fail: value size exceeded");
+ } catch (RowSizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ insert = "declare $name string; $city string; " +
+ "insert into keyLimitTable1(name, city) values($name, $city)";
+ preq = new PrepareRequest().setStatement(insert);
+ pret = handle.prepare(preq);
+ pstmt = pret.getPreparedStatement();
+
+ pstmt.setVariable("$name", new StringValue(name64));
+ pstmt.setVariable("$city", new StringValue(city512K));
+ qreq = new QueryRequest().setPreparedStatement(pstmt);
+ qret = handle.query(qreq);
+ assertEquals(1, qret.getResults().size());
+
+ pstmt.clearVariables();
+ pstmt.setVariable("$name", new StringValue(name64 + "a"));
+ pstmt.setVariable("$city", new StringValue(city512K));
+ qreq = new QueryRequest().setPreparedStatement(pstmt);
+ try {
+ qret = handle.query(qreq);
+ fail("Query should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ pstmt.clearVariables();
+ pstmt.setVariable("$name", new StringValue(name64));
+ pstmt.setVariable("$city", new StringValue(city512K + "a"));
+ qreq = new QueryRequest().setPreparedStatement(pstmt);
+ try {
+ qret = handle.query(qreq);
+ fail("Query should fail: value size exceeded");
+ } catch (RowSizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ fmt = "insert into testId(pk, s) values('%s', '%s')";
+ insert = String.format(fmt, name64, city512K);
+ preq = new PrepareRequest().setStatement(insert);
+ pret = handle.prepare(preq);
+
+ qreq = new QueryRequest().setPreparedStatement(pret);
+ try {
+ qret = handle.query(qreq);
+ fail("Query should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ /* PK has 2 components */
+ String s32 = makeName(32);
+ String s33 = makeName(33);
+ fmt = "insert into test2pk values('%s', '%s', 'a')";
+ insert = String.format(fmt, s32, s32);
+ preq = new PrepareRequest().setStatement(insert);
+ pret = handle.prepare(preq);
+ qreq = new QueryRequest().setPreparedStatement(pret);
+ qret = handle.query(qreq);
+ assertEquals(1, qret.getResults().size());
+
+ /* Key size exceeded, sk: 33, pk: 32 */
+ insert = String.format(fmt, s33, s32);
+ preq = new PrepareRequest().setStatement(insert);
+ try {
+ handle.prepare(preq);
+ fail("Prepare should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ /* Key size exceeded, sk: 32, pk: 33 */
+ insert = String.format(fmt, s32, s33);
+ preq = new PrepareRequest().setStatement(insert);
+ try {
+ handle.prepare(preq);
+ fail("Prepare should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ /* Query with variables */
+ insert = "declare $sk string; $pk string; " +
+ "upsert into test2pk(sk, pk, s) values($sk, $pk, 'a')";
+ preq = new PrepareRequest().setStatement(insert);
+ pret = handle.prepare(preq);
+ pstmt = pret.getPreparedStatement();
+
+ pstmt.setVariable("$sk", new StringValue(s32));
+ pstmt.setVariable("$pk", new StringValue(s32));
+ qreq = new QueryRequest().setPreparedStatement(pstmt);
+ qret = handle.query(qreq);
+ assertEquals(1, qret.getResults().size());
+
+ /* Key size exceeded, sk: 33, pk: 32 */
+ pstmt.clearVariables();
+ pstmt.setVariable("$sk", new StringValue(s33));
+ pstmt.setVariable("$pk", new StringValue(s32));
+ qreq = new QueryRequest().setPreparedStatement(pstmt);
+ try {
+ qret = handle.query(qreq);
+ fail("Query should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+
+ /* Key size exceeded, sk: 32, pk: 33 */
+ pstmt.clearVariables();
+ pstmt.setVariable("$sk", new StringValue(s32));
+ pstmt.setVariable("$pk", new StringValue(s33));
+ qreq = new QueryRequest().setPreparedStatement(pstmt);
+ try {
+ qret = handle.query(qreq);
+ fail("Query should fail: key size exceeded");
+ } catch (KeySizeLimitException ex) {
+ /* expected */
+ checkErrorMessage(ex);
+ }
+ }
+
+ /**
+ * Test index key size by populating a table before creating the
+ * index. Index creation should fail.
+ */
+ @Test
+ public void testCreateIndexFail() {
+ assumeTrue("Skip the test if onprem test or tenantLimits is not provided",
+ !onprem && tenantLimits != null);
+
+ final int indexKeyLimit = tenantLimits.getStandardTableLimits()
+ .getIndexKeySizeLimit();
+ createTable();
+
+ /*
+ * Populate with a bunch of values that will fail an index key size
+ * limit check. Use the name field.
+ */
+ MapValue value = new MapValue()
+ .put("id", 1)
+ .put("name", makeName(indexKeyLimit+1))
+ .putFromJson("json", "{\"a\": \"boo\"}", null);
+ PutRequest putReq = new PutRequest()
+ .setTableName("limitTable")
+ .setValue(value);
+
+ for (int i = 0; i < 500; i++) {
+ value.put("sid", i);
+ handle.put(putReq);
+ }
+
+ final String statementSize = "create index name on limitTable(name)";
+ final String statementType =
+ "create index name on limitTable(json.a as integer)";
+ try {
+ tableOperation(handle, statementSize, null,
+ TableResult.State.ACTIVE, 20000);
+ fail("Adding index should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /*
+ * Try adding an index on JSON with the wrong type. This will
+ * also fail. The rows have a string in that field.
+ */
+ try {
+ tableOperation(handle, statementType, null,
+ TableResult.State.ACTIVE, 20000);
+ fail("Adding index should have failed");
+ } catch (IllegalArgumentException iae) {
+ }
+ }
+
+ @Test
+ public void testRequestSizeLimit() {
+ assumeTrue("Skip the test if onprem test or tenantLimits is not provided",
+ !onprem && tenantLimits != null);
+
+ final TableRequestLimits limits = tenantLimits.getStandardTableLimits();
+ final int reqSizeLimit = limits.getRequestSizeLimit();
+ final int batchReqSizeLimit = limits.getBatchRequestSizeLimit();
+
+ createTable();
+
+ MapValue value = new MapValue()
+ .put("sid", 0)
+ .put("id", 1)
+ .put("name", "jack.smith")
+ .put("json", makeName(reqSizeLimit));
+ PutRequest putReq = new PutRequest()
+ .setTableName("limitTable")
+ .setValue(value);
+ try {
+ handle.put(putReq);
+ fail("Put should have failed");
+ } catch (RequestSizeLimitException ex) {
+ }
+
+ /*
+ * WriteMultipleRequest with max number of operations and data size,
+ * it is expected to succeed.
+ */
+ final int numOps = 50;
+ int dataSizePerOp = ROW_SIZE_LIMIT - 1024;
+ WriteMultipleRequest umReq = new WriteMultipleRequest();
+ for (int i = 0; i < numOps; i++) {
+ value = new MapValue()
+ .put("sid", 0)
+ .put("id", i)
+ .put("name", "jack.smith")
+ .put("json", makeName(dataSizePerOp));
+ umReq.add(new PutRequest()
+ .setTableName("limitTable")
+ .setValue(value),
+ false);
+ }
+ try {
+ handle.writeMultiple(umReq);
+ } catch (Exception ex) {
+ fail("WriteMultiple failed: " + ex.getMessage());
+ }
+
+ /*
+ * WriteMultipleRequest's request size exceeded batchReqSizeLimit,
+ * it is expected to fail.
+ */
+ dataSizePerOp = batchReqSizeLimit/numOps;
+ umReq = new WriteMultipleRequest();
+ for (int i = 0; i < numOps; i++) {
+ value = new MapValue()
+ .put("sid", 0)
+ .put("id", i)
+ .put("name", "jack.smith")
+ .put("json", makeName(dataSizePerOp));
+ umReq.add(new PutRequest()
+ .setTableName("limitTable")
+ .setValue(value),
+ false);
+ }
+ try {
+ handle.writeMultiple(umReq);
+ fail("WriteMultiple should have failed");
+ } catch (RequestSizeLimitException ex) {
+ }
+
+ /*
+ * Each sub request size should not exceed REQUEST_SIZE_LIMIT.
+ */
+ dataSizePerOp = REQUEST_SIZE_LIMIT;
+ umReq = new WriteMultipleRequest();
+ value = new MapValue()
+ .put("sid", 0)
+ .put("id", 0)
+ .put("name", "jack.smith")
+ .put("json", makeName(dataSizePerOp));
+ umReq.add(new PutRequest()
+ .setTableName("limitTable")
+ .setValue(value),
+ false);
+ try {
+ handle.writeMultiple(umReq);
+ fail("WriteMultiple should have failed");
+ } catch (RequestSizeLimitException ex) {
+ }
+ }
+
+ /**
+ * Test the number of column limit per table.
+ */
+ @Test
+ public void testColumnLimit() {
+ assumeTrue("Skip the test if not minicloud or cloud test or " +
+ "tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ TableRequestLimits requestLimits =
+ tenantLimits.getStandardTableLimits();
+ final int numFields = requestLimits.getColumnsPerTable();
+ final TableLimits tableLimits = new TableLimits(1000, 1000, 1);
+
+ String ddl = makeCreateTableDDL("columnLimitOK", numFields);
+ tableOperation(handle, ddl, tableLimits, State.ACTIVE, 20_000);
+
+ /*
+ * Create a table with column number exceeded limit, it is expected
+ * to fail.
+ */
+ ddl = makeCreateTableDDL("bad", numFields + 1);
+ try {
+ tableOperation(handle, ddl, tableLimits, State.ACTIVE, 20_000);
+ fail("Creating table with the number of columns that exceeded " +
+ "the limit should have fail");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /*
+ * Enforcing the # of columns limit via alter table is only
+ * supported by the "real" cloud -- minicloud or level 3 tests
+ */
+ if (cloudRunning) {
+ /*
+ * Add one more field to a table which already has max number of
+ * column, it is expected to fail.
+ */
+ ddl = "ALTER TABLE columnLimitOK(ADD nc1 INTEGER)";
+ try {
+ tableOperation(handle, ddl, null, State.ACTIVE, 20_000);
+ fail("Adding an new field to a table with max number of " +
+ "columns should have failed.");
+ } catch (IllegalArgumentException iae) {
+ }
+
+ /* Drop a column, then add an new column. */
+ ddl = "ALTER TABLE columnLimitOK(drop c0)";
+ tableOperation(handle, ddl, null, State.ACTIVE, 20_000);
+ ddl = "ALTER TABLE columnLimitOK(ADD nc1 INTEGER)";
+ tableOperation(handle, ddl, null, State.ACTIVE, 20_000);
+ }
+ }
+
+ /**
+ * Tests limits on total size and throughput allowed per-table and
+ * per-tenant.
+ */
+ @Test
+ public void testTableProvisioningLimits() {
+ /*
+ * This test aims to create tables exceeds the tenant capacity, it is
+ * not applicable in cloud test
+ */
+ assumeTrue(useMiniCloud);
+
+ TableRequestLimits requestLimits =
+ tenantLimits.getStandardTableLimits();
+ final int maxRead = requestLimits.getTableReadUnits();
+ final int maxWrite = requestLimits.getTableWriteUnits();
+ final int maxSize = requestLimits.getTableSize();
+
+ /* TODO: when per-tenant limits are available get them */
+ final int maxTenantRead = tenantLimits.getTenantReadUnits();
+ final int maxTenantWrite = tenantLimits.getTenantWriteUnits();
+ final int maxTenantSize = tenantLimits.getTenantSize();
+
+ TableLimits limits = new TableLimits(maxRead + 1, 1, 1);
+ String ddl = makeCreateTableDDL("limits", 2);
+
+ assertDeploymentException(ddl, limits, null, "read");
+
+ limits = new TableLimits(1, maxWrite + 1, 1);
+ assertDeploymentException(ddl, limits, null, "write");
+
+ limits = new TableLimits(1, 1, maxSize + 1);
+ assertDeploymentException(ddl, limits, null, "size");
+
+ /* make a table and try to evolve it past read limit */
+ limits = new TableLimits(maxRead, maxWrite, maxSize);
+ tableOperation(handle, ddl, limits, State.ACTIVE, 20_000);
+
+ limits = new TableLimits(maxRead+1, maxWrite, maxSize);
+ assertDeploymentException(null, limits, "limits", "read");
+
+ /*
+ * Test per-tenant limits by trying to create another table. If it's one
+ * table this only works if the per-table limit is >= 1/2 of the
+ * tenant limit. See ProxyTestBase's TenantLimits.
+ */
+ limits = new TableLimits(maxTenantRead - maxRead + 1, 1, 1);
+ ddl = makeCreateTableDDL("limits1", 2);
+ assertDeploymentException(ddl, limits, null,
+ new String[] {"read", "tenant"});
+
+ limits = new TableLimits(1, maxTenantWrite - maxWrite + 1, 1);
+ assertDeploymentException(ddl, limits, null,
+ new String[] {"write", "tenant"});
+
+ limits = new TableLimits(1, 1, maxTenantSize - maxSize + 1);
+ assertDeploymentException(ddl, limits, null,
+ new String[] {"size", "tenant"});
+ }
+
+ /**
+ * Use a special tier and tenant for this test. Otherwise the
+ * operations that happened in other tests get involved in this
+ * test case.
+ */
+ @Test
+ public void testOperationLimits() {
+ /*
+ * This test needs adjust the rate of ddl execution and table limits
+ * reduction, it is not applicable in cloud test
+ */
+ assumeTrue(useMiniCloud);
+
+ /*
+ * In order to isolate this test from others as well as allowing it
+ * to run more than once/day, use a timestamp on the test tier to
+ * make it unique.
+ */
+ String suffix = Long.toString(System.currentTimeMillis());
+ final String limitsTenant = "LimitsTenant." + suffix;
+ final int ddlRate = 4;
+ final int redRate = 2;
+
+ /*
+ * Throttling exceptions are retry-able so don't retry to get the
+ * right exception (vs timeout)
+ */
+ handle = configNoRetryHandle(limitsTenant);
+
+ /* run few operations ahead to warm up security cache */
+ if (isSecure()) {
+ try {
+ createTable(limitsTenant);
+ handle.getTable(new GetTableRequest()
+ .setTableName(tableName));
+ } catch (TableNotFoundException e) {
+ }
+ }
+
+ int origDDLRate = tenantLimits.getDdlRequestsRate();
+ int origReductionRate = tenantLimits.getTableLimitReductionsRate();
+ tenantLimits.setDdlRequestsRate(ddlRate);
+ tenantLimits.setTableLimitReductionsRate(redRate);
+ addTier(limitsTenant, tenantLimits);
+ try {
+ for (int i = 0; i < 10; i++) {
+ try {
+ createTable(limitsTenant);
+ if (i == 5) {
+ fail("DDL operation should have failed");
+ }
+ } catch (OperationThrottlingException e) {
+ // success
+ break;
+ }
+ }
+ /* reset DDL rate to avoid failure for that reason */
+ tenantLimits.setDdlRequestsRate(origDDLRate);
+ addTier(limitsTenant, tenantLimits);
+
+ /*
+ * 2 reductions are allowed. The 3rd should throw
+ */
+ TableLimits limits = new TableLimits(10000, 20000, 50);
+ tableOperation(handle, null, limits, limitsTenant, tableName,
+ null /* matchETag */, State.ACTIVE, 20_000);
+ limits = new TableLimits(20000, 10000, 50);
+ tableOperation(handle, null, limits, limitsTenant, tableName,
+ null /* matchETag */, State.ACTIVE, 20_000);
+ limits = new TableLimits(19000, 10000, 50);
+ failLimitsChange(limits, limitsTenant);
+
+ /* read */
+ limits = new TableLimits(10000, 10000, 50);
+ failLimitsChange(limits, limitsTenant);
+
+ /* write */
+ limits = new TableLimits(20000, 1000, 50);
+ failLimitsChange(limits, limitsTenant);
+
+ /* size */
+ limits = new TableLimits(20000, 10000, 30);
+ failLimitsChange(limits, limitsTenant);
+ } finally {
+ /* cleanup */
+ tenantLimits.setTableLimitReductionsRate(origDDLRate);
+ tenantLimits.setTableLimitReductionsRate(origReductionRate);
+ handle = configHandle(getProxyURL());
+ deleteTier(limitsTenant);
+ }
+ }
+
+ /**
+ * Test tenant max auto scaling table count and limits mode max change per
+ * day.
+ */
+ @Test
+ public void testAutoScalingTableLimits() {
+ assumeTrue("Skip the test if not minicloud or cloud test or " +
+ "tenantLimits is not provided",
+ cloudRunning && tenantLimits != null);
+
+ /*
+ * Create 3 auto scaling tables.
+ */
+ final String CREATE_TABLEX = "create table if not exists testusersX(" +
+ "id integer, name string, primary key(id))";
+ final String CREATE_TABLE1 = "create table if not exists testusers1(" +
+ "id integer, name string, primary key(id))";
+ final String CREATE_TABLE2 = "create table if not exists testusers2(" +
+ "id integer, name string, primary key(id))";
+ TableResult tres = tableOperation(handle,
+ CREATE_TABLEX,
+ new TableLimits(20),
+ 20000);
+ verifyAutoScalingResult(tres, 20);
+ tres = tableOperation(handle,
+ CREATE_TABLE1,
+ new TableLimits(30),
+ 20000);
+ verifyAutoScalingResult(tres, 30);
+ tres = tableOperation(handle,
+ CREATE_TABLE2,
+ new TableLimits(40),
+ 20000);
+ verifyAutoScalingResult(tres, 40);
+
+ /*
+ * Cannot create more than 3 auto scaling tables.
+ */
+ final String CREATE_TABLE3 = "create table if not exists testusers3(" +
+ "id integer, name string, primary key(id))";
+ tableOperation(handle,
+ CREATE_TABLE3,
+ new TableLimits(50),
+ null,
+ TableResult.State.ACTIVE,
+ TableLimitException.class);
+
+ /*
+ * Alter the table limits mode from AUTO_SCALING to PROVISIONED
+ */
+ tres = tableOperation(handle,
+ null,
+ new TableLimits(30, 40, 50),
+ "testusersX",
+ TableResult.State.ACTIVE,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ assertEquals(30, tres.getTableLimits().getReadUnits());
+ assertEquals(40, tres.getTableLimits().getWriteUnits());
+ assertEquals(50, tres.getTableLimits().getStorageGB());
+ assertEquals(PROVISIONED, tres.getTableLimits().getMode());
+
+ if (tenantLimits.getBillingModeChangeRate() == 1) {
+ /*
+ * Cannot change the limits mode any more after reaching mode max
+ * allowed changes per day.
+ */
+ tableOperation(handle,
+ null,
+ new TableLimits(10),
+ "testusersX",
+ TableResult.State.ACTIVE,
+ OperationThrottlingException.class);
+ } else {
+ /*
+ * Alter the table limits mode from PROVISIONED to AUTO_SCALING
+ */
+ tres = tableOperation(handle,
+ null,
+ new TableLimits(10),
+ "testusersX",
+ TableResult.State.ACTIVE,
+ 20000);
+ verifyAutoScalingResult(tres, 10);
+
+ /*
+ * Cannot change the limits mode any more after reaching mode max
+ * allowed changes per day.
+ */
+ tableOperation(handle,
+ null,
+ new TableLimits(300, 400, 500),
+ "testusersX",
+ TableResult.State.ACTIVE,
+ OperationThrottlingException.class);
+ }
+ }
+
+ private void verifyAutoScalingResult(TableResult tres, int tableSize) {
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ assertEquals(tenantLimits.getAutoScalingTableReadUnits(),
+ tres.getTableLimits().getReadUnits());
+ assertEquals(tenantLimits.getAutoScalingTableWriteUnits(),
+ tres.getTableLimits().getWriteUnits());
+ assertEquals(tableSize, tres.getTableLimits().getStorageGB());
+ assertEquals(ON_DEMAND, tres.getTableLimits().getMode());
+ }
+
+ private void failLimitsChange(TableLimits limits, String compartmentId) {
+ try {
+ tableOperation(handle, null, limits, compartmentId, tableName,
+ null /* matchETag */, State.ACTIVE, 20_000);
+ fail("Attempt at reduction should have failed");
+ } catch (IllegalArgumentException | OperationThrottlingException iae) {
+ // success
+ }
+ }
+
+ private void assertDeploymentException(String statement,
+ TableLimits limits,
+ String name,
+ String ... contains) {
+ try {
+ tableOperation(handle, statement, limits,
+ name, State.ACTIVE, 20_000);
+ fail("Operation should have thrown");
+ } catch (DeploymentException de) {
+ for (String s : contains) {
+ assertTrue(de.getMessage(), de.getMessage().contains(s));
+ }
+ }
+ }
+
+ private String makeCreateTableDDL(String name, int numFields) {
+ final StringBuilder sb = new StringBuilder("CREATE TABLE ");
+ sb.append(name);
+ sb.append("(id INTEGER, ");
+ for (int i = 0; i < numFields - 1; i++) {
+ sb.append("c");
+ sb.append(i);
+ sb.append(" STRING, ");
+ }
+ sb.append("PRIMARY KEY(id))");
+ return sb.toString();
+ }
+
+ private String makeName(int len) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < len; i++) {
+ sb.append("a");
+ }
+ return sb.toString();
+ }
+
+ /* shared by test cases */
+ private void createTable() {
+ tableOperation(handle, createTableDDL,
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 10000);
+ }
+
+ /* shared by test cases */
+ private void createTable(String compartmentId) {
+ tableOperation(handle, createTableDDL,
+ new TableLimits(20000, 20000, 50),
+ compartmentId, null /* tableName */,
+ null /* matchETag */, TableResult.State.ACTIVE, 10000);
+ }
+
+ /* shared by test cases */
+ private void createKeyTable() {
+ tableOperation(handle, createKeyTable1DDL,
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 10000);
+ tableOperation(handle, createKeyTableIndexDDL,
+ null,
+ TableResult.State.ACTIVE, 10000);
+ tableOperation(handle, createKeyTable2DDL,
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 10000);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/MonitorStatsTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MonitorStatsTest.java
new file mode 100644
index 00000000..a2999044
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MonitorStatsTest.java
@@ -0,0 +1,590 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static oracle.nosql.proxy.MonitorStats.ACTIVE_REQUEST_NAME;
+import static oracle.nosql.proxy.MonitorStats.DATA_RESPONSE_READ_SIZE_NAME;
+import static oracle.nosql.proxy.MonitorStats.DATA_RESPONSE_WRITE_SIZE_NAME;
+import static oracle.nosql.proxy.MonitorStats.REQUEST_LABELS;
+import static oracle.nosql.proxy.MonitorStats.REQUEST_LATENCY_NAME;
+import static oracle.nosql.proxy.MonitorStats.REQUEST_SERVER_FAILED_NAME;
+import static oracle.nosql.proxy.MonitorStats.REQUEST_THROTTLING_FAILED_NAME;
+import static oracle.nosql.proxy.MonitorStats.REQUEST_TOTAL_NAME;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+import oracle.nosql.common.sklogger.Counter;
+import oracle.nosql.common.sklogger.LongGauge;
+import oracle.nosql.common.sklogger.MetricFamilySamples;
+import oracle.nosql.common.sklogger.MetricFamilySamples.Sample;
+import oracle.nosql.common.sklogger.MetricRegistry;
+import oracle.nosql.common.sklogger.PerfQuantile;
+import oracle.nosql.common.sklogger.SizeQuantile;
+import oracle.nosql.common.sklogger.StatsData;
+import oracle.nosql.driver.TableNotFoundException;
+import oracle.nosql.driver.TimeToLive;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetIndexesResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.MultiDeleteResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutRequest.Option;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.proxy.MonitorStats.OperationType;
+
+import org.junit.Test;
+
+/**
+ * This is a Proxy concurrent smoke test along with monitor stats checking.
+ * It can also be used to generate monitor data samples by setting log
+ * configuration through System property java.util.logging.config.file and
+ * a longer test time through System property monitorSeconds.
+ */
+public class MonitorStatsTest extends ProxyTestBase {
+
+ private static final String WATCHER_NAME = "MonitorStatsTest";
+ /*
+ * The number of threads to run smoke test. This number should match the
+ * connection pool size in the NoSQLHandle config.
+ */
+ private static final int CONCURRENT_NUM = 3;
+ private static final String TABLE_PREFIX = "userStats";
+ private ExecutorService executor =
+ Executors.newFixedThreadPool(CONCURRENT_NUM);
+
+ /*
+ * Total requests for each type
+ */
+ private final Map requestTotal;
+ /*
+ * Failed data requests for each type
+ */
+ private final Map serverFailed;
+ private final Map userFailed;
+ private final Map throttlingFailed;
+ /*
+ * Total data operations for each type. Failed data request count 0 ops,
+ * and a multiple request count N ops.
+ */
+ private final Map operationTotal;
+ /*
+ * Data operation charged metrics
+ */
+ private final AtomicLong writeKBCharged = new AtomicLong(0);
+ private final AtomicLong readKBCharged = new AtomicLong(0);
+
+ public MonitorStatsTest() {
+ requestTotal = new HashMap();
+ serverFailed = new HashMap();
+ userFailed = new HashMap();
+ throttlingFailed = new HashMap();
+ operationTotal = new HashMap();
+ reset();
+ }
+
+ @Test
+ public void smokeTest() {
+
+ final long startTime = System.nanoTime();
+ /*
+ * Set monitorSeconds to a longer time if this test is used to generate
+ * monitor data samples that collected by MetricRegistry at background.
+ */
+ int seconds = Integer.parseInt(
+ System.getProperty("monitorSeconds", "30"));
+ long totalTime = seconds * 1_000_000_000L;
+
+ while(true) {
+ /*
+ * Submit number of smokeTest tasks.
+ */
+ Collection> tasks = new ArrayList>();
+ for (int k = 0; k < CONCURRENT_NUM; k++) {
+ final String tableName = TABLE_PREFIX + k;
+ tasks.add(new Callable() {
+ @Override
+ public Void call() {
+ smokeTest(tableName);
+ return null;
+ }
+ });
+ }
+ try {
+ reset();
+ List> futures = executor.invokeAll(tasks);
+ for(Future f : futures) {
+ f.get();
+ }
+ } catch (InterruptedException e) {
+ fail("unexpected interrupt");
+ } catch (ExecutionException e) {
+ fail("unexpected ExecutionException: " + e.getCause());
+ }
+ checkMonitorData();
+ if (System.nanoTime() - startTime > totalTime) {
+ break;
+ }
+ }
+ }
+
+ private void reset() {
+ for(OperationType type : OperationType.values()) {
+ requestTotal.put(type, new AtomicLong(0));
+ serverFailed.put(type, new AtomicLong(0));
+ userFailed.put(type, new AtomicLong(0));
+ throttlingFailed.put(type, new AtomicLong(0));
+ operationTotal.put(type, new AtomicLong(0));
+ }
+ writeKBCharged.set(0);
+ readKBCharged.set(0);
+
+ // Reset metrics by using the same watcher name to get metrics.
+ MetricRegistry.defaultRegistry.getAllMetricFactory(WATCHER_NAME);
+ }
+
+ private void smokeTest(String tableName) {
+
+ try {
+
+ MapValue key = new MapValue().put("id", 10);
+
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+
+ TableResult tres;
+ /* DDL language error */
+ try {
+ tres = tableOperation(handle,
+ "adrop table if exists " + tableName,
+ null, 20000);
+ fail("Expected IAE");
+ } catch (IllegalArgumentException iae) {
+ }
+ requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet();
+ userFailed.get(OperationType.TABLE_REQUEST).incrementAndGet();
+
+ /* drop a table */
+ tres = tableOperation(handle,
+ "drop table if exists " +
+ tableName,
+ null, TableResult.State.DROPPED, 40000);
+ requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet();
+ operationTotal.get(OperationType.TABLE_REQUEST).incrementAndGet();
+ assertNotNull(tres.getTableName());
+
+ /* Create a table */
+ tres = tableOperation(
+ handle,
+ "create table if not exists " + tableName + "(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(1000, 500, 50),
+ TableResult.State.ACTIVE,
+ 30000);
+ requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet();
+ operationTotal.get(OperationType.TABLE_REQUEST).incrementAndGet();
+ /*
+ * TODO
+ * There is a loop to get and wait table status, so we don't know
+ * the exact get table request count.
+ */
+ requestTotal.get(OperationType.GET_TABLE).incrementAndGet();
+ operationTotal.get(OperationType.GET_TABLE).incrementAndGet();
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* Create an index */
+ tres = tableOperation(
+ handle,
+ "create index if not exists Name on " + tableName + "(name)",
+ null,
+ TableResult.State.ACTIVE,
+ 50000);
+ requestTotal.get(OperationType.TABLE_REQUEST).incrementAndGet();
+ operationTotal.get(OperationType.TABLE_REQUEST).incrementAndGet();
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* GetTableRequest for table that doesn't exist */
+ try {
+ GetTableRequest getTable =
+ new GetTableRequest()
+ .setTableName("not_a_table");
+ tres = handle.getTable(getTable);
+ fail("Table should not be found");
+ } catch (TableNotFoundException tnfe) {}
+ requestTotal.get(OperationType.GET_TABLE).incrementAndGet();
+ userFailed.get(OperationType.GET_TABLE).incrementAndGet();
+
+ /* list tables */
+ ListTablesRequest listTables = new ListTablesRequest();
+
+ /* ListTablesRequest returns ListTablesResult */
+ ListTablesResult lres = handle.listTables(listTables);
+ assertNotNull(lres.toString());
+ requestTotal.get(OperationType.LIST_TABLES).incrementAndGet();
+ operationTotal.get(OperationType.LIST_TABLES).incrementAndGet();
+
+ /* Get indexes */
+ GetIndexesRequest getIndexes = new GetIndexesRequest()
+ .setTableName(tableName);
+
+ /* GetIndexesRquest returns GetIndexesResult */
+ GetIndexesResult giRes = handle.getIndexes(getIndexes);
+ if (testV3) {
+ /*
+ * TODO: GetIndexesResult.toString() in v5.4 might need enhance
+ * to handle null for String[] fieldTypes. Otherwise, when force
+ * V3 protocol, giRes.toString() causes NPE.
+ */
+ assertNotNull(giRes);
+ } else {
+ assertNotNull(giRes.toString());
+ }
+ requestTotal.get(OperationType.GET_INDEXES).incrementAndGet();
+ operationTotal.get(OperationType.GET_INDEXES).incrementAndGet();
+
+ /* PUT */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+
+ PutResult res = handle.put(putRequest);
+ requestTotal.get(OperationType.PUT).incrementAndGet();
+ operationTotal.get(OperationType.PUT).incrementAndGet();
+ readKBCharged.addAndGet(res.getReadUnitsInternal());
+ writeKBCharged.addAndGet(res.getWriteUnitsInternal());
+ assertNotNull("Put failed", res.getVersion());
+ assertWriteKB(res);
+ assertNull(res.getExistingValue());
+ assertNull(res.getExistingVersion());
+
+ /* put a few more. set TTL to test that path */
+ putRequest.setTTL(TimeToLive.ofHours(2));
+ for (int i = 20; i < 30; i++) {
+ value.put("id", i);
+ res = handle.put(putRequest);
+ requestTotal.get(OperationType.PUT).incrementAndGet();
+ operationTotal.get(OperationType.PUT).incrementAndGet();
+ readKBCharged.addAndGet(res.getReadUnitsInternal());
+ writeKBCharged.addAndGet(res.getWriteUnitsInternal());
+ }
+
+ /*
+ * Test ReturnRow for simple put of a row that exists. 2 cases:
+ * 1. unconditional (will return info)
+ * 2. if absent (will return info)
+ */
+ value.put("id", 20);
+ putRequest.setReturnRow(true);
+ PutResult pr = handle.put(putRequest);
+ requestTotal.get(OperationType.PUT).incrementAndGet();
+ operationTotal.get(OperationType.PUT).incrementAndGet();
+ readKBCharged.addAndGet(pr.getReadUnitsInternal());
+ writeKBCharged.addAndGet(pr.getWriteUnitsInternal());
+ assertNotNull(pr.getVersion()); // success
+ assertNotNull(pr.getExistingVersion());
+ assertNotNull(pr.getExistingValue());
+ assertTrue(pr.getExistingModificationTime() != 0);
+ assertReadKB(pr);
+
+ putRequest.setOption(Option.IfAbsent);
+ pr = handle.put(putRequest);
+ requestTotal.get(OperationType.PUT).incrementAndGet();
+ operationTotal.get(OperationType.PUT).incrementAndGet();
+ readKBCharged.addAndGet(pr.getReadUnitsInternal());
+ writeKBCharged.addAndGet(pr.getWriteUnitsInternal());
+ assertNull(pr.getVersion()); // failure
+ assertNotNull(pr.getExistingVersion());
+ assertNotNull(pr.getExistingValue());
+ assertTrue(pr.getExistingModificationTime() != 0);
+ assertReadKB(pr);
+
+ /* clean up */
+ putRequest.setReturnRow(false);
+ putRequest.setOption(null);
+
+ /* GET */
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName(tableName);
+
+ GetResult res1 = handle.get(getRequest);
+ requestTotal.get(OperationType.GET).incrementAndGet();
+ operationTotal.get(OperationType.GET).incrementAndGet();
+ readKBCharged.addAndGet(res1.getReadUnitsInternal());
+ writeKBCharged.addAndGet(res1.getWriteUnitsInternal());
+ assertNotNull("Get failed", res1.getJsonValue());
+ assertReadKB(res1);
+
+ /* DELETE */
+ DeleteRequest delRequest = new DeleteRequest()
+ .setKey(key)
+ .setTableName(tableName);
+
+ DeleteResult del = handle.delete(delRequest);
+ requestTotal.get(OperationType.DELETE).incrementAndGet();
+ operationTotal.get(OperationType.DELETE).incrementAndGet();
+ readKBCharged.addAndGet(del.getReadUnitsInternal());
+ writeKBCharged.addAndGet(del.getWriteUnitsInternal());
+ assertTrue("Delete failed", del.getSuccess());
+ assertWriteKB(del);
+
+ /* GET -- no row, it was removed above */
+ getRequest.setTableName(tableName);
+ res1 = handle.get(getRequest);
+ requestTotal.get(OperationType.GET).incrementAndGet();
+ operationTotal.get(OperationType.GET).incrementAndGet();
+ readKBCharged.addAndGet(res1.getReadUnitsInternal());
+ writeKBCharged.addAndGet(res1.getWriteUnitsInternal());
+ assertNull(res1.getValue());
+ assertReadKB(res1);
+
+ /* MULTIDELETE */
+ MultiDeleteRequest multiDelRequest = new MultiDeleteRequest();
+ multiDelRequest.setKey(new MapValue().put("id", 21));
+ multiDelRequest.setTableName(tableName);
+ MultiDeleteResult multiRes = handle.multiDelete(multiDelRequest);
+ requestTotal.get(OperationType.MULTI_DELETE).incrementAndGet();
+ operationTotal.get(OperationType.MULTI_DELETE).incrementAndGet();
+ readKBCharged.addAndGet(multiRes.getReadUnitsInternal());
+ writeKBCharged.addAndGet(multiRes.getWriteUnitsInternal());
+ assertWriteKB(multiRes);
+
+ /* MULTIDELETE -- no table */
+ multiDelRequest.setKey(new MapValue().put("id", 0));
+ multiDelRequest.setTableName("InvalidTable");
+ try {
+ handle.multiDelete(multiDelRequest);
+ fail("Attempt to access missing table should have thrown");
+ } catch (TableNotFoundException nse) {
+ // success
+ }
+ requestTotal.get(OperationType.MULTI_DELETE).incrementAndGet();
+ userFailed.get(OperationType.MULTI_DELETE).incrementAndGet();
+
+ /* GET -- no table */
+ try {
+ getRequest.setTableName("foo");
+ res1 = handle.get(getRequest);
+ fail("Attempt to access missing table should have thrown");
+ } catch (TableNotFoundException nse) {
+ // success
+ }
+ requestTotal.get(OperationType.GET).incrementAndGet();
+ userFailed.get(OperationType.GET).incrementAndGet();
+
+ /* PUT -- invalid row -- this will throw */
+ try {
+ value.remove("id");
+ value.put("not_a_field", 1);
+ res = handle.put(putRequest);
+ fail("Attempt to put invalid row should have thrown");
+ } catch (IllegalArgumentException iae) {
+ // success
+ }
+ requestTotal.get(OperationType.PUT).incrementAndGet();
+ userFailed.get(OperationType.PUT).incrementAndGet();
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail("Exception in test: " + e);
+ }
+ }
+
+ /*
+ * Check collected metrics are expected after executing some SC requests
+ * and data requests.
+ */
+ @SuppressWarnings("unchecked")
+ private void checkMonitorData() {
+ for (MetricFamilySamples> metricFamily :
+ MetricRegistry.defaultRegistry.getAllMetricFactory(
+ WATCHER_NAME)) {
+ final String metricName = metricFamily.getName();
+ if (metricName.equals(ACTIVE_REQUEST_NAME)) {
+ assertEquals(StatsData.Type.LONG_GAUGE, metricFamily.getType());
+ assertEquals(0, metricFamily.getLabelNames().size());
+ for (Sample> s : metricFamily.getSamples()) {
+ Sample sample =
+ (Sample) s;
+ assertEquals(0, sample.labelValues.size());
+ assertEquals(0, sample.dataValue.getGaugeVal());
+ }
+ } else if (metricName.equals(REQUEST_TOTAL_NAME)) {
+ assertEquals(StatsData.Type.COUNTER, metricFamily.getType());
+ assertArrayEquals(REQUEST_LABELS,
+ metricFamily.getLabelNames().toArray());
+ for (Sample> s : metricFamily.getSamples()) {
+ Sample sample =
+ (Sample) s;
+ assertEquals(REQUEST_LABELS.length,
+ sample.labelValues.size());
+ OperationType opType = getOperationType(sample.labelValues);
+ if (opType.equals(OperationType.GET_TABLE)) {
+ /*
+ * TODO
+ * There is a loop to get and wait table status, so we
+ * don't know the exact get table request count.
+ */
+ assertTrue(sample.labelValues + " reqTotal error",
+ sample.dataValue.getCount() >=
+ operationTotal.get(opType).get());
+ } else {
+ assertEquals(sample.labelValues + " reqTotal error",
+ requestTotal.get(opType).get(),
+ sample.dataValue.getCount());
+ }
+ }
+ } else if (metricName.equals(REQUEST_LATENCY_NAME)) {
+ assertEquals(metricFamily.getType(),
+ StatsData.Type.PERF_QUANTILE);
+ assertArrayEquals(REQUEST_LABELS,
+ metricFamily.getLabelNames().toArray());
+ for (Sample> s : metricFamily.getSamples()) {
+ Sample sample =
+ (Sample) s;
+ assertEquals(REQUEST_LABELS.length,
+ sample.labelValues.size());
+ assertEquals(0, sample.dataValue.getOverflowCount());
+ assertTrue("95th can't be negative",
+ sample.dataValue.get99th() >= 0);
+ assertTrue("99th can't be negative",
+ sample.dataValue.get95th() >= 0);
+ assertTrue("min can't be negative",
+ sample.dataValue.getMin() >= 0);
+ assertTrue("max can't be negative",
+ sample.dataValue.getMax() >=
+ sample.dataValue.getMin());
+ OperationType opType = getOperationType(sample.labelValues);
+ if (opType.equals(OperationType.GET_TABLE)) {
+ /*
+ * TODO
+ * There is a loop to get and wait table status, so we
+ * don't know the exact get table request count.
+ */
+ assertTrue(sample.labelValues + " opsTotal error",
+ sample.dataValue.getOperationCount() >=
+ operationTotal.get(opType).get());
+ assertTrue(sample.labelValues + " requestCount error",
+ sample.dataValue.getRequestCount() >=
+ requestTotal.get(opType).get() -
+ serverFailed.get(opType).get() -
+ throttlingFailed.get(opType).get() -
+ userFailed.get(opType).get());
+ } else {
+ assertEquals(sample.labelValues + " opsTotal error",
+ operationTotal.get(opType).get(),
+ sample.dataValue.getOperationCount());
+ assertEquals(sample.labelValues + " requestCount error",
+ requestTotal.get(opType).get() -
+ serverFailed.get(opType).get() -
+ throttlingFailed.get(opType).get() -
+ userFailed.get(opType).get(),
+ sample.dataValue.getRequestCount());
+ }
+ }
+ } else if (metricName.equals(DATA_RESPONSE_READ_SIZE_NAME)) {
+ assertEquals(StatsData.Type.SIZE_QUANTILE,
+ metricFamily.getType());
+ assertEquals(0, metricFamily.getLabelNames().size());
+ for (Sample> s : metricFamily.getSamples()) {
+ Sample sample =
+ (Sample) s;
+ assertEquals(0, sample.labelValues.size());
+ assertEquals(readKBCharged.get(),
+ sample.dataValue.getSum());
+ for(double perfVal : sample.dataValue.getQuantileValues()) {
+ assertTrue("perf value can't be negative",
+ perfVal >= 0);
+ }
+ }
+ } else if (metricName.equals(DATA_RESPONSE_WRITE_SIZE_NAME)) {
+ assertEquals(StatsData.Type.SIZE_QUANTILE,
+ metricFamily.getType());
+ assertEquals(0, metricFamily.getLabelNames().size());
+ for (Sample> s : metricFamily.getSamples()) {
+ Sample sample =
+ (Sample) s;
+ assertEquals(0, sample.labelValues.size());
+ assertEquals(writeKBCharged.get(),
+ sample.dataValue.getSum());
+ for(double perfVal : sample.dataValue.getQuantileValues()) {
+ assertTrue("perf value can't be negative",
+ perfVal >= 0);
+ }
+ }
+ } else if (metricName.equals(REQUEST_SERVER_FAILED_NAME)) {
+ assertEquals(StatsData.Type.COUNTER, metricFamily.getType());
+ assertArrayEquals(REQUEST_LABELS,
+ metricFamily.getLabelNames().toArray());
+ for (Sample> s : metricFamily.getSamples()) {
+ Sample sample =
+ (Sample) s;
+ assertEquals(REQUEST_LABELS.length,
+ sample.labelValues.size());
+ OperationType opType = getOperationType(sample.labelValues);
+ assertEquals(serverFailed.get(opType).get(),
+ sample.dataValue.getCount());
+ }
+ } else if (metricName.equals(REQUEST_THROTTLING_FAILED_NAME)) {
+ assertEquals(StatsData.Type.COUNTER, metricFamily.getType());
+ assertArrayEquals(REQUEST_LABELS,
+ metricFamily.getLabelNames().toArray());
+ for (Sample> s : metricFamily.getSamples()) {
+ Sample sample =
+ (Sample) s;
+ assertEquals(REQUEST_LABELS.length,
+ sample.labelValues.size());
+ OperationType opType = getOperationType(sample.labelValues);
+ assertEquals(throttlingFailed.get(opType).get(),
+ sample.dataValue.getCount());
+ }
+ } else if (metricName.startsWith(KVHandleStats.KV_HANDLE_NAME)) {
+ /* TODO: check kvstore handle metrics? */
+ } else {
+ fail("unkown metric name: " + metricName);
+ }
+ }
+ }
+
+ /*
+ * Check and convert operation label values to OperationType.
+ */
+ private OperationType getOperationType(List opLabelValues) {
+ assertEquals(1, opLabelValues.size());
+ for(OperationType type : OperationType.values()) {
+ if (type.getValue()[0].equals(opLabelValues.get(0))) {
+ return type;
+ }
+ }
+ fail("Unknown label values: " + opLabelValues.get(0));
+ return null;
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiDeleteTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiDeleteTest.java
new file mode 100644
index 00000000..5ae4e73d
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiDeleteTest.java
@@ -0,0 +1,306 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import oracle.nosql.driver.FieldRange;
+import oracle.nosql.driver.TableNotFoundException;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.MultiDeleteResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.MapValue;
+
+import org.junit.Test;
+
+/**
+ * Test on MultiDelete operation.
+ */
+public class MultiDeleteTest extends ProxyTestBase {
+ final static int WRITE_KB_LIMIT = rlimits.getRequestWriteKBLimit();
+
+ final static String tableName = "multiDeleteTable";
+
+ /* Create a table */
+ final static String createTableDDL =
+ "CREATE TABLE IF NOT EXISTS multiDeleteTable(" +
+ "sid INTEGER, id INTEGER, name STRING, longString STRING, " +
+ "PRIMARY KEY(SHARD(sid), id))";
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ tableOperation(handle, createTableDDL,
+ new TableLimits(10000, 10000, 50),
+ TableResult.State.ACTIVE, 10000);
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ deleteTable(tableName);
+ super.tearDown();
+ }
+
+ /**
+ * Test on success cases
+ */
+ @Test
+ public void testMultiDelete() {
+ final int numMajor = 5;
+ final int numPerMajor = 100;
+ final int recordKB = 2;
+
+ loadRows(numMajor, numPerMajor, recordKB);
+
+ /* Deletes rows with the shard key {"sid":0}, maxWriteKB = 0 */
+ int maxWriteKB = 0;
+ MapValue pKey = new MapValue().put("sid", 0);
+ runMultiDelete(pKey, null, maxWriteKB, numPerMajor, recordKB);
+
+ /* Deletes rows with shard key {"sid":1}, maxWriteKB = 10 */
+ maxWriteKB = 10;
+ pKey.put("sid", 1);
+ runMultiDelete(pKey, null, maxWriteKB, numPerMajor, recordKB);
+
+ /* Deletes rows with shard key {"sid":3}, maxWriteKB = 51 */
+ maxWriteKB = 51;
+ pKey.put("sid", 2);
+ runMultiDelete(pKey, null, maxWriteKB, numPerMajor, recordKB);
+
+ /*
+ * Deletes rows with shard key {"sid":3} and "id" < 10,
+ * maxWriteKB = 8.
+ */
+ FieldRange range;
+ maxWriteKB = 8;
+ range = new FieldRange("id").setEnd(new IntegerValue(10), false);
+ pKey.put("sid", 3);
+ runMultiDelete(pKey, range, maxWriteKB, 10, recordKB);
+
+ /*
+ * Deletes rows with shard key {"sid":3} and 10 <= "id" <= 19,
+ * maxWriteKB = 18
+ */
+ maxWriteKB = 18;
+ range = new FieldRange("id")
+ .setStart(new IntegerValue(10), true)
+ .setEnd(new IntegerValue(19), true);
+ runMultiDelete(pKey, range, maxWriteKB, 10, recordKB);
+
+ /*
+ * Deletes rows with shard key {"sid":3} and 20 <= "id" < 31,
+ * maxWriteKB = 20
+ */
+ maxWriteKB = 20;
+ range = new FieldRange("id")
+ .setStart(new IntegerValue(20), true)
+ .setEnd(new IntegerValue(31), false);
+ runMultiDelete(pKey, range, maxWriteKB, 11, recordKB);
+
+ /*
+ * Deletes rows with shard key {"sid":3} and "id" >= 31,
+ * maxWriteKB = 25
+ */
+ maxWriteKB = 25;
+ range = new FieldRange("id").setStart(new IntegerValue(31), true);
+ runMultiDelete(pKey, range, maxWriteKB, numPerMajor - 31, recordKB);
+ runMultiDelete(pKey, range, maxWriteKB, 0, recordKB);
+
+ /*
+ * Deletes rows with shard key {"sid":4} and 10 <= "id" <= 19,
+ * maxWriteKB = 0
+ */
+ maxWriteKB = 0;
+ pKey.put("sid", 4);
+ range = new FieldRange("id").setStart(new IntegerValue(10), true)
+ .setEnd(new IntegerValue(19), true);
+ runMultiDelete(pKey, range, maxWriteKB, 10, recordKB);
+ }
+
+ /* Test MultiDelete failed due to invalid argument */
+ @Test
+ public void testInvalidArgument() {
+
+ MultiDeleteRequest req = new MultiDeleteRequest();
+
+ /* Missing tableName */
+ execMultiDeleteExpIAE(req);
+
+ /* Missing a key */
+ req.setTableName(tableName);
+ execMultiDeleteExpIAE(req);
+
+ /* Invalid primary key */
+ req.setKey(new MapValue().put("name", 0));
+ execMultiDeleteExpIAE(req);
+
+ /* Missing shard field from the primary key */
+ req.setKey(new MapValue().put("id", 0));
+ execMultiDeleteExpIAE(req);
+
+ /* Invalid FieldRange */
+ req.setKey(new MapValue().put("sid", 0));
+ FieldRange range = new FieldRange("name")
+ .setStart(new IntegerValue(1), false);
+ req.setRange(range);
+ execMultiDeleteExpIAE(req);
+
+ /* Invalid FieldRange */
+ range = new FieldRange("id")
+ .setStart(new IntegerValue(1), false)
+ .setEnd(new IntegerValue(0), true);
+ req.setRange(range);
+ execMultiDeleteExpIAE(req);
+
+ /* maxWriteKB should be >= 0 */
+ try {
+ req.setMaxWriteKB(-1);
+ fail("Expect to catch IAE but not");
+ } catch (IllegalArgumentException ignored) {
+ }
+
+ /* maxWriteKB can not exceed WRITE_KB_LIMIT */
+ req.setMaxWriteKB(WRITE_KB_LIMIT + 1);
+ execMultiDeleteExpIAE(req);
+
+ /* Table not found */
+ req.setTableName("InvalidTable");
+ try {
+ execMultiDeleteExpIAE(req);
+ } catch (TableNotFoundException ignored) {
+ }
+ }
+
+ /**
+ * Runs MultiDelete request and verify its result.
+ */
+ private void runMultiDelete(MapValue pKey,
+ FieldRange range,
+ int maxWriteKB,
+ int expNumDeleted,
+ int recordKB) {
+
+ int nDeleted = 0;
+ int totalReadKB = 0;
+ int totalWriteKB = 0;
+ int totalReadUnits = 0;
+ int totalWriteUnits = 0;
+
+ byte[] continuationKey = null;
+
+ final int minRead = getMinRead();
+ int expWriteKB = 0;
+ int expReadKB = 0;
+ int writeKBLimit = (maxWriteKB != 0) ? maxWriteKB : WRITE_KB_LIMIT;
+
+ while(true) {
+ MultiDeleteResult ret = execMultiDelete(pKey, continuationKey,
+ range, maxWriteKB);
+ nDeleted += ret.getNumDeletions();
+ totalReadKB += ret.getReadKB();
+ totalWriteKB += ret.getWriteKB();
+ totalReadUnits += ret.getReadUnits();
+ totalWriteUnits += ret.getWriteUnits();
+
+ if (!onprem) {
+ if (ret.getNumDeletions() > 0) {
+ assertTrue(ret.getWriteKB() > 0 && ret.getReadKB() > 0);
+ } else {
+ expReadKB += minRead;
+ }
+ }
+
+ if (ret.getContinuationKey() == null) {
+ break;
+ }
+
+ if (!onprem) {
+ assertTrue(ret.getWriteUnits() >= writeKBLimit &&
+ ret.getWriteUnits() < writeKBLimit + recordKB);
+ }
+ }
+
+ assertTrue(nDeleted == expNumDeleted);
+
+ if (onprem) {
+ return;
+ }
+
+ expWriteKB += nDeleted * recordKB;
+ expReadKB += nDeleted * minRead;
+
+ assertReadKB(expReadKB, totalReadKB, totalReadUnits,
+ true /* isAbsolute */);
+
+ assertWriteKB(expWriteKB, totalWriteKB, totalWriteUnits);
+ }
+
+ /**
+ * Executes the MultiDelete request.
+ */
+ private MultiDeleteResult execMultiDelete(MapValue key,
+ byte[] continuationKey,
+ FieldRange range,
+ int maxWriteKB) {
+
+ MultiDeleteRequest mdReq = new MultiDeleteRequest()
+ .setTableName(tableName)
+ .setKey(key)
+ .setContinuationKey(continuationKey)
+ .setRange(range)
+ .setMaxWriteKB(maxWriteKB);
+
+ return handle.multiDelete(mdReq);
+ }
+
+ /**
+ * Executes the MultiDelete request, it is expected to catch IAE.
+ */
+ private void execMultiDeleteExpIAE(MultiDeleteRequest mdReq) {
+ try {
+ handle.multiDelete(mdReq);
+ fail("Expect to catch IAE but not");
+ } catch (IllegalArgumentException ignored) {
+ }
+ }
+
+ private void loadRows(int numMajor, int numPerMajor, int nKB) {
+
+ MapValue value = new MapValue();
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName(tableName);
+
+ /* Load rows */
+ final String longString = genString((nKB - 1) * 1024);
+ for (int i = 0; i < numMajor; i++) {
+ value.put("sid", i);
+ for (int j = 0; j < numPerMajor; j++) {
+ value.put("id", j);
+ value.put("name", "name_" + i + "_" + j);
+ value.put("longString", longString);
+ PutResult res = handle.put(putRequest);
+ assertNotNull("Put failed", res.getVersion());
+ }
+ }
+ }
+
+ private String genString(int len) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < len; i++) {
+ sb.append((char)('A' + i % 26));
+ }
+ return sb.toString();
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiRegionTableTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiRegionTableTest.java
new file mode 100644
index 00000000..4c8533e8
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/MultiRegionTableTest.java
@@ -0,0 +1,675 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2011, 2023 Oracle and/or its affiliates. All rights reserved.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.MultiDeleteResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PreparedStatement;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutRequest.Option;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryIterableResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.kv.Consistency;
+import oracle.kv.KVStore;
+import oracle.kv.KVStoreConfig;
+import oracle.kv.KVStoreFactory;
+import oracle.kv.Value;
+import oracle.kv.ValueVersion;
+import oracle.kv.impl.api.table.FieldDefImpl;
+import oracle.kv.impl.api.table.FieldValueImpl;
+import oracle.kv.impl.api.table.PrimaryKeyImpl;
+import oracle.kv.impl.api.table.RowImpl;
+import oracle.kv.impl.api.table.TableAPIImpl;
+import oracle.kv.impl.api.table.TableImpl;
+import oracle.kv.impl.api.table.TablePath;
+import oracle.kv.table.PrimaryKey;
+import oracle.kv.table.ReadOptions;
+import oracle.nosql.driver.Version;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.LongValue;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.NumberValue;
+import oracle.nosql.driver.values.StringValue;
+
+public class MultiRegionTableTest extends ProxyTestBase {
+ private static final ReadOptions readOptions =
+ new ReadOptions(Consistency.ABSOLUTE, 0, null);
+
+ private static KVStore store;
+ private static TableAPIImpl tableAPI;
+
+ private static final String MR_TABLE = "mrtable";
+
+ private final TableLimits limits = new TableLimits(100, 100, 1);
+ private final int WAIT_MS = 10000;
+
+ @BeforeClass
+ public static void staticSetUp()
+ throws Exception {
+
+ assumeTrue("Skipping MultiRegionTableTest if run in onprem mode",
+ !Boolean.getBoolean(ONPREM_PROP));
+
+ System.setProperty(TEST_MRTABLE_PROP, MR_TABLE);
+ ProxyTestBase.staticSetUp();
+
+ if (!cloudRunning) {
+ store = getStore(false /* excludeTombstone */);
+ tableAPI = (TableAPIImpl)store.getTableAPI();
+ }
+ }
+
+ @Test
+ public void testBasicOps() {
+ /* local proxy test only */
+ assumeTrue("Skipping testBasicOps in minicloud test", !cloudRunning);
+ runBasicOps(MR_TABLE, true);
+ runBasicOps("singleton", false);
+ }
+
+ public void runBasicOps(String tableName, boolean isMRTable) {
+ String tableDdl = "CREATE TABLE IF NOT EXISTS " + tableName +
+ "(sid INTEGER, " +
+ " id INTEGER, " +
+ " s STRING, " +
+ " j JSON, " +
+ " PRIMARY KEY(SHARD(sid), id))";
+
+ TableResult tret = tableOperation(handle, tableDdl, limits, WAIT_MS);
+ String kvTableName = getKVTableName(tret);
+
+ int sid = 0;
+ int id = 1;
+ MapValue row = createRow(sid, id);
+ Version ver;
+
+ /* put */
+ ver = doPut(tableName, row, null /* Option */, null /* matchVersion */);
+ assertNotNull(ver);
+ checkRow(row, tableName, kvTableName);
+
+ /* putIfPresent */
+ ver = doPut(tableName, row, Option.IfPresent, null /* matchVersion */);
+ assertNotNull(ver);
+ checkRow(row, tableName, kvTableName);
+
+ /* putIfVersion */
+ ver = doPut(tableName, row, Option.IfVersion, ver);
+ assertNotNull(ver);
+ checkRow(row, tableName, kvTableName);
+
+ /* delete */
+ boolean deleted = doDelete(tableName, row, null /* matchVersion */);
+ assertTrue(deleted);
+ checkRowDeleted(row, isMRTable, tableName, kvTableName);
+
+ /* putIfAbsent */
+ ver = doPut(tableName, row, Option.IfAbsent, null /* matchVersion */);
+ assertNotNull(ver);
+ checkRow(row, tableName, kvTableName);
+
+ /* deleteIfVersion */
+ deleted = doDelete(tableName, row, ver);
+ assertTrue(deleted);
+ checkRowDeleted(row, isMRTable, tableName, kvTableName);
+
+ /* writeMultiple */
+ sid++;
+ int numOps = 3;
+ List rows = new ArrayList<>();
+ for (int i = 0; i < numOps; i++) {
+ rows.add(createRow(sid, i));
+ }
+ doWriteMultiple(tableName, rows, true /* putOp */);
+ for (MapValue val : rows) {
+ checkRow(val, tableName, kvTableName);
+ }
+ doWriteMultiple(tableName, rows, false /* putOp */);
+ for (MapValue val : rows) {
+ checkRowDeleted(val, isMRTable, tableName, kvTableName);
+ }
+
+ /* multiDelete */
+ sid++;
+ rows.clear();
+ for (int i = 0; i < numOps; i++) {
+ row = createRow(sid, i);
+ assertNotNull(doPut(tableName, row, null, null));
+ rows.add(createRow(sid, i));
+ }
+
+ MapValue key = new MapValue().put("sid", sid);
+ int ndel = doMultiDelete(tableName, key);
+ assertEquals(numOps, ndel);
+ for (MapValue val : rows) {
+ checkRowDeleted(val, isMRTable, tableName, kvTableName);
+ }
+
+ /* query */
+ String query;
+ List results;
+ Map values = new HashMap<>();
+
+ sid++;
+ row = createRow(sid, 0);
+ values.put("$sid", row.get("sid"));
+ values.put("$id", row.get("id"));
+ values.put("$s", row.get("s"));
+ values.put("$j", row.get("j"));
+
+ query = "DECLARE $sid INTEGER; $id INTEGER; $s STRING; $j JSON; " +
+ "INSERT INTO " + tableName +
+ "(sid, id, s, j) VALUES($sid, $id, $s, $j)";
+ results = doQuery(query, values);
+ checkRow(row, tableName, kvTableName);
+
+ FieldValue sval = new StringValue(row.get("s").getString() + "_upd");
+ row.put("s", sval);
+ values.put("$s", sval);
+ values.remove("$j");
+ query = "DECLARE $sid INTEGER; $id INTEGER; $s STRING;" +
+ "UPDATE " + tableName +
+ " SET s = $s WHERE sid = $sid and id = $id";
+ results = doQuery(query, values);
+ assertEquals(1, results.size());
+ checkRow(row, tableName, kvTableName);
+
+ query = "SELECT * FROM " + tableName;
+ results = doQuery(query);
+ assertEquals(1, results.size());
+ assertEquals(row, results.get(0));
+
+ values.remove("$s");
+ values.remove("$j");
+ query = "DECLARE $sid INTEGER; $id INTEGER; DELETE FROM " +
+ tableName + " WHERE sid = $sid and id = $id";
+ results = doQuery(query, values);
+ assertEquals(1, results.size());
+ checkRowDeleted(row, isMRTable, tableName, kvTableName);
+
+ /* query without being prepared */
+ sid++;
+ id = 1;
+ row = createRow(sid, id);
+ assertNotNull(doPut(tableName, row, null, null));
+ query = "DELETE FROM " + tableName + " WHERE sid = " + sid +
+ " and id = " + id;
+ results = doQuery(query);
+ assertEquals(1, results.size());
+ checkRowDeleted(row, isMRTable, tableName, kvTableName);
+
+ dropTable(handle, tableName);
+ }
+
+ @Test
+ public void testCRDT() {
+ runCRDTTest("testCRDT");
+ }
+
+ private void runCRDTTest(String tableName) {
+
+ String tableDdl = "CREATE TABLE IF NOT EXISTS " + tableName +
+ "(sid INTEGER, " +
+ " id INTEGER, " +
+ " s STRING, " +
+ " ci INTEGER AS MR_COUNTER, " +
+ " cn NUMBER AS MR_COUNTER, " +
+ " j JSON(ci AS INTEGER MR_COUNTER, " +
+ " cl as LONG MR_COUNTER), " +
+ " PRIMARY KEY(SHARD(sid), id))";
+
+ TableResult tr = tableOperation(handle, tableDdl, limits, WAIT_MS);
+ String kvTableName = getKVTableName(tr);
+
+ int sid = 0;
+ int id = 1;
+ int step = 2;
+
+ MapValue row = createRow(sid, id);
+ assertNotNull(doPut(tableName, row, null, null));
+
+ List results;
+ Map values = new HashMap<>();
+ values.put("$sid", new IntegerValue(sid));
+ values.put("$id", new IntegerValue(id));
+
+ String query = "DECLARE $sid INTEGER; $id INTEGER; " +
+ "UPDATE " + tableName +
+ "$t SET ci = ci + " + step +
+ ", cn = cn + " + step +
+ ", $t.j.ci = $t.j.ci - " + step +
+ ", $t.j.cl = $t.j.cl + " + step +
+ " WHERE sid = $sid and id = $id " +
+ " RETURNING ci, cn, $t.j.ci as jci, $t.j.cl as jcl";
+ results = doQuery(query, values);
+ assertEquals(1, results.size());
+
+ FieldValue ciVal = new IntegerValue(2);
+ FieldValue cnVal = new NumberValue("2");
+ FieldValue jciVal = new IntegerValue(-2);
+ FieldValue jclVal = new LongValue(2);
+
+ MapValue rec = results.get(0);
+ assertEquals(ciVal, rec.get("ci"));
+ assertEquals(cnVal, rec.get("cn"));
+ assertEquals(jciVal, rec.get("jci"));
+ assertEquals(jclVal, rec.get("jcl"));
+
+ row.put("ci", ciVal);
+ row.put("cn", cnVal);
+ row.get("j").asMap().put("ci", jciVal).put("cl", jclVal);
+ checkRow(row, tableName, kvTableName);
+
+ dropTable(handle, tableName);
+ }
+
+ @Test
+ public void testFreezeTable() {
+ /*
+ * Skip this test in local proxy test, because freeze/unfreeze schema
+ * is managed by SC
+ */
+ assumeTrue("Skipping testBasicOps in local test", cloudRunning);
+
+ final String tableName = "testFreezeTable";
+ String freezeDdl = "ALTER TABLE " + tableName + " FREEZE SCHEMA";
+ String unfreezeDdl = "ALTER TABLE " + tableName + " UNFREEZE SCHEMA";
+ TableLimits limits = new TableLimits(100, 100, 1);
+ TableLimits newLimits = new TableLimits(200, 150, 1);
+ TableResult tr;
+ String ddl;
+
+ /*
+ * 0. Create table with schema frozen.
+ */
+ String tableDdl = "CREATE TABLE " + tableName +
+ "(id INTEGER, s STRING, j JSON, PRIMARY KEY(id)) " +
+ "WITH SCHEMA FROZEN";
+ tableOperation(handle, tableDdl, limits, WAIT_MS);
+
+ /* freeze schema of the table already frozen, do nothing */
+ tableOperation(handle, freezeDdl, null, WAIT_MS);
+
+ /*
+ * 1. Test update table after freeze table: cannot alter table schema
+ * but able to update ttl or limits
+ */
+
+ /* altering table schema should fail */
+ ddl = "ALTER TABLE " + tableName + "(ADD i INTEGER)";
+ tableOperation(handle, ddl, null /* limits */, null /* tableName */,
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* updating ttl or limits should succeed */
+ ddl = "ALTER TABLE " + tableName + " USING TTL 1 days";
+ tr = tableOperation(handle, ddl, null, WAIT_MS);
+ assertTrue(tr.getSchema().contains("\"ttl\":\"1 DAYS\""));
+
+ ddl = "ALTER TABLE " + tableName + " USING TTL 1 days";
+ tr = tableOperation(handle, null /* statement */, newLimits, tableName,
+ TableResult.State.ACTIVE, WAIT_MS);
+ assertEquals(newLimits.getWriteUnits(),
+ tr.getTableLimits().getWriteUnits());
+
+ /*
+ * 2. Test alter table after unfreeze schema.
+ */
+
+ /* unfreeze schema */
+ tableOperation(handle, unfreezeDdl, null, WAIT_MS);
+ /* unfreeze schema again, do nothing */
+ tableOperation(handle, unfreezeDdl, null, WAIT_MS);
+
+ /* dropping JSON field should succeed after unfreezed schema */
+ ddl = "ALTER TABLE " + tableName + "(DROP j)";
+ tableOperation(handle, ddl, null, WAIT_MS);
+
+ /*
+ * 3. Test cannot freeze table without a JSON field
+ */
+
+ /* freezing table without a JSON field should fail */
+ tableOperation(handle, freezeDdl, null /* limits */,
+ null /* tableName */, TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* Add a JSON field, freezing table should succeed */
+ ddl = "ALTER TABLE " + tableName + "(ADD j1 JSON)";
+ tableOperation(handle, ddl, null, WAIT_MS);
+ tableOperation(handle, freezeDdl, null, WAIT_MS);
+
+ /*
+ * Creating table with schema frozen but without a JSON field
+ * should fail
+ */
+ ddl = "CREATE TABLE tnojson(id INTEGER, s STRING, PRIMARY KEY(id)) " +
+ "WITH SCHEMA FROZEN";
+ tableOperation(handle, ddl, null /* limits */,
+ null /* tableName */, TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /*
+ * Test freeze table force
+ */
+
+ /*
+ * Create a table without a JSON field and freeze it using
+ * "with schema frozen force"
+ */
+ ddl = "CREATE TABLE tnojson(id INTEGER, s STRING, PRIMARY KEY(id)) " +
+ "WITH SCHEMA FROZEN FORCE";
+ tableOperation(handle, ddl, limits, WAIT_MS);
+
+ /* Alter the TTL of the frozen table */
+ ddl = "ALTER TABLE tnojson USING TTL 3 days";
+ tableOperation(handle, unfreezeDdl, null, WAIT_MS);
+
+ /* Fail: can't alter table schema */
+ ddl = "ALTER TABLE tnojson (ADD i INTEGER)";
+ tableOperation(handle, ddl, null /* limits */,
+ null /* tableName */, TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* unfreeze table */
+ ddl = "ALTER TABLE tnojson UNFREEZE SCHEMA";
+ tableOperation(handle, ddl, null, WAIT_MS);
+ /* unfreeze table again, do nothing */
+ tableOperation(handle, ddl, null, WAIT_MS);
+
+ /* The table is mutable now, add a new field */
+ ddl = "ALTER TABLE tnojson (ADD i INTEGER)";
+ tableOperation(handle, ddl, null /* limits */, WAIT_MS);
+
+ /* freeze table using "freeze schema force" */
+ ddl = "ALTER TABLE tnojson FREEZE SCHEMA FORCE";
+ tableOperation(handle, ddl, null /* limits */, WAIT_MS);
+ /* freeze table again, do nothing */
+ tableOperation(handle, ddl, null /* limits */, WAIT_MS);
+
+ /* Fail: can't alter frozen table's schema */
+ ddl = "ALTER TABLE tnojson (DROP i)";
+ tableOperation(handle, ddl, null /* limits */,
+ null /* tableName */, TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+ }
+
+ private void checkRow(MapValue row,
+ String tableName,
+ String kvTableName) {
+
+ MapValue retRow = doGet(tableName, row);
+ assertEquals(row, retRow);
+
+ /* skip checking the raw value if not local test */
+ if (store == null) {
+ return;
+ }
+
+ final int regionId = getRegionId();
+ PrimaryKeyImpl pkey = getKVPrimaryKey(kvTableName, row);
+ Value value = getKVValue(pkey);
+ assertEquals(Value.Format.TABLE_V1, value.getFormat());
+ assertTrue(value.getValue().length > 0);
+
+ TableImpl table = pkey.getTableImpl();
+ if (table.hasSchemaMRCounters()) {
+ RowImpl kvRow = getKVRow(pkey);
+ for (int i = 0; i < table.getFields().size(); i++) {
+ if (table.isPrimKeyAtPos(i)) {
+ continue;
+ }
+
+ FieldDefImpl fdef = table.getFieldDef(i);
+ FieldValueImpl fval;
+ if (fdef.isMRCounter()) {
+ fval = kvRow.get(table.getFields().get(i));
+ checkMRCounterValue(fval, regionId);
+ } else if (fdef.hasJsonMRCounter()) {
+ for (TablePath path : table.getSchemaMRCounterPaths(i)) {
+ fval = kvRow.evaluateScalarPath(path, 0);
+ checkMRCounterValue(fval, regionId);
+ }
+ }
+ }
+ }
+ }
+
+ private void checkMRCounterValue(FieldValueImpl fval, int regionId) {
+ if (fval != null && !fval.isNull()) {
+ assertTrue(fval.isMRCounter());
+ assertTrue(!fval.getMRCounterMap().isEmpty());
+ if (fval.toString().startsWith("-")) {
+ regionId = -regionId;
+ }
+ assertTrue(fval.getMRCounterMap().containsKey(regionId));
+ }
+ }
+
+ private void checkRowDeleted(MapValue key,
+ boolean isMRTable,
+ String tableName,
+ String kvTableName) {
+ assertNull(doGet(tableName, key));
+
+ /* skip checking the raw value if not local test */
+ if (store == null) {
+ return;
+ }
+
+ PrimaryKeyImpl pkey = getKVPrimaryKey(kvTableName, key);
+ Value value = getKVValue(pkey);
+ if (isMRTable) {
+ checkTombStoneNone(value);
+ } else {
+ assertNull(value);
+ }
+ }
+
+ private void checkTombStoneNone(Value value) {
+ assertEquals(Value.Format.NONE, value.getFormat());
+ assertTrue(value.getValue().length == 0);
+ }
+
+ private PrimaryKeyImpl getKVPrimaryKey(String kvTableName, MapValue key) {
+ TableImpl table = getKVTable(kvTableName);
+ assertNotNull("table not found: " + kvTableName, table);
+ return table.createPrimaryKeyFromJson(key.toJson(), false);
+ }
+
+ private Value getKVValue(PrimaryKeyImpl pkey) {
+ ValueVersion vv = store.get(pkey.getPrimaryKey(false),
+ readOptions.getConsistency(),
+ readOptions.getTimeout(),
+ readOptions.getTimeoutUnit());
+ if (vv != null) {
+ return vv.getValue();
+ }
+ return null;
+ }
+
+ private RowImpl getKVRow(PrimaryKey pkey) {
+ return (RowImpl)tableAPI.get(pkey, readOptions);
+ }
+
+ private TableImpl getKVTable(String kvTableName) {
+ return (TableImpl)tableAPI.getTable(kvTableName);
+ }
+
+ private Version doPut(String tableName,
+ MapValue row,
+ Option option,
+ Version matchVersion) {
+
+ PutRequest req = new PutRequest()
+ .setTableName(tableName)
+ .setOption(option)
+ .setValue(row);
+ if (matchVersion != null) {
+ req.setMatchVersion(matchVersion);
+ }
+
+ PutResult ret = handle.put(req);
+ return ret.getVersion();
+ }
+
+ private MapValue doGet(String tableName, MapValue key) {
+ GetRequest req = new GetRequest()
+ .setTableName(tableName)
+ .setKey(key);
+ GetResult ret = handle.get(req);
+ assertTrue(ret.getReadKB() > 0);
+ return ret.getValue();
+ }
+
+ private boolean doDelete(String tableName,
+ MapValue key,
+ Version matchVersion) {
+
+ DeleteRequest req = new DeleteRequest()
+ .setTableName(tableName)
+ .setKey(key)
+ .setMatchVersion(matchVersion);
+ DeleteResult ret = handle.delete(req);
+ assertTrue(ret.getWriteKB() > 0);
+ return ret.getSuccess();
+ }
+
+ private void doWriteMultiple(String tableName,
+ List rows,
+ boolean putOp) {
+
+ WriteMultipleRequest req = new WriteMultipleRequest();
+ for (MapValue row : rows) {
+ if (putOp) {
+ req.add(new PutRequest()
+ .setTableName(tableName)
+ .setValue(row), true /* abortIfUnsucessful */);
+ } else {
+ req.add(new DeleteRequest()
+ .setTableName(tableName)
+ .setKey(row), true /* abortIfUnsucessful */);
+ }
+ }
+
+ WriteMultipleResult ret = handle.writeMultiple(req);
+ assertTrue(ret.getSuccess());
+ assertTrue (ret.getWriteKB() > 0);
+ }
+
+ private int doMultiDelete(String tableName, MapValue key) {
+ MultiDeleteRequest req = new MultiDeleteRequest()
+ .setTableName(tableName)
+ .setKey(key);
+ MultiDeleteResult ret = handle.multiDelete(req);
+ return ret.getNumDeletions();
+ }
+
+ private List doQuery(String query,
+ Map values) {
+
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRet = handle.prepare(prepReq);
+ PreparedStatement pstmt = prepRet.getPreparedStatement();
+
+ if (values != null) {
+ for (Entry e : values.entrySet()) {
+ pstmt.setVariable(e.getKey(), e.getValue());
+ }
+ }
+
+ List results = new ArrayList<>();
+ try (@SuppressWarnings("resource")
+ QueryRequest req = new QueryRequest().setPreparedStatement(pstmt)) {
+ try (QueryIterableResult ret = handle.queryIterable(req)) {
+ for (MapValue row : ret) {
+ results.add(row);
+ }
+ }
+ }
+ return results;
+ }
+
+ private List doQuery(String query) {
+ List results = new ArrayList<>();
+ try (@SuppressWarnings("resource")
+ QueryRequest req = new QueryRequest().setStatement(query)) {
+ try (QueryIterableResult ret = handle.queryIterable(req)) {
+ for (MapValue row : ret) {
+ results.add(row);
+ }
+ }
+ }
+ return results;
+ }
+
+ private MapValue createRow(int sid, int id) {
+ MapValue row = createPrimaryKey(sid, id);
+ row.put("s", "s" + sid + id);
+
+ String json = "{\"cl\":0, \"ci\":0}";
+ FieldValue jval = MapValue.createFromJson(json, null);
+ row.put("j", jval);
+ return row;
+ }
+
+ private MapValue createPrimaryKey(int sid, int id) {
+ MapValue row = new MapValue();
+ row.put("sid", sid);
+ row.put("id", id);
+ return row;
+ }
+
+ private String getKVTableName(TableResult ret) {
+ String tid = ret.getTableId();
+ return (cloudRunning ? tid.replace(".", "_") : tid);
+ }
+
+ private static KVStore getStore(boolean excludeTombstone) {
+ if (kvlite == null) {
+ KVStoreConfig config = new KVStoreConfig(getStoreName(),
+ "localhost:5000");
+ config.setExcludeTombstones(excludeTombstone);
+ config.setEnableTableCache(false);
+ return KVStoreFactory.getStore(config);
+ }
+
+ String hostPort = getHostName() + ":" + getKVPort();
+ KVStoreConfig config = new KVStoreConfig(getStoreName(), hostPort);
+ config.setExcludeTombstones(excludeTombstone);
+ config.setEnableTableCache(false);
+ return KVStoreFactory.getStore(config);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/NumericTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/NumericTest.java
new file mode 100644
index 00000000..600392aa
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/NumericTest.java
@@ -0,0 +1,535 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.FieldValue.Type;
+import oracle.nosql.driver.values.JsonOptions;
+import oracle.nosql.driver.values.MapValue;
+
+import org.junit.Test;
+
+public class NumericTest extends ProxyTestBase {
+
+ final String tableName = "numericTest";
+ final String createTableDdl =
+ "create table if not exists numericTest (" +
+ "id integer, " +
+ "i integer, " +
+ "l long, " +
+ "f float," +
+ "d double, " +
+ "n number, " +
+ "primary key(id))";
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+
+ tableOperation(handle, createTableDdl,
+ new TableLimits(20000, 20000, 50),
+ TableResult.State.ACTIVE, 10000);
+ }
+
+ @Override
+ public void tearDown() throws Exception {
+ deleteTable(tableName);
+ super.tearDown();
+ }
+
+ /**
+ * Test put numeric values, values are parsed from JSON.
+ */
+ @Test
+ public void testPutWithJson() {
+
+ final int intVal = 123456;
+ final long longVal = 987654321012345678L;
+ final float fltVal = 1.234567f;
+ final double dblVal = 9.8765432123456d;
+
+ int[] ints = new int[] {
+ Integer.MIN_VALUE,
+ Integer.MAX_VALUE,
+ 0,
+ intVal,
+ };
+
+ long [] longs = new long[] {
+ Long.MIN_VALUE,
+ Long.MAX_VALUE,
+ 0L,
+ longVal,
+ };
+
+ float[] flts = new float[] {
+ Float.MIN_VALUE,
+ Float.MAX_VALUE,
+ 0.0f,
+ fltVal
+ };
+
+ double[] dbls = new double[] {
+ Double.MIN_VALUE,
+ Double.MAX_VALUE,
+ 0.0d,
+ dblVal
+ };
+
+ BigInteger bint =
+ new BigInteger("98765432109876543210987654321098765432109876543210");
+ BigDecimal[] decs = new BigDecimal[] {
+ new BigDecimal(bint, -1024),
+ new BigDecimal(bint, 1024),
+ BigDecimal.ZERO,
+ BigDecimal.valueOf(longVal)
+ };
+
+ for (int i = 0; i < ints.length; i++) {
+ runPutGetTest(ints[i], longs[i], flts[i], dbls[i], decs[i], false);
+ runPutGetTest(ints[i], longs[i], flts[i], dbls[i], decs[i], true);
+ }
+ }
+
+ private void runPutGetTest(int i, long l, float f, double d, BigDecimal dec,
+ boolean numericAsNumber) {
+
+ final MapValue key = new MapValue().put("id", 1);
+ final MapValue row = new MapValue()
+ .put("id", 1)
+ .put("i", i)
+ .put("l", l)
+ .put("f", f)
+ .put("d", d)
+ .put("n", dec);
+
+ final JsonOptions jsonOpts =
+ new JsonOptions().setNumericAsNumber(numericAsNumber);
+ String jsonStr = row.toJson(jsonOpts);
+
+ PutRequest putReq = new PutRequest()
+ .setTableName(tableName)
+ .setValueFromJson(jsonStr, jsonOpts);
+ PutResult putRet = handle.put(putReq);
+ assertTrue(putRet != null && putRet.getVersion() != null);
+
+ GetRequest getReq = new GetRequest()
+ .setTableName(tableName)
+ .setKey(key);
+ GetResult getRet = handle.get(getReq);
+ assertTrue(getRet != null && getRet.getValue() != null);
+
+ MapValue value = getRet.getValue();
+ assertType(value.get("i"), Type.INTEGER);
+ assertTrue("Wrong value of \"i\"", value.getInt("i") == i);
+
+ assertType(value.get("l"), Type.LONG);
+ assertTrue("Wrong value of \"l\"", value.getLong("l") == l);
+
+ assertType(value.get("f"), Type.DOUBLE);
+ assertTrue("Wrong value of \"f\"", value.getDouble("f") == f);
+
+ assertType(value.get("d"), Type.DOUBLE);
+ assertTrue("Wrong value of \"d\"", value.getDouble("d") == d);
+
+ assertType(value.get("n"), Type.NUMBER);
+ assertTrue("Wrong value of \"n\"",
+ value.getNumber("n").compareTo(dec) == 0);
+ }
+
+ /**
+ * Put numeric values with other compatible type values.
+ */
+ @Test
+ public void testCompatibleTypes() {
+
+ final MapValue value = new MapValue().put("id", 1);
+ Map expValues = new HashMap();
+
+ /*
+ * Target KV field type: Integer
+ * Value types: LONG, DOUBLE, NUMBER
+ */
+ String fname = "i";
+
+ /* Use LONG for Integer type */
+ final long longToIntOK = Integer.MAX_VALUE;
+ final long longToIntFail = (long)Integer.MAX_VALUE + 1;
+
+ value.put(fname, longToIntOK);
+ expValues.put(fname, (int)longToIntOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, longToIntFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /* Use DOUBLE for Integer type */
+ final double doubleToIntOK = 1.2345678E7d;
+ final double doubleToIntFail = -1.1d;
+
+ value.put(fname, doubleToIntOK);
+ expValues.put(fname, (int)doubleToIntOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, doubleToIntFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /* Use NUMBER for Integer type */
+ final BigDecimal decimalToIntOK = BigDecimal.valueOf(Integer.MIN_VALUE);
+ final BigDecimal decimalToIntFail = BigDecimal.valueOf(Long.MAX_VALUE);
+
+ value.put(fname, decimalToIntOK);
+ expValues.put(fname, decimalToIntOK.intValue());
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, decimalToIntFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /*
+ * Target KV field type: Long
+ * Value types: INTEGER, DOUBLE, NUMBER
+ */
+ expValues.clear();
+ value.remove(fname);
+ fname = "l";
+
+ /* Use INTEGER for Long type */
+ final int intToLongOK = Integer.MAX_VALUE;
+
+ value.put(fname, intToLongOK);
+ expValues.put(fname, (long)intToLongOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ /* Use DOUBLE for Long type */
+ final double doubleToLongOK = 1.234567890123E12d;
+ final double doubleToLongFail = -1.1d;
+
+ value.put(fname, doubleToLongOK);
+ expValues.put(fname, (long)doubleToLongOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, doubleToLongFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /* Use NUMBER for Long type */
+ final BigDecimal decimalToLongOK = BigDecimal.valueOf(Long.MAX_VALUE);
+ final BigDecimal decimalToLongFail = new BigDecimal("1234567890.1");
+
+ value.put(fname, decimalToLongOK);
+ expValues.put(fname, decimalToLongOK.longValue());
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, decimalToLongFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /*
+ * Target KV field type: Float
+ * Value types: INTEGER, LONG, DOUBLE, NUMBER
+ */
+ expValues.clear();
+ value.remove(fname);
+ fname = "f";
+
+ /* Use INTEGER for Float type */
+ final int intToFloatOK = 16777216;
+ final int intToFloatFail = 16777217;
+
+ value.put(fname, intToFloatOK);
+ expValues.put(fname, (double)intToFloatOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, intToFloatFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /* Use LONG for Float type */
+ final long longToFloatOK = Long.MAX_VALUE;
+ final long longToFloatFail = Long.MAX_VALUE - 1;
+
+ value.put(fname, longToFloatOK);
+ expValues.put(fname, (double)longToFloatOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, longToFloatFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /* Use DOUBLE for Float type */
+ final double doubleToFloatOK = -Float.MAX_VALUE;
+ final double doubleToFloatFail = Double.MAX_VALUE;
+
+ value.put(fname, doubleToFloatOK);
+ expValues.put(fname, doubleToFloatOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, doubleToFloatFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /* Use NUMBER for Float type */
+ float flt = 1.23456E2f;
+ final BigDecimal decimalToFloatOK = BigDecimal.valueOf(flt);
+ final BigDecimal decimalToFloatFail =
+ BigDecimal.valueOf(Double.MAX_VALUE);
+
+ value.put(fname, decimalToFloatOK);
+ expValues.put(fname, (double)flt);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, decimalToFloatFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /*
+ * Target KV field type: Double
+ * Value types: INTEGER, LONG, NUMBER
+ */
+ expValues.clear();
+ value.remove(fname);
+ fname = "d";
+
+ /* Use INTEGER for Double type */
+ final int intToDoubleOK = Integer.MAX_VALUE;
+
+ value.put(fname, intToDoubleOK);
+ expValues.put(fname, (double)intToDoubleOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ /* Use LONG for Double type */
+ final long longToDoubleOK = Long.MAX_VALUE;
+ final long longToDoubleFail = Long.MAX_VALUE - 1;
+
+ value.put(fname, longToDoubleOK);
+ expValues.put(fname, (double)longToDoubleOK);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, longToDoubleFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /* Use NUMBER for Double type */
+ double dbl = Double.MAX_VALUE;
+ final BigDecimal decimalToDoubleOK = BigDecimal.valueOf(dbl);
+ final BigDecimal decimalToDoubleFail =
+ BigDecimal.valueOf(Long.MAX_VALUE - 1);
+
+ value.put(fname, decimalToDoubleOK);
+ expValues.put(fname, dbl);
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ value.put(fname, decimalToDoubleFail);
+ putAsOtherNuermicTypeTest(value, false);
+
+ /*
+ * Target KV field type: Number
+ * Value types: INTEGER, LONG, DOUBLE
+ */
+ expValues.clear();
+ value.remove(fname);
+ fname = "n";
+
+ /* Use INTEGER for Number type */
+ final int intToNumberOK = Integer.MAX_VALUE;
+ value.put(fname, intToNumberOK);
+ expValues.put(fname, BigDecimal.valueOf(intToNumberOK));
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ /* Use LONG for Number type */
+ final long longToNumberOK = Long.MAX_VALUE;
+ value.put(fname, longToNumberOK);
+ expValues.put(fname, BigDecimal.valueOf(longToNumberOK));
+ putAsOtherNuermicTypeTest(value, true, expValues);
+
+ /* Use DOUBLE for Number type */
+ final double doubleToNumberOK = Double.MAX_VALUE;
+ value.put(fname, doubleToNumberOK);
+ expValues.put(fname, BigDecimal.valueOf(doubleToNumberOK));
+ putAsOtherNuermicTypeTest(value, true, expValues);
+ }
+
+ /*
+ * Test Get/Delete op with a key parsed from JSON.
+ */
+ @Test
+ public void testGetDeleteWithKeyFromJson() {
+ String tabName = "tableWithNumberKey";
+ String ddl = "create table if not exists " + tabName + "(" +
+ "pk number, " +
+ "str string, " +
+ "primary key(pk))";
+
+ tableOperation(handle, ddl,
+ new TableLimits(1000, 1000, 50),
+ TableResult.State.ACTIVE, 10000);
+
+ JsonOptions optNumericAsNumber =
+ new JsonOptions().setNumericAsNumber(true);
+
+ BigDecimal bd = new BigDecimal("123456789012345678901234567890");
+ /* this used to fail but with driver 5.4.11 it works */
+ runGetDeleteTest(tabName, bd, null, true);
+ runGetDeleteTest(tabName, bd, optNumericAsNumber, true);
+
+ bd = BigDecimal.valueOf(Integer.MAX_VALUE);
+ runGetDeleteTest(tabName, bd, null, true);
+ runGetDeleteTest(tabName, bd, optNumericAsNumber, true);
+
+ bd = BigDecimal.valueOf(Long.MAX_VALUE);
+ runGetDeleteTest(tabName, bd, null, true);
+ runGetDeleteTest(tabName, bd, optNumericAsNumber, true);
+
+ bd = BigDecimal.valueOf(Float.MAX_VALUE);
+ runGetDeleteTest(tabName, bd, null, true);
+ runGetDeleteTest(tabName, bd, optNumericAsNumber, true);
+
+ bd = BigDecimal.valueOf(Double.MAX_VALUE);
+ runGetDeleteTest(tabName, bd, null, true);
+ runGetDeleteTest(tabName, bd, optNumericAsNumber, true);
+ }
+
+ private void runGetDeleteTest(String tname,
+ BigDecimal bd,
+ JsonOptions jsonOpts,
+ boolean expSucceed) {
+
+ /* Put a row */
+ MapValue mapVal = new MapValue()
+ .put("pk", bd)
+ .put("str", "strdata");
+ PutRequest putReq = new PutRequest()
+ .setTableName(tname)
+ .setValue(mapVal);
+ PutResult putRes = handle.put(putReq);
+ assertNotNull(putRes.getVersion());
+
+ mapVal = new MapValue().put("pk", bd);
+ String pkJson = mapVal.toJson(jsonOpts);
+
+ /*
+ * Get the row, the key is parsed from JSON string with the
+ * specified options.
+ */
+ GetRequest getReq = new GetRequest()
+ .setKeyFromJson(pkJson, jsonOpts)
+ .setTableName(tname);
+ GetResult getRes = handle.get(getReq);
+ if (expSucceed) {
+ assertNotNull(getRes.getValue());
+ } else {
+ assertNull(getRes.getValue());
+ }
+
+ /*
+ * Delete the row, the key is parsed from JSON string with the
+ * specified options.
+ */
+ DeleteRequest delReq = new DeleteRequest()
+ .setKeyFromJson(pkJson, jsonOpts)
+ .setTableName(tname);
+ DeleteResult delRes = handle.delete(delReq);
+ assertTrue(expSucceed == delRes.getSuccess());
+ }
+
+ private void putAsOtherNuermicTypeTest(MapValue value,
+ boolean shouldSucceed) {
+
+ putAsOtherNuermicTypeTest(value, shouldSucceed, null);
+ }
+
+ private void putAsOtherNuermicTypeTest(MapValue value,
+ boolean shouldSucceed,
+ Map expValues) {
+
+ runPutAsOtherNuermicTypeTest(value, false, shouldSucceed, expValues);
+ runPutAsOtherNuermicTypeTest(value, true, shouldSucceed, expValues);
+ }
+
+ private void runPutAsOtherNuermicTypeTest(MapValue value,
+ boolean numericAsNumber,
+ boolean shouldSucceed,
+ Map expValues){
+
+ final JsonOptions jsonOpts =
+ new JsonOptions().setNumericAsNumber(numericAsNumber);
+ final String jsonStr = value.toJson(jsonOpts);
+ PutRequest putReq = new PutRequest()
+ .setTableName(tableName)
+ .setValueFromJson(jsonStr, jsonOpts);
+ try {
+ PutResult putRet = handle.put(putReq);
+ if (shouldSucceed) {
+ assertTrue("Put failed",
+ putRet != null && putRet.getVersion() != null);
+ MapValue key = new MapValue().put("id", 1);
+ GetRequest getReq = new GetRequest()
+ .setTableName(tableName)
+ .setKey(key);
+ GetResult getRet = handle.get(getReq);
+ assertTrue(getRet != null);
+ if (expValues != null) {
+ checkValue(getRet.getValue(), expValues);
+ }
+ } else {
+ fail("Put should have failed");
+ }
+ } catch (Exception ex) {
+ if (shouldSucceed) {
+ fail("Put failed: " + ex.getMessage());
+ }
+ //System.out.println(ex.getMessage());
+ }
+ }
+
+ private void checkValue(MapValue value, Map expValues) {
+
+ for (Entry e : expValues.entrySet()) {
+ String fname = e.getKey();
+ Object fval = e.getValue();
+
+ if (fval instanceof Integer) {
+ FieldValue fieldValue = value.get(fname);
+ assertType(fieldValue, Type.INTEGER);
+ assertTrue(fieldValue.getInt() == (int)fval);
+ } else if (fval instanceof Long) {
+ FieldValue fieldValue = value.get(fname);
+ assertType(fieldValue, Type.LONG);
+ assertTrue(fieldValue.getLong() == (long)fval);
+ } else if (fval instanceof Double) {
+ FieldValue fieldValue = value.get(fname);
+ assertType(fieldValue, Type.DOUBLE);
+ assertTrue(fieldValue.getDouble() == (double)fval);
+ } else if (fval instanceof BigDecimal) {
+ FieldValue fieldValue = value.get(fname);
+ assertType(fieldValue, Type.NUMBER);
+ assertTrue(
+ fieldValue.getNumber().compareTo((BigDecimal)fval) == 0);
+ } else {
+ fail("Unexpected value: " + fval);
+ }
+ }
+ }
+
+ private void assertType(FieldValue value, FieldValue.Type type) {
+ assertTrue("Wrong type. expect " + type + " actual " + value.getType(),
+ value.getType() == type);
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ParallelQueryTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ParallelQueryTest.java
new file mode 100644
index 00000000..e3b81c99
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ParallelQueryTest.java
@@ -0,0 +1,453 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import oracle.nosql.driver.NoSQLHandle;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.NoSQLHandleFactory;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PreparedStatement;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.MapValue;
+
+import oracle.nosql.proxy.kv.KVTenantManager;
+import oracle.nosql.proxy.sc.TenantManager;
+import oracle.nosql.proxy.sc.LocalTenantManager;
+import oracle.nosql.proxy.security.AccessCheckerFactory;
+import oracle.nosql.proxy.security.SecureTestUtil;
+
+/**
+ * Test parallel queries. This is a separate test from QueryTest
+ * because it requires use of a multi-shard store to test parallel
+ * indexed queries
+ */
+public class ParallelQueryTest extends ProxyTestBase {
+
+ protected static TenantManager tm;
+ protected static String proxyEndpoint;
+ protected static String parallelStoreName = "ParallelQueryStore";
+
+
+ @BeforeClass
+ public static void staticSetUp()
+ throws Exception {
+
+ assumeTrue("Skip ParallelQuery in minicloud or cloud test",
+ !Boolean.getBoolean("usemc") &&
+ !Boolean.getBoolean("usecloud"));
+ startKV();
+ startLocalProxy();
+ }
+
+ @AfterClass
+ public static void staticTearDown()
+ throws Exception {
+
+ stopProxy();
+
+ if (kvlite != null) {
+ kvlite.stop(false);
+ }
+
+ cleanupTestDir();
+ }
+
+ private static void startKV() {
+ verbose = Boolean.getBoolean(VERBOSE_PROP);
+ /*
+ * use a different store name to avoid conflicts in topology
+ */
+ kvlite = startKVLite(hostName,
+ parallelStoreName,
+ false, /* don't useThreads */
+ verbose,
+ true, /* multishard true */
+ 0, /* default memory MB */
+ false, /* not secure */
+ getKVPort(), /* default */
+ getPortRange(), /* default */
+ getTestDir());
+ }
+
+ protected static void startLocalProxy() {
+ onprem = Boolean.getBoolean(ONPREM_PROP);
+
+ Properties commandLine = new Properties();
+
+ commandLine.setProperty(Config.STORE_NAME.paramName,
+ parallelStoreName);
+ commandLine.setProperty(Config.HELPER_HOSTS.paramName,
+ (hostName + ":" + getKVPort()));
+ Config.ProxyType ptype = (onprem ? Config.ProxyType.KVPROXY :
+ Config.ProxyType.CLOUDTEST);
+ commandLine.setProperty(Config.PROXY_TYPE.paramName, ptype.name());
+ commandLine.setProperty(Config.VERBOSE.paramName,
+ Boolean.toString(verbose));
+ /* use a non-privileged port */
+ commandLine.setProperty(Config.HTTP_PORT.paramName, "8095");
+
+ /* allow query tracing */
+ commandLine.setProperty(Config.QUERY_TRACING.paramName, "true");
+
+ /* use defaults for thread pools and sizes */
+
+ /* simple access checker */
+ ac = AccessCheckerFactory.createInsecureAccessChecker();
+ Config cfg = new Config(commandLine);
+ /* create an appropriate TenantManager */
+ if (onprem) {
+ /* note: in KVPROXY mode the proxy *requires* a KVTenantManager */
+ tm = KVTenantManager.createTenantManager(cfg);
+ } else {
+ tm = LocalTenantManager.createTenantManager(cfg);
+ }
+ proxy = Proxy.initialize(cfg, tm, ac, null);
+ proxyEndpoint = "http://" + hostName + ":" + cfg.getHttpPort();
+ }
+
+ protected static void stopProxy() throws Exception {
+ if (proxy != null) {
+ proxy.shutdown(3, TimeUnit.SECONDS);
+ proxy = null;
+ }
+ if (tm != null) {
+ tm.close();
+ tm = null;
+ }
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ /*
+ * Configure the endpoint
+ */
+ if (handle == null) {
+ NoSQLHandleConfig config =
+ new NoSQLHandleConfig(proxyEndpoint);
+ SecureTestUtil.setAuthProvider(config, false,
+ onprem, getTenantId());
+ handle = getHandle(config); /* see ProxyTestBase */
+ }
+ dropAllTables(handle, true);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (handle != null) {
+ dropAllTables(handle, true);
+ handle.close();
+ handle = null;
+ }
+ }
+
+ @Test
+ public void testParallelQueryArgs() {
+ final String tableName = "ParallelQuery";
+ final String createTable = "create table " + tableName +
+ "(id integer, primary key(id)) as json collection";
+ final String query = "select * from " + tableName;
+ tableOperation(handle, createTable,
+ new TableLimits(4, 1000, 1000),
+ TableResult.State.ACTIVE, 10000);
+ PreparedStatement ps =
+ handle.prepare(new PrepareRequest().setStatement(query))
+ .getPreparedStatement();
+
+ QueryRequest qr = new QueryRequest().setStatement(query).
+ setNumberOfOperations(1);
+ failParallelQuery(qr, "not prepared1", IllegalArgumentException.class);
+ qr.setNumberOfOperations(0).setOperationNumber(1);
+ failParallelQuery(qr, "not prepared2", IllegalArgumentException.class);
+
+ /* use prepared statement now to check other params */
+ qr.setPreparedStatement(ps).setStatement(null);
+ failParallelQuery(qr, "numops set, opnum not set",
+ IllegalArgumentException.class);
+
+ qr.setNumberOfOperations(1).setOperationNumber(0);
+ failParallelQuery(qr, "opnum set, numops not set",
+ IllegalArgumentException.class);
+
+ qr.setNumberOfOperations(1).setOperationNumber(2);
+ failParallelQuery(qr, "opnum too large",
+ IllegalArgumentException.class);
+
+ qr.setNumberOfOperations(ps.getMaximumParallelism() + 1);
+ failParallelQuery(qr, "numops too large",
+ IllegalArgumentException.class);
+
+ qr.setNumberOfOperations(-1);
+ failParallelQuery(qr, "negative numops",
+ IllegalArgumentException.class);
+
+ qr.setNumberOfOperations(1).setOperationNumber(-1);
+ failParallelQuery(qr, "negative opnum",
+ IllegalArgumentException.class);
+
+ String upd = "insert into " + tableName + "(id) values (2000)";
+ ps = handle.prepare(new PrepareRequest().setStatement(upd))
+ .getPreparedStatement();
+ assertEquals(0, ps.getMaximumParallelism());
+ /* any non-zero value is illegal for updates */
+ qr.setPreparedStatement(ps).setOperationNumber(1).
+ setNumberOfOperations(1);
+ failParallelQuery(qr, "cannot insert/update",
+ IllegalArgumentException.class);
+ }
+
+ @Test
+ public void testParallelMisc() {
+ final int numRows = 1000;
+ final String tableName = "ParallelQuery";
+ final String createTable = "create table " + tableName +
+ "(id integer, primary key(id)) as json collection";
+ String createIndex = "create index idx on " + tableName +
+ "(name as string)";
+ tableOperation(handle, createTable,
+ new TableLimits(4, 1000, 1000),
+ TableResult.State.ACTIVE, 10000);
+ tableOperation(handle, createIndex, null, null,
+ TableResult.State.ACTIVE, null);
+
+ final String query1 = "select * from " + tableName; /* yes */
+ final String query2 = "select * from " + tableName + /* no */
+ " order by id";
+ final String query3 = "select * from " + tableName + /* yes */
+ " where name = 'joe'";
+ final String query4 = "select count(*) from " + tableName; /* no */
+
+ final String[] queries = new String[]{query1, query2, query3, query4};
+ /*
+ * These answers rely on the default configuration of a multishard
+ * KVLite
+ */
+ final int[] answers = new int[]{multishardPartitions,
+ 0, multishardShards, 0};
+ for (int i = 0; i < queries.length; i++) {
+ assertEquals(answers[i], maxParallel(queries[i]));
+ }
+ }
+
+ /*
+ * Use query parallelism.
+ * 1. in a non-threaded fashion to test that the use of subsets of a
+ * table return complete, non-intersecting results
+ * 2. in a threaded, truly parallel scenario
+ *
+ * Start with all partition parallelism and use a JSON collection table
+ * TODO:
+ * o parallel indexed queries
+ * o queries that cannot be parallel (max 1)
+ */
+ @Test
+ public void testParallelQuery() {
+ final int numRows = 1000;
+ final String tableName = "ParallelQuery";
+ final String createTable = "create table " + tableName +
+ "(id integer, primary key(id)) as json collection";
+ String createIndex = "create index idx on " + tableName +
+ "(name as string)";
+ final String query = "select * from " + tableName;
+ /* use an index query that will still return all results */
+ final String indexQuery = "select * from " + tableName +
+ " where name > 'm'";
+ tableOperation(handle, createTable,
+ new TableLimits(10000, 10000, 1000),
+ TableResult.State.ACTIVE, 10000);
+ tableOperation(handle, createIndex, null, null,
+ TableResult.State.ACTIVE, null);
+ PreparedStatement ps =
+ handle.prepare(new PrepareRequest().setStatement(query))
+ .getPreparedStatement();
+ int max = ps.getMaximumParallelism();
+ assertEquals(multishardPartitions, max);
+
+ /* load rows sufficient to cover all partitions */
+ PutRequest preq = new PutRequest().setTableName(tableName);
+ putRowsInParallelTable(preq, numRows);
+
+ final AtomicInteger readKB = new AtomicInteger();
+ final Set keys = ConcurrentHashMap.newKeySet();
+ for (int i = 0; i < max; i++) {
+ doSubsetQuery(ps, max, i + 1, keys, readKB);
+ }
+ /* did all of the results get read and are they unique? */
+ assertEquals(numRows, keys.size());
+ assertEquals(numRows, readKB.get());
+
+ /*
+ * do another "parallel" query but with 3 subsets and
+ * make sure that all rows are read, with no duplicates
+ */
+ readKB.set(0);
+ keys.clear();
+ for (int i = 0; i < 3; i++) {
+ doSubsetQuery(ps, 3, i + 1, keys, readKB);
+ }
+ assertEquals(numRows, keys.size());
+ assertEquals(numRows, readKB.get());
+
+ /* use indexed, all shard query that returns all results */
+ PreparedStatement psIndex =
+ handle.prepare(new PrepareRequest().setStatement(indexQuery))
+ .getPreparedStatement();
+ max = psIndex.getMaximumParallelism();
+ assertEquals(multishardShards, max);
+
+ readKB.set(0);
+ keys.clear();
+ for (int i = 0; i < max; i++) {
+ doSubsetQuery(psIndex, max, i + 1, keys, readKB);
+ }
+ assertEquals(numRows, keys.size());
+ assertEquals(numRows, readKB.get());
+
+ /*
+ * this is all shards use max of 2
+ */
+ readKB.set(0);
+ keys.clear();
+ for (int i = 0; i < 2; i++) {
+ doSubsetQuery(psIndex, 2, i + 1, keys, readKB);
+ }
+ assertEquals(numRows, keys.size());
+ assertEquals(numRows, readKB.get());
+
+ /*
+ * Rather than create a new test, tables, etc. reuse the existing
+ * table and data and run these queries in threads vs sequentially
+ */
+ doQueryInThreads(ps, numRows);
+ doQueryInThreads(psIndex, numRows);
+ }
+
+ private void doQueryInThreads(final PreparedStatement ps, int numRows) {
+ /*
+ * If the max is < 10, use it as num operations. If > 10 use
+ * 10
+ */
+ final AtomicInteger readKB = new AtomicInteger();
+ final Set keys = ConcurrentHashMap.newKeySet();
+ final int max = Math.min(ps.getMaximumParallelism(), 10);
+ assertTrue(max >= multishardShards);
+
+ ExecutorService executor = Executors.newFixedThreadPool(max);
+ /* create a list of callables and start them at the same time */
+ Collection> tasks = new ArrayList>();
+ for (int i = 0; i < max; i++) {
+ final int opNum = i + 1;
+ tasks.add(new Callable() {
+ @Override
+ public Void call() {
+ doSubsetQuery(ps, max, opNum, keys, readKB);
+ return null;
+ }
+ });
+ }
+ try {
+ List> futures = executor.invokeAll(tasks);
+ for(Future f : futures) {
+ f.get();
+ }
+ } catch (Exception e) {
+ fail("Exception: " + e);
+ }
+ /* did all of the results get read and are they unique? */
+ assertEquals(numRows, keys.size());
+ assertEquals(numRows, readKB.get());
+ }
+
+ /*
+ * Do a single portion (operation) of a parallel query
+ */
+ private void doSubsetQuery(PreparedStatement ps,
+ int numOperations,
+ int operationNumber,
+ Set keys,
+ final AtomicInteger readKB) {
+ QueryRequest qr = new QueryRequest().setPreparedStatement(ps);
+ qr.setNumberOfOperations(numOperations);
+ qr.setOperationNumber(operationNumber);
+ QueryResult qres = null;
+ do {
+ qres = handle.query(qr);
+ for (MapValue v : qres.getResults()) {
+ keys.add(v.get("id").getInt());
+ }
+ } while (!qr.isDone());
+ readKB.addAndGet(qres.getReadKB());
+ }
+
+ private void putRowsInParallelTable(PutRequest preq, int numRows) {
+ for (int id = 0; id < numRows; id++) {
+ MapValue row = new MapValue()
+ .put("id", id)
+ .put("name", ("name_" + id))
+ .put("age", (id % 25));
+ preq.setValue(row);
+ PutResult pret = handle.put(preq);
+ assertNotNull(pret.getVersion());
+ }
+ }
+
+ /*
+ * Return the max amount of parallelism
+ */
+ private int maxParallel(String query) {
+ return handle.prepare(new PrepareRequest().setStatement(query))
+ .getPreparedStatement().getMaximumParallelism();
+ }
+
+ private void failParallelQuery(QueryRequest qr,
+ final String msg,
+ Class extends Exception> expected) {
+ try {
+ handle.query(qr);
+ fail("Expected exception on parallel query for : " +
+ msg);
+ } catch (Exception e) {
+ if (!expected.equals(e.getClass())) {
+ fail("Unexpected exception. Expected " + expected + ", got " +
+ e + " for case: " + msg);
+ }
+ }
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyConfigTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyConfigTest.java
new file mode 100644
index 00000000..9b057a0b
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyConfigTest.java
@@ -0,0 +1,159 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+
+import oracle.kv.Consistency;
+import oracle.kv.KVStoreConfig;
+import oracle.kv.Durability;
+import oracle.nosql.proxy.util.TestBase;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Validate the proxy configuration
+ */
+public class ProxyConfigTest extends TestBase {
+ private static PrintStream original;
+
+ @BeforeClass
+ public static void staticSetUp() throws Exception {
+ /*
+ * Filter out the stderr output from proxy startup
+ */
+ original = System.out;
+ System.setErr(new PrintStream(new OutputStream() {
+ @Override
+ public void write(int b) throws IOException {}
+ }));
+ }
+
+ @AfterClass
+ public static void staticTearDown() throws Exception {
+ if (original != null) {
+ System.setErr(original);
+ }
+ }
+
+ @Test
+ public void testPrecedence() throws Exception {
+
+ Properties fileProps = new Properties();
+ fileProps.setProperty(Config.NUM_REQUEST_THREADS.paramName, "123");
+ fileProps.setProperty(Config.MONITOR_STATS_ENABLED.paramName, "true");
+ fileProps.setProperty(Config.IDLE_READ_TIMEOUT.paramName, "8888");
+ String configFileName = createConfigFile(fileProps);
+
+
+ Properties commandLine = new Properties();
+ commandLine.setProperty(Config.NUM_REQUEST_THREADS.paramName, "456");
+ commandLine.setProperty(Config.NUM_ACCEPT_THREADS.paramName, "5");
+ commandLine.setProperty(Config.KV_REQUEST_TIMEOUT.paramName, "4000");
+ commandLine.setProperty(Config.KV_CONSISTENCY.paramName, "ABSOLUTE");
+ commandLine.setProperty(Config.CONFIG_FILE.paramName, configFileName);
+
+ Config config = new Config(commandLine);
+ KVStoreConfig kvConfig = config.getTemplateKVStoreConfig();
+
+ /* don't validate all fields, just a few */
+ assertEquals(456, config.getNumRequestThreads());
+ assertEquals(5, config.getNumAcceptThreads());
+ assertEquals(true, config.isMonitorStatsEnabled());
+ assertEquals(8888, config.getIdleReadTimeout());
+ assertEquals(4000, kvConfig.getRequestTimeout(TimeUnit.MILLISECONDS));
+ assertEquals(Consistency.ABSOLUTE, kvConfig.getConsistency());
+ }
+
+ @Test
+ public void testSslProtocols() throws Exception {
+ Properties commandLine = new Properties();
+ commandLine.setProperty(Config.SSL_PROTOCOLS.paramName,
+ "TLSv1.3,TLSv1.1");
+ Config config = new Config(commandLine);
+ assertEquals(config.getSSLProtocols().length, 2);
+
+ commandLine.setProperty(Config.SSL_PROTOCOLS.paramName,
+ "TLSv1.4,TLSv1.1");
+ try {
+ new Config(commandLine);
+ } catch (IllegalArgumentException e) {
+ }
+
+ commandLine.setProperty(Config.SSL_PROTOCOLS.paramName,
+ "TLSv1,");
+ try {
+ new Config(commandLine);
+ } catch (IllegalArgumentException e) {
+ }
+ }
+
+ @Test
+ public void testKVDurability() throws Exception {
+ String[] s = new String[]{"-" + Config.KV_DURABILITY.paramName, "COMMIT_ALL_SYNC"};
+ Config c = new Config(s);
+ KVStoreConfig kvConfig = c.makeTemplateKVStoreConfig();
+ Durability durability = kvConfig.getDurability();
+ assertEquals(durability.getMasterSync(), Durability.SyncPolicy.SYNC);
+ assertEquals(durability.getReplicaSync(), Durability.SyncPolicy.SYNC);
+ assertEquals(durability.getReplicaAck(),
+ Durability.ReplicaAckPolicy.SIMPLE_MAJORITY);
+
+ s = new String[]{"-" + Config.KV_DURABILITY.paramName, "COMMIT_ALL_WRITE_NO_SYNC"};
+ c = new Config(s);
+ kvConfig = c.makeTemplateKVStoreConfig();
+ durability = kvConfig.getDurability();
+ assertEquals(durability.getMasterSync(), Durability.SyncPolicy.WRITE_NO_SYNC);
+ assertEquals(durability.getReplicaSync(), Durability.SyncPolicy.WRITE_NO_SYNC);
+ assertEquals(durability.getReplicaAck(),
+ Durability.ReplicaAckPolicy.SIMPLE_MAJORITY);
+
+ }
+
+ @Test
+ public void testCommandLine() throws Exception {
+
+ String[] s = new String[] {"-foo"};
+ try {
+ new Config(s);
+ } catch (IllegalArgumentException e) {
+ }
+
+ s = new String[] {Config.STORE_NAME.paramName, "StagingStore"};
+ try {
+ new Config(s);
+ } catch (IllegalArgumentException e) {
+ }
+
+ s = new String[] {"-" + Config.STORE_NAME.paramName, "StagingStore"};
+ Config config = new Config(s);
+ assertEquals("StagingStore", config.getStoreName());
+
+ }
+
+
+ private String createConfigFile(Properties fileContents)
+ throws Exception {
+
+ File configFile = new File(getTestDir(), "mock.config.props");
+ OutputStream output = new FileOutputStream(configFile);
+ fileContents.store(output, "Mock config file");
+ return configFile.getAbsolutePath();
+ }
+
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyHealthSourceTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyHealthSourceTest.java
new file mode 100644
index 00000000..3e27a7e9
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyHealthSourceTest.java
@@ -0,0 +1,131 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.junit.Test;
+
+import oracle.nosql.common.sklogger.SkLogger;
+import oracle.nosql.proxy.cloud.ProxyHealthSource;
+import oracle.nosql.proxy.sc.SCTenantManager;
+import oracle.nosql.proxy.security.AccessChecker;
+import oracle.nosql.proxy.security.AccessCheckerFactory;
+import oracle.nosql.util.HostPort;
+import oracle.nosql.util.ServiceDirectory;
+import oracle.nosql.util.ph.HealthStatus;
+
+public class ProxyHealthSourceTest extends ProxyTestBase {
+
+ private static final SkLogger logger = new SkLogger(
+ ProxyHealthSourceTest.class.getName(),
+ "proxy", "proxytest.log");
+
+ @Test
+ public void scConnectivityTest() throws Exception {
+ /*
+ * 1. Set up a test Proxy with Wrong SC IP address.
+ */
+ SCTenantManager testTM =
+ new SCTenantManager("V0",
+ 0, 0, true /* isChildTableEnabled */, 0,
+ new TestServiceDirectory());
+ AccessChecker checker =
+ AccessCheckerFactory.createInsecureAccessChecker();
+ HostPort hp = new HostPort("errorhost", 8888);
+ testTM.establishURLBase(hp.toUrl(false), true /* reset */);
+ Properties commandLine = new Properties();
+ commandLine.setProperty(Config.PROXY_TYPE.paramName,
+ Config.ProxyType.CLOUDTEST.name());
+
+ commandLine.setProperty(Config.HTTP_PORT.paramName,
+ Integer.toString(9095));
+
+ commandLine.setProperty(Config.HTTPS_PORT.paramName,
+ Integer.toString(9096));
+ commandLine.setProperty(Config.NUM_REQUEST_THREADS.paramName,
+ Integer.toString(1));
+ /* Disable pulling rules thread in FilterHandler */
+ commandLine.setProperty(Config.PULL_RULES_INTERVAL_SEC.paramName,
+ Integer.toString(0));
+
+ Proxy testProxy = ProxyMain.startProxy(commandLine, testTM,
+ checker, audit);
+
+ /*
+ * 2. Check Proxy HealthStatus is RED as SC can't be connected.
+ */
+ ProxyHealthSource healthSource = testProxy.getHealthSource();
+ List errors = new ArrayList<>();
+ HealthStatus status = healthSource.getStatus("Proxy",
+ "Proxy0",
+ "localhost",
+ logger,
+ errors);
+ assertEquals(HealthStatus.YELLOW, status);
+ assertEquals(1, errors.size());
+ /*
+ * Minicloud test only
+ */
+ if (useMiniCloud) {
+ /*
+ * 3. Set TM to the real SC IP address.
+ */
+ hp = new HostPort(scHost, scPort);
+ testTM.establishURLBase(hp.toUrl(false), true /* reset */);
+ /*
+ * Wait more than 1 minute for last failed SC request expired.
+ */
+ try {
+ Thread.sleep(61_000);
+ } catch (InterruptedException e) {
+ }
+ /*
+ * 4. Check Proxy HealthStatus is GREEN now.
+ */
+ healthSource = testProxy.getHealthSource();
+ errors = new ArrayList<>();
+ status = healthSource.getStatus("Proxy", "Proxy0", "localhost",
+ logger, errors);
+ assertEquals(errors.toString(), HealthStatus.GREEN, status);
+ assertEquals(errors.toString(), 0, errors.size());
+ }
+ }
+
+ class TestServiceDirectory implements ServiceDirectory {
+
+ /**
+ * Returns a positive value as the service (region) identifier
+ * A positive value is required to indicate that this proxy
+ * is in a "cloud" environment - whether it's cloudsim or a unittest.
+ */
+ @Override
+ public int getLocalServiceInteger() {
+ return 1;
+ }
+
+ @Override
+ public String getLocalServiceName() {
+ return "localPP";
+ }
+
+ @Override
+ public String translateToRegionName(String serviceName) {
+ return serviceName + "-region";
+ }
+
+ @Override
+ public String validateRemoteReplica(String targetRegionName) {
+ return targetRegionName + "-servicename";
+ }
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyI18NTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyI18NTest.java
new file mode 100644
index 00000000..b59be760
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyI18NTest.java
@@ -0,0 +1,436 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This file was distributed by Oracle as part of a version of Oracle NoSQL
+ * Database made available at:
+ *
+ * http://www.oracle.com/technetwork/database/database-technologies/nosqldb/downloads/index.html
+ *
+ * Please see the LICENSE file included in the top-level directory of the
+ * appropriate version of Oracle NoSQL Database for a copy of the license and
+ * additional information.
+ */
+
+package oracle.nosql.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.util.UUID;
+
+import oracle.nosql.driver.NoSQLException;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.JsonOptions;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.TimestampValue;
+
+import org.junit.Test;
+
+/**
+ * Test I18N in the cloud driver. This test and associated data files were
+ * supplied by the i18n group.
+ */
+public class ProxyI18NTest extends ProxyTestBase {
+ private static final String basePath =
+ getProxyBase() + "/oracle/nosql/proxy/";
+ private static final String[] Jsonfilesname = {
+ "utf8.json","utf8bom.json",
+ "utf16le.json", "utf16lebom.json",
+ "utf16be.json","utf16bebom.json"
+ };
+
+ private static final String expfile = "utf8_testdata.txt";
+ private static final String outputres = "results_testdata.txt";
+ private static final String jsondata = "utf8_jsondata.txt";
+ private String expstr = null;
+
+ /*
+ * Test creation from various json encodings
+ */
+ @Test
+ public void createJsonTest(){
+ try {
+ for (String element : Jsonfilesname) {
+ File file = new File(basePath + element);
+ FileInputStream fis = new FileInputStream(file);
+ MapValue mv =
+ FieldValue.createFromJson(fis, new JsonOptions()).asMap();
+ expstr = readexpfile("fr",1);
+ assertEquals(expstr, mv.getString("name"));
+ fis.close();
+ }
+ } catch (Exception e) {
+ fail("Exception: " + e);
+ }
+ }
+
+ /*
+ * Test French input
+ */
+ @Test
+ public void fr_simpleTest() throws Exception {
+
+ try {
+ /*
+ * Create a simple table with an integer key and a single
+ * name field
+ */
+ TableResult tres = tableOperation(
+ handle,
+ "create table if not exists users(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(500, 500, 50),
+ TableResult.State.ACTIVE,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* PUT a row */
+
+ /* construct a simple row */
+ MapValue value = new MapValue().put("id", 1).
+ put("name", "\u00E7\u00E9_myname");
+
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("users");
+
+ PutResult putRes = handle.put(putRequest);
+ assertNotNull("Put failed", putRes.getVersion());
+ assertWriteKB(putRes);
+
+ /* GET the row */
+ MapValue key = new MapValue().put("id", 1);
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName("users");
+
+ GetResult getRes = handle.get(getRequest);
+
+ String getstr = getRes.getValue().toString();
+
+ expstr = readexpfile("fr",2);
+
+ assertEquals(expstr, getstr);
+
+ /* PUT a row using JSON */
+
+ /* construct a simple row */
+ String jsonString = readexpfile("fr",3);
+ putRequest = new PutRequest()
+ .setValueFromJson(jsonString, null) // no options
+ .setTableName("users");
+
+ putRes = handle.put(putRequest);
+
+ /* GET the new row */
+ key = new MapValue().put("id", 2);
+ getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName("users");
+
+ getRes = handle.get(getRequest);
+
+ expstr = readexpfile("fr",3);
+ getstr = getRes.getValue().toString();
+
+ assertEquals(expstr, getstr);
+
+ /* DELETE a row */
+ DeleteRequest delRequest = new DeleteRequest()
+ .setKey(key)
+ .setTableName("users");
+
+ handle.delete(delRequest);
+ } catch (NoSQLException nse) {
+ System.err.println("Op failed: " + nse.getMessage());
+ } catch (Exception e) {
+ System.err.println("Exception processing msg: " + e);
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void jsontableTest(){
+
+ /* Create a table */
+ TableResult tres = tableOperation(
+ handle,
+ "create table if not exists restaurants(uid string, " +
+ "restaurantJSON JSON, primary key(uid))",
+ new TableLimits(500, 500, 50),
+ TableResult.State.ACTIVE,
+ 20000);
+
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* Create an index */
+ tres = tableOperation(
+ handle,
+ "CREATE INDEX IF NOT EXISTS idx_json_name on restaurants " +
+ " (restaurantJSON.name as string)",
+ null,
+ TableResult.State.ACTIVE,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* populate jason data to table */
+ BufferedReader br = null;
+ FileReader fr = null;
+
+ try {
+ String jsonString = "";
+ String currLine;
+ int pCount = 0;
+ boolean buildObj = false;
+ boolean beganParsing = false;
+ String jsonfilePath = basePath + jsondata;
+
+ fr = new FileReader(jsonfilePath);
+ br = new BufferedReader(fr);
+
+
+ /*
+ * Parse the sample JSON file to find the matching parenthesis to
+ * construct JSON string
+ */
+ while ((currLine = br.readLine()) != null) {
+ pCount += countParens(currLine, '{');
+
+ // Empty line in the data file
+ if (currLine.length() == 0) {
+ continue;
+ }
+
+ // Comments must start at column 0 in the
+ // data file.
+ if (currLine.charAt(0) == '#') {
+ continue;
+ }
+
+ // If we've found at least one open paren, it's time to
+ // start collecting data
+ if (pCount > 0) {
+ buildObj = true;
+ beganParsing = true;
+ }
+
+ if (buildObj) {
+ jsonString += currLine;
+ }
+
+ /*
+ * If our open and closing parens balance (the count
+ * is zero) then we've collected an entire object
+ */
+ pCount -= countParens(currLine, '}');
+ if (pCount < 1)
+ {
+ buildObj = false;
+ /*
+ * If we started parsing data, but buildObj is false
+ * then that means we've reached the end of a JSON
+ * object in the input file. So write the object
+ * to the table, using the PutRequest
+ */
+ }
+
+ if (beganParsing && !buildObj) {
+
+ /*
+ * Use the putFromJSON to automatically convert JSON string
+ * into JSON object
+ */
+ MapValue value = new MapValue().put("uid", generateUUID()).
+ putFromJson("restaurantJSON", jsonString, null);
+ PutRequest putRequest = new PutRequest().setValue(value).
+ setTableName("restaurants");
+ PutResult putRes = handle.put(putRequest);
+ assertNotNull(putRes.getVersion());
+ jsonString = "";
+ }
+ }
+
+ /* query json table */
+ /* index line 4 name in expfile */
+ String idxname = readexpfile("fr", 4 );
+ String predQuery = "SELECT * FROM restaurants r WHERE " +
+ " r.restaurantJSON.name = \"" + idxname + "\"";
+ //String predQuery = "SELECT * FROM restaurants r WHERE " +
+ //" r.restaurantJSON.name < \"" + idxname + "\"";
+
+ //Create the Query Request
+ QueryRequest queryRequest = new QueryRequest().
+ setStatement(predQuery);
+
+ //Execute the query and get the response
+ QueryResult queryRes = handle.query(queryRequest);
+ if (queryRes.getResults().size() >0) {
+ String name;
+ String address;
+ String phonenumber;
+ String mobile_reserve_url;
+
+ for (MapValue record : queryRes.getResults()) {
+ MapValue jsonValue = record.get("restaurantJSON").asMap();
+
+ name = jsonValue.getString("name");
+ address = jsonValue.getString("address");
+ phonenumber = jsonValue.getString("phone");
+ mobile_reserve_url =
+ jsonValue.getString("mobile_reserve_url");
+
+ // write the result data to an outputfile
+ String oputrespath = basePath + outputres;
+ OutputStreamWriter pw = null;
+ FileOutputStream fs =
+ new FileOutputStream(oputrespath,true);
+ pw = new OutputStreamWriter(fs,"UTF8");
+ pw.write(name + "\t");
+ pw.write(address + "\t");
+ pw.write(phonenumber + "\t");
+ pw.write(mobile_reserve_url + "\n");
+ pw.close();
+ /* delete output file */
+ new File(oputrespath).delete();
+ }
+ }
+ } catch (FileNotFoundException fnfe) {
+ fail(" File not found: " + fnfe );
+ } catch (IOException ioe) {
+ fail("IOException: " + ioe );
+ System.exit(-1);
+ } catch (NoSQLException nse) {
+ fail("jsontableTest Op failed: " + nse.getMessage());
+ } catch (Exception e) {
+ fail( "Exception processing msg: " + e );
+ } finally {
+ try {
+ if (br != null) {
+ br.close();
+ }
+ if (fr != null) {
+ fr.close();
+ }
+ } catch (IOException iox) {
+ // ignore
+ }
+ }
+ }
+
+ @Test
+ public void timestampTest(){
+ String timesp1 = "1970-01-01T00:00:00Z";
+ String timesp2 = "1970-01-01T00:00:00+00:00";
+ timestamp(timesp1);
+ timestamp(timesp2);
+ }
+
+ /*
+ * Used by populateTable() to know when a JSON object
+ * begins and ends in the input data file.
+ */
+ private static int countParens(String line, char p) {
+ int c = 0;
+ for (int i = 0; i < line.length(); i++) {
+ if (line.charAt(i) == p) {
+ c++;
+ }
+ }
+ return c;
+ }
+
+ private static String generateUUID() {
+ return UUID.randomUUID().toString().replace("-", "");
+ }
+
+ private static String readexpfile(String lang, int line){
+ BufferedReader br = null;
+ InputStreamReader isr = null;
+ String currLine;
+ String expstr = null;
+ String langfg = lang+"{";
+
+ try {
+ //Get the expected data file
+ File file = new File(basePath + expfile);
+
+ //Read file content using utf8
+ isr = new InputStreamReader(new FileInputStream(file), "utf8");
+
+ br = new BufferedReader(isr);
+
+ while ((currLine = br.readLine()) != null) {
+
+ // Empty line in the data file
+ if (currLine.length() == 0) {
+ continue;
+ }
+
+ // Comments must start at column 0 in the data file.
+ if (currLine.charAt(0) == '#') {
+ continue;
+ }
+
+ /*
+ * If we've found the expected language, it's time to start
+ * collecting data
+ */
+ if (currLine.equals(langfg)) {
+ int i = 0;
+ while (i != line){
+ i++ ;
+ currLine = br.readLine();
+ }
+ String currstr = currLine.substring(currLine.indexOf(":")+1);
+ expstr = currstr.trim();
+ break;
+ }
+ }
+ } catch (Exception e) {
+ fail("Exception: " + e);
+ } finally {
+ try {
+ if (isr != null) {
+ isr.close();
+ }
+ if (br != null) {
+ br.close();
+ }
+ } catch (IOException ioe) {
+ // ignore
+ }
+ }
+
+ return expstr;
+ }
+
+ /* Used by timestamp test is enabled */
+ private static void timestamp(String s) {
+ try {
+ @SuppressWarnings("unused")
+ TimestampValue v = new TimestampValue(s);
+ // System.out.println(s +" = long(" + v.getLong() + ")");
+ } catch (Exception e) {
+ fail("test timestamp failed: " + e.getMessage());
+ }
+ }
+}
diff --git a/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTest.java b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTest.java
new file mode 100644
index 00000000..c2dfb8a6
--- /dev/null
+++ b/httpproxy/tests/src/main/java/oracle/nosql/proxy/ProxyTest.java
@@ -0,0 +1,3228 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * See the file LICENSE for redistribution information.
+ *
+ */
+
+package oracle.nosql.proxy;
+
+import static oracle.nosql.driver.ops.TableLimits.CapacityMode.ON_DEMAND;
+import static oracle.nosql.proxy.protocol.Protocol.TABLE_USAGE_NUMBER_LIMIT;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
+
+import java.net.URL;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.ZoneId;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.time.format.DateTimeParseException;
+import java.time.temporal.ChronoField;
+import java.time.temporal.TemporalAccessor;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import oracle.nosql.driver.Consistency;
+import oracle.nosql.driver.DefinedTags;
+import oracle.nosql.driver.Durability;
+import oracle.nosql.driver.Durability.SyncPolicy;
+import oracle.nosql.driver.FreeFormTags;
+import oracle.nosql.driver.Durability.ReplicaAckPolicy;
+import oracle.nosql.driver.IndexExistsException;
+import oracle.nosql.driver.IndexNotFoundException;
+import oracle.nosql.driver.KeySizeLimitException;
+import oracle.nosql.driver.NoSQLHandle;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.OperationThrottlingException;
+import oracle.nosql.driver.ReadThrottlingException;
+import oracle.nosql.driver.RowSizeLimitException;
+import oracle.nosql.driver.TableExistsException;
+import oracle.nosql.driver.TableNotFoundException;
+import oracle.nosql.driver.TimeToLive;
+import oracle.nosql.driver.Version;
+import oracle.nosql.driver.WriteThrottlingException;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetIndexesResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PreparedStatement;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutRequest.Option;
+import oracle.nosql.driver.ops.TableUsageResult.TableUsage;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.TableUsageResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.ops.WriteMultipleResult.OperationResult;
+import oracle.nosql.driver.ops.WriteRequest;
+import oracle.nosql.driver.ops.WriteResult;
+import oracle.nosql.driver.values.BinaryValue;
+import oracle.nosql.driver.values.BooleanValue;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.IntegerValue;
+import oracle.nosql.driver.values.JsonNullValue;
+import oracle.nosql.driver.values.MapValue;
+import oracle.nosql.driver.values.NullValue;
+import oracle.nosql.driver.values.StringValue;
+import oracle.nosql.driver.values.TimestampValue;
+import oracle.nosql.util.HttpResponse;
+
+import org.junit.FixMethodOrder;
+import org.junit.Test;
+import org.junit.runners.MethodSorters;
+
+/*
+ * The tests are ordered so that the zzz* test goes last so it picks up
+ * DDL history reliably.
+ */
+@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+public class ProxyTest extends ProxyTestBase {
+ /*
+ * The time stamp string pattern used to parse start/end range parameter
+ * of table usage.
+ */
+ private final static String TimestampPattern =
+ "yyyy-MM-dd['T'HH:mm:ss[.SSS]]";
+ private final static ZoneId UTCZone = ZoneId.of(ZoneOffset.UTC.getId());
+ private final static DateTimeFormatter timestampFormatter =
+ DateTimeFormatter.ofPattern(TimestampPattern).withZone(UTCZone);
+
+ private final static int USAGE_TIME_SLICE_MS = 60 * 1000;
+ final static int KEY_SIZE_LIMIT = rlimits.getPrimaryKeySizeLimit();
+ final static int ROW_SIZE_LIMIT = rlimits.getRowSizeLimit();
+
+ @Test
+ public void smokeTest() {
+
+ try {
+
+ MapValue key = new MapValue().put("id", 10);
+
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+
+ /* drop a table */
+ TableResult tres = tableOperation(handle,
+ "drop table if exists testusers",
+ null, TableResult.State.DROPPED,
+ 20000);
+ assertNotNull(tres.getTableName());
+ assertTrue(tres.getTableState() == TableResult.State.DROPPED);
+ assertNull(tres.getTableLimits());
+
+ /* Create a table */
+ tres = tableOperation(
+ handle,
+ "create table if not exists testusers(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(500, 500, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* Create an index */
+ tres = tableOperation(
+ handle,
+ "create index if not exists Name on testusers(name)",
+ null,
+ TableResult.State.ACTIVE,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* GetTableRequest for table that doesn't exist */
+ try {
+ GetTableRequest getTable =
+ new GetTableRequest()
+ .setTableName("not_a_table");
+ tres = handle.getTable(getTable);
+ fail("Table should not be found");
+ } catch (TableNotFoundException tnfe) {}
+
+ /* list tables */
+ ListTablesRequest listTables =
+ new ListTablesRequest();
+ ListTablesResult lres = handle.listTables(listTables);
+ /*
+ * the test cases don't yet clean up so there may be additional
+ * tables present, be flexible in this assertion.
+ */
+ assertTrue(lres.getTables().length >= 1);
+ assertNotNull(lres.toString());
+
+ /* getTableUsage. It won't return much in test mode */
+ if (!onprem) {
+ TableUsageRequest gtu = new TableUsageRequest()
+ .setTableName("testusers").setLimit(2)
+ .setEndTime(System.currentTimeMillis());
+ TableUsageResult gtuRes = handle.getTableUsage(gtu);
+ assertNotNull(gtuRes);
+ assertNotNull(gtuRes.getUsageRecords());
+ }
+
+ /* PUT */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusers");
+
+ PutResult res = handle.put(putRequest);
+ assertNotNull("Put failed", res.getVersion());
+ assertWriteKB(res);
+ /* put a few more. set TTL to test that path */
+ putRequest.setTTL(TimeToLive.ofHours(2));
+ for (int i = 20; i < 30; i++) {
+ value.put("id", i);
+ handle.put(putRequest);
+ }
+
+ /*
+ * Test ReturnRow for simple put of a row that exists. 2 cases:
+ * 1. unconditional (will return info)
+ * 2. if absent (will return info)
+ */
+ value.put("id", 20);
+ putRequest.setReturnRow(true);
+ PutResult pr = handle.put(putRequest);
+ assertNotNull(pr.getVersion()); /* success */
+ assertNotNull(pr.getExistingVersion());
+ assertNotNull(pr.getExistingValue());
+ assertTrue(pr.getExistingModificationTime() != 0);
+ assertReadKB(pr);
+ assertWriteKB(pr);
+
+ putRequest.setOption(Option.IfAbsent);
+ pr = handle.put(putRequest);
+ assertNull(pr.getVersion()); /* failure */
+ assertNotNull(pr.getExistingVersion());
+ assertNotNull(pr.getExistingValue());
+ assertTrue(pr.getExistingModificationTime() != 0);
+ assertReadKB(pr);
+
+ /* clean up */
+ putRequest.setReturnRow(false);
+ putRequest.setOption(null);
+
+ /* GET */
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName("testusers");
+
+ GetResult res1 = handle.get(getRequest);
+ assertNotNull("Get failed", res1.getJsonValue());
+ assertReadKB(res1);
+
+ /* DELETE */
+ DeleteRequest delRequest = new DeleteRequest()
+ .setKey(key)
+ .setTableName("testusers");
+
+ DeleteResult del = handle.delete(delRequest);
+ assertTrue("Delete failed", del.getSuccess());
+ assertWriteKB(del);
+
+ /* GET -- no row, it was removed above */
+ getRequest.setTableName("testusers");
+ res1 = handle.get(getRequest);
+ assertNull(res1.getValue());
+ assertReadKB(res1);
+
+ /* GET -- no table */
+ try {
+ getRequest.setTableName("not_a_table");
+ res1 = handle.get(getRequest);
+ fail("Attempt to access missing table should have thrown");
+ } catch (TableNotFoundException nse) {
+ /* success */
+ }
+
+ /* PUT -- invalid row -- this will throw */
+ try {
+ value.remove("id");
+ value.put("not_a_field", 1);
+ res = handle.put(putRequest);
+ fail("Attempt to put invalid row should have thrown");
+ } catch (IllegalArgumentException iae) {
+ /* success */
+ }
+ } catch (Exception e) {
+ checkErrorMessage(e);
+ e.printStackTrace();
+ fail("Exception in test");
+ }
+ }
+
+ @Test
+ public void testCaseSensitivity()
+ throws Exception {
+
+ assumeKVVersion("testCaseSensitivity", 23, 3, 0);
+
+ String ddl =
+ "create table foo(id integer, S string, " +
+ "primary key(Id, s))";
+ tableOperation(
+ handle,
+ ddl,
+ new TableLimits(500, 500, 50),
+ TableResult.State.ACTIVE,
+ 20000);
+
+ MapValue val = new MapValue().put("id", 1).put("s", "xyz");
+ PutRequest putReq = new PutRequest()
+ .setTableName("foo")
+ .setValue(val);
+ handle.put(putReq);
+
+ GetRequest getRequest = new GetRequest()
+ .setKey(new MapValue().put("id", 1).put("s", "xyz"))
+ .setTableName("foo");
+ GetResult res = handle.get(getRequest);
+ /*
+ * "Id" in pkey should have been turned into "id" and "s" to "S"
+ */
+ assertTrue(res.getValue().contains("id"));
+ assertFalse(res.getValue().contains("Id"));
+ assertTrue(res.getValue().contains("S"));
+ assertFalse(res.getValue().contains("s"));
+ }
+
+ @Test
+ public void testSimpleThroughput() throws Exception {
+
+ assumeTrue(onprem == false);
+
+ final String create = "create table testusersTp(id integer," +
+ "name string, primary key(id))";
+
+ /* Create a table */
+ TableResult tres = tableOperation(
+ handle,
+ create,
+ new TableLimits(500, 500, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+
+ /*
+ * Handle some Put cases
+ */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusersTp");
+
+
+ PutResult res = handle.put(putRequest);
+ assertNotNull("Put failed", res.getVersion());
+ int origRead = res.getReadKB();
+ int origWrite = res.getWriteKB();
+ assertEquals(1, origWrite);
+ assertEquals(0, origRead);
+
+ /*
+ * do a second put. Read should still be 0, write will increase
+ * because it's an update, which counts the "delete"
+ */
+ res = handle.put(putRequest);
+ int newRead = res.getReadKB();
+ int newWrite = res.getWriteKB();
+ assertEquals(2*origWrite, newWrite);
+ assertEquals(0, newRead);
+
+ /* set return row and expect read unit is 1 */
+ putRequest.setReturnRow(true);
+ res = handle.put(putRequest);
+ newRead = res.getReadKB();
+ newWrite = res.getWriteKB();
+ assertEquals(2*origWrite, newWrite);
+ assertEquals(1, newRead);
+
+ /* make it ifAbsent and verify read and write consumption */
+ putRequest.setOption(PutRequest.Option.IfAbsent);
+ res = handle.put(putRequest);
+ assertNull("Put should have failed", res.getVersion());
+ /* use read units because in a write, readKB != readUnits */
+ newRead = res.getReadUnits();
+ newWrite = res.getWriteKB();
+ /*
+ * no write, but read is min read + record size, former for the version
+ * and the latter for the value
+ */
+ assertEquals(0, newWrite);
+ assertEquals(1 + origWrite, newRead);
+ }
+
+ /**
+ * Test bad urls.
+ */
+ @Test
+ public void testBadURL() throws Exception {
+ /* bad port */
+ tryURL(new URL("http", getProxyHost(), getProxyPort() + 7, "/"));
+ /* bad host */
+ tryURL(new URL("http", "nohost", getProxyPort(), "/"));
+ }
+
+ private void tryURL(URL url) {
+ try {
+ NoSQLHandleConfig config = new NoSQLHandleConfig(url);
+ setHandleConfig(config);
+ NoSQLHandle myhandle = getHandle(config);
+ myhandle.close();
+ fail("Connection should have failed");
+ } catch (Exception e) {
+ /* TODO: check for specific exception */
+ /* success */
+ }
+ }
+
+ /**
+ * Test that throttling happens. This requires its own table and
+ * handle.
+ */
+ @Test
+ public void throttleTest() throws Exception {
+
+ assumeTrue(onprem == false);
+
+ /* this test is invalid with proxy-level rate limiting */
+ assumeTrue(Boolean.getBoolean(PROXY_DRL_ENABLED_PROP) == false);
+
+ /*
+ * Create a new handle configured with no retries
+ */
+ NoSQLHandleConfig config = new NoSQLHandleConfig(getProxyEndpoint());
+ setHandleConfig(config);
+
+ /*
+ * no retries
+ */
+ config.configureDefaultRetryHandler(0, 0);
+
+ /*
+ * Open the handle
+ */
+ NoSQLHandle myhandle = getHandle(config);
+
+ MapValue key = new MapValue().put("id", 10);
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+
+ /* Create a table with small throughput */
+ TableResult tres = tableOperation(
+ myhandle,
+ "create table testusersThrottle(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(1, 1, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+
+ int num = 0;
+ try {
+ while (true) {
+ /* PUT */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusersThrottle");
+ myhandle.put(putRequest);
+ num++;
+ if (num > 1000) {
+ fail("Throttling exception should have been thrown");
+ }
+ }
+ } catch (WriteThrottlingException wte) {
+ checkErrorMessage(wte);
+ /* success */
+ }
+ num = 0;
+ try {
+ while (true) {
+ /* GET */
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName("testusersThrottle");
+ GetResult gres = myhandle.get(getRequest);
+ assertNotNull(gres.getValue());
+ num++;
+ if (num > 1000) {
+ fail("Throttling exception should have been thrown");
+ }
+ }
+ } catch (ReadThrottlingException wte) {
+ checkErrorMessage(wte);
+ /* success */
+ }
+ /* Query based on single partition scanning */
+ String query = "select * from testusersThrottle where id = 10";
+ PrepareRequest prepReq = new PrepareRequest().setStatement(query);
+ PrepareResult prepRes = handle.prepare(prepReq);
+ assertTrue("Prepare statement failed",
+ prepRes.getPreparedStatement() != null);
+
+ /* Query with size limit */
+ num = 0;
+ try {
+ while (true) {
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepRes)
+ .setMaxReadKB(3);
+ /* Query */
+ QueryResult res = myhandle.query(queryReq);
+ assertTrue(res.getResults().size() == 1);
+ num++;
+ if (num > 1000) {
+ fail("Throttling exception should have been thrown");
+ }
+ }
+ } catch (ReadThrottlingException rte) {
+ checkErrorMessage(rte);
+ /* success */
+ }
+ /* Alter table limit to increase read limit */
+ tres = tableOperation(
+ myhandle,
+ null,
+ new TableLimits(10, 200, 50),
+ "testusersThrottle",
+ TableResult.State.ACTIVE,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusersThrottle");
+ for (int i = 0; i < 200; i++) {
+ value.put("id", 100 + i);
+ myhandle.put(putRequest);
+ }
+
+ /* prepare should get throttled */
+ try {
+ for (int i = 0; i < 1000; i++) {
+ query = "select * from testusersThrottle where name = \"jane\"";
+ prepReq = new PrepareRequest().setStatement(query);
+ prepRes = myhandle.prepare(prepReq);
+ }
+ fail("Throttling exception should have been thrown");
+ } catch (Exception rte) {
+ checkErrorMessage(rte);
+ }
+
+ /* Query based on all partitions scanning */
+ /* Use the original handle to get throttling retries */
+ query = "select * from testusersThrottle where name = \"jane\"";
+ prepReq = new PrepareRequest().setStatement(query);
+ prepRes = handle.prepare(prepReq);
+ assertTrue("Prepare statement failed",
+ prepRes.getPreparedStatement() != null);
+
+ /* Query with size limit */
+ Thread.sleep(2000); /* try to avoid previous throttling */
+ num = 0;
+ try {
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepRes)
+ .setMaxReadKB(20);
+ do {
+ /* Query */
+ QueryResult res = myhandle.query(queryReq);
+
+ /* it's possible to get 0 results and continuation key */
+ num += res.getResults().size();
+ if (num > 1000) {
+ fail("Throttling exception should have been thrown");
+ }
+ } while (!queryReq.isDone());
+ } catch (ReadThrottlingException rte) {
+ /* success */
+ checkErrorMessage(rte);
+ }
+ assertTrue(num > 0);
+
+ /* Query without limits */
+ Thread.sleep(1000);
+ num = 0;
+ try {
+ while (true) {
+ QueryRequest queryReq = new QueryRequest()
+ .setPreparedStatement(prepRes);
+ /* Query */
+ QueryResult res = myhandle.query(queryReq);
+ assertTrue(res.getResults().size() > 0);
+ num++;
+ if (num > 1000) {
+ fail("Throttling exception should have been thrown");
+ }
+ }
+ } catch (ReadThrottlingException rte) {
+ /* success */
+ checkErrorMessage(rte);
+ }
+ }
+
+ @Test
+ public void droppedTableTest() throws Exception {
+ assumeTrue("Skipping droppedTableTest for minicloud test",
+ !cloudRunning);
+
+ final String CREATE_TABLE = "create table if not exists testDropped(" +
+ "id integer, name string, primary key(id))";
+
+ /* create a table */
+ TableResult tres;
+ tres = tableOperation(handle,
+ CREATE_TABLE,
+ new TableLimits(500, 500, 5),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /* start a background thread to drop the table after 5 seconds */
+ Thread bg = new Thread(()-> {
+ try {
+ Thread.sleep(5_000);
+ } catch (Exception e) {}
+ tableOperation(handle, "drop table testDropped",
+ null, 20000);
+ });
+ bg.start();
+
+ /*
+ * Run gets/puts for 10 seconds. After about 5 seconds they should
+ * start failing and consistently fail thereafter.
+ * Note: this test is mainly designed to exercise the
+ * MetadataNotFoundException retry logic in the proxy
+ */
+ MapValue key = new MapValue().put("id", 10);
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+ long endTimeMs = System.currentTimeMillis() + 10_000;
+ while (true) {
+ /* PUT */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTimeout(1000)
+ .setTableName("testDropped");
+
+ try {
+ handle.put(putRequest);
+ } catch (TableNotFoundException tnfe) {
+ /* expected */
+ }
+
+ /* GET */
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTimeout(2000)
+ .setTableName("testDropped");
+
+ try {
+ handle.get(getRequest);
+ } catch (TableNotFoundException tnfe) {
+ /* expected */
+ }
+
+ if (System.currentTimeMillis() > endTimeMs) {
+ break;
+ }
+
+ try {
+ Thread.sleep(50);
+ } catch (Exception e) {
+ break;
+ }
+ }
+ bg.join(10000);
+ }
+
+ @Test
+ public void ddlTest() throws Exception {
+ final String CREATE_TABLE = "create table if not exists testusersX(" +
+ "id integer, name string, primary key(id))";
+ final String CREATE_TABLE_NO_IFNOTEXISTS = "create table testusersX(" +
+ "id integer, name string, primary key(id))";
+ final String CREATE_TABLE_SCHEMA_DIFF =
+ "create table if not exists testusersX(" +
+ "id integer, name string, age integer, primary key(id))";
+
+ final String BAD_DDL = "create tab x(id integer, " +
+ "name string, primary key(id))";
+ final String ALTER_DDL = "alter table testusersX(add name1 string)";
+ final String BAD_ADD_INDEX =
+ "create index idx on testusers_not_here(name)";
+ final String BAD_ADD_TEXT_INDEX =
+ "create fulltext index idxText on testusersX(name)";
+ final String ADD_INDEX = "create index idx on testusersX(name)";
+ final String DROP_INDEX = "drop index idx on testusersX";
+ final String DROP_INDEX_IFX = "drop index if exists idx on testusersX";
+ final String DROP_DDL = "drop table testusersX";
+
+ TableResult tres;
+
+ /*
+ * Bad syntax
+ */
+ try {
+ tres = tableOperation(handle,
+ BAD_DDL,
+ null,
+ 20000);
+ fail("Expected IAE");
+ } catch (IllegalArgumentException iae) {
+ checkErrorMessage(iae);
+ }
+
+ /*
+ * Table doesn't exist
+ */
+ tres = tableOperation(handle,
+ BAD_ADD_INDEX,
+ TableResult.State.ACTIVE,
+ TableNotFoundException.class);
+
+ /*
+ * create the table to alter it
+ */
+ tres = tableOperation(handle,
+ CREATE_TABLE,
+ new TableLimits(5000, 5000, 50),
+ 20000);
+
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /*
+ * Table already exists.
+ */
+ tres = tableOperation(handle,
+ CREATE_TABLE_NO_IFNOTEXISTS,
+ new TableLimits(5000, 5000, 50),
+ null, TableResult.State.ACTIVE,
+ TableExistsException.class);
+
+ /*
+ * "create table if not exists" should not check schema for existing
+ * table.
+ */
+ tres = tableOperation(handle,
+ CREATE_TABLE_SCHEMA_DIFF,
+ new TableLimits(5000, 5000, 50),
+ 20000);
+
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ /*
+ * Add index
+ */
+ tres = tableOperation(handle,
+ ADD_INDEX,
+ null,
+ 20000);
+
+ /*
+ * Index already exists.
+ */
+ tres = tableOperation(handle,
+ ADD_INDEX,
+ TableResult.State.ACTIVE,
+ IndexExistsException.class);
+
+ /*
+ * FullText index is not allowed.
+ */
+ tres = tableOperation(handle,
+ BAD_ADD_TEXT_INDEX,
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /*
+ * Drop index
+ */
+ tres = tableOperation(handle,
+ DROP_INDEX,
+ null,
+ 20000);
+
+ /*
+ * Drop index again, using if exists
+ */
+ tres = tableOperation(handle,
+ DROP_INDEX_IFX,
+ null,
+ 20000);
+
+ /*
+ * Alter the table
+ */
+ tres = tableOperation(handle,
+ ALTER_DDL,
+ null,
+ 20000);
+ /*
+ * Alter the table limits
+ */
+ if (!onprem) {
+ tres = tableOperation(handle,
+ null,
+ new TableLimits(50, 50, 10),
+ "testusersX",
+ TableResult.State.ACTIVE,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ assertEquals(50, tres.getTableLimits().getReadUnits());
+ }
+
+ /*
+ * drop the table
+ * NOTE: this sequence may, or may not work with the real Tenant
+ * Manager. The local/test version only updates the state of its
+ * cached tables on demand. I.e. if a table is dropped and never
+ * "gotten" again, it could live in the cache in DROPPING state for
+ * a very long time. TODO: time out cache entries.
+ */
+ tres = tableOperation(handle,
+ DROP_DDL,
+ null,
+ 20000);
+
+ /*
+ * the table should be gone now
+ */
+ try {
+ GetTableRequest getTable =
+ new GetTableRequest().setTableName("testusersX");
+ tres = handle.getTable(getTable);
+ fail("Table should not be found");
+ } catch (TableNotFoundException tnfe) {
+ checkErrorMessage(tnfe);
+ }
+
+ /*
+ * Unsupported ddl operations
+ */
+
+ /* CRATE USER */
+ tres = tableOperation(handle,
+ "CREATE USER guest IDENTIFIED BY \"welcome\"",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+ /* ALTER USER */
+ tres = tableOperation(handle,
+ "ALTER USER guest ACCOUNT LOCK",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* DROP USER */
+ tres = tableOperation(handle,
+ "DROP USER guest",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* CREATE ROLE */
+ tres = tableOperation(handle,
+ "CREATE ROLE employee",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* DROP ROLE */
+ tres = tableOperation(handle,
+ "DROP ROLE employee",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* GRANT */
+ tres = tableOperation(handle,
+ "GRANT readwrite TO USER guest",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* REVOKE */
+ tres = tableOperation(handle,
+ "REVOKE readwrite FROM USER guest",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* SHOW */
+ tres = tableOperation(handle,
+ "SHOW TABLES",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /* DESCRIBE */
+ tres = tableOperation(handle,
+ "DESCRIBE TABLE testusersX",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ /*
+ * DML operation with TableRequest
+ */
+ tres = tableOperation(handle,
+ "SELECT * FROM testusersX",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+
+ tres = tableOperation(handle,
+ "UPDATE testusersX SET name = \"test\" " +
+ "where id = 1",
+ TableResult.State.ACTIVE,
+ IllegalArgumentException.class);
+ }
+
+ @Test
+ public void testGetProvisionedTable() throws Exception {
+ TableLimits tableLimits = new TableLimits(10, 20, 1);
+ testGetTable(tableLimits, tableLimits);
+ }
+
+ @Test
+ public void testGetAutoScalingTable() throws Exception {
+ if (cloudRunning && tenantLimits == null) {
+ /* Skip this test if tenantLimits is not provided */
+ return;
+ }
+
+ TableLimits tableLimits = new TableLimits(1);
+ TableLimits expectedLimits;
+ if (cloudRunning) {
+ expectedLimits = new TableLimits(
+ tenantLimits.getAutoScalingTableReadUnits(),
+ tenantLimits.getAutoScalingTableWriteUnits(),
+ tableLimits.getStorageGB(),
+ ON_DEMAND);
+ } else {
+ expectedLimits = new TableLimits(
+ Integer.MAX_VALUE - 1,
+ Integer.MAX_VALUE - 1,
+ tableLimits.getStorageGB(),
+ ON_DEMAND);
+ }
+ testGetTable(tableLimits, expectedLimits);
+ }
+
+ private void testGetTable(TableLimits tableLimits,
+ TableLimits expectedLimits) throws Exception {
+ final String tableName = "getTableTest";
+ final String statement = "create table if not exists " + tableName +
+ "(id integer, name string, primary key(id))";
+
+ TableRequest tableRequest = new TableRequest()
+ .setStatement(statement)
+ .setTableLimits(tableLimits)
+ .setTimeout(15000);
+
+ TableResult tres = handle.tableRequest(tableRequest);
+ TableLimits resultLimits = tres.getTableLimits();
+ if (resultLimits != null) {
+ assertEquals(expectedLimits.getStorageGB(),
+ resultLimits.getStorageGB());
+ assertEquals(expectedLimits.getMode(),
+ resultLimits.getMode());
+ }
+
+ /*
+ * Get table with operation id, the table name is invalid, expect to
+ * get TableNotFoundException.
+ */
+ GetTableRequest getReq = new GetTableRequest()
+ .setTableName("invalid")
+ .setOperationId(tres.getOperationId());
+ try {
+ tres = handle.getTable(getReq);
+ fail("Expect to get TableNotFoundException but not");
+ } catch (IllegalArgumentException ex) {
+ /* expected */
+ }
+ tres.waitForCompletion(handle, 20000, 1000);
+
+ /*
+ * Get table, check the schema text contains the table name.
+ */
+ getReq = new GetTableRequest().setTableName(tableName);
+ tres = handle.getTable(getReq);
+ assertTableOcid(tres.getTableId());
+ assertNotNull(tres.getSchema());
+ resultLimits = tres.getTableLimits();
+ if (resultLimits != null) {
+ assertEquals(expectedLimits.getReadUnits(),
+ resultLimits.getReadUnits());
+ assertEquals(expectedLimits.getWriteUnits(),
+ resultLimits.getWriteUnits());
+ assertEquals(expectedLimits.getStorageGB(),
+ resultLimits.getStorageGB());
+ assertEquals(expectedLimits.getMode(),
+ resultLimits.getMode());
+ }
+ assertTrue(tres.getSchema().contains(tableName));
+ }
+
+ @Test
+ public void testListTables() {
+ final int numTables = 8;
+ final String ddlFmt =
+ "create table %s (id integer, name string, primary key(id))";
+ final TableLimits tableLimits = new TableLimits(10, 10, 1);
+ final String[] namePrefix = new String[] {"USERB", "userA", "userC"};
+
+ if (onprem) {
+ handle.doSystemRequest("create namespace NS001", 20000, 1000);
+ }
+
+ /*
+ * create tables
+ */
+ TableResult tres;
+ Set nameSorted = new TreeSet<>();
+ for (int i = 0; i < numTables; i++) {
+ /* if onprem, create a mix of tables, some with namespaces */
+ String tableName;
+ if (onprem && (i % 2) == 1) {
+ tableName = "NS001:" + namePrefix[i % namePrefix.length] + i;
+ } else {
+ tableName = namePrefix[i % namePrefix.length] + i;
+ }
+ tres = tableOperation(handle,
+ String.format(ddlFmt, tableName),
+ tableLimits,
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ nameSorted.add(tableName);
+ }
+ List nameSortedList = new ArrayList(nameSorted);
+
+ /*
+ * List all tables
+ */
+ ListTablesRequest req = new ListTablesRequest();
+ ListTablesResult res = handle.listTables(req);
+ List returnedTableNames = Arrays.asList(res.getTables());
+ if (cloudRunning) {
+ /* verify tables sorted by name */
+ assertEquals(nameSortedList, returnedTableNames);
+ } else {
+ /* verify all added tables are in list */
+ for (String name : nameSorted) {
+ assertTrue("Table " + name + " missing from listTables",
+ returnedTableNames.contains(name));
+ }
+ }
+
+ /*
+ * List all tables with limit
+ */
+ int[] values = new int[] {0, 6, 2, 1};
+ List tables;
+ for (int limit : values) {
+ tables = doListTables(limit);
+ if (cloudRunning) {
+ /* verify tables sorted by name */
+ assertEquals(nameSortedList, tables);
+ }
+ }
+ }
+
+ /* Run list tables with limit specified */
+ private List doListTables(int limit) {
+ List tables = new ArrayList<>();
+
+ ListTablesRequest req = new ListTablesRequest();
+ req.setLimit(limit);
+ ListTablesResult res;
+ while(true) {
+ res = handle.listTables(req);
+ if (res.getTables().length > 0) {
+ tables.addAll(Arrays.asList(res.getTables()));
+ }
+
+ if (limit == 0 || res.getTables().length < limit) {
+ break;
+ }
+ assertEquals(limit, res.getTables().length);
+ req.setStartIndex(res.getLastReturnedIndex());
+ }
+ return tables;
+ }
+
+ /**
+ * Tests serialization of types, including some coercion to schema
+ * types in the proxy.
+ */
+ @Test
+ public void typeTest() throws Exception {
+
+ final String TABLE_CREATE =
+ "create table if not exists Types( " +
+ "id integer, " +
+ "primary key(id), " +
+ "longField long, " +
+ "doubleField double, " +
+ "stringField string, " +
+ "numberField number, " +
+ "enumField enum(a,b,c)" +
+ ")";
+
+ final String jsonString =
+ "{" +
+ "\"id\":1, " +
+ "\"longField\": 123 ," + // int => long
+ "\"doubleField\":4 ," + // int => double
+ "\"stringField\":\"abc\" ," + // no coercion
+ "\"numberField\":4.5 ," + // double => number
+ "\"enumField\":\"b\"" + // string => enum
+ "}";
+ TableResult tres;
+
+ tres = tableOperation(handle,
+ TABLE_CREATE,
+ new TableLimits(50, 50, 50),
+ TableResult.State.ACTIVE,
+ 20000);
+
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ PutRequest pr = new PutRequest().setValueFromJson(jsonString, null).
+ setTableName("Types");
+ PutResult pres = handle.put(pr);
+ assertNotNull(pres.getVersion());
+ }
+
+ @Test
+ public void recreateTest() throws Exception {
+ final String CREATE_TABLE =
+ "create table recreate( " +
+ "id integer, " +
+ "primary key(id), " +
+ "name string)";
+ final String DROP_TABLE = "drop table recreate";
+ TableResult tres = tableOperation(handle,
+ CREATE_TABLE,
+ new TableLimits(50, 50, 50),
+ 20000);
+
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ PutRequest pr = new PutRequest()
+ .setTableName("recreate")
+ .setValue(new MapValue().put("id", 1).put("name", "joe"));
+ PutResult pres = handle.put(pr);
+ assertNotNull(pres.getVersion());
+
+ tres = tableOperation(handle,
+ DROP_TABLE,
+ null,
+ 20000);
+
+ tres = tableOperation(handle,
+ CREATE_TABLE,
+ new TableLimits(50, 50, 50),
+ 20000);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+
+ pres = handle.put(pr);
+ assertNotNull(pres.getVersion());
+ }
+
+
+ /**
+ * This test does a lot of simple operations in a loop in multiple threads,
+ * looking for HTTP transport problems. This is probably temporary.
+ */
+ @Test
+ public void httpTest() {
+ ExecutorService executor = Executors.newFixedThreadPool(3);
+ Collection> tasks = new ArrayList>();
+ for (int i = 0; i < 6; i++) {
+ tasks.add(new Callable() {
+ @Override
+ public Void call() {
+ doHttpTest();
+ return null;
+ }
+ });
+ }
+ try {
+ List