diff --git a/patches/nonexistant-rr.patch b/patches/nonexistant-rr.patch new file mode 100644 index 0000000..980bfbf --- /dev/null +++ b/patches/nonexistant-rr.patch @@ -0,0 +1,22 @@ +Forced nonexistant key to ban reject-rules specifying versionLE or versionNE + +From: nobody + + +--- + src/ObjectManager.cc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/ObjectManager.cc b/src/ObjectManager.cc +index 9c2d3b0c..5f4a6e79 100644 +--- a/src/ObjectManager.cc ++++ b/src/ObjectManager.cc +@@ -2893,7 +2893,7 @@ Status + ObjectManager::rejectOperation(const RejectRules* rejectRules, uint64_t version) + { + if (version == VERSION_NONEXISTENT) { +- if (rejectRules->doesntExist) ++ if (rejectRules->doesntExist || rejectRules->versionLeGiven || rejectRules->versionNeGiven) + return STATUS_OBJECT_DOESNT_EXIST; + return STATUS_OK; + } diff --git a/patches/series b/patches/series index a4f6898..c290fb6 100644 --- a/patches/series +++ b/patches/series @@ -13,3 +13,4 @@ bindings-migration.patch log-sse42-status.patch rpcwrapper-logging-fix.patch table-enumerator.patch +nonexistant-rr.patch diff --git a/testing/cluster_test_utils.py b/testing/cluster_test_utils.py index 48ef5ed..a3da277 100644 --- a/testing/cluster_test_utils.py +++ b/testing/cluster_test_utils.py @@ -149,6 +149,75 @@ def drop_tables(ensemble, table_names): for table_name in table_names: r.drop_table(table_name) +def output_logs_detached(docker_containers, path="/src/tmp"): + if not os.path.exists(path): + os.makedirs(path) + for container in docker_containers: + outfile = '%s/%s.out' % (path, container.name) + f = open(outfile, 'wb') + # make a stream of output logs to iterate and write to file + # (uses less memory than storing the container log as a string), + # and don't keep the stream open for new logs, since we want + # to get thru outputting whatever we got for this container + # before doing same for next container. + for line in container.logs(stream=True, follow=False): + f.write(line) + f.close() + +def output_zk_detached(ensemble, path="/src/tmp"): + if not os.path.exists(path): + os.makedirs(path) + zk_client = get_zookeeper_client(ensemble) + zk_table_configs = [ + ZkTableConfiguration( + outfile = "config.out", + zk_path = "/zookeeper/config", + proto = "string", + is_leaf = True), + ZkTableConfiguration( + outfile = "quota.out", + zk_path = "/zookeeper/quota", + proto = "string", + is_leaf = True), + ZkTableConfiguration( + outfile = "coordinatorClusterClock.out", + zk_path = "/ramcloud/main/coordinatorClusterClock", + proto = CoordinatorClusterClock_pb2.CoordinatorClusterClock(), + is_leaf = True), + ZkTableConfiguration( + outfile = "tables.out", + zk_path = "/ramcloud/main/tables", + proto = Table_pb2.Table(), + is_leaf = False), + ZkTableConfiguration( + outfile = "tableManager.out", + zk_path = "/ramcloud/main/tableManager", + proto = TableManager_pb2.TableManager(), + is_leaf = True), + ZkTableConfiguration( + outfile = "coordinator.out", + zk_path = "/ramcloud/main/coordinator", + proto = "string", + is_leaf = True), + ZkTableConfiguration( + outfile = "servers.out", + zk_path = "/ramcloud/main/servers", + proto = ServerListEntry_pb2.ServerListEntry(), + is_leaf = False), + ZkTableConfiguration( + outfile = "coordinatorUpdateManager.out", + zk_path = "/ramcloud/main/coordinatorUpdateManager", + proto = CoordinatorUpdateInfo_pb2.CoordinatorUpdateInfo(), + is_leaf = True), + ZkTableConfiguration( + outfile = "clientLeaseAuthority.out", + zk_path = "/ramcloud/main/clientLeaseAuthority", + proto = "string", + is_leaf = False), + ] + for zk_table_config in zk_table_configs: + zk_table_config.dump(path, zk_client) + # ClusterTest Usage in Python interpreter: # >>> import cluster_test_utils as ctu # >>> x = ctu.ClusterTest() diff --git a/testing/ramcloud_test_cluster.py b/testing/ramcloud_test_cluster.py index 44843a8..aa1c3cf 100644 --- a/testing/ramcloud_test_cluster.py +++ b/testing/ramcloud_test_cluster.py @@ -2,17 +2,32 @@ import argparse import sys +# If you're trying to make fake data in RAMCloud, this works from Python3 interpreter, +# assuming you started up the default 3-node test cluster: +# +# >>> import ramcloud +# >>> import cluster_test_utils as ctu +# >>> rc = ramcloud.RAMCloud() +# >>> rc.connect('zk:10.0.1.1:2181,10.0.1.2:2181,10.0.1.3:2181', 'main') +# >>> rc.create_table('test') +# >>> tid = rc.get_table_id('test') +# >>> rc.write(tid, 'testKey', 'testValue') +# >>> rc.read(tid, 'testKey') + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--action', '-a', metavar='A', type=str, default="status", - help="Defines the action to take: status, reset, start, stop") + help="Defines the action to take: status, reset, log, start, stop") parser.add_argument('--nodes', '-n', type=int, default=3, help="Number of zk, rc-coordinator, and rc-server instances to bring up. Only relevant when there's no cluster up yet. Default is 3") + parser.add_argument('--path', '-p', type=str, default="/src/tmp", + help="Path to place logs in when action is set to \"log\"") args = parser.parse_args() print("action =",args.action) print("nodes =",args.nodes) +print("path =",args.path) if (args.action == "start"): x = ctu.ClusterTest() x.setUp(num_nodes = args.nodes) @@ -21,6 +36,14 @@ elif (args.action == "stop"): docker_network, docker_containers = ctu.get_status() ctu.destroy_network_and_containers(docker_network, docker_containers) +elif (args.action == "log"): + docker_network, docker_containers = ctu.get_status() + if (not docker_network or not docker_containers): + print("No network or containers currently up to log") + exit() + ensemble = ctu.get_ensemble(len(docker_containers)) + ctu.output_logs_detached(docker_containers, args.path) + ctu.output_zk_detached(ensemble, args.path) elif (args.action == "reset"): docker_network, docker_containers = ctu.get_status() if (not docker_network):