diff --git a/TestInput.py b/TestInput.py index d31993952..6b89e1029 100644 --- a/TestInput.py +++ b/TestInput.py @@ -27,6 +27,7 @@ def __init__(self): self.test_params = {} self.tuq_client = {} self.elastic = [] + self.cbas = [] #servers , each server can have u,p,port,directory def param(self, name, *args): @@ -192,6 +193,7 @@ def parse_from_file(file): client_ips = [] input.dashboard = [] input.ui_conf = {} + cbas = [] for section in sections: result = re.search('^cluster', section) if section == 'servers': @@ -214,6 +216,8 @@ def parse_from_file(file): input.tuq_client = TestInputParser.get_tuq_config(config, section) elif section == 'elastic': input.elastic = TestInputParser.get_elastic_config(config, section) + elif section == 'cbas': + input.cbas = TestInputParser.get_cbas_config(config, section) elif result is not None: cluster_list = TestInputParser.get_server_ips(config, section) cluster_ips.extend(cluster_list) @@ -340,6 +344,15 @@ def get_elastic_config(config, section): server.es_password = config.get(section, option) return server + @staticmethod + def get_cbas_config(config, section): + server = TestInputServer() + options = config.options(section) + for option in options: + if option == 'ip': + server.ip = config.get(section, option) + return server + @staticmethod def get_server(ip, config): server = TestInputServer() diff --git a/b/resources/1-node-fts-rqgp1-template.ini b/b/resources/1-node-fts-rqgp1-template.ini index 984a0dbb6..110af46e5 100644 --- a/b/resources/1-node-fts-rqgp1-template.ini +++ b/b/resources/1-node-fts-rqgp1-template.ini @@ -12,7 +12,7 @@ rest_password:password 1:_1 [elastic] -ip:172.23.106.245 +ip:172.23.109.54 port:9200 es_username:Administrator es_password:password diff --git a/b/resources/1-node-template-all-services.ini b/b/resources/1-node-template-all-services.ini new file mode 100644 index 000000000..7cd394e7a --- /dev/null +++ b/b/resources/1-node-template-all-services.ini @@ -0,0 +1,14 @@ +[global] +username:root +password:couchbase + +[membase] +rest_username:Administrator +rest_password:password + +[servers] +1:_1 + +[_1] +ip:dynamic +services:kv,index,n1ql diff --git a/b/resources/4-nodes-n1ql-template.ini b/b/resources/4-nodes-n1ql-template.ini new file mode 100644 index 000000000..cce664b33 --- /dev/null +++ b/b/resources/4-nodes-n1ql-template.ini @@ -0,0 +1,34 @@ +[global] +port:8091 +username:root +password:couchbase + +[servers] +1:vm1 +2:vm2 +3:vm3 +4:vm4 + +[vm1] +ip:dynamic +services=kv,index,n1ql + +[vm2] +ip:dynamic +services:kv,index,n1ql + +[vm3] +ip:dynamic +services:kv,n1ql + +[vm4] +ip:dynamic +services:kv,n1ql + +[membase] +rest_username:Administrator +rest_password:password + +[tuq_client] +goroot:/root/n1ql/go +sherlock_path=/opt/couchbase/bin diff --git a/b/resources/5-nodes-template-dataservices.ini b/b/resources/5-nodes-template-dataservices.ini new file mode 100644 index 000000000..a37474b44 --- /dev/null +++ b/b/resources/5-nodes-template-dataservices.ini @@ -0,0 +1,36 @@ +[global] +port:8091 +username:root +password:couchbase +index_port:9102 + +[membase] +rest_username:Administrator +rest_password:password + +[servers] +1:_1 +2:_2 +3:_3 +4:_4 +5:_5 + +[_1] +ip:dynamic +services:n1ql,kv,index + +[_2] +ip:dynamic +services=kv + +[_3] +ip:dynamic +services=kv + +[_4] +ip:dynamic +services=index + +[_5] +ip:dynamic +services=index,kv diff --git a/b/resources/rqg/multiple_table_db/grammar/subquery_fields_multiple_table.yy b/b/resources/rqg/multiple_table_db/grammar/subquery_fields_multiple_table.yy index 182e5fb2a..83633ce86 100644 --- a/b/resources/rqg/multiple_table_db/grammar/subquery_fields_multiple_table.yy +++ b/b/resources/rqg/multiple_table_db/grammar/subquery_fields_multiple_table.yy @@ -3,7 +3,8 @@ query: select: # SELECT OUTER_BUCKET_NAME.* FROM BUCKET_NAME WHERE subquery_fields_comparisons | - SELECT OUTER_BUCKET_NAME.* FROM BUCKET_NAME WHERE subquery_condition_exists ; + SELECT OUTER_BUCKET_NAME.* FROM BUCKET_NAME WHERE subquery_condition_exists | + SELECT OUTER_BUCKET_NAME.* FROM BUCKET_NAME WHERE subquery_condition_exists_limit_offset limit 2 offset 1; #SELECT OUTER_BUCKET_NAME.* FROM BUCKET_NAME WHERE subquery_agg_exists | #SELECT OUTER_BUCKET_NAME.* FROM BUCKET_NAME WHERE subquery_in; @@ -13,6 +14,9 @@ subquery_fields_comparisons: subquery_condition_exists: exists_operator_type START_EXISTS_SUBQUERY ( rule_subquery_exists ) END_EXISTS_SUBQUERY ; +subquery_condition_exists_limit_offset: + exists_operator_type START_EXISTS_SUBQUERY ( rule_subquery_exists_limit_offset ) END_EXISTS_SUBQUERY ; + subquery_agg_exists: INNER_SUBQUERY_AGG_FIELD comparison_operator START_AGG_SUBQUERY ( rule_subquery_agg_exists ) END_AGG_SUBQUERY ; @@ -31,6 +35,10 @@ use_key_conditions: rule_subquery_exists: SELECT * FROM BUCKET_NAME use_key_conditions WHERE AND_OUTER_INNER_TABLE_PRIMARY_KEY_COMPARISON MYSQL_OPEN_PAR subquery_where_condition MYSQL_CLOSED_PAR; +rule_subquery_exists_limit_offset: + SELECT * FROM BUCKET_NAME use_key_conditions WHERE AND_OUTER_INNER_TABLE_PRIMARY_KEY_COMPARISON MYSQL_OPEN_PAR subquery_where_condition MYSQL_CLOSED_PAR limit 10 offset 0 ; + + rule_subquery_fields_comparisons: SELECT OUTER_SUBQUERY_FIELDS FROM BUCKET_NAME use_key_conditions WHERE AND_OUTER_INNER_TABLE_PRIMARY_KEY_COMPARISON complex_condition ; diff --git a/b/resources/rqg/multiple_table_db/query_test_using_templates/query_10000_subqueries-limitoffset.txt.zip b/b/resources/rqg/multiple_table_db/query_test_using_templates/query_10000_subqueries-limitoffset.txt.zip new file mode 100644 index 000000000..8c8b18769 Binary files /dev/null and b/b/resources/rqg/multiple_table_db/query_test_using_templates/query_10000_subqueries-limitoffset.txt.zip differ diff --git a/b/resources/rqg/multiple_table_db/query_test_using_templates/query_5000_subqueries-limit.txt.zip b/b/resources/rqg/multiple_table_db/query_test_using_templates/query_5000_subqueries-limit.txt.zip new file mode 100644 index 000000000..b6820481e Binary files /dev/null and b/b/resources/rqg/multiple_table_db/query_test_using_templates/query_5000_subqueries-limit.txt.zip differ diff --git a/b/resources/rqg/multiple_table_db/query_test_using_templates/query_5000_subqueries.txt.zip b/b/resources/rqg/multiple_table_db/query_test_using_templates/query_5000_subqueries.txt.zip index 62badc9c4..ecbf0ba2b 100644 Binary files a/b/resources/rqg/multiple_table_db/query_test_using_templates/query_5000_subqueries.txt.zip and b/b/resources/rqg/multiple_table_db/query_test_using_templates/query_5000_subqueries.txt.zip differ diff --git a/conf/2i/py-index_drop_based_recovery.conf b/conf/2i/py-index_drop_based_recovery.conf index 0d732f846..04d420bdb 100644 --- a/conf/2i/py-index_drop_based_recovery.conf +++ b/conf/2i/py-index_drop_based_recovery.conf @@ -4,7 +4,7 @@ # test with warm up test_warmup,nodes_init=5,nodes_in=1,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,GROUP=WARMUP;P0 # test with autofailover - test_autofailover,nodes_init=5,nodes_out=1,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_out_dist=kv:1,GROUP=AUTOFAILOVER;P1 + test_autofailover,nodes_init=5,nodes_out=1,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index:n1ql-index,nodes_out_dist=kv:1,GROUP=AUTOFAILOVER;P1 # test with rebalance-in test_rebalance_in,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,nodes_init=5,nodes_in=1,services_in=kv,services_init=n1ql:kv-kv-kv-index-index,GROUP=REB-IN;P0 test_rebalance_in,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,nodes_init=5,nodes_in=1,services_in=index,services_init=n1ql:kv-kv-kv-index-index,GROUP=REB-IN;P0 @@ -14,22 +14,22 @@ # test with rebalance-in-out test_rebalance_in_out,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,nodes_in=1,nodes_out=1,services_init=kv-kv-kv-index-index:n1ql,services_in=kv,nodes_out_dist=kv:1,nodes_init=5,GROUP=REB-IN-OUT;P0 # test with server crash by killing memcached - test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index-index:n1ql,nodes_init=5,nodes_out=1,targetMaster=True,targetProcess=memcached,GROUP=KILL-PROCESS;P0 - test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,targetProcess=memcached,GROUP=KILL-PROCESS;P0 + test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index-index:n1ql,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,targetMaster=True,targetProcess=memcached,GROUP=KILL-PROCESS;P0 + test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index:n1ql-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,targetProcess=memcached,GROUP=KILL-PROCESS;P0 test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_init=5,nodes_out=1,nodes_out_dist=index:1,targetProcess=memcached,GROUP=KILL-PROCESS;P0 # test with network partitioning - test_network_partitioning,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,GROUP=NETWORK-PARTITIONING;P0 + test_network_partitioning,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index:n1ql-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,GROUP=NETWORK-PARTITIONING;P0 # test with server crash by killing indexer test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_init=5,nodes_out=1,nodes_out_dist=index:1,targetProcess=indexer,GROUP=KILL-PROCESS;P0 # test with server crash by killing projector test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index-index:n1ql,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,targetMaster=True,targetProcess=projector,GROUP=KILL-PROCESS;P0 - test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,targetProcess=projector,GROUP=KILL-PROCESS;P0 + test_server_crash,initial=create_index,in_between=,after=drop_index,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index:n1ql-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,targetProcess=projector,GROUP=KILL-PROCESS;P0 test_server_crash,initial=create_index,in_between=,after=,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_init=5,nodes_out=1,nodes_out_dist=index:1,targetProcess=projector,GROUP=KILL-PROCESS;P0 # test with server restart - test_server_restart,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv-kv-index-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,GROUP=SERVER-RESTART;P0 + test_server_restart,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index:n1ql-index,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,GROUP=SERVER-RESTART;P0 # test with hard failover test_failover,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index-index:n1ql,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,targetMaster=True,GROUP=HARD-FAILOVER;P0 # test with graceful failover test_failover,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-kv-index-index:n1ql,nodes_init=5,nodes_out=1,nodes_out_dist=kv:1,graceful=True,targetMaster=True,GROUP=GRACEFUL;P0 # test with failover and add-back recovery - test_failover_add_back,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=n1ql:kv-kv:n1ql-index-index,nodes_init=4,nodes_out=1,nodes_out_dist=kv:1,graceful=False,GROUP=FAILOVER-ADDBACK;P0 \ No newline at end of file + test_failover_add_back,initial=create_index,in_between=drop_index,after=,groups=simple,dataset=default,doc-per-day=10,services_init=kv-kv-index:n1ql-index,nodes_init=4,nodes_out=1,nodes_out_dist=kv:1,graceful=False,GROUP=FAILOVER-ADDBACK;P0 \ No newline at end of file diff --git a/conf/2i/py-upgrade2i.conf b/conf/2i/py-upgrade2i.conf index 197d708d3..c71a985a5 100644 --- a/conf/2i/py-upgrade2i.conf +++ b/conf/2i/py-upgrade2i.conf @@ -3,18 +3,26 @@ test_online_upgrade,doc_ops=True,update_ops_per=.5,nodes_out=2,nodes_out_dist=index:2,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=5,init_nodes=False,defer_build=false test_upgrade_with_memdb,nodes_init=4,services_init=kv-kv-index-n1ql,standard_buckets=1,doc_ops=True,doc-per-day=10,dataset=default,scan_consistency=request_plus,groups=simple,init_nodes=False,gsi_type=memory_optimized test_upgrade_with_memdb,nodes_init=4,services_init=kv-kv-index-n1ql,standard_buckets=1,doc_ops=True,doc-per-day=10,dataset=default,scan_consistency=request_plus,groups=simple,init_nodes=False,gsi_type=forestdb + test_upgrade_with_memdb,nodes_init=4,services_init=kv-kv-index-n1ql,standard_buckets=1,doc_ops=True,doc-per-day=10,dataset=default,scan_consistency=request_plus,groups=simple,init_nodes=False,gsi_type=forestdb,set_circular_compaction=True + test_online_upgrade_with_mixed_mode_cluster,before=create_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,dataset=default,groups=simple,nodes_init=2,services_init=kv:index:n1ql-kv:index:n1ql,doc-per-day=10,init_nodes=False,services_in=kv:n1ql:index ##Add Based tests - test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=create,nodes_out=1,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False - test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=create,nodes_out=1,nodes_out_dist=index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,verify_query_result=False,init_nodes=False,defer_build=false - test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=create,nodes_out=2,nodes_out_dist=kv:1-index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,verify_query_result=False,init_nodes=False,defer_build=false - test_online_upgrade_swap_rebalance,doc_ops=True,update_ops_per=.5,index_op=create,nodes_in=1,nodes_out=1,nodes_out_dist=kv:1,services_in=kv,dataset=default,groups=simple,nodes_init=3,services_init=kv:n1ql-kv-index,doc-per-day=5,init_nodes=False + test_online_upgrade,in_between=create_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,in_between=create_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=2,nodes_out_dist=kv:1-index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,in_between=create_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,in_between=create_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=n1ql:1,dataset=default,groups=simple,nodes_init=4,services_init=kv-kv-n1ql-index,doc-per-day=10,init_nodes=False + test_online_upgrade_swap_rebalance,in_between=create_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_in=1,nodes_out=1,services_in=kv,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=3,services_init=kv:n1ql-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade_with_two_query_nodes,in_between=create_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,dataset=default,groups=simple,nodes_init=4,services_init=kv:index-n1ql-n1ql-kv:index,doc-per-day=10,init_nodes=False ##Query Based tests - test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=query,nodes_out=1,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False - test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=query,nodes_out=1,nodes_out_dist=index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False - test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=query,nodes_out=2,nodes_out_dist=kv:1-index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False - test_online_upgrade_swap_rebalance,doc_ops=True,update_ops_per=.5,index_op=query,nodes_in=1,nodes_out=1,nodes_out_dist=kv:1,services_in=kv,dataset=default,groups=simple,nodes_init=3,services_init=kv:n1ql-kv-index,doc-per-day=5,init_nodes=False + test_online_upgrade,before=create_index,in_between=query:verify_query_result-query_with_explain,after=query:verify_query_result-drop_index,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,before=create_index,in_between=query:verify_query_result-query_with_explain,after=query:verify_query_result-drop_index,doc_ops=True,update_ops_per=.5,nodes_out=2,nodes_out_dist=kv:1-index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,before=create_index,in_between=query:verify_query_result-query_with_explain,after=query:verify_query_result-drop_index,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,before=create_index,in_between=query:verify_query_result-query_with_explain,after=query:verify_query_result-drop_index,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=n1ql:1,dataset=default,groups=simple,nodes_init=4,services_init=kv-kv-n1ql-index,doc-per-day=10,init_nodes=False + test_online_upgrade_swap_rebalance,before=create_index,in_between=query:verify_query_result-query_with_explain,after=query:verify_query_result-drop_index,doc_ops=True,update_ops_per=.5,nodes_in=1,nodes_out=1,services_in=kv,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=3,services_init=kv:n1ql-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade_with_two_query_nodes,before=create_index,in_between=query:verify_query_result-query_with_explain,after=query:verify_query_result-drop_index,doc_ops=True,update_ops_per=.5,dataset=default,groups=simple,nodes_init=4,services_init=kv:index-n1ql-n1ql-kv:index,doc-per-day=10,init_nodes=False ##Drop Based tests - #test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=drop,nodes_out=1,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False - #test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=drop,nodes_out=1,nodes_out_dist=index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False - #test_online_upgrade,doc_ops=True,update_ops_per=.5,index_op=drop,nodes_out=2,nodes_out_dist=kv:1-index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False - #test_online_upgrade_swap_rebalance,doc_ops=True,update_ops_per=.5,index_op=drop,nodes_in=1,nodes_out=1,nodes_out_dist=kv:1,services_in=kv,dataset=default,groups=simple,nodes_init=3,services_init=kv:n1ql-kv-index,doc-per-day=5,init_nodes=False \ No newline at end of file + test_online_upgrade,before=create_index,in_between=drop_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,before=create_index,in_between=drop_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=2,nodes_out_dist=kv:1-index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,before=create_index,in_between=drop_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=index:1,dataset=default,groups=simple,nodes_init=4,services_init=kv:n1ql-kv-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade,before=create_index,in_between=drop_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_out=1,nodes_out_dist=n1ql:1,dataset=default,groups=simple,nodes_init=4,services_init=kv-kv-index-n1ql,doc-per-day=10,init_nodes=False + test_online_upgrade_swap_rebalance,before=create_index,in_between=drop_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,nodes_in=1,nodes_out=1,services_in=kv,nodes_out_dist=kv:1,dataset=default,groups=simple,nodes_init=3,services_init=kv:n1ql-index-index,doc-per-day=10,init_nodes=False + test_online_upgrade_with_two_query_nodes,before=create_index,in_between=drop_index,after=query:verify_query_result-query_with_explain,doc_ops=True,update_ops_per=.5,dataset=default,groups=simple,nodes_init=4,services_init=kv:index-n1ql-n1ql-kv:index,doc-per-day=10,init_nodes=False \ No newline at end of file diff --git a/conf/cbas/py-analytics-nulls.conf b/conf/cbas/py-analytics-nulls.conf new file mode 100644 index 000000000..205fa3621 --- /dev/null +++ b/conf/cbas/py-analytics-nulls.conf @@ -0,0 +1,29 @@ +tuqquery.tuq_nulls.NULLTests: + test_null_query,GROUP=NULLS;P0 + test_not_null_query,GROUP=NULLS;P0 + test_missing_query,GROUP=NULLS;P0 + test_not_missing_query,GROUP=NULLS;P0 + test_nulls_over,GROUP=NULLS;P0 + #test_ifnan,GROUP=NULLS;SCALAR;P0 + #test_ifnull,GROUP=NULLS;SCALAR;P0 + #test_nanif,GROUP=NULLS;SCALAR;P0 + #test_ifposinf,GROUP=NULLS;SCALAR;P0 + #test_ifinf,GROUP=NULLS;SCALAR;P0 + #test_ifmissing,GROUP=NULLS;SCALAR;P0 + #test_missingif,GROUP=NULLS;SCALAR;P0 + test_null_query_any + test_not_null_query_any + test_prepared_null_query + test_prepared_not_null_query + test_let_null + test_let_not_null + test_missing_query_any + test_not_missing_query_any + test_prepared_not_missing_query + test_let_missing + test_let_not_missing + test_not_valued_query_any + test_prepared_valued_query + test_prepared_not_valued_query + test_let_valued + test_let_not_valued \ No newline at end of file diff --git a/conf/cbas/py-analytics-ro.conf b/conf/cbas/py-analytics-ro.conf new file mode 100644 index 000000000..49e5b4b69 --- /dev/null +++ b/conf/cbas/py-analytics-ro.conf @@ -0,0 +1,7 @@ +tuqquery.n1ql_ro_user.ReadOnlyUserTests: + test_select,GROUP=P0,start_cmd=false,to_run=test_array + test_select,GROUP=P0,start_cmd=false,to_run=test_like_wildcards + test_select,GROUP=P0,start_cmd=false,to_run=test_group_by_satisfy + test_select,GROUP=P0,start_cmd=false,to_run=test_comparition_more_and_less_equal + test_select,GROUP=P0,start_cmd=false,to_run=test_check_types + test_readonly,GROUP=P0,start_cmd=false \ No newline at end of file diff --git a/conf/cbas/py-cbas-demo-queries.conf b/conf/cbas/py-cbas-demo-queries.conf new file mode 100644 index 000000000..226e63dd4 --- /dev/null +++ b/conf/cbas/py-cbas-demo-queries.conf @@ -0,0 +1,13 @@ +cbas.cbas_demo_queries.CBASDemoQueries: + + # Tests to run demo queries + test_demo_query,query_id=extential_quantification,cluster=D + test_demo_query,query_id=universal_quantification,cluster=D + test_demo_query,query_id=lookup_table_metadata,cluster=D + test_demo_query,query_id=simple_aggregation,cluster=D + test_demo_query,query_id=simple_aggregation_unwrapped,cluster=D + test_demo_query,query_id=aggregation_array_count,cluster=D + test_demo_query,query_id=grouping_aggregation,cluster=D + test_demo_query,query_id=hash_based_grouping_aggregation,cluster=D + test_demo_query,query_id=grouping_limits,cluster=D + test_demo_query,query_id=equijoin_limits,cluster=D \ No newline at end of file diff --git a/conf/cbas/py-cbas-dynamic.conf b/conf/cbas/py-cbas-dynamic.conf new file mode 100644 index 000000000..87152d7bd --- /dev/null +++ b/conf/cbas/py-cbas-dynamic.conf @@ -0,0 +1,53 @@ +tuqquery.newtuq.QueryTests: + test_simple_check,GROUP=SIMPLE;P1,primary_indx_drop=True + # test_simple_negative_check,GROUP=SIMPLE;P0 + test_consistent_simple_check,GROUP=SIMPLE;P0 + test_simple_nulls + test_limit_offset + test_limit_offset_zero + # test_limit_offset_negative_check + # test_limit_offset_sp_char_check + test_simple_alias,GROUP=ALIAS;P0,BUGS=MB-8967 + # test_simple_negative_alias,GROUP=ALIAS;P0 + # test_alias_from_clause,GROUP=ALIAS;P1 + test_alias_from_clause_group,GROUP=ALIAS;P0 + # test_alias_order_desc,GROUP=ALIAS;P0 + # test_alias_order_asc,GROUP=ALIAS;P1 + test_alias_aggr_fn,GROUP=ALIAS;P0 +# test_alias_unnest,GROUP=ALIAS;UNNEST;P1 + test_order_by_check,GROUP=ORDER;P1 + test_order_by_alias,GROUP=ORDER;P1,BUGS=MB-9304 + test_order_by_alias_arrays,GROUP=ORDER;P0 + # test_order_by_aggr_fn,GROUP=ORDER;P0 + # test_order_by_alias_aggr_fn,GROUP=ORDER;P1,BUGS=MB-9106_alias_allowed_in_order_by + test_order_by_precedence,GROUP=ORDER;P1 + test_order_by_satisfy,GROUP=ORDER;SATISFY;P0 + # test_distinct,GROUP=DISTINCT;P0 + # test_distinct_nested,GROUP=DISTINCT;P0 + test_simple_alias,dataset=bigdata,value_size=1024,GROUP=ALIAS;P1,primary_indx_drop=True,reload_data=True + # test_alias_order_desc,GROUP=ALIAS;P0,dataset=bigdata,value_size=1024 + # test_alias_order_asc,GROUP=ALIAS;P1,dataset=bigdata,value_size=1024 + test_alias_aggr_fn,GROUP=ALIAS;P0,dataset=bigdata,value_size=1024 + # test_order_by_aggr_fn,GROUP=ORDER;P0,dataset=bigdata,value_size=1024 + test_distinct,GROUP=DISTINCT;P0,dataset=bigdata,value_size=1024 + test_simple_check,skip_build_tuq=True,cbq_version=sherlock,dataset=sabre,reload_data=True + test_simple_nulls,skip_build_tuq=True,cbq_version=sherlock,dataset=sabre + test_limit_offset,skip_build_tuq=True,cbq_version=sherlock,dataset=sabre + test_simple_alias,GROUP=ALIAS;P0,BUGS=MB-8967,dataset=sabre + # test_alias_from_clause,GROUP=ALIAS;P1,dataset=sabre + # test_alias_from_clause_group,GROUP=ALIAS;P0,dataset=sabre + # test_alias_order_desc,GROUP=ALIAS;P0,dataset=sabre + #enj test_alias_order_asc,GROUP=ALIAS;P1,dataset=sabre + test_alias_aggr_fn,GROUP=ALIAS;P0,dataset=sabre +## test_alias_unnest,GROUP=ALIAS;UNNEST;P1,dataset=sabre + test_order_by_check,GROUP=ORDER;P1,dataset=sabre + test_order_by_alias,GROUP=ORDER;P1,BUGS=MB-9304,dataset=sabre + test_order_by_alias_arrays,GROUP=ORDER;P0,dataset=sabre + # test_order_by_aggr_fn,GROUP=ORDER;P0,dataset=sabre + # test_order_by_alias_aggr_fn,GROUP=ORDER;P1,BUGS=MB-9106_alias_allowed_in_order_by,dataset=sabre + test_order_by_precedence,GROUP=ORDER;P1,dataset=sabre + test_distinct,GROUP=DISTINCT;P0,dataset=sabre + test_distinct_nested,GROUP=DISTINCT;P0,dataset=sabre + test_unnest,dataset=sabre +## test_subquery_select,dataset=sabre +## test_subquery_from,dataset=sabre diff --git a/conf/cbas/py-cbas-functional-tests.conf b/conf/cbas/py-cbas-functional-tests.conf new file mode 100644 index 000000000..21ef5da34 --- /dev/null +++ b/conf/cbas/py-cbas-functional-tests.conf @@ -0,0 +1,70 @@ +cbas.cbas_functional_tests.CBASFunctionalTests: + + # Create Bucket tests + test_create_bucket_on_cbas,cluster=D + test_create_another_bucket_on_cbas,error=A bucket with this name travel already exists.,cluster=D + test_create_bucket_on_cbas,cb_bucket_name=travel,cluster=D + test_create_bucket_on_cbas,cb_server_ip=INVALID_IP,cluster=D + + # Create dataset tests + test_create_dataset_on_bucket,cluster=D + test_create_dataset_on_bucket,cbas_bucket_name_invalid=travel-invalid,error=Cannot find dataset invalid in dataverse Default nor an alias with name invalid!,cluster=D + test_create_another_dataset_on_bucket,cbas_dataset2_name=travel_ds1,cluster=D + test_create_another_dataset_on_bucket,cbas_dataset2_name=travel_ds,error=A dataset with this name travel_ds already exists.,cluster=D + + # Connect Bucket tests + test_connect_bucket,cluster=D + test_connect_bucket,cb_bucket_password=password,error=Unauthorized (bucket/password invalid) - please check credentials!,cluster=D + test_connect_bucket,cbas_bucket_name_invalid=travel_invalid,error=A bucket with this name travel_invalid doesn't exist.,cluster=D + test_connect_bucket_on_a_connected_bucket,error=The bucket travel is already connected.,cluster=D + test_connect_bucket,skip_create_dataset=True,error=The bucket travel doesn't have any shadow datasets.,cluster=D + test_connect_bucket,cb_bucket_name=travel,error=Unauthorized (bucket/password invalid) - please check credentials!,cluster=D + test_connect_bucket,cb_server_ip=INVALID_IP,error=connection timed out: /INVALID_IP:PORT,cluster=D + + # Disconnect Bucket tests + test_disconnect_bucket,cluster=D + test_disconnect_bucket,disconnect_if_connected=True,cluster=D + test_disconnect_bucket,cbas_bucket_name_invalid=travel_invalid,error=A bucket with this name travel_invalid doesn't exist.,cluster=D + test_disconnect_bucket,cbas_bucket_name_invalid=travel_invalid,disconnect_if_connected=True,error=A bucket with this name travel_invalid doesn't exist.,cluster=D + test_disconnect_bucket_already_disconnected,error=The bucket travel is not connected.,cluster=D + test_disconnect_bucket_already_disconnected,disconnect_if_connected=True,cluster=D + + # Drop Dataset tests + test_drop_dataset_on_bucket,cluster=D + test_drop_dataset_on_bucket,cbas_dataset_name_invalid=travel_ds_invalid,error=There is no dataset with this name travel_ds_invalid in dataverse Default.,cluster=D + test_drop_dataset_on_bucket,skip_drop_connection=True,error=Can't drop shadow dataset because its bucket is in the connected state,cluster=D + + # Drop Bucket tests + test_drop_cbas_bucket,cluster=D + test_drop_cbas_bucket,skip_drop_connection=True,error=The bucket travel can't be dropped because it is in the connected state.,cluster=D + test_drop_cbas_bucket,skip_drop_dataset=True,error=The bucket travel can't be dropped because the following datasets shadow it: travel_ds.,cluster=D + test_drop_cbas_bucket,skip_drop_connection=True,skip_drop_dataset=True,error=The bucket travel can't be dropped because it is in the connected state.,cluster=D + test_drop_cbas_bucket,cbas_bucket_name_invalid=travel_invalid,error=A bucket with this name travel_invalid doesn't exist.,cluster=D + +cbas.cbas_bucket_operations.CBASBucketOperations: + + # Bucket Operations tests + load_docs_in_cb_bucket_before_cbas_connect,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + load_docs_in_cb_bucket_before_and_after_cbas_connect,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + load_docs_in_cb_bucket_after_cbas_connect,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + delete_some_docs_in_cb_bucket,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + delete_all_docs_in_cb_bucket,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + compact_cb_bucket_with_cbas_connected,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10000,cluster=D + compact_cb_bucket_then_cbas_connect,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10000,cluster=D + # These tests will fail because of MB-20912 + flush_cb_bucket_with_cbas_connected,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10000,cluster=D + flush_cb_bucket_then_cbas_connect,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10000,cluster=D + # These tests will fail because of MB-20914 + delete_cb_bucket_with_cbas_connected,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10000,cluster=D + delete_cb_bucket_then_cbas_connect,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=10000,cluster=D + # These tests might fail because of MB-21385 + update_some_docs_in_cb_bucket,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + update_all_docs_in_cb_bucket,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + create_update_delete_cb_bucket_then_cbas_connect,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + create_update_delete_cb_bucket_with_cbas_connected,cb_bucket_name=default,cbas_bucket_name=default_bucket,cbas_dataset_name=default_ds,items=100000,cluster=D + + + + + + diff --git a/conf/cbas/py-tuq-analytics.conf b/conf/cbas/py-tuq-analytics.conf new file mode 100644 index 000000000..9cb02115c --- /dev/null +++ b/conf/cbas/py-tuq-analytics.conf @@ -0,0 +1,169 @@ +tuqquery.tuq.QueryTests: + test_all,GROUP=P0 + test_all_nested,GROUP=P0 + #test_any,GROUP=ANY;SATISFY;P0 + #test_any_external,GROUP=ANY;SATISFY;P1,BUGS=MB-9188_coll_doesnt_allow_external_docs + #test_any_no_in_clause,GROUP=ANY;SATISFY;P1,BUGS=MB-9136_over_clause_without_in_clause + #test_every,GROUP=ALL;SATISFY;P0 + #test_array,GROUP=SATISFY;P0 + test_like,GROUP=LIKE;P0 + test_like_every,GROUP=LIKE;SATISFY;P1 + test_like_aliases,GROUP=LIKE;P1 + test_like_wildcards,GROUP=LIKE;P0 + #test_distinct_negative,GROUP=DISTINCT;P1 + test_group_by,GROUP=GROUP;P0test_sum + #test_group_by_having,GROUP=GROUP;P0 + #test_group_by_aggr_fn,GROUP=GROUP;P0 + test_group_by_satisfy,GROUP=GROUP;SATISFY;P1 + #test_group_by_negative,GROUP=GROUP;P1 + test_group_by_satisfy,GROUP=GROUP;P1 + test_ceil,GROUP=SCALAR;P0 + #test_floor,GROUP=SCALAR;P0 + test_greatest,GROUP=SCALAR;P0 + test_least,GROUP=SCALAR;P0 + #test_meta,GROUP=SCALAR;META;P0 + #test_meta_flags,item_flag=4042322160,GROUP=SCALAR;META;P0 + #test_meta_cas,GROUP=SCALAR;META;P1 + test_length,GROUP=SCALAR;P0 + test_upper,GROUP=SCALAR;P0 + test_round,GROUP=SCALAR;P0 + test_lower,GROUP=SCALAR;P0 + #test_substr,GROUP=SCALAR;P0 + test_trunc,GROUP=SCALAR;P0 + test_first,GROUP=SCALAR;P0 + test_sum,GROUP=AGGREGATE;P0 + test_avg,GROUP=AGGREGATE;P1 + test_case,GROUP=EXPRESSIONS;P0 + test_case_expr,GROUP=EXPRESSIONS;P1 + test_case_arithm,GROUP=EXPRESSIONS;P1 + test_in_int,GROUP=EXPRESSIONS;P0 + test_in_str,GROUP=EXPRESSIONS;P1 + #test_arithm,GROUP=EXPRESSIONS;P1 + test_logic_expr,GROUP=EXPRESSIONS;P1,BUGS=MB-9191_incorrect_scan_range + test_comparition_expr,GROUP=EXPRESSIONS;P0 + #test_array_agg_distinct,GROUP=SCALAR;P1 + #test_array_agg,GROUP=SCALAR;P0 + #test_array_length,GROUP=SCALAR;ARRAY_FN;P1 + #test_array_append,GROUP=SCALAR;ARRAY_FN;P0 + #test_array_prepend,GROUP=SCALAR;ARRAY_FN;P1 + #test_array_concat,GROUP=SCALAR;ARRAY_FN;P1 + #test_array_remove,GROUP=SCALAR;ARRAY_FN;P1 + #test_type,GROUP=TYPE;P0 + test_check_types,GROUP=TYPE;P0 + test_types_in_satisfy,GROUP=TYPE;P1 + #test_to_num,GROUP=TYPE;P1 + #test_to_str,GROUP=TYPE;P1 + #test_to_bool,GROUP=TYPE;P1 + test_between,GROUP=P0;DP4 + test_concatenation,GROUP=P0 + test_concatenation_where,GROUP=P1 + #test_now,GROUP=DATE;P0 + #test_hours,GROUP=DATE;P0 + #test_where,GROUP=DATE;P0 + #test_now_millis,GROUP=DATE;P0 + #test_str_to_millis,GROUP=DATE;P0 + #test_millis_to_str,GROUP=DATE;P0 + #test_date_part_millis,GROUP=DATE;P0 + #test_where_millis,GROUP=DATE;P0 + #test_order_by_dates,GROUP=DATE;P0 + #test_escaped_identifiers,GROUP=P0 + test_select_split_fn,GROUP=P0;DP4 + test_split_where,GROUP=P0;DP4 + #test_union,GROUP=UNION;DP4 + #test_union_multiply_buckets,standard_buckets=1,GROUP=UNION;DP4 + test_union_all,GROUP=UNION;DP4 + test_union_all_multiply_buckets,standard_buckets=1,GROUP=UNION;DP4 + #test_union_where,GROUP=UNION;DP4 + #test_union_aggr_fns,GROUP=UNION;DP4 + #test_intersect + #test_intersect_all + #test_except_secondsetempty + #test_except + #test_except_all + #test_within_list_object,GROUP=WITHIN;DP4 + #test_within_list_of_lists,GROUP=WITHIN;DP4 + #test_within_object,GROUP=WITHIN;DP4 + #test_within_array,GROUP=WITHIN;DP4 + test_raw,GROUP=DP4 + test_raw_limit,GROUP=DP4 + test_raw_order + #test_push_limit + #test_clock_millis,GROUP=DATE;DP4 + #test_clock_str,GROUP=DATE;DP4 + #test_date_add_millis,GROUP=DATE;DP4 + #test_date_add_str,GROUP=DATE;DP4 + #test_date_diff_millis,GROUP=DATE;DP4 + #test_date_diff_str,GROUP=DATE;DP4 + #test_array_avg,GROUP=DP4 + #test_array_contains,GROUP=DP4 + #test_array_count,GROUP=DP4 + #test_array_distinct,GROUP=DP4 + #test_array_max,GROUP=DP4 + #test_array_min,GROUP=DP4 + #test_array_position,GROUP=DP4 + #test_array_put,GROUP=DP4 + #test_array_range,GROUP=DP4 + #test_array_remove,GROUP=DP4 + #test_array_insert,GROUP=DP4 + #test_array_replace,GROUP=DP4 + #test_array_repeat,GROUP=DP4 + #test_array_reverse,GROUP=DP4 + #test_array_sort,GROUP=DP4 + #test_array_sum,GROUP=DP4 + test_comparition_more_less_equal,GROUP=DP4 + test_comparition_not_equal,GROUP=DP4 + #test_comparition_equal_int,GROUP=DP4 + #test_comparition_equal_str,GROUP=DP4 + #test_nanif,GROUP=DP4 + #test_posinf,GROUP=DP4 + test_contains,GROUP=DP4 + test_initcap,GROUP=DP4 + test_title,GROUP=DP4 + test_position,GROUP=DP4 + test_regex_contains,GROUP=DP4 + test_regex_like,GROUP=DP4 + test_regex_position,GROUP=DP4 + #test_regex_replace,GROUP=DP4 + #test_replace,GROUP=DP4 + test_repeat,GROUP=DP4 + #test_string_fn_negative + #test_meta_negative + #test_sum_negative + #test_all_negative + #test_keywords + #test_satisfy_negative + #test_arrays_negative + #test_between_negative + #test_check_is_isnot_negative + test_let_nums + #test_let_string + test_uuid + test_letting + test_comparition_not_equal + test_comparition_not_equal_more_less + test_every_comparision_not_equal_less_more + test_every_comparision_not_equal + #test_prepared_comparision_not_equal + #test_prepared_comparision_not_equal_less_more + test_let_not_equal + test_let_not_equal_less_more + test_any_between + #test_prepared_between + test_let_between + test_any_less_equal + test_any_more_equal + #test_prepared_more_equal + #test_prepared_less_equal + test_let_more_equal + test_let_less_equal +tuqquery.tuq_precedence.PrecedenceTests: + #test_case_and_like,GROUP=PRECEDENCE;P0 + #test_case_and_logic_exp,GROUP=PRECEDENCE;P0 + #test_case_and_comparision_exp,GROUP=PRECEDENCE;P0 + test_arithm_and_comparision_exp,GROUP=PRECEDENCE;P0 + #test_arithm_and_like_exp,GROUP=PRECEDENCE;P0 + test_logic_exp,GROUP=PRECEDENCE;P0 + test_logic_exp_nulls,GROUP=PRECEDENCE;P0 +#tuqquery.tuq_system.SysCatalogTests: +# test_memcached_buckets,doc-per-day=1,nodes_init=1,memcached_buckets=1,skip_index=True,skip_load=True +# test_negative_buckets,doc-per-day=1,nodes_init=1,memcached_buckets=1,skip_index=True,skip_load=True diff --git a/conf/couchbase-cli/py-bucket-compact.conf b/conf/couchbase-cli/py-bucket-compact.conf index 9df917ec8..9b8708636 100644 --- a/conf/couchbase-cli/py-bucket-compact.conf +++ b/conf/couchbase-cli/py-bucket-compact.conf @@ -28,6 +28,6 @@ clitest.couchbase_clitest.CouchbaseCliTest: # Compact a bucket that doesn't exist testBucketCompact,username=Administrator,password=password,bucket-name=bucket-1,expect-error=True,error-msg=Bucket not found # Verify invalid username/password returns an error - testBucketCompact,init-username=Administrator,init-password=password,init-bucket-type=couchbase,username=Administrator1,password=password1,bucket-name=bucket-1,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + testBucketCompact,init-bucket-type=couchbase,username=Administrator1,password=password1,bucket-name=bucket-1,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) # Verify running against an uninitialized cluster fails testBucketCompact,initialized=False,username=Administrator,password=password,init-bucket-type=couchbase,bucket-name=bucket-1,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-bucket-edit.conf b/conf/couchbase-cli/py-bucket-edit.conf index 27b0e9341..217bd774e 100644 --- a/conf/couchbase-cli/py-bucket-edit.conf +++ b/conf/couchbase-cli/py-bucket-edit.conf @@ -20,7 +20,7 @@ clitest.couchbase_clitest.CouchbaseCliTest: testBucketEdit,username=Administrator,password=password,init-bucket-type=couchbase,bucket-name=bucket-3,bucket-password=password2,memory-quota=277,eviction-policy=valueOnly,replica-count=2,priority=low,enable-flush=0,expect-error=False testBucketEdit,username=Administrator,password=password,init-bucket-type=couchbase,bucket-name=bucket-4,bucket-password=password3,memory-quota=279,priority=high,enable-flush=1,expect-error=False # Make sure that the bucket name is required - testBucketEdit,username=Administrator,password=password,init-bucket-type=couchbase,memory-quota=290,expect-error=True,error-msg=Option required, but not specified: --bucket + testBucketEdit,username=Administrator,password=password,memory-quota=290,expect-error=True,error-msg=Option required, but not specified: --bucket # Verify invalid memory quota returns an error testBucketEdit,username=Administrator,password=password,init-bucket-type=couchbase,bucket-name=bucket-1,memory-quota=ddd,expect-error=True,error-msg=option --bucket-ramsize: invalid integer value: 'ddd' # Verify invalid replica count returns and error diff --git a/conf/couchbase-cli/py-cluster-edit.conf b/conf/couchbase-cli/py-cluster-edit.conf index 6b45e62b3..34c9ff203 100644 --- a/conf/couchbase-cli/py-cluster-edit.conf +++ b/conf/couchbase-cli/py-cluster-edit.conf @@ -25,7 +25,7 @@ clitest.couchbase_clitest.CouchbaseCliTest: # Verify that a password that is too short fails testClusterEdit,username=Administrator,password=password,new-username=new_user,new-password=short,data-ramsize=256,expect-error=True,error-msg=The password must be at least six characters. # Verify that invalid username and password fail - testClusterEdit,init-username=Administrator,init-password=password,username=myusername,password=mypassword,data-ramsize=256,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + testClusterEdit,username=myusername,password=mypassword,data-ramsize=256,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) # Verify that an invalid port fails testClusterEdit,username=Administrator,password=password,port=99999,expect-error=True,error-msg=The port number must be greater than 1023 and less than 65536. testClusterEdit,username=Administrator,password=password,port=100,expect-error=True,error-msg=The port number must be greater than 1023 and less than 65536. diff --git a/conf/couchbase-cli/py-cluster-init.conf b/conf/couchbase-cli/py-cluster-init.conf index b4b3b1049..0c4d5f696 100644 --- a/conf/couchbase-cli/py-cluster-init.conf +++ b/conf/couchbase-cli/py-cluster-init.conf @@ -2,48 +2,49 @@ clitest.couchbase_clitest.CouchbaseCliTest: # Verify cluster initialization with only the data service testClusterInit,username=Administrator,password=password,data-ramsize=256,expect-error=False testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,expect-error=False - # Verify cluster initialization with different service - testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,index,index-ramsize=256,expect-error=False - testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,fts,fts-ramsize=256,expect-error=False - testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,index,fts,index-ramsize=256,fts-ramsize=256,expect-error=False + # Verify cluster initialization with different service. + testClusterInit,username=Administrator,password=password,data-ramsize=256,services=index,data,index-ramsize=256,expect-error=False + testClusterInit,username=Administrator,password=password,data-ramsize=256,services=fts,data,fts-ramsize=256,expect-error=False + testClusterInit,username=Administrator,password=password,data-ramsize=256,services=fts,index,data,index-ramsize=256,fts-ramsize=256,expect-error=False # Verify cluster initialization with a different port testClusterInit,username=Administrator,password=password,data-ramsize=256,port=8091,expect-error=False testClusterInit,username=Administrator,password=password,data-ramsize=256,port=5000,expect-error=False # Verify cluster initialization with different index storage modes - testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,index,index-ramsize=256,index-storage-mode=default,expect-error=False - testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,index,index-ramsize=256,index-storage-mode=memopt,expect-error=False + testClusterInit,username=Administrator,password=password,data-ramsize=256,services=index,data,index-ramsize=256,index-storage-mode=default,expect-error=False + testClusterInit,username=Administrator,password=password,data-ramsize=256,services=index,data,index-ramsize=256,index-storage-mode=memopt,expect-error=False testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,index-storage-mode=memopt,expect-error=False testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,expect-error=False # Verify cluster initialization with different RAM quotas - testClusterInit,username=Administrator,password=password,data-ramsize=257,services=data,index,fts,index-ramsize=258,fts-ramsize=259,expect-error=False - testClusterInit,username=Administrator,password=password,data-ramsize=260,services=data,index,index-ramsize=261,expect-error=False + testClusterInit,username=Administrator,password=password,data-ramsize=257,services=fts,index,data,index-ramsize=258,fts-ramsize=259,expect-error=False + testClusterInit,username=Administrator,password=password,data-ramsize=260,services=index,data,index-ramsize=261,expect-error=False # Verify cluster initialization with a cluster name testClusterInit,username=Administrator,password=password,data-ramsize=260,name=mycluster,expect-error=False testClusterInit,username=Administrator,password=password,data-ramsize=260,name="",expect-error=False testClusterInit,username=Administrator,password=password,data-ramsize=260,name="my cluster",expect-error=False # Verify that the data service is required - testClusterInit,username=Administrator,password=password,services=index,index-ramsize=256,expect-error=True,error-msg=cannot setup first cluster node without kv service + testClusterInit,username=Administrator,password=password,services=index,index-ramsize=256,expect-error=True,error-msg=Cannot set up first cluster node without the data service # Verify that having the index service requires the index ramsize to be set - testClusterInit,username=Administrator,password=password,services=data,index,data-ramsize=256,expect-error=True,error-msg=option cluster-index-ramsize is not specified + testClusterInit,username=Administrator,password=password,services=data,index,data-ramsize=256,expect-error=True,error-msg=Option required, but not specified when index service enabled: --cluster-index-ramsize # Verify that having the fts service requires the fts ramsize to be set - testClusterInit,username=Administrator,password=password,services=data,fts,data-ramsize=256,expect-error=True,error-msg=option fts-index-ramsize is not specified + testClusterInit,username=Administrator,password=password,services=data,fts,data-ramsize=256,expect-error=True,error-msg=Option required, but not specified when fts service enabled: --cluster-fts-ramsize # Verify that having the data service requires the data ramsize to be set - testClusterInit,username=Administrator,password=password,expect-error=True,error-msg=option cluster-ramsize is not specified + testClusterInit,username=Administrator,password=password,expect-error=True,error-msg=Option required, but not specified when data service enabled: --cluster-ramsize + # Verify that an invalid service name causes an error - testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,fff,expect-error=True,error-msg=invalid service: fff + testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,fff,expect-error=True,error-msg=`fff` is not a valid service # Verify that giving an invalid port fails testClusterInit,username=Administrator,password=password,data-ramsize=256,port=99999,expect-error=True,error-msg=The port number must be greater than 1023 and less than 65536. testClusterInit,username=Administrator,password=password,data-ramsize=256,port=100,expect-error=True,error-msg=The port number must be greater than 1023 and less than 65536. - testClusterInit,username=Administrator,password=password,data-ramsize=256,port=34z,expect-error=True,error-msg=--cluster-port must be an integer + testClusterInit,username=Administrator,password=password,data-ramsize=256,port=34z,expect-error=True,error-msg=option --cluster-port: invalid integer value: '34z' # Verify invalid index storage setting - testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,index,index-ramsize=256,index-storage-mode=invalid,expect-error=True,error-msg=invalid index storage setting `invalid`. Must be [default, memopt] + testClusterInit,username=Administrator,password=password,data-ramsize=256,services=data,index,index-ramsize=256,index-storage-mode=invalid,expect-error=True,error-msg=option --index-storage-setting: invalid choice: 'invalid' (choose from 'default', 'memopt') # Verify with invalid ram quotas - testClusterInit,username=Administrator,password=password,data-ramsize=25z6,expect-error=True,error-msg=--cluster-ramsize must be an integer - testClusterInit,username=Administrator,password=password,data-ramsize=256,index-ramsize=25z6,services=data,index,expect-error=True,error-msg=--cluster-index-ramsize must be an integer - testClusterInit,username=Administrator,password=password,data-ramsize=256,fts-ramsize=25z6,services=data,fts,expect-error=True,error-msg=--cluster-fts-ramsize must be an integer + testClusterInit,username=Administrator,password=password,data-ramsize=25z6,expect-error=True,error-msg=option --cluster-ramsize: invalid integer value: '25z6' + testClusterInit,username=Administrator,password=password,data-ramsize=256,index-ramsize=25z6,services=index,data,expect-error=True,error-msg=option --cluster-index-ramsize: invalid integer value: '25z6' + testClusterInit,username=Administrator,password=password,data-ramsize=256,fts-ramsize=25z6,services=fts,data,expect-error=True,error-msg=option --cluster-fts-ramsize: invalid integer value: '25z6' # Verify with password longer than 24 characters testClusterInit,username=Administrator,password=password_that_is_too_long,data-ramsize=256,expect-error=True,error-msg=Password length 25 exceeds maximum length of 24 characters # Verify that specifying a password that is too short testClusterInit,username=Administrator,password=short,data-ramsize=256,expect-error=True,error-msg=The password must be at least six characters. # Try to initialize a cluster that is already initialized - testClusterInit,username=Administrator,password=password,data-ramsize=256,initialized=True,expect-error=True,error-msg=Cluster is already initialized, use cluster-edit to change settings + testClusterInit,username=Administrator,password=password,data-ramsize=256,initialized=True,expect-error=True,error-msg=Cluster is already initialized, use setting-cluster to change settings diff --git a/conf/couchbase-cli/py-collect-logs-start.conf b/conf/couchbase-cli/py-collect-logs-start.conf new file mode 100644 index 000000000..915b8ab58 --- /dev/null +++ b/conf/couchbase-cli/py-collect-logs-start.conf @@ -0,0 +1,28 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Test log collection without upload (all nodes) + testCollectLogStart,username=Administrator,password=password,all-nodes=True,expect-error=False + # Test log collection without upload (1 of 2 nodes) + testCollectLogStart,init-num-servers=2,username=Administrator,password=password,nodes=1,expect-error=False + # Test log collection without upload (2 of 3 nodes) + testCollectLogStart,init-num-servers=3,username=Administrator,password=password,nodes=2,expect-error=False + # Test log collection with upload + testCollectLogStart,username=Administrator,password=password,all-nodes=True,upload=True,upload-host=s3.amazonaws.com,customer=cb,ticket=12345,expect-error=False + # Test log collection with upload (no ticket number) + testCollectLogStart,username=Administrator,password=password,all-nodes=True,upload=True,upload-host=s3.amazonaws.com,customer=cb,expect-error=False + + # Test log collection with an invalid node + testCollectLogStart,invalid-node=True,username=Administrator,password=password,expect-error=True,error-msg=Servers list contains invalid servers + testCollectLogStart,init-num-servers=2,invalid-node=True,username=Administrator,password=password,nodes=1,expect-error=True,error-msg=Servers list contains invalid servers + # Test log collection no nodes or all nodes + testCollectLogStart,username=Administrator,password=password,upload=True,upload-host=s3.amazonaws.com,customer=cb,expect-error=True,error-msg=Must specify either --all-nodes or --nodes + # Test log collection with both nodes and all nodes specified + testCollectLogStart,init-num-servers=2,username=Administrator,password=password,all-nodes=True,nodes=1,upload=True,upload-host=s3.amazonaws.com,customer=cb,expect-error=True,error-msg=Cannot specify both --all-nodes and --nodes + # Test log collection with upload, but no upload host + testCollectLogStart,username=Administrator,password=password,all-nodes=True,upload=True,customer=cb,expect-error=True,error-msg=--upload-host is required when --upload is specified + # Test log collection with upload, but no upload customer + testCollectLogStart,username=Administrator,password=password,all-nodes=True,upload=True,upload-host=s3.amazonaws.com,expect-error=True,error-msg=--upload-customer is required when --upload is specified + + # Verify that invalid username and password fail + testCollectLogStart,username=myusername,password=mypassword,all-nodes=True,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testCollectLogStart,username=Administrator,password=password,initialized=False,all-nodes=True,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-collect-logs-stop.conf b/conf/couchbase-cli/py-collect-logs-stop.conf new file mode 100644 index 000000000..0a96d2a8c --- /dev/null +++ b/conf/couchbase-cli/py-collect-logs-stop.conf @@ -0,0 +1,8 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Test enable email alerts, some alerts set + testCollectLogStop,username=Administrator,password=password,expect-error=False + + # Verify that invalid username and password fail + testCollectLogStop,username=myusername,password=mypassword,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testCollectLogStop,username=Administrator,password=password,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-failover.conf b/conf/couchbase-cli/py-failover.conf new file mode 100644 index 000000000..744b2b823 --- /dev/null +++ b/conf/couchbase-cli/py-failover.conf @@ -0,0 +1,16 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Hard failover one node + testFailover,username=Administrator,password=password,force=True,expect-error=False + # Gracefully failover one node + testFailover,username=Administrator,password=password,expect-error=False + # Try to hard failover an invalid node + testFailover,username=Administrator,password=password,force=true,invalid-node=True,expect-error=True,error-msg=Server can't be failed over because it's not part of the cluster + # Try to graceful failover an invalid node + testFailover,username=Administrator,password=password,invalid-node=True,expect-error=True,error-msg=Server can't be failed over because it's not part of the cluster + # Test no failover nodes specified + testFailover,username=Administrator,password=password,no-failover-servers=True,expect-error=True,error-msg=Option required, but not specified: --server-failover + + # Verify invalid username/password returns an error + testFailover,username=Administrator1,password=password1,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify running against an uninitialized cluster fails + testFailover,initialized=False,username=Administrator,password=password,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-group-manage.conf b/conf/couchbase-cli/py-group-manage.conf new file mode 100644 index 000000000..800e7c758 --- /dev/null +++ b/conf/couchbase-cli/py-group-manage.conf @@ -0,0 +1,45 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # No tests for group-manage --list + + # Test create group + testGroupManage,username=Administrator,password=password,create=True,name=new_group,expect-error=False + # Test create group that already exists + testGroupManage,init-group=new_group,username=Administrator,password=password,create=True,name=new_group,expect-error=True,error-msg=name - already exists + # Test create group without a group name + testGroupManage,username=Administrator,password=password,create=True,expect-error=True,error-msg=--group-name is required with --create flag + + # Test group delete + testGroupManage,init-group=new_group,username=Administrator,password=password,delete=True,name=new_group,expect-error=False + # Test group delete when group doesn't exist + testGroupManage,username=Administrator,password=password,delete=True,name=new_group,expect-error=True,error-msg=Group `new_group` not found + # Test group delete with no group name + testGroupManage,username=Administrator,password=password,delete=True,expect-error=True,error-msg=--group-name is required with --delete flag + # Test group delete with non-empty group + testGroupManage,username=Administrator,password=password,delete=True,name="Group 1",expect-error=True,error-msg=_ - group is not empty + + # Test group rename + testGroupManage,init-group=new_group,username=Administrator,password=password,rename=new_group,name=newer_group,expect-error=False + # Test group rename when the group doesn't exist + testGroupManage,init-group=new_group,username=Administrator,password=password,rename=bad_group,name=newer_group,expect-error=True,error-msg=Group `bad_group` not found + # Test group rename with no group name + testGroupManage,init-group=new_group,username=Administrator,password=password,rename=new_group,expect-error=True,error-msg=--group-name is required with --rename option + + # Test move server (single and multiple servers) + testGroupManage,init-group=new_group,username=Administrator,password=password,move-servers=1,from-group="Group 1",to-group=new_group,expect-error=False + testGroupManage,init-group=new_group,init-num-servers=2,username=Administrator,password=password,move-servers=2,from-group="Group 1",to-group=new_group,expect-error=False + # Test move servers when the server doesn't exist + testGroupManage,init-group=new_group,username=Administrator,password=password,invalid-move-server=invalid:8091,from-group="Group 1",to-group=new_group,expect-error=True,error-msg=Can't move invalid:8091 because it doesn't exist in 'Group 1' + # Test move servers when the from group doesn't exist + testGroupManage,init-group=new_group,username=Administrator,password=password,move-servers=1,from-group=old_group,to-group=new_group,expect-error=True,error-msg=Group to move servers from `old_group` not found + # Test move servers when the to group doesn't exist + testGroupManage,username=Administrator,password=password,move-servers=1,from-group="Group 1",to-group=new_group,expect-error=True,error-msg=Group to move servers to `Group 1` not found + + # Test group manage when more than one of --list, --move-servers, --create, --delete, or --rename is used + testGroupManage,username=Administrator,password=password,create=True,delete=True,name=new_group,expect-error=True,error-msg=Only one of the following may be specified: --create, --delete, --list, --move-servers, or --rename + # Test group manage when none of --list, --move-servers, --create, --delete, or --rename are used + testGroupManage,username=Administrator,password=password,expect-error=True,error-msg=Must specify one of the following: --create, --delete, --list, --move-servers, or --rename + + # Verify that invalid username and password fail + testGroupManage,username=myusername,password=mypassword,create=True,name=new_group,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testGroupManage,initialized=True,username=Administrator,password=password,create=True,name=new_group,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-import-export.conf b/conf/couchbase-cli/py-import-export.conf new file mode 100644 index 000000000..84ac83aad --- /dev/null +++ b/conf/couchbase-cli/py-import-export.conf @@ -0,0 +1,73 @@ +clitest.importexporttest.ImportExportTests: + test_export_from_empty_bucket,imex_type=json,format_type=lines + test_export_from_empty_bucket,imex_type=json,format_type=lines,path=local + test_export_from_empty_bucket,imex_type=json,format_type=lines,default_bucket=False,sasl_buckets=1 + test_export_from_empty_bucket,imex_type=json,format_type=lines,default_bucket=False,sasl_buckets=1,path=local + test_export_from_sasl_bucket,imex_type=json,format_type=list + test_export_from_sasl_bucket,imex_type=json,format_type=lines + test_export_from_sasl_bucket,imex_type=json,format_type=list + test_export_and_import_back,imex_type=json,format_type=list,import_back=True,nodes_init=2 + test_export_and_import_back,imex_type=json,format_type=lines,import_back=True,nodes_init=2 + test_import_json_file,imex_type=json,format_type=lines,import_file=json_1000_lines,nodes_init=2 + test_import_json_file,imex_type=json,format_type=lines,import_file=json_1000_lines_invalid,nodes_init=2 + test_import_json_file,imex_type=json,format_type=lines,import_file=json_1000_lines_invalid,nodes_init=2,default_bucket=False,sasl_buckets=1 + test_import_json_file,imex_type=json,format_type=list,import_file=json_list_1000_lines,nodes_init=2 + test_import_json_file,imex_type=json,format_type=list,import_file=json_list_1000_lines_invalid,nodes_init=2 + test_import_json_file,imex_type=json,format_type=list,import_file=json_list_1000_lines_invalid,nodes_init=2,default_bucket=False,sasl_buckets=1 + test_import_json_file,imex_type=json,format_type=lines,import_file=json_1000_lines,nodes_init=2,default_bucket=False,sasl_buckets=1 + test_import_json_file,imex_type=json,format_type=list,import_file=json_list_1000_lines,nodes_init=2,default_bucket=False,sasl_buckets=1 + test_import_json_file,imex_type=json,format_type=lines,import_file=json_1000_lines,nodes_init=2,import_method=url,import_file=https://s3-us-west-1.amazonaws.com/imex-data/json/json_1000_lines + test_import_json_file,imex_type=json,format_type=list,import_file=json_list_1000_lines,nodes_init=2,import_method=url,import_file=https://s3-us-west-1.amazonaws.com/imex-data/json/json_list_1000_lines + test_import_json_file,imex_type=json,format_type=lines,import_file=json_1000_lines,nodes_init=2,default_bucket=False,sasl_buckets=1,import_method=url,import_file=https://s3-us-west-1.amazonaws.com/imex-data/json/json_1000_lines + test_import_json_file,imex_type=json,format_type=list,import_file=json_list_1000_lines,nodes_init=2,default_bucket=False,sasl_buckets=1,import_method=url,import_file=https://s3-us-west-1.amazonaws.com/imex-data/json/json_list_1000_lines + test_import_json_sample,default_bucket=False,imex_type=json,sample_file=beer-sample,,nodes_init=2 + test_import_json_sample,default_bucket=False,imex_type=json,sample_file=gamesim-sample,,nodes_init=2 + test_import_json_sample,default_bucket=False,imex_type=json,sample_file=travel-sample,,nodes_init=2 + test_imex_flags,imex_type="",,nodes_init=2 + test_imex_flags,imex_type="",nodes_init=2,path_type=local + test_imex_flags,cluster_flag="",nodes_init=2 + test_imex_flags,cluster_flag="",nodes_init=2,path_type=local + test_imex_flags,user_flag="",nodes_init=2 + test_imex_flags,user_flag="",nodes_init=2,path_type=local + test_imex_flags,password_flag="",nodes_init=2 + test_imex_flags,password_flag="",nodes_init=2,path_type=local + test_imex_flags,bucket_flag="",nodes_init=2 + test_imex_flags,bucket_flag="",nodes_init=2,path_type=local + test_imex_flags,dataset_flag="",nodes_init=2 + test_imex_flags,dataset_flag="",nodes_init=2,path_type=local + test_imex_flags,format_flag="",nodes_init=2 + test_imex_flags,format_flag="",nodes_init=2,path_type=local + test_imex_flags,generate_flag="",nodes_init=2 + test_imex_flags,generate_flag="",nodes_init=2,path_type=local + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,threads_flag=2 + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,threads_flag=2,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,threads_flag=2 + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,threads_flag=2,path_type=local + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,threads_flag=empty + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,threads_flag=empty",path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,threads_flag=empty + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,threads_flag=empty,path_type=local + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,threads_flag=notnumber + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,threads_flag=notnumber,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,threads_flag=notnumber + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,threads_flag=notnumber,path_type=local + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,errors_flag=empty + test_imex_optional_flags,imex_type=json,format_type=lines,import_file=json_1000_lines,errors_flag=empty,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=empty + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=empty,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=error + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=error,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=relative_path + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=relative_path_type,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=absolute_path + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,errors_flag=absolute_path,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=empty + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=empty,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=log + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=log,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=relative_path + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=relative_path,path_type=local + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=absolute_path + test_imex_optional_flags,imex_type=json,format_type=list,import_file=json_list_1000_lines,logs_flag=absolute_path,path_type=local + test_imex_non_default_port,imex_type=json,format_type=list,import_file=json_list_1000_lines + test_imex_non_default_port,imex_type=json,format_type=list,import_file=json_list_1000_lines,path_type=local diff --git a/conf/couchbase-cli/py-node-init.conf b/conf/couchbase-cli/py-node-init.conf new file mode 100644 index 000000000..8a86107ab --- /dev/null +++ b/conf/couchbase-cli/py-node-init.conf @@ -0,0 +1,11 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Test node init with data paths and hostname + testNodeInit,username=Administrator,password=password,hostname=couchbase.host.com,expect-error=False + testNodeInit,username=Administrator,password=password,data-path=valid,index-path=valid,expect-error=False + testNodeInit,username=Administrator,password=password,index-path=valid,expect-error=False + + # Test node init with no data paths or hostname + testNodeInit,username=Administrator,password=password,expect-error=True,error-msg=No node initialization parameters specified + # Test node init with invalid data paths + testNodeInit,username=Administrator,password=password,data-path=/invalid/data/path,expect-error=True,error-msg=Could not set the storage path. It must be a directory writable by 'couchbase' user. + testNodeInit,username=Administrator,password=password,data-path=/invalid/index/path,expect-error=True,error-msg=Could not set the storage path. It must be a directory writable by 'couchbase' user. diff --git a/conf/couchbase-cli/py-rebalance-stop.conf b/conf/couchbase-cli/py-rebalance-stop.conf new file mode 100644 index 000000000..17da3d36c --- /dev/null +++ b/conf/couchbase-cli/py-rebalance-stop.conf @@ -0,0 +1,9 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Need to add test cases when rebalance is actually running + + # Modify audit settings on an initialized cluster + testRebalanceStop,username=Administrator,password=password,init-rebalance=False,initialized=True,expect-error=False + # Verify that invalid username and password fail + testRebalanceStop,username=myusername,password=mypassword,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testRebalanceStop,username=Administrator,password=password,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-rebalance.conf b/conf/couchbase-cli/py-rebalance.conf new file mode 100644 index 000000000..9fcd6598b --- /dev/null +++ b/conf/couchbase-cli/py-rebalance.conf @@ -0,0 +1,12 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + testRebalance,username=Administrator,password=password,expect-error=False + testRebalance,username=Administrator,password=password,num-add-servers=1,expect-error=False + testRebalance,username=Administrator,password=password,num-add-servers=3,expect-error=False + testRebalance,username=Administrator,password=password,num-initial-servers=2,num-remove-servers=1,expect-error=False + testRebalance,username=Administrator,password=password,num-initial-servers=4,num-remove-servers=3,expect-error=False + testRebalance,username=Administrator,password=password,num-initial-servers=2,num-remove-servers=1,num-add-servers=1,expect-error=False + testRebalanceInvalidRemoveServer,error-msg=Some nodes specified to be removed are not part of the cluster + # Verify invalid username/password returns an error + testRebalance,username=Administrator1,password=password1,num-add-servers=1,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify running against an uninitialized cluster fails + testRebalance,initialized=False,username=Administrator,password=password,num-add-servers=1,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-recovery.conf b/conf/couchbase-cli/py-recovery.conf new file mode 100644 index 000000000..3bf856c68 --- /dev/null +++ b/conf/couchbase-cli/py-recovery.conf @@ -0,0 +1,22 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Recover one node with delta reovery + testRecovery,init-num-servers=2,username=Administrator,password=password,servers=1,recovery-type=delta,expect-error=False + # Recover two nodes with delta recovery + testRecovery,init-num-servers=3,username=Administrator,password=password,servers=2,recovery-type=delta,expect-error=False + # Recover one node with full recovery + testRecovery,init-num-servers=2,username=Administrator,password=password,servers=1,recovery-type=full,expect-error=False + # Recover two nodes with full recovery + testRecovery,init-num-servers=3,username=Administrator,password=password,servers=2,recovery-type=full,expect-error=False + + # Test recovery with an invalid node + testRecovery,skip-failover=True,username=Administrator,password=password,invalid-recover-server=invalid:8091,recovery-type=full,expect-error=True,error-msg=Server not found invalid:8091 + # Test recovery without specifying a node + testRecovery,username=Administrator,password=password,recovery-type=full,expect-error=True,error-msg=Option required, but not specified: --server-recovery + # Test recovery with an incorrect recovery type + testRecovery,username=Administrator,password=password,recovery-type=invalid_type,expect-error=True,error-msg=option --recovery-type: invalid choice: 'invalid_type' (choose from 'delta', 'full') + # Test recovery of node that was not failed over + testRecovery,init-num-servers=2,skip-failover=True,username=Administrator,password=password,servers=1,recovery-type=delta,expect-error=True,error-msg=otpNode - invalid node name or node can't be used for delta recovery + # Verify invalid username/password returns an error + testRecovery,init-num-servers=2,username=Administrator1,password=password1,servers=1,recovery-type=delta,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify running against an uninitialized cluster fails + testRecovery,initialized=False,init-num-servers=2,username=Administrator,password=password,servers=1,recovery-type=delta,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-server-add.conf b/conf/couchbase-cli/py-server-add.conf new file mode 100644 index 000000000..858ffe76c --- /dev/null +++ b/conf/couchbase-cli/py-server-add.conf @@ -0,0 +1,25 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Verify basic server add + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,expect-error=False + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,services=index,query,expect-error=False + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,index-storage-mode=default,expect-error=False + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,group-name="Group 1",index-storage-mode=default,services=index,query,expect-error=False + # Test server add with invalid service + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,services=index,invalid,expect-error=True,error-msg=`invalid` is not a valid service + # Test server add with invalid group name + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,group-name=invalidgroup,expect-error=True,error-msg=Group `invalidgroup` not found + # Test server add with no server-add-username or server-add-password + testServerAdd,username=Administrator,password=password,num-add-servers=1,expect-error=True,error-msg=Option required, but not specified: --server-add-username + testServerAdd,username=Administrator,password=password,num-add-servers=1,expect-error=True,error-msg=Option required, but not specified: --server-add-password + # Test server add with invalid server-add-username or server-add-password + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=invalid,server-add-password=invalid,expect-error=True,error-msg=Prepare join failed. Authentication failed. Verify username and password. + # Test server add with invalid index storage mode + testServerAdd,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,index-storage-mode=badmode,expect-error=True,error-msg=option --index-storage-setting: invalid choice: 'badmode' (choose from 'default', 'memopt') + # Test server add with index storage mode different than the one on the server + testServerAdd,username=Administrator,password=password,init-services=data,index,init-index-storage-mode=default,num-add-servers=1,server-add-username=Administrator,server-add-password=password,index-storage-mode=memopt,services=index,expect-error=True,error-msg=storageMode - Changing the optimization mode of global indexes is not supported when index service nodes are present in the cluster. Please remove all index service nodes to change this option. + # Test server add with index storage mode the same as the one on the server + testServerAdd,username=Administrator,password=password,init-index-storage-mode=memopt,num-add-servers=1,server-add-username=Administrator,server-add-password=password,index-storage-mode=memopt,expect-error=False + # Verify invalid username/password returns an error + testServerAdd,init-bucket-type=couchbase,username=Administrator1,password=password1,num-add-servers=1,server-add-username=Administrator,server-add-password=password,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify running against an uninitialized cluster fails + testServerAdd,initialized=False,username=Administrator,password=password,num-add-servers=1,server-add-username=Administrator,server-add-password=password,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-server-readd.conf b/conf/couchbase-cli/py-server-readd.conf new file mode 100644 index 000000000..3aa11c63b --- /dev/null +++ b/conf/couchbase-cli/py-server-readd.conf @@ -0,0 +1,16 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Readd a single server + testServerReadd,init-num-servers=2,username=Administrator,password=password,servers=1,expect-error=False + # Readd two servers + testServerReadd,init-num-servers=3,username=Administrator,password=password,servers=2,expect-error=False + + # Test recovery with an invalid node + testServerReadd,skip-failover=True,username=Administrator,password=password,invalid-recover-server=invalid:8091,recovery-type=full,expect-error=True,error-msg=Server not found invalid:8091 + # Test recovery without specifying a node + testServerReadd,username=Administrator,password=password,recovery-type=full,expect-error=True,error-msg=Option required, but not specified: --server-add + # Test recovery of node that was not failed over + testServerReadd,init-num-servers=2,skip-failover=True,username=Administrator,password=password,servers=1,recovery-type=delta,expect-error=True,error-msg=otpNode - invalid node name or node can't be used for delta recovery + # Verify invalid username/password returns an error + testServerReadd,init-num-servers=2,username=Administrator1,password=password1,servers=1,recovery-type=delta,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify running against an uninitialized cluster fails + testServerReadd,initialized=False,init-num-servers=2,username=Administrator,password=password,servers=1,recovery-type=delta,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-setting-cluster.conf b/conf/couchbase-cli/py-setting-cluster.conf index 85cf2d895..fb9f99123 100644 --- a/conf/couchbase-cli/py-setting-cluster.conf +++ b/conf/couchbase-cli/py-setting-cluster.conf @@ -25,7 +25,7 @@ clitest.couchbase_clitest.CouchbaseCliTest: # Verify that a password that is too short fails testSettingCluster,username=Administrator,password=password,new-username=new_user,new-password=short,data-ramsize=256,expect-error=True,error-msg=The password must be at least six characters. # Verify that invalid username and password fail - testSettingCluster,init-username=Administrator,init-password=password,username=myusername,password=mypassword,data-ramsize=256,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + testSettingCluster,username=myusername,password=mypassword,data-ramsize=256,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) # Verify that an invalid port fails testSettingCluster,username=Administrator,password=password,port=99999,expect-error=True,error-msg=The port number must be greater than 1023 and less than 65536. testSettingCluster,username=Administrator,password=password,port=100,expect-error=True,error-msg=The port number must be greater than 1023 and less than 65536. diff --git a/conf/couchbase-cli/py-setting-compaction.conf b/conf/couchbase-cli/py-setting-compaction.conf new file mode 100644 index 000000000..8fb287272 --- /dev/null +++ b/conf/couchbase-cli/py-setting-compaction.conf @@ -0,0 +1,70 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Test with all parameters set + testSettingCompaction,username=Administrator,password=password,db-frag-perc=50,db-frag-size=2048,view-frag-perc=50,view-frag-size=2048,from-period=1:00,to-period=5:00,abort-outside=1,parallel-compact=1,purge-interval=7,expect-error=False + # Test with only compaction triggers set + testSettingCompaction,username=Administrator,password=password,db-frag-perc=50,db-frag-size=2048,view-frag-perc=50,view-frag-size=2048,expect-error=False + # test with only compaction time intervals set + testSettingCompaction,username=Administrator,password=password,from-period=1:00,to-period=5:00,abort-outside=1,expect-error=False + # Test update to only the metadata purge interval + testSettingCompaction,username=Administrator,password=password,purge-interval=7,expect-error=False + # Test with no parameters set + testSettingCompaction,username=Administrator,password=password,expect-error=False + + # Ensure that the compaction db percentage must be between 2 and 100 + testSettingCompaction,username=Administrator,password=password,db-frag-perc=1,expect-error=True,error-msg=--compaction-db-percentage must be between 2 and 100 + testSettingCompaction,username=Administrator,password=password,db-frag-perc=2,expect-error=False + testSettingCompaction,username=Administrator,password=password,db-frag-perc=100,expect-error=False + testSettingCompaction,username=Administrator,password=password,db-frag-perc=101,expect-error=True,error-msg=--compaction-db-percentage must be between 2 and 100 + # Ensure that the compaction db percentage must be an integer + testSettingCompaction,username=Administrator,password=password,db-frag-perc=bad_val,expect-error=True,error-msg=option --compaction-db-percentage: invalid integer value: 'bad_val' + # Ensure that the compaction db size must be large than or equal to 1 + testSettingCompaction,username=Administrator,password=password,db-frag-size=0,expect-error=True,error-msg=--compaction-db-size must be between greater than 1 or infinity + testSettingCompaction,username=Administrator,password=password,db-frag-size=1,expect-error=False + # Ensure that the compaction db size must be an integer + testSettingCompaction,username=Administrator,password=password,db-frag-size=bad_val,expect-error=True,error-msg=option --compaction-db-size: invalid integer value: 'bad_val' + # Ensure that the compaction view percentage must be between 2 and 100 + testSettingCompaction,username=Administrator,password=password,view-frag-perc=1,expect-error=True,error-msg=--compaction-view-percentage must be between 2 and 100 + testSettingCompaction,username=Administrator,password=password,view-frag-perc=2,expect-error=False + testSettingCompaction,username=Administrator,password=password,view-frag-perc=100,expect-error=False + testSettingCompaction,username=Administrator,password=password,view-frag-perc=101,expect-error=True,error-msg=--compaction-view-percentage must be between 2 and 100 + # Ensure that the compaction view percentage must be an integer + testSettingCompaction,username=Administrator,password=password,view-frag-perc=bad_val,expect-error=True,error-msg=option --compaction-view-percentage: invalid integer value: 'bad_val' + # Ensure that the compaction view size must be large than 1 + testSettingCompaction,username=Administrator,password=password,view-frag-size=0,expect-error=True,error-msg=--compaction-view-size must be between greater than 1 or infinity + testSettingCompaction,username=Administrator,password=password,db-frag-size=1,expect-error=False + # Ensure that the compaction view size must be an integer + testSettingCompaction,username=Administrator,password=password,view-frag-size=bad_val,expect-error=True,error-msg=option --compaction-view-size: invalid integer value: 'bad_val' + # Ensure that the compaction from period must be a time (HH:MM) + testSettingCompaction,username=Administrator,password=password,from-period=12,to-period=5:00,abort-outside=1,expect-error=True,error-msg=Invalid value for --compaction-period-from, must be in form XX:XX + testSettingCompaction,username=Administrator,password=password,from-period=one,to-period=5:00,abort-outside=1,expect-error=True,error-msg=Invalid value for --compaction-period-from, must be in form XX:XX + testSettingCompaction,username=Administrator,password=password,from-period=12:,to-period=5:00,abort-outside=1,expect-error=True,error-msg=Invalid minute value for --compaction-period-from, must be an integer + testSettingCompaction,username=Administrator,password=password,from-period=:12,to-period=5:00,abort-outside=1,expect-error=True,error-msg=Invalid hour value for --compaction-period-from, must be an integer + testSettingCompaction,username=Administrator,password=password,from-period=:,to-period=5:00,abort-outside=1,expect-error=True,error-msg=Invalid hour value for --compaction-period-from, must be an integer + # Ensure that the compaction from period must be a valid time (Hour 0-24, min 0-59) + testSettingCompaction,username=Administrator,password=password,from-period=0:60,to-period=5:00,abort-outside=1,expect-error=True,error-msg=Invalid minute value for --compaction-period-from, must be 0-59 + testSettingCompaction,username=Administrator,password=password,from-period=25:0,to-period=5:00,abort-outside=1,expect-error=True,error-msg=Invalid hour value for --compaction-period-from, must be 0-23 + # Ensure that the compaction to period must be a time (HH:MM) + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=12,abort-outside=1,expect-error=True,error-msg=Invalid value for --compaction-period-to, must be in form XX:XX + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=one,abort-outside=1,expect-error=True,error-msg=Invalid value for --compaction-period-to, must be in form XX:XX + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=12:,abort-outside=1,expect-error=True,error-msg=Invalid minute value for --compaction-period-to, must be an integer + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=:12,abort-outside=1,expect-error=True,error-msg=Invalid hour value for --compaction-period-to, must be an integer + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=:,abort-outside=1,expect-error=True,error-msg=Invalid hour value for --compaction-period-to, must be an integer + # Ensure that the compaction to period must be a valid time (Hour 0-24, min 0-59) + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=00:60,abort-outside=1,expect-error=True,error-msg=Invalid minute value for --compaction-period-to, must be 0-59 + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=25:00,abort-outside=1,expect-error=True,error-msg=Invalid hour value for --compaction-period-to, must be 0-23 + # Ensure that the compaction abort outside parameter must be either 0 or 1 + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=6:00,abort-outside=bad_val,expect-error=True,error-msg=option --enable-compaction-abort: invalid choice: 'bad_val' (choose from '0', '1') + # Ensure that the compaction from and to time and the abort outside time must be set together + testSettingCompaction,username=Administrator,password=password,from-period=5:00,abort-outside=1,expect-error=True,error-msg=--compaction-period-to is required when using --compaction-period-from + testSettingCompaction,username=Administrator,password=password,from-period=5:00,to-period=6:00,expect-error=True,error-msg=--enable-compaction-abort is required when using --compaction-period-from + testSettingCompaction,username=Administrator,password=password,abort-outside=1,expect-error=True,error-msg=--compaction-period-from is required when using --enable-compaction-abort + # Ensure that the metadata purge interval must be between 0.04 and 60 + testSettingCompaction,username=Administrator,password=password,purge-interval=0,expect-error=True,error-msg=--metadata-purge-interval must be between 0.04 and 60.0 + testSettingCompaction,username=Administrator,password=password,purge-interval=0.8,expect-error=False + testSettingCompaction,username=Administrator,password=password,purge-interval=61.0,expect-error=True,error-msg=--metadata-purge-interval must be between 0.04 and 60.0 + # Ensure that the metadata purge interval must be an integer + testSettingCompaction,username=Administrator,password=password,purge-interval=bad_val,expect-error=True,error-msg=option --metadata-purge-interval: invalid floating-point value: 'bad_val' + # Verify that invalid username and password fail + testSettingCompaction,username=myusername,password=mypassword,purge-interval=0.8,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testSettingCompaction,username=Administrator,password=password,purge-interval=0.8,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-settings-alert.conf b/conf/couchbase-cli/py-settings-alert.conf new file mode 100644 index 000000000..cb3e4146a --- /dev/null +++ b/conf/couchbase-cli/py-settings-alert.conf @@ -0,0 +1,26 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Test enable email alerts, some alerts set + testSettingAlert,username=Administrator,password=password,enabled=1,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-host=localhost,email-port=25,alert-auto-failover-node=True,alert-ip-changed=True,alert-audit-msg-dropped=True,expect-error=False + # Test enable email alerts, all alerts set + testSettingAlert,username=Administrator,password=password,enabled=1,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-host=localhost,email-port=25,alert-auto-failover-node=True,alert-auto-failover-max-reached=True,alert-auto-failover-node-down=True,alert-auto-failover-cluster-small=True,alert-auto-failover-disable=True,alert-ip-changed=True,alert-disk-space=True,alert-meta-overhead=True,alert-meta-oom=True,alert-write-failed=True,alert-audit-msg-dropped=True,expect-error=False + # Test enable email alerts no alerts set + testSettingAlert,username=Administrator,password=password,enabled=1,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-host=localhost,email-port=25,email-user=user,email-password=password,expect-error=False + + # Test enable email alerts no email recipients + testSettingAlert,username=Administrator,password=password,enabled=1,email-sender=root@couchbase.com,email-host=localhost,email-port=25,expect-error=True,error-msg=--email-recipient must be set when email alerts are enabled + # Test enable email alerts no email sender + testSettingAlert,username=Administrator,password=password,enabled=1,email-recipients=user@couchbase.com,email-host=localhost,email-port=25,expect-error=True,error-msg=--email-sender must be set when email alerts are enabled + # Test enable email alerts no email host + testSettingAlert,username=Administrator,password=password,enabled=1,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-port=25,expect-error=True,error-msg=--email-host must be set when email alerts are enabled + # Test enable email alerts no email port + testSettingAlert,username=Administrator,password=password,enabled=1,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-host=localhost,expect-error=True,error-msg=--email-port must be set when email alerts are enabled + + # Test disable email alerts + testSettingAlert,username=Administrator,password=password,enabled=0,expect-error=False + # Test disable email alerts with other parameters set + testSettingAlert,username=Administrator,password=password,enabled=0,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-host=localhost,email-port=25,expect-error=False + + # Verify that invalid username and password fail + testSettingAlert,username=myusername,password=mypassword,enabled=0,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-host=localhost,email-port=25,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testSettingAlert,username=Administrator,password=password,enabled=0,email-recipients=user@couchbase.com,email-sender=root@couchbase.com,email-host=localhost,email-port=25,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-settings-audit.conf b/conf/couchbase-cli/py-settings-audit.conf new file mode 100644 index 000000000..28e8ef6cc --- /dev/null +++ b/conf/couchbase-cli/py-settings-audit.conf @@ -0,0 +1,15 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Modify audit settings on an initialized cluster + testSettingAudit,username=Administrator,password=password,enabled=1,log-path=valid,initialized=True,expect-error=False + testSettingAudit,username=Administrator,password=password,enabled=1,log-path=valid,rotate_interval=90000,initialized=True,expect-error=False + testSettingAudit,username=Administrator,password=password,enabled=0,initialized=True,expect-error=False + # Modify none of audit settings + testSettingAudit,username=Administrator,password=password,initialized=True,expect-error=True,error-msg=No settings specified to be changed + # Test that setting an invalid audit log path fails + testSettingAudit,username=Administrator,password=password,enabled=1,log-path=/invalid/path,initialized=True,expect-error=True,error-msg=logPath - The value must be a valid directory + # Test that setting an invalid audit log rotate interval fails + testSettingAudit,username=Administrator,password=password,enabled=1,log-path=valid,rotate-interval=bad_value,initialized=True,expect-error=True,error-msg=option --audit-log-rotate-interval: invalid integer value: 'bad_value' + # Verify that invalid username and password fail + testSettingAudit,username=myusername,password=mypassword,enabled=1,log-path=valid,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testSettingAudit,username=Administrator,password=password,enabled=1,log-path=valid,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-settings-autofailover.conf b/conf/couchbase-cli/py-settings-autofailover.conf new file mode 100644 index 000000000..341c5b95f --- /dev/null +++ b/conf/couchbase-cli/py-settings-autofailover.conf @@ -0,0 +1,16 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Modify index settings on an initialized cluster + testSettingAutoFailover,username=Administrator,password=password,enabled=1,timeout=60,initialized=True,expect-error=False + testSettingAutoFailover,username=Administrator,password=password,enabled=0,timeout=120,initialized=True,expect-error=False + # Modify none of index settings + testSettingAutoFailover,username=Administrator,password=password,initialized=True,expect-error=True,error-msg=No settings specified to be changed + # Test that an auto-failover timeout of less than 30 seconds fails + testSettingAutoFailover,username=Administrator,password=password,timeout=15,initialized=True,expect-error=True,error-msg=Timeout value must be at least 30 seconds + # Ensure invalid value for enable fails + testSettingAutoFailover,username=Administrator,password=password,enabled=badvalue,initialized=True,expect-error=True,error-msg=option --enable-auto-failover: invalid choice: 'badvalue' (choose from '0', '1') + # Ensure invalid value for timeout fails + testSettingAutoFailover,username=Administrator,password=password,timeout=badvalue,initialized=True,expect-error=True,error-msg=option --auto-failover-timeout: invalid integer value: 'badvalue' + # Verify that invalid username and password fail + testSettingAutoFailover,username=myusername,password=mypassword,enabled=1,timeout=60,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testSettingAutoFailover,username=Administrator,password=password,enabled=1,timeout=60,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-settings-index.conf b/conf/couchbase-cli/py-settings-index.conf new file mode 100644 index 000000000..9710bb543 --- /dev/null +++ b/conf/couchbase-cli/py-settings-index.conf @@ -0,0 +1,22 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Modify index settings on an initialized cluster + testSettingIndex,username=Administrator,password=password,max-rollback-points=3,storage-mode=memopt,stable-snapshot-interval=30,memory-snapshot-interval=40,threads=6,log-level=info,initialized=True,expect-error=False + testSettingIndex,username=Administrator,password=password,max-rollback-points=7,storage-mode=default,stable-snapshot-interval=25,memory-snapshot-interval=35,threads=10,log-level=debug,initialized=True,expect-error=False + # Modify none of index settings + testSettingIndex,username=Administrator,password=password,initialized=True,expect-error=True,error-msg=No settings specified to be changed + # Ensure that an invalid index storage mode fails + testSettingIndex,username=Administrator,password=password,storage-mode=badvalue,initialized=True,expect-error=True,error-msg=option --index-storage-setting: invalid choice: 'badvalue' (choose from 'default', 'memopt') + # Ensure that max rollback points must be an integer + testSettingIndex,username=Administrator,password=password,max-rollback-points=badvalue,initialized=True,expect-error=True,error-msg=option --index-max-rollback-points: invalid integer value: 'badvalue' + # Ensure that stable snapshot interval must be an integer + testSettingIndex,username=Administrator,password=password,stable-snapshot-interval=badvalue,initialized=True,expect-error=True,error-msg=option --index-stable-snapshot-interval: invalid integer value: 'badvalue' + # Ensure that memory snapshot interval must be an integer + testSettingIndex,username=Administrator,password=password,memory-snapshot-interval=badvalue,initialized=True,expect-error=True,error-msg=option --index-memory-snapshot-interval: invalid integer value: 'badvalue' + # Ensure that threads must be an integer + testSettingIndex,username=Administrator,password=password,threads=badvalue,initialized=True,expect-error=True,error-msg=option --index-threads: invalid integer value: 'badvalue' + # Ensure that an invalid log level fails + testSettingIndex,username=Administrator,password=password,log-level=badvalue,initialized=True,expect-error=True,error-msg=option --index-log-level: invalid choice: 'badvalue' (choose from 'debug', 'silent', 'fatal', 'error', 'warn', 'info', 'verbose', 'timing', 'trace') + # Verify that invalid username and password fail + testSettingIndex,username=myusername,password=mypassword,max-rollback-points=3,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testSettingIndex,username=Administrator,password=password,max-rollback-points=3,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-settings-ldap.conf b/conf/couchbase-cli/py-settings-ldap.conf new file mode 100644 index 000000000..1a05a81dd --- /dev/null +++ b/conf/couchbase-cli/py-settings-ldap.conf @@ -0,0 +1,20 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Test enable ldap settings + testSettingLdap,username=Administrator,password=password,enabled=1,admins=mike,john,expect-error=False + testSettingLdap,username=Administrator,password=password,enabled=1,ro-admins=mike,john,expect-error=False + testSettingLdap,username=Administrator,password=password,enabled=1,admins=mike,john,default=roadmins,expect-error=False + testSettingLdap,username=Administrator,password=password,enabled=1,ro-admins=mike,john,default=admins,expect-error=False + testSettingLdap,username=Administrator,password=password,enabled=1,admins=alice,ro-admins=mike,john,default=admins,expect-error=False + testSettingLdap,username=Administrator,password=password,enabled=1,expect-error=False + testSettingLdap,username=Administrator,password=password,enabled=0,expect-error=False + testSettingLdap,username=Administrator,password=password,enabled=0,admins=alice,ro-admins=mike,john,default=admins,expect-error=False + + # Test invalid default + testSettingLdap,username=Administrator,password=password,expect-error=True,error-msg=option --ldap-enabled: invalid choice: 'False' (choose from '0', '1') + # Test enabled not specified + testSettingLdap,username=Administrator,password=password,enabled=1,default=invalid,expect-error=True,error-msg=option --ldap-default: invalid choice: 'invalid' (choose from 'admins', 'roadmins', 'none') + + # Verify that invalid username and password fail + testSettingLdap,username=myusername,password=mypassword,enabled=1,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify that running against an uninitialized cluster fails + testSettingLdap,username=Administrator,password=password,enabled=1,initialized=False,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/couchbase-cli/py-user-manage.conf b/conf/couchbase-cli/py-user-manage.conf new file mode 100644 index 000000000..e5349d1b0 --- /dev/null +++ b/conf/couchbase-cli/py-user-manage.conf @@ -0,0 +1,35 @@ +clitest.couchbase_clitest.CouchbaseCliTest: + # Test create read only user + testUserManage,username=Administrator,password=password,set=True,ro-username=mike,ro-password=password,expect-error=False + # Test create read only user no username + testUserManage,username=Administrator,password=password,set=True,ro-password=password,expect-error=True,error-msg=--ro-username is required with the --set command + # Test create read only user no password + testUserManage,username=Administrator,password=password,set=True,ro-username=mike,expect-error=True,error-msg=--ro-password is required with the --set command + # Test create read only user password too short + testUserManage,username=Administrator,password=password,set=True,ro-username=mike,ro-password=pass,expect-error=True,error-msg=password - The password must be at least six characters. + # Test create read only user when one already exists + testUserManage,username=Administrator,password=password,set=True,init-ro-username=john,init-ro-password=password,ro-username=mike,ro-password=password,expect-error=True,error-msg=The internal read-only user already exists + # Test read only user password change + testUserManage,username=Administrator,password=password,set=True,init-ro-username=mike,init-ro-password=password,ro-username=mike,ro-password=new_pass,expect-error=False + + # Test delete read only user + testUserManage,username=Administrator,password=password,delete=True,init-ro-username=john,init-ro-password=password,expect-error=False + # Test delete read only user, none exists + testUserManage,username=Administrator,password=password,delete=True,expect-error=True,error-msg="Read-Only admin does not exist" + + # Test list the read only user + testUserManage,username=Administrator,password=password,list=True,init-ro-username=john,init-ro-password=password,expect-error=False + # Test list read only user, none exists + testUserManage,username=Administrator,password=password,list=True,expect-error=True,error-msg=There is no internal read-only user + + # Test when --list, --set, or --delete are not set that there is an error + testUserManage,username=Administrator,password=password,expect-error=True,error-msg=Must specify --delete, --list, or --set + # Test when more than one of --list, --set, and --delete are set that there is an error + testUserManage,username=Administrator,password=password,list=True,set=True,ro-username=mike,ro-password=password,expect-error=True,error-msg=Only one of the following can be specified: --delete, --list, or --set + testUserManage,username=Administrator,password=password,delete=True,set=True,ro-username=mike,ro-password=password,expect-error=True,error-msg=Only one of the following can be specified: --delete, --list, or --set + testUserManage,username=Administrator,password=password,list=True,set=True,delete=True,ro-username=mike,ro-password=password,expect-error=True,error-msg=Only one of the following can be specified: --delete, --list, or --set + + # Verify invalid username/password returns an error + testUserManage,username=Administrator1,password=password1,list=True,expect-error=True,error-msg=unable to access the REST API - please check your username (-u) and password (-p) + # Verify running against an uninitialized cluster fails + testUserManage,initialized=False,username=Administrator,password=password,list=True,expect-error=True,error-msg=Cluster is not initialized, use cluster-init to initialize the cluster \ No newline at end of file diff --git a/conf/failover/py-newfailover.conf b/conf/failover/py-newfailover.conf index 9bce57f63..12ecca55a 100644 --- a/conf/failover/py-newfailover.conf +++ b/conf/failover/py-newfailover.conf @@ -1,60 +1,33 @@ failover.failovertests.FailoverTests: -# Hard Failover Tests with Firewall Fail Node Tests - #Bug MB-11535, test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,failoverMaster=True,GROUP=P0 - #Bug MB-11535t est_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,failoverMaster=True,GROUP=P0 - #Bug MB-11535 test_failover_firewall,replicas=2,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,failoverMaster=True,GROUP=P1 - test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,doc_ops=update,withMutationOps=true,withQueries=True,numViews=5,withViewsOps=True,GROUP=P0 - test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,doc_ops=update,compact=True,GROUP=P1 - test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,GROUP=P0 - test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=40000,sasl_buckets=1,GROUP=P1 - test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,bidirectional=True,GROUP=P0 - test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,GROUP=P1 - test_failover_firewall,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=10,GROUP=P1 -# Hard Failover Tests with Normal Failover Tests - test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,failoverMaster=True,GROUP=P0 - test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,active_resident_threshold=70,dgm_run=True,failoverMaster=True,GROUP=P0 - test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,GROUP=P0 - test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,withQueries=True,numViews=5,withViewsOps=True,GROUP=P0 - test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=40000,standard_buckets=1,GROUP=P0; + test_failover_normal,replicas=0,graceful=True,check_verify_failover_type=True,num_failed_nodes=2,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL + test_failover_normal,replicas=1,graceful=True,check_verify_failover_type=True,num_failed_nodes=1,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL + test_failover_normal,replicas=2,graceful=True,check_verify_failover_type=True,num_failed_nodes=3,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL + test_failover_normal,replicas=3,graceful=True,check_verify_failover_type=True,num_failed_nodes=4,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,GROUP=P1 - test_failover_normal,replicas=2,graceful=False,num_failed_nodes=2,items=20000,GROUP=P1 - test_failover_normal,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=5,GROUP=P1 - test_failover_normal,items=100000,graceful=False,during_ops=change_password,GROUP=P1;WINDOWS - test_failover_normal,items=100000,graceful=False,during_ops=change_port,GROUP=P1;WINDOWS -# Hard Failover Tests with Stop Node Fail Tests - test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,items=100000,active_resident_threshold=70,dgm_run=True,failoverMaster=True,GROUP=P0 - test_failover_stop_server,replicas=2,graceful=False,num_failed_nodes=1,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,items=100000,active_resident_threshold=70,dgm_run=True,failoverMaster=True,GROUP=P1 - test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,failoverMaster=True,GROUP=P0 - test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,GROUP=P0 - test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,withQueries=True,numViews=5,withViewsOps=True,max_verify=10000,GROUP=P0 test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,GROUP=P1 test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=1,GROUP=P2 - test_failover_stop_server,replicas=2,graceful=False,num_failed_nodes=2,items=20000,GROUP=P0 test_failover_stop_server,replicas=2,graceful=False,num_failed_nodes=2,load_ratio=10,GROUP=P2 - test_failover_stop_server,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=1,GROUP=P1 - test_failover_stop_server,replicas=3,graceful=False,num_failed_nodes=3,items=100000,compact=True,load_ratio=1,GROUP=P1 + test_failover_normal,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=5,GROUP=P1 + test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,bidirectional=True,GROUP=P0 + test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,GROUP=P1 test_failover_stop_server,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=1,failoverMaster=True,GROUP=P1 -# Graceful Failover and/or add-back tests - 3.0.0 Tests - test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,compact=True,active_resident_threshold=70,dgm_run=True,withMutationOps=True,doc_ops=update,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL - test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,withMutationOps=True,doc_ops=update,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL - test_failover_normal,replicas=2,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,withMutationOps=True,doc_ops=update,failoverMaster=True,graceful=True,GROUP=P1;GRACEFUL - test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL - test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL - test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,active_resident_threshold=70,dgm_run=True,graceful=True,GROUP=P0;GRACEFUL + test_failover_normal,replicas=2,graceful=False,num_failed_nodes=2,items=20000,GROUP=P1 + test_failover_stop_server,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=1,GROUP=P1 + test_failover_stop_server,replicas=2,graceful=False,num_failed_nodes=2,items=20000,GROUP=P0 test_failover_normal,replicas=1,num_failed_nodes=1,load_ratio=20,sasl_buckets=1,graceful=True,GROUP=P1;GRACEFUL - test_failover_then_add_back,replicas=2,num_failed_nodes=2,items=100000,standard_buckets=1,recoveryType=delta:full,graceful=True,GROUP=P2;GRACEFUL + test_failover_firewall,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=10,GROUP=P1 + test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,withMutationOps=True,doc_ops=update,upr_check=False,recoveryType=delta,graceful=True,GROUP=P0;GRACEFUL + test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,withMutationOps=True,doc_ops=update,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL + test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=40000,standard_buckets=1,GROUP=P0; + test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=40000,sasl_buckets=1,GROUP=P1 + test_failover_stop_server,replicas=3,graceful=False,num_failed_nodes=3,items=100000,compact=True,load_ratio=1,GROUP=P1 + test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,sasl_buckets=1,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL + test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=200000,vbuckets=1024,total_vbuckets=1024,stopGracefulFailover=True,graceful=True,recoveryType=delta,GROUP=P1;GRACEFUL + test_failover_then_add_back,replicas=2,num_failed_nodes=1,items=100000,standard_buckets=1,recoveryType=delta,deltaRecoveryBuckets=default:standard_buckets0,graceful=True,GROUP=P1;GRACEFUL test_failover_then_add_back,replicas=2,num_failed_nodes=2,items=100000,standard_buckets=1,recoveryType=delta:delta,deltaRecoveryBuckets=default,graceful=False,GROUP=P1;GRACEFUL + test_failover_normal,replicas=1,num_failed_nodes=1,items=200000,vbuckets=1024,total_vbuckets=1024,stopGracefulFailover=True,graceful=True,GROUP=P0;GRACEFUL test_failover_then_add_back,replicas=2,num_failed_nodes=1,items=100000,standard_buckets=1,recoveryType=full,deltaRecoveryBuckets=default,graceful=True,GROUP=P1;GRACEFUL - test_failover_then_add_back,replicas=2,num_failed_nodes=1,items=100000,standard_buckets=1,recoveryType=delta,deltaRecoveryBuckets=default:standard_buckets0,graceful=True,GROUP=P1;GRACEFUL - test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,withMutationOps=True,doc_ops=update,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL - test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,withMutationOps=True,doc_ops=update,upr_check=False,recoveryType=delta,graceful=True,GROUP=P0;GRACEFUL + test_failover_then_add_back,replicas=2,num_failed_nodes=2,items=100000,standard_buckets=1,recoveryType=delta:full,graceful=True,GROUP=P2;GRACEFUL test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,sasl_buckets=1,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,sasl_buckets=1,upr_check=False,recoveryType=delta,graceful=True,GROUP=P0;GRACEFUL test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,numViews=5,compact=True,withViewsOps=True,createIndexesDuringFailover=True,sasl_buckets=1,upr_check=False,recoveryType=delta,graceful=True,GROUP=P1;GRACEFUL - test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,sasl_buckets=1,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL - test_failover_normal,replicas=1,graceful=True,check_verify_failover_type=True,num_failed_nodes=1,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL - test_failover_normal,replicas=2,graceful=True,check_verify_failover_type=True,num_failed_nodes=3,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL - test_failover_normal,replicas=3,graceful=True,check_verify_failover_type=True,num_failed_nodes=4,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL - test_failover_normal,replicas=0,graceful=True,check_verify_failover_type=True,num_failed_nodes=2,items=100,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL - test_failover_normal,replicas=1,num_failed_nodes=1,items=200000,vbuckets=1024,total_vbuckets=1024,stopGracefulFailover=True,graceful=True,GROUP=P0;GRACEFUL - test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=200000,vbuckets=1024,total_vbuckets=1024,stopGracefulFailover=True,graceful=True,recoveryType=delta,GROUP=P1;GRACEFUL \ No newline at end of file diff --git a/conf/fts/py-fts-custmap-rqg-queries.conf b/conf/fts/py-fts-custmap-rqg-queries.conf index 474cf3cfd..50ee0b0de 100644 --- a/conf/fts/py-fts-custmap-rqg-queries.conf +++ b/conf/fts/py-fts-custmap-rqg-queries.conf @@ -62,25 +62,32 @@ fts.stable_topology_fts.StableTopFTS: #emp-dataset - single type/doc map + single custom analyzer + querying - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=1,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=2,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=3,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=4,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=5,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=6,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=9,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=10,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=11,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=12,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=15,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=16,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=17,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=19,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=21,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=22,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=23,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=24,num_queries=100,compare_es=True,GROUP=P0 - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,cm_id=30,num_queries=100,compare_es=True,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=1,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=3,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=4,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=6,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=11,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=12,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=13,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=15,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=19,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=24,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=28,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=33,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=38,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=43,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=48,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=54,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=56,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=65,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=72,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=82,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=89,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=92,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=100,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=101,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=105,num_queries=100,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=111,num_queries=100,GROUP=P0 #wiki-dataset - single type/doc map + single custom analyzer + querying @@ -125,17 +132,25 @@ fts.stable_topology_fts.StableTopFTS: index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=3,cm_id=2,num_queries=100,compare_es=True,dataset=all,GROUP=P0 #emp-dataset - single type/doc map + single custom analyzer + multiple filters + querying - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,multiple_filters=true,cm_id=34,num_queries=100,compare_es=True,GROUP=P0,cluster=D+F - index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,multiple_filters=true,cm_id=35,num_queries=100,compare_es=True,GROUP=P0,cluster=D+F + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,multiple_filters=true,cm_id=34,num_queries=100,compare_es=True,GROUP=P0 + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,multiple_filters=true,cm_id=35,num_queries=100,compare_es=True,GROUP=P0 #Update/Delete custom analyzer in an index definition - index_edit_and_query_custom_analyzer,items=100,custom_map=True,num_custom_analyzers=1,cm_id=2,num_queries=1,compare_es=True,GROUP=P0,cluster=D+F - index_delete_custom_analyzer,items=100,custom_map=True,num_custom_analyzers=1,cm_id=2,delete_custom_analyzer=True,error_msg=no analyzer with name or type,GROUP=P0,cluster=D+F - index_delete_custom_analyzer,items=100,custom_map=True,num_custom_analyzers=1,cm_id=2,delete_custom_filter=True,error_msg=error building analyzer,GROUP=P0,cluster=D+F + index_edit_and_query_custom_analyzer,items=100,custom_map=True,num_custom_analyzers=1,cm_id=2,num_queries=1,compare_es=True,GROUP=P0 + index_delete_custom_analyzer,items=100,custom_map=True,num_custom_analyzers=1,cm_id=2,delete_custom_analyzer=True,error_msg=no analyzer with name or type,GROUP=P0 + index_delete_custom_analyzer,items=100,custom_map=True,num_custom_analyzers=1,cm_id=12,delete_custom_filter=True,error_msg=error building analyzer,GROUP=P0 #Field Name Alias / Searchable As property of field mapping - test_field_name_alias,items=1000,field_name=name,field_type=text,field_alias=fullname,query="""{"match": "Safiya", "field":"fullname"}""",expected_hits=12,GROUP=P0,cluster=D+F - test_field_name_alias,items=1000,field_name=salary,field_type=number,field_alias=sal,query="""{"min": 50000,"max": 100000,"field":"sal","inclusive_max": true}""",expected_hits=514,GROUP=P0,cluster=D+F - test_field_name_alias,items=1000,field_name=join_date,field_type=datetime,field_alias=doj,query="""{"inclusive_start": true, "field": "doj", "inclusive_end": true, "end": "2016-08-18T14:40:30.938673", "start": "1991-08-24T07:43:00"}""",expected_hits=372,GROUP=P0,cluster=D+F + test_field_name_alias,items=1000,field_name=name,field_type=text,field_alias=fullname,query="""{"match": "Safiya", "field":"fullname"}""",expected_hits=12,GROUP=P0 + test_field_name_alias,items=1000,field_name=salary,field_type=number,field_alias=sal,query="""{"min": 50000,"max": 100000,"field":"sal","inclusive_max": true}""",expected_hits=514,GROUP=P0 + test_field_name_alias,items=1000,field_name=join_date,field_type=datetime,field_alias=doj,query="""{"inclusive_start": true, "field": "doj", "inclusive_end": true, "end": "2016-08-18T14:40:30.938673", "start": "1991-08-24T07:43:00"}""",expected_hits=372,GROUP=P0 + + #One field with multiple analyzers + test_one_field_multiple_analyzer,items=1000,dataset=wiki,field_name=revision.text.#text,field_type=text,field_alias=description,query="""{"match": "Republic", "field":"revision.text.description"}""",expected_hits1=0,expected_hits2=77,GROUP=P0 + + #Document Type Configuration + test_doc_config,mode=docid_regexp,query={"match": "orleans", "field":"city"},expected_hits=5,cluster=D+F + test_doc_config,mode=docid_prefix,query={"match": "orleans", "field":"city"},expected_hits=5,cluster=D+F + test_doc_config,mode=type_field,query={"match": "orleans", "field":"city"},expected_hits=5,cluster=D+F diff --git a/conf/fts/py-fts-defmap-rqg-queries.conf b/conf/fts/py-fts-defmap-rqg-queries.conf index c4db53cf8..78d0bb298 100644 --- a/conf/fts/py-fts-defmap-rqg-queries.conf +++ b/conf/fts/py-fts-defmap-rqg-queries.conf @@ -66,4 +66,13 @@ fts.stable_topology_fts.StableTopFTS: test_query_type_on_alias,dataset=wiki,items=30000,num_queries=100,query_types=match,cluster=D:F,compare_es=True,update=True,delete=True,GROUP=P0 # alias on multiple indexes - create_query_alias_on_multiple_indexes,items=1000,num_queries=100,query_types=match,match_phrase,conjunction,bool,disjunction,cluster=D+F,compare_es=True,GROUP=P0 \ No newline at end of file + create_query_alias_on_multiple_indexes,items=1000,num_queries=100,query_types=match,match_phrase,conjunction,bool,disjunction,cluster=D+F,compare_es=True,GROUP=P0 + + # Boosting query + test_boost_query_type,items=100,GROUP=P0,cluster=D+F + + # Doc ID query + test_doc_id_query_type,items=1000,expected_hits=3,query={"ids":["emp10000001","emp10000002","emp10000003"]},GROUP=P0,cluster=D+F + test_doc_id_query_type,items=1000,expected_hits=14,query={"ids":["emp10000001","emp10000002","emp10000003"]},disjuncts_query={"match": "Safiya", "field":"name"},GROUP=P0,cluster=D+F + test_doc_id_query_type,items=1000,expected_hits=3,query={"ids":["emp10000001","emp10000002","emp10000003"]},invalid_doc_id=emp20000001,GROUP=P0,cluster=D+F + test_doc_id_query_type,items=1000,expected_hits=14,query={"ids":["emp10000001","emp10000002","emp10000003"]},invalid_doc_id=emp20000001,disjuncts_query={"match": "Safiya", "field":"name"},GROUP=P0,cluster=D+F \ No newline at end of file diff --git a/conf/fts/py-fts-querytime-tests.conf b/conf/fts/py-fts-querytime-tests.conf new file mode 100644 index 000000000..cf88863c3 --- /dev/null +++ b/conf/fts/py-fts-querytime-tests.conf @@ -0,0 +1,65 @@ +fts.stable_topology_fts.StableTopFTS: + + #Testcases for Facets - + + #Positive testcases with size 5 + test_facets,items=100,query={"match": "emp", "field":"type"},facets=terms,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "emp", "field":"type"},facets=numeric_ranges,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "emp", "field":"type"},facets=date_ranges,GROUP=P0,cluster=D+F + + #Multiple facets in a single query + test_facets,items=100,query={"match": "emp", "field":"type"},facets=terms,numeric_ranges,date_ranges,GROUP=P0,cluster=D+F + + #Negative testcase - Hits = 0 + test_facets,items=100,query={"match": "wiki", "field":"type"},facets=terms,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "wiki", "field":"type"},facets=numeric_ranges,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "wiki", "field":"type"},facets=date_ranges,GROUP=P0,cluster=D+F + + #Negative testcase - Field not indexed + test_facets,items=100,query={"match": "emp", "field":"type"},facets=terms,field_indexed=False,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "emp", "field":"type"},facets=numeric_ranges,field_indexed=False,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "emp", "field":"type"},facets=date_ranges,field_indexed=False,GROUP=P0,cluster=D+F + + #Positive testcase - Size less than no. of buckets defined + test_facets,items=100,query={"match": "emp", "field":"type"},facets=terms,facets_size=5,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "emp", "field":"type"},facets=numeric_ranges,facets_size=2,GROUP=P0,cluster=D+F + test_facets,items=100,query={"match": "emp", "field":"type"},facets=date_ranges,facets_size=2,GROUP=P0,cluster=D+F + + # --- Sorting testcases --- + # Sorting on fields of different data types, ascending and descending + test_sorting_of_results,items=100,sort_fields=dept,expected=emp10000042,emp10000001,emp10000071,tolerance=emp10000001,emp10000071,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-dept,expected=emp10000071,emp10000001,emp10000042,tolerance=emp10000001,emp10000071,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-salary,expected=emp10000071,emp10000001,emp10000042,cluster=D+F + test_sorting_of_results,items=100,sort_fields=salary,expected=emp10000042,emp10000001,emp10000071,cluster=D+F + test_sorting_of_results,items=100,sort_fields=join_date,expected=emp10000001,emp10000071,emp10000042,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-join_date,expected=emp10000042,emp10000071,emp10000001,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-is_manager,expected=emp10000071,emp10000042,emp10000001,tolerance=emp10000042,emp10000071,cluster=D+F + test_sorting_of_results,items=100,sort_fields=is_manager,expected=emp10000001,emp10000042,emp10000071,tolerance=emp10000042,emp10000071,cluster=D+F + #test_sorting_of_results,items=100,sort_fields=languages_known,expected=emp10000042,emp10000001,emp10000071,cluster=D+F + #test_sorting_of_results,items=100,sort_fields=-languages_known,expected=emp10000042,emp10000071,emp10000001,cluster=D+F + test_sorting_of_results,items=100,sort_fields=_id,expected=emp10000001,emp10000042,emp10000071,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-_id,expected=emp10000071,emp10000042,emp10000001,cluster=D+F + test_sorting_of_results,items=100,sort_fields=_score,expected=emp10000042,emp10000071,emp10000001,tolerance=emp10000042,emp10000071,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-_score,expected=emp10000001,emp10000071,emp10000042,tolerance=emp10000042,emp10000071,cluster=D+F + + # Sorting when no fields are specified - default sort field is -_score + test_sorting_of_results,items=100,sort_fields=,expected=emp10000001,emp10000071,emp10000042,tolerance=emp10000042,emp10000071,cluster=D+F + + # Sorting with multiple fields in different orders + test_sorting_of_results,items=100,sort_fields=is_manager,salary,expected=emp10000001,emp10000042,emp10000071,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-is_manager,salary,expected=emp10000042,emp10000071,emp10000001,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-is_manager,-salary,expected=emp10000071,emp10000042,emp10000001,cluster=D+F + test_sorting_of_results,items=100,sort_fields=is_manager,-salary,expected=emp10000001,emp10000071,emp10000042,cluster=D+F + test_sorting_of_results,items=100,sort_fields=-is_manager,salary,dept,languages_known,join_date,_id_score,expected=emp10000042,emp10000071,emp10000001,cluster=D+F + + # Sorting based on an invalid field name + #test_sorting_of_results,items=100,sort_fields=invalid,expected=emp10000071,emp10000001,emp10000042,tolerance=emp10000042,emp10000071,cluster=D+F + + # Sorting when there are no search hits + test_sorting_of_results_custom_map,items=100,sort_fields=-salary,query="""{"match_none": "true", "field":"name"}""",cluster=D+F + + # Sorting when the sort field is not indexed + test_sorting_of_results_custom_map,items=100,sort_fields=-salary,expected=emp10000071,emp10000001,emp10000042,cluster=D+F + + + diff --git a/conf/lww-epengine/py-lww.conf b/conf/lww-epengine/py-lww.conf index 10b81adf0..c12152e69 100755 --- a/conf/lww-epengine/py-lww.conf +++ b/conf/lww-epengine/py-lww.conf @@ -1,18 +1,18 @@ epengine.bucket_config.BucketConfig: test_restart,lww=True,drift=False - test_restart,lww=True,drift=True + #test_restart,lww=True,drift=True test_restart,lww=False test_failover,lww=True,drift=False - test_failover,lww=True,drift=True + #test_failover,lww=True,drift=True test_failover,lww=False test_rebalance_in,lww=True,drift=False,skip_rebalance=True - test_rebalance_in,lww=True,drift=True,skip_rebalance=True + # drift is disabled in 4.6 test_rebalance_in,lww=True,drift=True,skip_rebalance=True test_rebalance_in,lww=False,skip_rebalance=True - test_modify_bucket_params,lww=False - test_modify_bucket_params,lww=True,drift=False + # modify is not implemented in 4.6 test_modify_bucket_params,lww=False + # modify is not implemented in 4.6 test_modify_bucket_params,lww=True,drift=False test_backup_same_cluster,lww=True,drift=False - test_backup_same_cluster,lww=True,drift=True + # drift is disabled in 4.6 test_backup_same_cluster,lww=True,drift=True epengine.opschangecas.OpsChangeCasTests: test_cas_set @@ -28,7 +28,7 @@ epengine.opschangecas.OpsChangeCasTests: test_cas_getMeta_empty_vBucket test_cas_setMeta_lower test_cas_setMeta_higher - test_cas_deleteMeta + test_cas_deleteMeta,lww=True test_cas_skip_conflict_resolution test_revid_conflict_resolution test_cas_conflict_resolution @@ -36,3 +36,12 @@ epengine.opschangecas.OpsChangeCasTests: test_rebalance_revid_conflict_resolution test_failover_revid_conflict_resolution + +epengine.lww_stats.LWWStatsTests: + test_time_sync_threshold_setting,lww=true + test_poisoned_cas,lww=true + test_cas_expiry,lww=true + test_drift_stats,lww=true,check_ahead_threshold=True + test_drift_stats,lww=true,check_ahead_threshold=False + test_logical_clock_ticks,lww=true + test_time_sync_threshold_setting_rest_call,lww=true diff --git a/conf/os-certify/cli.conf b/conf/os-certify/cli.conf index 7b7aec3ec..6d028b4da 100644 --- a/conf/os-certify/cli.conf +++ b/conf/os-certify/cli.conf @@ -65,16 +65,6 @@ clitest.couchbase_clitest.CouchbaseCliTest: testAddRemoveNodesWithRecovery,nodes_add=3,nodes_rem=1,nodes_failover=2,nodes_readd=1,force_failover=True,nodes_recovery=1,GROUP=GRACEFUL testAddRemoveNodesWithRecovery,nodes_add=4,nodes_rem=0,nodes_failover=3,nodes_readd=2,force_failover=True,nodes_recovery=2,GROUP=GRACEFUL testStartStopRebalance - testNodeInit,data_path=|tmp|data,index_path=|tmp|index - testNodeInit,index_path=|tmp|index - #testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-db-percentage=10 - #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-db-size=10 - #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-view-percentage=99 - #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-view-size=100 - #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-period-from=10:10 - #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-period-to=12:12 - #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,enable-compaction-abort=1 - #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,enable-compaction-parallel=0 # XDCR clitest.couchbase_clitest.XdcrCLITest: testXDCRSetup,servers=1,xdcr-cluster-name=remote,xdcr-hostname=1,xdcr-username=Administrator,xdcr-password=password @@ -91,9 +81,3 @@ clitest.couchbase_clitest.XdcrCLITest: testXdcrReplication,servers=2,xdcr-cluster-name=remote,xdcr-hostname=3,xdcr-username=Administrator,xdcr-password=password,demand-encryption=1,xdcr-certificate=cert.pem,xdcr-from-bucket=default,xdcr-to-bucket=default testXdcrReplication,servers=2,xdcr-cluster-name=remote,xdcr-hostname=3,xdcr-username=Administrator,xdcr-password=password,demand-encryption=0,xdcr-certificate=cert.pem,xdcr-from-bucket=default,xdcr-to-bucket=default,pause-resume=1 testXdcrReplication,servers=2,xdcr-cluster-name=remote,xdcr-hostname=3,xdcr-username=Administrator,xdcr-password=password,demand-encryption=0,xdcr-certificate=cert.pem,xdcr-from-bucket=default,xdcr-to-bucket=default,replication_mode=xmem - -# RZA -clitest.couchbase_clitest.CouchbaseCliTest: - testCreateRenameDeleteGroup - testAddMoveServerListGroup - testServerAddRebalancewithGroupManage diff --git a/conf/os-certify/fts-extended-sanity.conf b/conf/os-certify/fts-extended-sanity.conf new file mode 100644 index 000000000..ae038bc79 --- /dev/null +++ b/conf/os-certify/fts-extended-sanity.conf @@ -0,0 +1,67 @@ +fts.stable_topology_fts.StableTopFTS: + + # service start/run + check_fts_service_started,no_buckets=True,GROUP=PS + check_fts_service_started,default_bucket=False,sasl_buckets=1,GROUP=PS + + # simple index creation + create_simple_default_index,items=10,cluster=D,F,GROUP=PS + + # updates, expiry,delete + create_simple_default_index,items=10000,update=True,upd_del_fields=['is_manager','dept','manages.team_size'],GROUP=PS + create_simple_default_index,items=10000,update=True,expires=30,GROUP=PS + create_simple_default_index,items=10000,delete=True,GROUP=PS + + # index creation on sasl bucket and standard_bucket + create_simple_default_index,items=10000,cluster=D,F,D+F,default_bucket=False,sasl_buckets=1,GROUP=PS + create_simple_default_index,items=10000,cluster=D,F,F,default_bucket=False,standard_buckets=1,GROUP=PS + + # parallel index building (more than one bucket) + create_simple_default_index,items=1000,cluster=D,F,standard_buckets=1,sasl_buckets=1,GROUP=PS + + # parallel index building (more than one index per bucket, multiple buckets) + create_simple_default_index,items=1000,cluster=D,F,standard_buckets=1,sasl_buckets=1,index_per_bucket=3,GROUP=PS + + # configure plan params + create_simple_default_index,items=20000,index_replicas=1,cluster=D+F,F,F,GROUP=PS + + # delete bucket + drop_bucket_check_index,items=100,GROUP=PS + + # Simple queries + run_default_index_query,items=100,GROUP=PS + run_default_index_query,items=1000,query="""{"match": "safiya@mcdiabetes.com", "field": "email"}""",expected_hits=1000,GROUP=PS + run_default_index_query,items=1000,query="""{"terms": ["Darcei", "Safiya"], "field":"manages.reports"}""",expected_hits=0,GROUP=PS + run_default_index_query,items=1000,query="""{"match_phrase": "Darcei gel", "field": "manages.reports"}""",expected_hits=0,GROUP=PS + run_default_index_query,items=1000,update=True,upd=100,query="""{"min": 1, "max":1, "field": "mutated", "inclusive_min": true, "inclusive_max": true}""",expected_hits=1000,GROUP=PS + run_default_index_query,items=1000,delete=True,del=100,query="""{"min": 0,"max": 1,"field":"mutated","inclusive_max": true}""",expected_hits=0,GROUP=PS + + # Test different types of queries + # ["match", "phrase", "bool", "match_phrase","prefix", "fuzzy", "conjunction", "disjunction" + # "wildcard", "regexp", "query_string", "numeric_range", "date_range", "match_all","match_none"] + test_query_type,items=10000,num_queries=2,query_type=match,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=phrase,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=match_phrase,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=conjunction,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=disjunction,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=prefix,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=bool,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=date_range,GROUP=PS + test_query_type,items=10000,num_queries=2,query_type=numeric_range,GROUP=PS + test_query_type,items=10000,num_queries=30,query_type=match,prefix,phrase,bool,GROUP=PS + test_match_all,items=10000,GROUP=PS + test_match_none,items=10000,GROUP=PS + + # emp-dataset - single type/doc map + querying + index_query_custom_mapping,items=1000,custom_map=True,cm_id=0,num_queries=100,compare_es=True,GROUP=PS + + # wiki-dataset: single type/doc map + querying + index_query_custom_mapping,items=1000,custom_map=True,cm_id=0,num_queries=100,compare_es=True,dataset=wiki,GROUP=PS + index_query_custom_mapping,items=1000,custom_map=True,cm_id=0,num_queries=100,compare_es=True,dataset=wiki,update=True,delete=True,GROUP=PS + + # wiki + emp dataset, single doc maps +/- default map + index_query_custom_mapping,items=1000,custom_map=True,cm_id=0,num_queries=100,compare_es=True,dataset=all,GROUP=PS + + #emp-dataset - single type/doc map + single custom analyzer + querying + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=1,num_queries=100,GROUP=PS + index_query_custom_mapping,items=1000,custom_map=True,num_custom_analyzers=1,compare_es=True,cm_id=3,num_queries=100,GROUP=PS \ No newline at end of file diff --git a/conf/py-1node-sanity.conf b/conf/py-1node-sanity.conf index b222ad4c6..71271484a 100644 --- a/conf/py-1node-sanity.conf +++ b/conf/py-1node-sanity.conf @@ -24,7 +24,7 @@ view.viewquerytests.ViewQueryTests.test_employee_dataset_all_queries,limit=1000, CCCP.CCCP.test_get_config_client,standard_buckets=1,sasl_buckets=1 CCCP.CCCP.test_not_my_vbucket_config -flush.bucketflush.BucketFlushTests.bucketflush_with_data_ops_moxi,items=1000,data_op=create,use_ascii=False +flush.bucketflush.BucketFlushTests.bucketflush_with_data_ops_moxi,items=5000,data_op=create,use_ascii=False ### Security - Audit + LDAP - LDAP will run separately in sanity tests #### security.audittest.auditTest.test_bucketEvents,default_bucket=false,id=8201,ops=create diff --git a/conf/py-cli.conf b/conf/py-cli.conf index 8494d5e04..640f0f8c7 100644 --- a/conf/py-cli.conf +++ b/conf/py-cli.conf @@ -65,8 +65,6 @@ clitest.couchbase_clitest.CouchbaseCliTest: testAddRemoveNodesWithRecovery,nodes_add=3,nodes_rem=1,nodes_failover=2,nodes_readd=1,force_failover=True,nodes_recovery=1,GROUP=GRACEFUL testAddRemoveNodesWithRecovery,nodes_add=4,nodes_rem=0,nodes_failover=3,nodes_readd=2,force_failover=True,nodes_recovery=2,GROUP=GRACEFUL testStartStopRebalance - testNodeInit,data_path=|tmp|data,index_path=|tmp|index - testNodeInit,index_path=|tmp|index #testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-db-percentage=10 #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-db-size=10 #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-view-percentage=99 diff --git a/conf/py-couchbase-cli.conf b/conf/py-couchbase-cli.conf index e71359209..11a72af18 100644 --- a/conf/py-couchbase-cli.conf +++ b/conf/py-couchbase-cli.conf @@ -18,8 +18,6 @@ clitest.couchbase_clitest.CouchbaseCliTest: testAddRemoveNodesWithRecovery,nodes_add=3,nodes_rem=1,nodes_failover=2,nodes_readd=1,force_failover=True,nodes_recovery=1,GROUP=GRACEFUL testAddRemoveNodesWithRecovery,nodes_add=4,nodes_rem=0,nodes_failover=3,nodes_readd=2,force_failover=True,nodes_recovery=2,GROUP=GRACEFUL testStartStopRebalance - testNodeInit,data_path=|tmp|data,index_path=|tmp|index - testNodeInit,index_path=|tmp|index #testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-db-percentage=10 #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-db-size=10 #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-view-percentage=99 @@ -28,6 +26,27 @@ clitest.couchbase_clitest.CouchbaseCliTest: #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,compaction-period-to=12:12 #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,enable-compaction-abort=1 #MB-8567testSettingCompaction,bucket=AAAA,bucket_port=1223,enable-compaction-parallel=0 + +## test backup directory structure before spock +clitest.couchbase_clitest.CouchbaseCliTest: + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=1 + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=2 + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=1,laod_all=true + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=2,load_all=true + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=3,num_backup_bucket=1 + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=3,num_backup_bucket=2 + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=3,num_backup_bucket=3 + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=3,num_backup_bucket=1,load_all=true + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=3,num_backup_bucket=2,load_all=true + test_directory_backup_stuctrue,backup_cmd=cbbackup,num_sasl_buckets=3,num_backup_bucket=3,load_all=true + +## test backup directory structure from spock +clitest.couchbase_clitest.CouchbaseCliTest: + test_directory_backup_stuctrue,backup_cmd=cbbackupmgr,num_sasl_buckets=1 + test_directory_backup_stuctrue,backup_cmd=cbbackupmgr,num_sasl_buckets=2 + test_directory_backup_stuctrue,backup_cmd=cbbackupmgr,num_sasl_buckets=1,laod_all=true + test_directory_backup_stuctrue,backup_cmd=cbbackupmgr,num_sasl_buckets=2,load_all=true + clitest.couchbase_clitest.XdcrCLITest: testXDCRSetup,servers=1,xdcr-cluster-name=remote,xdcr-hostname=1,xdcr-username=Administrator,xdcr-password=password testXDCRSetup,servers=1,xdcr-cluster-name=remote,xdcr-hostname=1,xdcr-username=Administrator,xdcr-password=password diff --git a/conf/py-epenginesanity.conf b/conf/py-epenginesanity.conf index 81233c8ec..2b183a419 100644 --- a/conf/py-epenginesanity.conf +++ b/conf/py-epenginesanity.conf @@ -1,10 +1,10 @@ -# warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,active_resident_threshold=70,dgm_run=True,is_partial=True,GROUP=P0 -# warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,active_resident_threshold=90,is_partial=False,replica=1,doc_ops=update;delete,GROUP=P0 -# warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,sasl_buckets=1,active_resident_threshold=90,doc_ops=update,without_access_log=True,GROUP=P0 -# warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,replicas=2,active_resident_threshold=80,dgm_run=True,is_partial=True,doc_ops=expire,without_access_log=True,GROUP=P0 -# warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,replicas=2,active_resident_threshold=80,dgm_run=True,eviction_policy=fullEviction,is_partial=True,doc_ops=expire,without_access_log=True,reboot=True,GROUP=P0 -# warmup.warmuptest.WarmUpTests.test_warm_up_progress,standard_buckets=1,items=100000,active_resident_threshold=70,dgm_run=True,is_partial=True,GROUP=P0 -# warmup.warmuptest.WarmUpTests.test_warm_up_progress,sasl_buckets=1,items=100000,active_resident_threshold=70,dgm_run=True,is_partial=True,GROUP=P0 +warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,active_resident_threshold=70,dgm_run=True,is_partial=True,GROUP=P0 +warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,active_resident_threshold=90,is_partial=False,replica=1,doc_ops=update;delete,GROUP=P0 +warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,sasl_buckets=1,active_resident_threshold=90,doc_ops=update,without_access_log=True,GROUP=P0 +warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,replicas=2,active_resident_threshold=80,dgm_run=True,is_partial=True,doc_ops=expire,without_access_log=True,GROUP=P0 +warmup.warmuptest.WarmUpTests.warmup_test,stats_monitor=vb_replica_perc_mem_resident;vb_active_perc_mem_resident,items=10000,replicas=2,active_resident_threshold=80,dgm_run=True,eviction_policy=fullEviction,is_partial=True,doc_ops=expire,without_access_log=True,reboot=True,GROUP=P0 +warmup.warmuptest.WarmUpTests.test_warm_up_progress,standard_buckets=1,items=100000,active_resident_threshold=70,dgm_run=True,is_partial=True,GROUP=P0 +warmup.warmuptest.WarmUpTests.test_warm_up_progress,sasl_buckets=1,items=100000,active_resident_threshold=70,dgm_run=True,is_partial=True,GROUP=P0 autocompaction.AutoCompactionTests.test_database_fragmentation,autocompaction_value=0 autocompaction.AutoCompactionTests.test_database_fragmentation,autocompaction_value=70 autocompaction.AutoCompactionTests.rebalance_in_with_auto_DB_compaction,items=20000,autocompaction_value=70 diff --git a/conf/py-rza-cli.conf b/conf/py-rza-cli.conf deleted file mode 100644 index cf28d3e64..000000000 --- a/conf/py-rza-cli.conf +++ /dev/null @@ -1,4 +0,0 @@ -clitest.couchbase_clitest.CouchbaseCliTest: - testCreateRenameDeleteGroup - testAddMoveServerListGroup - testServerAddRebalancewithGroupManage diff --git a/conf/py-tools-P0.conf b/conf/py-tools-P0.conf index 791b8edff..bc4fe80a8 100644 --- a/conf/py-tools-P0.conf +++ b/conf/py-tools-P0.conf @@ -98,8 +98,6 @@ clitest.couchbase_clitest.CouchbaseCliTest: testAddRemoveNodesWithRecovery,nodes_add=3,nodes_rem=1,nodes_failover=2,nodes_readd=1,force_failover=True,nodes_recovery=1,GROUP=GRACEFUL testAddRemoveNodesWithRecovery,nodes_add=4,nodes_rem=0,nodes_failover=3,nodes_readd=2,force_failover=True,nodes_recovery=2,GROUP=GRACEFUL testStartStopRebalance - testNodeInit,data_path=|tmp|data,index_path=|tmp|index - testNodeInit,index_path=|tmp|index test_change_admin_password_with_read_only_account,default_bucket=False test_change_admin_password_with_read_only_account,default_bucket=True,dgm_run=True test_change_admin_password_with_read_only_account,nodes_init=2,default_bucket=False diff --git a/conf/py-uitests.conf b/conf/py-uitests.conf index 878d3b8f5..12243d88f 100644 --- a/conf/py-uitests.conf +++ b/conf/py-uitests.conf @@ -6,10 +6,8 @@ ui.uisampletests.InitializeTest: test_initialize,agree_terms=True,indeces_path=c:\tmp,ram_quota_node=300,sample=beer-sample,ram_quota=100,num_replica=0, ui.uisampletests.BucketTests: test_add_bucket,name=bucket0,ram_quota=100,type=Couchbase,port=9000,index_replica=true,num_replica=2,GROUP=LINUX;WINDOWS -#per Alk:This is expected. Temporary files used during index building/updating are not accounted for per-ddoc. -# test_bucket_stats_mb_8538,GROUP=LINUX;WINDOWS -# disabled as per discussion in MB-20129 -# test_bucket_stats_connections,GROUP=LINUX;WINDOWS + test_bucket_stats_mb_8538,GROUP=LINUX;WINDOWS,descr=per Alk:This is expected. Temporary files used during index building/updating are not accounted for per-ddoc. + test_bucket_stats_connections,GROUP=LINUX;WINDOWS,descr=disabled as per discussion in MB-20129 test_add_bucket_with_ops,meta_data=valueOnly,GROUP=LINUX;WINDOWS test_add_bucket_with_ops,meta_data=fullEviction,GROUP=LINUX;WINDOWS test_add_bucket_with_ops,io_priority=low,GROUP=LINUX;WINDOWS @@ -43,8 +41,7 @@ ui.uisampletests.SettingsTests: ui.uisampletests.ROuserTests: test_read_only_user,GROUP=LINUX;WINDOWS test_delete_read_only_user,skip_preparation=True,GROUP=LINUX;WINDOWS -#MB-8790 -# test_negative_read_only_user,skip_preparation=True,username=Administrator,password=password,error=,GROUP=LINUX;WINDOWS + test_negative_read_only_user,skip_preparation=True,username=Administrator,password=password,error=Read-only user cannot be same user as administrator,GROUP=LINUX;WINDOWS,descr=MB-8790 test_negative_read_only_user,skip_preparation=True,username='!@#@$#%',error='The username must not contain spaces, control or any of ()<>@,;:\\"/[]?={} characters and must be valid utf8',GROUP=LINUX;WINDOWS test_negative_read_only_user,skip_preparation=True,username='my_ro_user',password='123',error='The password must be at least six characters.',GROUP=LINUX;WINDOWS test_negative_read_only_user,skip_preparation=True,username='my_ro_user',password='password',verify_password='massword',error=[u"Password doesn't match"],GROUP=LINUX;WINDOWS @@ -70,8 +67,8 @@ ui.uisampletests.GracefullFailoverTests: test_delta_recovery_failover,nodes_init=2,GROUP=LINUX;WINDOWS test_delta_recovery_failover,nodes_init=2,confirm_recovery=false,GROUP=LINUX;WINDOWS test_delta_recovery_failover,nodes_init=2,option=full,GROUP=LINUX;WINDOWS -# ui.uisampletests.RebalanceProgressTests: - # test_rebalance_in,GROUP=LINUX;WINDOWS +ui.uisampletests.RebalanceProgressTests: + test_rebalance_in,GROUP=LINUX;WINDOWS ui.uixdcrtests.XDCRTests: test_create_replication,GROUP=LINUX;WINDOWS test_cancel_create_replication,GROUP=LINUX;WINDOWS @@ -80,7 +77,7 @@ ui.uixdcrtests.XDCRTests: test_create_reference,ip_to_replicate=,error=Attention - hostname (ip) is missing,GROUP=LINUX;WINDOWS test_create_reference,user=' ',error=Authentication failed,GROUP=LINUX;WINDOWS test_create_reference,passwd=,error=Attention - password is missing,GROUP=LINUX;WINDOWS - test_create_reference,ip_to_replicate=abd,error=Reference is not created: Attention - Could not connect to "abd" on port 8091,GROUP=LINUX;WINDOWS + test_create_reference,ip_to_replicate=abd,error=Attention - Could not connect to "abd" on port 8091. This could be due to an incorrect host/port combination or a firewall in place between the servers.,GROUP=LINUX;WINDOWS test_create_reference,user=user,error=Authentication failed,GROUP=LINUX;WINDOWS test_create_reference,passwd=pwd,error=Authentication failed,GROUP=LINUX;WINDOWS test_create_reference,GROUP=LINUX;WINDOWS @@ -112,7 +109,6 @@ ui.uixdcrtests.XDCRTests: test_create_replication,advanced_settings=batch_count:1000,GROUP=LINUX;WINDOWS,name=auto26 test_create_replication,advanced_settings=batch_size:5000,GROUP=LINUX;WINDOWS,name=auto27 test_create_replication,advanced_settings=retry_interval:150,GROUP=LINUX;WINDOWS,name=auto28 - ui.uisampletests.ViewsTests: test_add_dev_view,GROUP=LINUX;WINDOWS test_add_prod_view,GROUP=LINUX;WINDOWS @@ -132,3 +128,26 @@ ui.uirebalancetests.RebalanceTests: test_incrimental_rebalance ui.uiftstests.FTSTests: test_create_simple_fts_index,GROUP=LINUX;WINDOWS +ui.uiqueryworkbench.QueryWorkbenchTests: + test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,check_query=CREATE PRIMARY INDEX beer_index_pr ON `beer` USING GSI,expected_result={\n "requestID": "(\S+)",\n "clientContextID": "(\S+)",\n "signature": null,\n "results": \[\n \],\n "status": "success",\n "metrics": {\n "elapsedTime": "\d+\.\d+s",\n "executionTime": "\d+\.\d+s",\n "resultCount": 0,\n "resultSize": 0\n }\n}|{\n "results": \[\],\n "metrics": {\n "elapsedTime": "\d+\.\d+s",\n "executionTime": "\d+\.\d+s",\n "resultCount": 0,\n "resultSize": 0\n }\n} + test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,check_query=CREATE INDEX beer_index_sec on `beer`(meta().id),expected_result={\n "requestID": "(\S+)",\n "clientContextID": "(\S+)",\n "signature": null,\n "results": \[\n \],\n "status": "success",\n "metrics": {\n "elapsedTime": "\d+.\d+s",\n "executionTime": "\d+.\d+s",\n "resultCount": 0,\n "resultSize": 0\n }\n|{\n "results": \[\],\n "metrics": {\n "elapsedTime": "\d+\.\d+s",\n "executionTime": "\d+\.\d+s",\n "resultCount": 0,\n "resultSize": 0\n }\n} + #MB-20833 test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,check_query=,expected_result=\[\n {\n "code": 1050,\n "msg": "No statement or prepared value"\n }\n\] + test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,check_query=CREATE INDEX beer_index_sec on `beer`(meta().id) _SEM_ SELECT _STAR_ FROM system:indexes,expected_result={"error":"Error, you cannot issue more than one query at once. Please remove all text after \n the semicolon closing the first query."}|{"error": "you cannot issue more than one query at once."} + test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,init_query=CREATE INDEX beer_index_sec on `beer`(meta().id),check_query=SELECT _STAR_ FROM system:indexes,expected_result={\n "indexes": {\n "datastore_id": "http://127.0.0.1:8091",\n "id": "(\S+)",\n "index_key": \[\n "\(meta\(\).`id`\)"\n \],\n "keyspace_id": "beer",\n "name": "beer_index_sec",\n "namespace_id": "default",\n "state": "online",\n "using": "gsi"\n }\n }|\[\n {\n "indexes": {\n "datastore_id": "http://127.0.0.1:8091",\n "id": "(\S+)",\n "index_key": \[\n "\(meta\(\).`id`\)"\n \],\n "keyspace_id": "beer",\n "name": "beer_index_sec",\n "namespace_id": "default",\n "state": "online",\n "using": "gsi"\n }\n }\n\] + test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,init_query=CREATE INDEX beer_index_sec on `beer`(meta().id),check_query=SELECT b.name, meta(b).id \nFROM `beer` b \nWHERE meta(b).id > "g" limit 1,expected_result=\[\n {\n "id": "g_heileman_brewing",\n "name": "G. Heileman Brewing"\n }\n \]|\[\n {\n "id": "g_heileman_brewing",\n "name": "G. Heileman Brewing"\n }\n\] + test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,check_query=FAKE _SEM_,expected_result=\[\n {\n "code": 3000,\n "msg": "syntax error - at ;",\n "query_from_user": "FAKE ;"\n }\n] + #MB-20833 test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,init_query=CREATE INDEX beer_index_sec on `beer`(meta().id),check_query=,expected_result=\[\n {\n "code": 1050,\n "msg": "No statement or prepared value"\n }\n\] + test_create_indexes,mode=JSON,GROUP=LINUX;WINDOWS,check_query=FAKE,expected_result=\[\n {\n "code": 3000,\n "msg": "Input was not a statement.",\n "query_from_user": "fake"\n }\n\] + test_create_indexes,mode=Tree,GROUP=LINUX;WINDOWS,init_query=CREATE INDEX beer_index_sec on `beer`(meta().id),check_query=SELECT b.name, meta(b).id FROM `beer` b WHERE meta(b).id > "g" limit 1,expected_result=id g_heileman_brewing\nname G. Heileman Brewing + test_create_indexes,mode=Plan,GROUP=LINUX;WINDOWS,init_query=CREATE INDEX beer_index_sec on `beer`(meta().id),check_query=SELECT b.name, meta(b).id FROM `beer` b WHERE meta(b).id > "g" limit 1,expected_result=Query Plan Summary:\nIndexes\nbeer.beer_index_sec\nBuckets\nbeer\nFields\nbeer.name\n\nQuery Operator Data Flows \(bottom to top\):\n\nLimit 1 \nFinalProject \nInitialProject 2 terms \nFilter \nFetch beer as b \nIndexScan beer.beer_index_sec \n\n\nVisual Plan\n\nLimit\n1\nFinalProject\nInitialProject\n2 terms\nFilter\nFetch\nbeer as b\nIndexScan\nbeer.beer_index_sec + test_create_indexes,mode=Plan Text,GROUP=LINUX;WINDOWS,init_query=CREATE INDEX beer_index_sec on `beer`(meta().id),check_query=SELECT b.name, meta(b).id FROM `beer` b WHERE meta(b).id > "g" limit 1,expected_result={\n "plan": {\n "#operator": "Sequence",\n "~children": \[\n {\n "#operator": "Sequence",\n "~children": \[\n {\n "#operator": "IndexScan",\n "index": "beer_index_sec",\n "index_id": "(\S+)",\n "keyspace": "beer",\n "limit": "1",\n "namespace": "default" + test_create_indexes,mode=Plan Text,GROUP=LINUX;WINDOWS,init_query=CREATE INDEX beer_index_sec on `beer`(meta().id),check_query=SELECT b.name, meta(b).id FROM `beer` b WHERE meta(b).id > "g" limit 1,summary_result=Status: success Elapsed: \d+.\d+ms Execution: \d+.\d+ms Result Count: 1 Result Size: 93 + test_bucket_analysis,GROUP=LINUX;WINDOWS,check_query=CREATE INDEX beer_index_sec on `beer`(meta().id),init_analysis=Fully Queryable Buckets\nQueryable on Indexed Fields\nNon-Indexed Buckets\n beer|Fully Queryable Buckets\nQueryable on Indexed Fields\nNon-Indexed Buckets\nbeer,expected_analysis=Fully Queryable Buckets\nQueryable on Indexed Fields\nbeer\nNon-Indexed Buckets + #test_save_query,GROUP=LINUX;WINDOWS + #test_save_query,GROUP=LINUX;WINDOWS,check_query=CREATE INDEX beer_index_sec on `beer`(meta().id) + #test_save_query,path=,GROUP=LINUX;WINDOWS,check_query=CREATE INDEX beer_index_sec on `beer`(meta().id) + #test_save_query,path=blablabla.json,GROUP=LINUX;WINDOWS,check_query=blaaaaaaaaa + #test_save_result,GROUP=LINUX;WINDOWS,expected_result= + #test_save_result,GROUP=LINUX;WINDOWS,check_query=CREATE INDEX beer_index_sec on `beer`(meta().id),expected_result={\n "results": \[\],\n "metrics": {\n "elapsedTime": "\d+.\d+s",\n "executionTime": "\d+\.\d+s",\n "resultCount": 0,\n "resultSize": 0\n }\n} + #test_save_result,path=,GROUP=LINUX;WINDOWS,check_query=CREATE INDEX beer_index_sec on `beer`(meta().id),expected_result={\n "results": \[\],\n "metrics": {\n "elapsedTime": "\d+.\d+s",\n "executionTime": "\d+\.\d+s",\n "resultCount": 0,\n "resultSize": 0\n }\n} + #test_save_result,path=blablabla.json,GROUP=LINUX;WINDOWS,check_query=blaaaaaaaaa,expected_result=\[\n {\n "code": 3000,\n "msg": "Input was not a statement.",\n "query_from_user": "blaaaaaaaaa"\n }\n\] \ No newline at end of file diff --git a/conf/py-xdcr-bidirectional.conf b/conf/py-xdcr-bidirectional.conf index c3b9e65b4..fb2ed4038 100644 --- a/conf/py-xdcr-bidirectional.conf +++ b/conf/py-xdcr-bidirectional.conf @@ -7,23 +7,23 @@ xdcr.biXDCR.bidirectional: #load_with_async_ops_and_joint_sets,items=5000,ctopology=chain,rdirection=bidirection,standard_buckets=2,default_bucket=False,update=C1,update=C2,upd=30,GROUP=P1 load_with_async_ops_and_joint_sets,items=10000,ctopology=chain,rdirection=bidirection,delete=C1-C2,update=C2,upd=30,del=30,GROUP=P1 - load_with_async_ops_and_joint_sets,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=1,expires=30,update=C2,delete=C1-C2,upd=30,del=30,GROUP=P1 + load_with_async_ops_and_joint_sets,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=1,expires=300,update=C2,delete=C1-C2,upd=30,del=30,GROUP=P1 #Incremental ops on joint sets with warmup load_with_async_ops_and_joint_sets_with_warmup,items=20000,ctopology=chain,rdirection=bidirection,warm=C1,GROUP=P1 - load_with_async_ops_and_joint_sets_with_warmup,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=1,expires=60,update=C1-C2,warm=C2,upd=30,replication_type=xmem,GROUP=P0;xmem + load_with_async_ops_and_joint_sets_with_warmup,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=1,expires=300,update=C1-C2,warm=C2,upd=30,replication_type=xmem,GROUP=P0;xmem load_with_async_ops_and_joint_sets_with_warmup,items=10000,ctopology=chain,rdirection=bidirection,delete=C1-C2, update=C2,upd=30,del=30,warm=C1-C2,GROUP=P1 #Incremental ops on joint sets with warmup master load_with_async_ops_and_joint_sets_with_warmup_master,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=1,delete=C1-C2,update=C2,upd=30,del=30,replication_type=xmem,warm=C1-C2,GROUP=P0;xmem #Failover during load - load_with_failover,replicas=1,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=1,expires=60,update=C1-C2,delete=C1,failover=C2,replication_type=xmem,GROUP=P0;xmem + load_with_failover,replicas=1,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=1,expires=300,update=C1-C2,delete=C1,failover=C2,replication_type=xmem,GROUP=P0;xmem load_with_failover,replicas=1,items=10000,ctopology=chain,rdirection=bidirection,sasl_buckets=2,default_bucket=False,update=C1-C2,delete=C1,failover=C2,timeout=180,GROUP=P1 load_with_failover,replicas=1,items=20000,ctopology=chain,rdirection=bidirection,update=C1-C2,delete=C1,failover=C2-C1,GROUP=P1 #failover and add back - load_with_failover_then_add_back,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=2,expires=60,update=C1-C2,delete=C1-C2,failover=C1,timeout=150,GROUP=P1 + load_with_failover_then_add_back,items=10000,ctopology=chain,rdirection=bidirection,standard_buckets=2,expires=300,update=C1-C2,delete=C1-C2,failover=C1,timeout=150,GROUP=P1 load_with_failover_then_add_back,items=10000,ctopology=chain,rdirection=bidirection,sasl_buckets=1,standard_buckets=1,update=C1-C2,delete=C1-C2,failover=C2,timeout=150,GROUP=P1 #LOAD WITH OPS WHILE REBOOTING NODES @@ -33,7 +33,7 @@ xdcr.biXDCR.bidirectional: #Compaction and view queries replication_with_view_queries_and_ops,items=10000,update=C1-C2,delete=C1-C2,is_dev_ddoc=false,rdirection=bidirection,replication_type=xmem,poll_timeout=240,GROUP=P0;xmem replication_with_ddoc_compaction,items=20000,rdirection=bidirection,GROUP=P0 - replication_with_disabled_ddoc_compaction,items=10000,standard_buckets=1,expires=30,update=C1-C2,delete=C1-C2,rdirection=bidirection,GROUP=P1 + replication_with_disabled_ddoc_compaction,items=10000,standard_buckets=1,expires=300,update=C1-C2,delete=C1-C2,rdirection=bidirection,GROUP=P1 replication_with_disabled_ddoc_compaction,items=20000,sasl_buckets=1,update=C1-C2,delete=C1-C2,disable_src_comp=False,rdirection=bidirection,replication_type=xmem,GROUP=P0;xmem xdcr.xdcrMiscTests.XdcrMiscTests: test_verify_mb8825,items=10000,doc-ops=create-delete,upd=80,del=20,replication_type=xmem,GROUP=P1 \ No newline at end of file diff --git a/conf/py-xdcr-lww-2.conf b/conf/py-xdcr-lww-2.conf index 603158716..60132d39f 100644 --- a/conf/py-xdcr-lww-2.conf +++ b/conf/py-xdcr-lww-2.conf @@ -35,17 +35,22 @@ xdcr.lww.Lww: test_seq_upd_on_bi_with_src_clock_faster,rdirection=bidirection,ctopology=chain,skip_ntp=True,GROUP=P0 test_seq_add_del_on_bi_with_target_clock_faster,rdirection=bidirection,ctopology=chain,skip_ntp=True,GROUP=P0 test_seq_del_add_on_bi_with_target_clock_faster,rdirection=bidirection,ctopology=chain,skip_ntp=True,GROUP=P0 - test_lww_with_rebalance_in_and_simult_upd_del,items=100000,rdirection=bidirection,ctopology=chain,update=C1-C2,delete=C1-C2,GROUP=P1 - test_lww_with_rebalance_out_and_simult_upd_del,items=100000,rdirection=bidirection,ctopology=chain,update=C1-C2,delete=C1-C2,GROUP=P1 - test_lww_with_failover_and_simult_upd_del,items=100000,rdirection=bidirection,ctopology=chain,update=C1-C2,delete=C1-C2,graceful=True,recoveryType=full,GROUP=P1 - test_lww_disabled_extended_metadata,items=1000,GROUP=P1 - test_lww_src_disabled_dst_enabled_extended_metadata,items=1000,GROUP=P1 + test_hlc_active_and_replica,GROUP=P0 + test_hlc,rdirection=unidirection,items=10000,ctopology=chain,GROUP=P0 + test_hlc_target_faster,rdirection=unidirection,ctopology=chain,skip_ntp=True,GROUP=P0 + test_hlc_source_faster,rdirection=unidirection,ctopology=chain,skip_ntp=True,GROUP=P0 + test_hlc_within_cluster_target_faster,rdirection=unidirection,ctopology=chain,skip_ntp=True,GROUP=P0 + test_hlc_within_cluster_source_faster,rdirection=unidirection,ctopology=chain,skip_ntp=True,GROUP=P0 + test_hlc_ordering_with_delay_source_faster,rdirection=unidirection,ctopology=chain,skip_ntp=True,GROUP=P0 + test_lww_with_rebalance_in_and_simult_upd_del,items=100000,rdirection=bidirection,ctopology=chain,update=C1-C2,delete=C1-C2,skip_key_validation=True,GROUP=P1 + test_lww_with_rebalance_out_and_simult_upd_del,items=100000,rdirection=bidirection,ctopology=chain,update=C1-C2,delete=C1-C2,skip_key_validation=True,GROUP=P1 + test_lww_with_failover_and_simult_upd_del,items=100000,rdirection=bidirection,ctopology=chain,update=C1-C2,delete=C1-C2,graceful=True,recoveryType=full,skip_key_validation=True,GROUP=P1 + test_lww_disabled_extended_metadata,items=1000,rdirection=bidirection,GROUP=P1 + test_lww_src_enabled_dst_disabled_extended_metadata,items=1000,rdirection=bidirection,GROUP=P1 test_lww_with_nodes_reshuffle,items=1000,GROUP=P1 - test_lww_with_dst_failover_and_rebalance,items=100000,rdirection=unidirection,ctopology=chain,graceful=True,recoveryType=full,GROUP=P1 - test_lww_with_dst_bucket_flush,items=100000,rdirection=unidirection,ctopology=chain,GROUP=P1 - test_lww_with_dst_bucket_delete,items=100000,rdirection=unidirection,ctopology=chain,GROUP=P1 + test_lww_with_dst_failover_and_rebalance,items=100000,rdirection=unidirection,ctopology=chain,graceful=True,recoveryType=full,skip_key_validation=True,GROUP=P1 test_lww_with_rebooting_non_master_node,items=100000,rdirection=unidirection,ctopology=chain,GROUP=P1 - test_lww_with_firewall,items=100000,rdirection=unidirection,ctopology=chain,GROUP=P1 + test_lww_with_firewall,items=100000,rdirection=unidirection,ctopology=chain,skip_key_validation=True,GROUP=P1 test_replication_with_lww_default,items=1000,default@C1=filter_expression:lww,GROUP=P1 test_replication_with_lww_default,items=1000,demand_encryption=True,GROUP=P1 test_lww_with_node_crash_cluster,items=100000,rdirection=unidirection,ctopology=chain,update=C1,delete=C1,crash=C1-C2,GROUP=P1 @@ -53,3 +58,19 @@ xdcr.lww.Lww: test_lww_with_auto_failover,items=1000,GROUP=P1 test_lww_with_mixed_buckets,items=1000,GROUP=P1 test_lww_with_diff_time_zones,items=1000,chain_length=3,GROUP=P1 + test_lww_with_dest_shutdown,items=100000,rdirection=unidirection,ctopology=chain,update=C1,delete=C1,GROUP=P1 + test_replication_with_lww_default,dgm_run=1,active_resident_threshold=30,rdirection=unidirection,ctopology=chain,GROUP=P1 + test_lww_with_checkpoint_validation,items=10000,rdirection=unidirection,ctopology=chain,GROUP=P1 + test_lww_with_backup_and_restore,items=1000,rdirection=unidirection,ctopology=chain,GROUP=P1 + test_lww_with_time_diff_in_src_nodes,items=1000,rdirection=unidirection,ctopology=chain,skip_ntp=True,GROUP=P1 + test_lww_with_nfs,items=1000,rdirection=unidirection,ctopology=chain,GROUP=P1 + test_lww_enabled_with_diff_topology_and_clocks_out_of_sync,ctopology=chain,chain_length=3,rdirection=bidirection,skip_ntp=True,GROUP=P1 + test_lww_mixed_with_diff_topology_and_clocks_out_of_sync,ctopology=chain,chain_length=3,rdirection=bidirection,skip_ntp=True,GROUP=P1 + test_lww_enabled_with_diff_topology_and_clocks_out_of_sync,ctopology=ring,chain_length=3,rdirection=bidirection,skip_ntp=True,GROUP=P1 + test_lww_mixed_with_diff_topology_and_clocks_out_of_sync,ctopology=ring,chain_length=3,rdirection=bidirection,skip_ntp=True,GROUP=P1 + test_v_topology_with_clocks_out_of_sync,ctopology=C1>C2 0: + self.gen_results = TuqGenerators(self.log, self.full_docs_list) + + + def killall_tuq_process(self): + self.shell.execute_command("killall cbq-engine") + self.shell.execute_command("killall tuqtng") + self.shell.execute_command("killall indexer") + + def run_query_from_template(self, query_template): + self.query = self.gen_results.generate_query(query_template) + expected_result = self.gen_results.generate_expected_result() + actual_result = self.run_analytics_query() + return actual_result, expected_result + + def run_analytics_query(self, query=None, min_output_size=10, server=None, query_params = {}, is_prepared=False, scan_consistency = None, scan_vector = None, verbose= True): + if query is None: + query = self.query + if server is None: + server = self.master + if server.ip == "127.0.0.1": + self.analytics_port = server.analytics_port + else: + if server.ip == "127.0.0.1": + self.analytics_port = server.analytics_port + if self.input.tuq_client and "client" in self.input.tuq_client: + server = self.tuq_client + if self.analytics_port == None or self.analytics_port == '': + self.analytics_port = self.input.param("analytics_port", 8095) + if not self.analytics_port: + self.log.info(" analytics_port is not defined, processing will not proceed further") + raise Exception("analytics_port is not defined, processing will not proceed further") + cred_params = {'creds': []} + for bucket in self.buckets: + if bucket.saslPassword: + cred_params['creds'].append({'user': 'local:%s' % bucket.name, 'pass': bucket.saslPassword}) + query_params.update(cred_params) + if self.use_rest: + query_params = {} + if scan_consistency: + query_params['scan_consistency']= scan_consistency + if scan_vector: + query_params['scan_vector']= str(scan_vector).replace("'", '"') + if verbose: + self.log.info('RUN QUERY %s' % query) + query = query + ";" + if "USE INDEX" in query: + query = query.replace("USE INDEX(`#primary` USING GSI)"," ") + for bucket in self.buckets: + query = query.replace(bucket.name,bucket.name+"_shadow") + + + print query + result = RestConnection(server).analytics_tool(query, self.analytics_port, query_params=query_params, verbose = verbose) + + if isinstance(result, str) or 'errors' in result: + error_result = str(result) + length_display = len(error_result) + if length_display > 500: + error_result = error_result[:500] + raise CBQError(error_result, server.ip) + self.log.info("TOTAL ELAPSED TIME: %s" % result["metrics"]["elapsedTime"]) + return result + + + + def _verify_results(self, actual_result, expected_result, missing_count = 1, extra_count = 1): + self.log.info(" Analyzing Actual Result") + actual_result = self._gen_dict(actual_result) + self.log.info(" Analyzing Expected Result") + expected_result = self._gen_dict(expected_result) + if len(actual_result) != len(expected_result): + raise Exception("Results are incorrect.Actual num %s. Expected num: %s.\n" % ( + len(actual_result), len(expected_result))) + msg = "The number of rows match but the results mismatch, please check" + if actual_result != expected_result: + raise Exception(msg) + + def _verify_results_rqg(self, n1ql_result = [], sql_result = [], hints = ["a1"]): + new_n1ql_result = [] + for result in n1ql_result: + if result != {}: + new_n1ql_result.append(result) + n1ql_result = new_n1ql_result + if self._is_function_in_result(hints): + return self._verify_results_rqg_for_function(n1ql_result, sql_result) + check = self._check_sample(n1ql_result, hints) + actual_result = n1ql_result + if actual_result == [{}]: + actual_result = [] + if check: + actual_result = self._gen_dict(n1ql_result) + actual_result = sorted(actual_result) + expected_result = sorted(sql_result) + if len(actual_result) != len(expected_result): + extra_msg = self._get_failure_message(expected_result, actual_result) + raise Exception("Results are incorrect.Actual num %s. Expected num: %s.:: %s \n" % ( + len(actual_result), len(expected_result), extra_msg)) + msg = "The number of rows match but the results mismatch, please check" + if self._sort_data(actual_result) != self._sort_data(expected_result): + extra_msg = self._get_failure_message(expected_result, actual_result) + raise Exception(msg+"\n "+extra_msg) + + def _sort_data(self, result): + new_data =[] + for data in result: + new_data.append(sorted(data)) + return new_data + + def _verify_results_crud_rqg(self, n1ql_result = [], sql_result = [], hints = ["primary_key_id"]): + new_n1ql_result = [] + for result in n1ql_result: + if result != {}: + new_n1ql_result.append(result) + n1ql_result = new_n1ql_result + if self._is_function_in_result(hints): + return self._verify_results_rqg_for_function(n1ql_result, sql_result) + check = self._check_sample(n1ql_result, hints) + actual_result = n1ql_result + if actual_result == [{}]: + actual_result = [] + if check: + actual_result = self._gen_dict(n1ql_result) + actual_result = sorted(actual_result) + expected_result = sorted(sql_result) + if len(actual_result) != len(expected_result): + extra_msg = self._get_failure_message(expected_result, actual_result) + raise Exception("Results are incorrect.Actual num %s. Expected num: %s.:: %s \n" % ( + len(actual_result), len(expected_result), extra_msg)) + if not self._result_comparison_analysis(actual_result,expected_result) : + msg = "The number of rows match but the results mismatch, please check" + extra_msg = self._get_failure_message(expected_result, actual_result) + raise Exception(msg+"\n "+extra_msg) + + def _get_failure_message(self, expected_result, actual_result): + if expected_result == None: + expected_result = [] + if actual_result == None: + actual_result = [] + len_expected_result = len(expected_result) + len_actual_result = len(actual_result) + len_expected_result = min(5,len_expected_result) + len_actual_result = min(5,len_actual_result) + extra_msg = "mismatch in results :: expected :: {0}, actual :: {1} ".format(expected_result[0:len_expected_result], actual_result[0:len_actual_result]) + return extra_msg + + def _result_comparison_analysis(self, expected_result, actual_result): + expected_map ={} + actual_map ={} + for data in expected_result: + primary=None + for key in data.keys(): + keys = key + if keys.encode('ascii') == "primary_key_id": + primary = keys + expected_map[data[primary]] = data + for data in actual_result: + primary=None + for key in data.keys(): + keys = key + if keys.encode('ascii') == "primary_key_id": + primary = keys + actual_map[data[primary]] = data + check = True + for key in expected_map.keys(): + if sorted(actual_map[key]) != sorted(expected_map[key]): + check= False + return check + + def _analyze_for_special_case_using_func(self, expected_result, actual_result): + if expected_result == None: + expected_result = [] + if actual_result == None: + actual_result = [] + if len(expected_result) == 1: + value = expected_result[0].values()[0] + if value == None or value == 0: + expected_result = [] + if len(actual_result) == 1: + value = actual_result[0].values()[0] + if value == None or value == 0: + actual_result = [] + return expected_result, actual_result + + def _is_function_in_result(self, result): + if result == "FUN": + return True + return False + + def _verify_results_rqg_for_function(self, n1ql_result = [], sql_result = [], hints = ["a1"]): + actual_count = -1 + expected_count = -1 + actual_result = n1ql_result + sql_result, actual_result= self._analyze_for_special_case_using_func(sql_result, actual_result) + if len(sql_result) != len(actual_result): + msg = "the number of results do not match :: expected = {0}, actual = {1}".format(len(n1ql_result), len(sql_result)) + extra_msg = self._get_failure_message(sql_result, actual_result) + raise Exception(msg+"\n"+extra_msg) + n1ql_result = self._gen_dict_n1ql_func_result(n1ql_result) + n1ql_result = sorted(n1ql_result) + sql_result = self._gen_dict_n1ql_func_result(sql_result) + sql_result = sorted(sql_result) + if len(sql_result) == 0 and len(actual_result) == 0: + return + if sql_result != n1ql_result: + max = 2 + if len(sql_result) < 5: + max = len(sql_result) + msg = "mismatch in results :: expected [0:{0}]:: {1}, actual [0:{0}]:: {2} ".format(max, sql_result[0:max], n1ql_result[0:max]) + raise Exception(msg) + + def _convert_to_number(self, val): + if not isinstance(val, str): + return val + value = -1 + try: + if value == '': + return 0 + value = int(val.split("(")[1].split(")")[0]) + except Exception, ex: + self.log.info(ex) + finally: + return value + + def analyze_failure(self, actual, expected): + missing_keys =[] + different_values = [] + for key in expected.keys(): + if key not in actual.keys(): + missing_keys.append(key) + if expected[key] != actual[key]: + different_values.append("for key {0}, expected {1} \n actual {2}". + format(key, expected[key], actual[key])) + self.log.info(missing_keys) + if(len(different_values) > 0): + self.log.info(" number of such cases {0}".format(len(different_values))) + self.log.info(" example key {0}".format(different_values[0])) + + def check_missing_and_extra(self, actual, expected): + missing = [] + extra = [] + for item in actual: + if not (item in expected): + extra.append(item) + for item in expected: + if not (item in actual): + missing.append(item) + return missing, extra + + def build_url(self, version): + info = self.shell.extract_remote_info() + type = info.distribution_type.lower() + if type in ["ubuntu", "centos", "red hat"]: + url = "https://s3.amazonaws.com/packages.couchbase.com/releases/couchbase-query/dp1/" + url += "couchbase-query_%s_%s_linux.tar.gz" %( + version, info.architecture_type) + #TODO for windows + return url + + def _restart_indexer(self): + couchbase_path = "/opt/couchbase/var/lib/couchbase" + cmd = "rm -f {0}/meta;rm -f /tmp/log_upr_client.sock".format(couchbase_path) + self.shell.execute_command(cmd) + + def _start_command_line_query(self, server): + self.shell = RemoteMachineShellConnection(server) + self._set_env_variable(server) + if self.version == "git_repo": + os = self.shell.extract_remote_info().type.lower() + if os != 'windows': + gopath = testconstants.LINUX_GOPATH + else: + gopath = testconstants.WINDOWS_GOPATH + if self.input.tuq_client and "gopath" in self.input.tuq_client: + gopath = self.input.tuq_client["gopath"] + if os == 'windows': + cmd = "cd %s/src/github.com/couchbase/query/server/main; " % (gopath) +\ + "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %( + server.ip, server.port) + else: + cmd = "cd %s/src/github.com/couchbase/query//server/main; " % (gopath) +\ + "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %( + server.ip, server.port) + self.shell.execute_command(cmd) + elif self.version == "sherlock": + os = self.shell.extract_remote_info().type.lower() + if os != 'windows': + couchbase_path = testconstants.LINUX_COUCHBASE_BIN_PATH + else: + couchbase_path = testconstants.WIN_COUCHBASE_BIN_PATH + if self.input.tuq_client and "sherlock_path" in self.input.tuq_client: + couchbase_path = "%s/bin" % self.input.tuq_client["sherlock_path"] + print "PATH TO SHERLOCK: %s" % couchbase_path + if os == 'windows': + cmd = "cd %s; " % (couchbase_path) +\ + "./cbq-engine.exe -datastore http://%s:%s/ >/dev/null 2>&1 &" %( + server.ip, server.port) + else: + cmd = "cd %s; " % (couchbase_path) +\ + "./cbq-engine -datastore http://%s:%s/ >n1ql.log 2>&1 &" %( + server.ip, server.port) + n1ql_port = self.input.param("n1ql_port", None) + if server.ip == "127.0.0.1" and server.n1ql_port: + n1ql_port = server.n1ql_port + if n1ql_port: + cmd = "cd %s; " % (couchbase_path) +\ + './cbq-engine -datastore http://%s:%s/ -http=":%s">n1ql.log 2>&1 &' %( + server.ip, server.port, n1ql_port) + self.shell.execute_command(cmd) + else: + os = self.shell.extract_remote_info().type.lower() + if os != 'windows': + cmd = "cd /tmp/tuq;./cbq-engine -couchbase http://%s:%s/ >/dev/null 2>&1 &" %( + server.ip, server.port) + else: + cmd = "cd /cygdrive/c/tuq;./cbq-engine.exe -couchbase http://%s:%s/ >/dev/null 2>&1 &" %( + server.ip, server.port) + self.shell.execute_command(cmd) + def _parse_query_output(self, output): + if output.find("cbq>") == 0: + output = output[output.find("cbq>") + 4:].strip() + if output.find("tuq_client>") == 0: + output = output[output.find("tuq_client>") + 11:].strip() + if output.find("cbq>") != -1: + output = output[:output.find("cbq>")].strip() + if output.find("tuq_client>") != -1: + output = output[:output.find("tuq_client>")].strip() + return json.loads(output) + + def sort_nested_list(self, result): + actual_result = [] + for item in result: + curr_item = {} + for key, value in item.iteritems(): + if isinstance(value, list) or isinstance(value, set): + curr_item[key] = sorted(value) + else: + curr_item[key] = value + actual_result.append(curr_item) + return actual_result + + def configure_gomaxprocs(self): + max_proc = self.input.param("gomaxprocs", None) + cmd = "export GOMAXPROCS=%s" % max_proc + for server in self.servers: + shell_connection = RemoteMachineShellConnection(self.master) + shell_connection.execute_command(cmd) + + def drop_primary_index(self, using_gsi = True, server = None): + if server == None: + server = self.master + self.log.info("CHECK FOR PRIMARY INDEXES") + for bucket in self.buckets: + self.query = "DROP PRIMARY INDEX ON {0}".format(bucket.name) + if using_gsi: + self.query += " USING GSI" + if not using_gsi: + self.query += " USING VIEW " + self.log.info(self.query) + try: + check = self._is_index_in_list(bucket.name, "#primary", server = server) + if check: + self.run_analytics_query(server = server) + except Exception, ex: + self.log.error('ERROR during index creation %s' % str(ex)) + + def create_primary_index(self, using_gsi = True, server = None): + if server == None: + server = self.master + for bucket in self.buckets: + self.query = "CREATE PRIMARY INDEX ON %s " % (bucket.name) + if using_gsi: + self.query += " USING GSI" + # if gsi_type == "memdb": + # self.query += " WITH {'index_type': 'memdb'}" + if not using_gsi: + self.query += " USING VIEW " + try: + check = self._is_index_in_list(bucket.name, "#primary", server = server) + if not check: + self.run_analytics_query(server = server) + check = self.is_index_online_and_in_list(bucket.name, "#primary", server = server) + if not check: + raise Exception(" Timed-out Exception while building primary index for bucket {0} !!!".format(bucket.name)) + else: + raise Exception(" Primary Index Already present, This looks like a bug !!!") + except Exception, ex: + self.log.error('ERROR during index creation %s' % str(ex)) + raise ex + + def verify_index_with_explain(self, actual_result, index_name, check_covering_index= False): + check = True + if check_covering_index: + if "covering" in str(actual_result): + check = True + else: + check = False + if index_name in str(actual_result): + return True and check + return False + + def run_query_and_verify_result(self, server = None, query = None, timeout = 120.0, max_try = 1, + expected_result = None, scan_consistency = None, scan_vector = None, verify_results = True): + check = False + init_time = time.time() + try_count = 0 + while not check: + next_time = time.time() + try: + actual_result = self.run_analytics_query(query = query, server = server, + scan_consistency = scan_consistency, scan_vector = scan_vector) + if verify_results: + self._verify_results(sorted(actual_result['results']), sorted(expected_result)) + else: + return "ran query with success and validated results" , True + check = True + except Exception, ex: + if (next_time - init_time > timeout or try_count >= max_try): + return ex, False + finally: + try_count += 1 + return "ran query with success and validated results" , check + + + def run_cbq_query(self, query=None, min_output_size=10, server=None, query_params = {}, is_prepared=False, scan_consistency = None, scan_vector = None, verbose= True): + if query is None: + query = self.query + if server is None: + server = self.master + if server.ip == "127.0.0.1": + self.n1ql_port = server.n1ql_port + else: + if server.ip == "127.0.0.1": + self.n1ql_port = server.n1ql_port + if self.input.tuq_client and "client" in self.input.tuq_client: + server = self.tuq_client + if self.n1ql_port == None or self.n1ql_port == '': + self.n1ql_port = self.input.param("n1ql_port", 90) + if not self.n1ql_port: + self.log.info(" n1ql_port is not defined, processing will not proceed further") + raise Exception("n1ql_port is not defined, processing will not proceed further") + cred_params = {'creds': []} + for bucket in self.buckets: + if bucket.saslPassword: + cred_params['creds'].append({'user': 'local:%s' % bucket.name, 'pass': bucket.saslPassword}) + query_params.update(cred_params) + if self.use_rest: + query_params = {} + if scan_consistency: + query_params['scan_consistency']= scan_consistency + if scan_vector: + query_params['scan_vector']= str(scan_vector).replace("'", '"') + if verbose: + self.log.info('RUN QUERY %s' % query) + result = RestConnection(server).query_tool(query, self.n1ql_port, query_params=query_params, is_prepared = is_prepared, verbose = verbose) + else: + # if self.version == "git_repo": + # output = self.shell.execute_commands_inside("$GOPATH/src/github.com/couchbaselabs/tuqtng/" +\ + # "tuq_client/tuq_client " +\ + # "-engine=http://%s:8093/" % server.ip, + # subcommands=[query,], + # min_output_size=20, + # end_msg='tuq_client>') + # else: + #os = self.shell.extract_remote_info().type.lower() + shell = RemoteMachineShellConnection(server) + #query = query.replace('"', '\\"') + #query = query.replace('`', '\\`') + #if os == "linux": + cmd = "%s/cbq -engine=http://%s:8093/" % (testconstants.LINUX_COUCHBASE_BIN_PATH,server.ip) + output = shell.execute_commands_inside(cmd,query,"","","","","") + print "--------------------------------------------------------------------------------------------------------------------------------" + print output + result = json.loads(output) + print result + result = self._parse_query_output(output) + if isinstance(result, str) or 'errors' in result: + error_result = str(result) + length_display = len(error_result) + if length_display > 500: + error_result = error_result[:500] + raise CBQError(error_result, server.ip) + self.log.info("TOTAL ELAPSED TIME: %s" % result["metrics"]["elapsedTime"]) + return result + + # def is_index_online_and_in_list(self, bucket, index_name, server=None, timeout=600.0): + # check = self._is_index_in_list(bucket, index_name, server = server) + # init_time = time.time() + # while not check: + # time.sleep(1) + # check = self._is_index_in_list(bucket, index_name, server = server) + # next_time = time.time() + # if check or (next_time - init_time > timeout): + # return check + # return check + # + # def is_index_ready_and_in_list(self, bucket, index_name, server=None, timeout=600.0): + # query = "SELECT * FROM system:indexes where name = \'{0}\'".format(index_name) + # if server == None: + # server = self.master + # init_time = time.time() + # check = False + # while not check: + # res = self.run_analytics_query(query=query, server=server) + # for item in res['results']: + # if 'keyspace_id' not in item['indexes']: + # check = False + # elif item['indexes']['keyspace_id'] == str(bucket) \ + # and item['indexes']['name'] == index_name \ + # and item['indexes']['state'] == "online": + # check = True + # time.sleep(1) + # next_time = time.time() + # check = check or (next_time - init_time > timeout) + # return check + + # def is_index_online_and_in_list_bulk(self, bucket, index_names = [], server = None, index_state = "online", timeout = 600.0): + # check, index_names = self._is_index_in_list_bulk(bucket, index_names, server = server, index_state = index_state) + # init_time = time.time() + # while not check: + # check, index_names = self._is_index_in_list_bulk(bucket, index_names, server = server, index_state = index_state) + # next_time = time.time() + # if check or (next_time - init_time > timeout): + # return check + # return check + # + # def gen_build_index_query(self, bucket = "default", index_list = []): + # return "BUILD INDEX on {0}({1}) USING GSI".format(bucket,",".join(index_list)) + # + # def gen_query_parameter(self, scan_vector = None, scan_consistency = None): + # query_params = {} + # if scan_vector: + # query_params.update("scan_vector", scan_vector) + # if scan_consistency: + # query_params.update("scan_consistency", scan_consistency) + # return query_params + + # def _is_index_in_list(self, bucket, index_name, server = None, index_state = ["pending", "building", "deferred"]): + # query = "SELECT * FROM system:indexes where name = \'{0}\'".format(index_name) + # if server == None: + # server = self.master + # res = self.run_cbq_query(query = query, server = server) + # for item in res['results']: + # if 'keyspace_id' not in item['indexes']: + # return False + # if item['indexes']['keyspace_id'] == str(bucket) and item['indexes']['name'] == index_name and item['indexes']['state'] not in index_state: + # return True + # return False + # + # def _is_index_in_list_bulk(self, bucket, index_names = [], server = None, index_state = ["pending","building"]): + # query = "SELECT * FROM system:indexes" + # if server == None: + # server = self.master + # res = self.run_cbq_query(query = query, server = server) + # index_count=0 + # found_index_list = [] + # for item in res['results']: + # if 'keyspace_id' not in item['indexes']: + # return False + # for index_name in index_names: + # if item['indexes']['keyspace_id'] == str(bucket) and item['indexes']['name'] == index_name and item['indexes']['state'] not in index_state: + # found_index_list.append(index_name) + # if len(found_index_list) == len(index_names): + # return True, [] + # return False, list(set(index_names) - set(found_index_list)) + # + # def gen_index_map(self, server = None): + # query = "SELECT * FROM system:indexes" + # if server == None: + # server = self.master + # res = self.run_cbq_query(query = query, server = server) + # index_map = {} + # for item in res['results']: + # bucket_name = item['indexes']['keyspace_id'].encode('ascii','ignore') + # if bucket_name not in index_map.keys(): + # index_map[bucket_name] = {} + # index_name = str(item['indexes']['name']) + # index_map[bucket_name][index_name] = {} + # index_map[bucket_name][index_name]['state'] = item['indexes']['state'] + # return index_map + # + # def get_index_count_using_primary_index(self, buckets, server = None): + # query = "SELECT COUNT(*) FROM {0}" + # map= {} + # if server == None: + # server = self.master + # for bucket in buckets: + # res = self.run_cbq_query(query = query.format(bucket.name), server = server) + # map[bucket.name] = int(res["results"][0]["$1"]) + # return map + # + # def get_index_count_using_index(self, bucket, index_name,server=None): + # query = 'SELECT COUNT(*) FROM {0} USE INDEX ({1})'.format(bucket.name, index_name) + # if not server: + # server = self.master + # res = self.run_cbq_query(query=query, server=server) + # return int(res['results'][0]['$1']) + + def _gen_dict(self, result): + result_set = [] + if result != None and len(result) > 0: + for val in result: + for key in val.keys(): + result_set.append(val[key]) + return result_set + + def _gen_dict_n1ql_func_result(self, result): + result_set = [val[key] for val in result for key in val.keys()] + new_result_set = [] + if len(result_set) > 0: + for value in result_set: + if isinstance(value, float): + new_result_set.append(round(value, 0)) + else: + new_result_set.append(value) + else: + new_result_set = result_set + return new_result_set + + def _check_sample(self, result, expected_in_key = None): + if expected_in_key == "FUN": + return False + if expected_in_key == None or len(expected_in_key) == 0: + return False + if result != None and len(result) > 0: + sample=result[0] + for key in sample.keys(): + for sample in expected_in_key: + if key in sample: + return True + return False + + def old_gen_dict(self, result): + result_set = [] + map = {} + duplicate_keys = [] + try: + if result != None and len(result) > 0: + for val in result: + for key in val.keys(): + result_set.append(val[key]) + for val in result_set: + if val["_id"] in map.keys(): + duplicate_keys.append(val["_id"]) + map[val["_id"]] = val + keys = map.keys() + keys.sort() + except Exception, ex: + self.log.info(ex) + raise + if len(duplicate_keys) > 0: + raise Exception(" duplicate_keys {0}".format(duplicate_keys)) + return map + diff --git a/lib/couchbase_helper/cluster.py b/lib/couchbase_helper/cluster.py index bde9d7785..d2aaf8707 100644 --- a/lib/couchbase_helper/cluster.py +++ b/lib/couchbase_helper/cluster.py @@ -17,7 +17,8 @@ def __init__(self): self.task_manager = TaskManager("Cluster_Thread") self.task_manager.start() - def async_create_default_bucket(self, server, size, replicas=1, enable_replica_index=1, eviction_policy='valueOnly', bucket_priority = None): + def async_create_default_bucket(self, server, size, replicas=1, enable_replica_index=1, eviction_policy='valueOnly', + bucket_priority = None,lww=False): """Asynchronously creates the default bucket Parameters: @@ -29,7 +30,8 @@ def async_create_default_bucket(self, server, size, replicas=1, enable_replica_i BucketCreateTask - A task future that is a handle to the scheduled task.""" _task = BucketCreateTask(server, 'default', replicas, size, - enable_replica_index=enable_replica_index, eviction_policy=eviction_policy,bucket_priority=bucket_priority) + enable_replica_index=enable_replica_index, eviction_policy=eviction_policy, + bucket_priority=bucket_priority,lww=lww) self.task_manager.schedule(_task) return _task @@ -263,7 +265,8 @@ def async_wait_for_xdcr_stat(self, servers, bucket, param, stat, comparison, val return _task def create_default_bucket(self, server, size, replicas=1, timeout=600, - enable_replica_index=1, eviction_policy='valueOnly', bucket_priority = None): + enable_replica_index=1, eviction_policy='valueOnly', + bucket_priority = None,lww=False): """Synchronously creates the default bucket Parameters: @@ -276,7 +279,9 @@ def create_default_bucket(self, server, size, replicas=1, timeout=600, _task = self.async_create_default_bucket(server, size, replicas, enable_replica_index=enable_replica_index, - eviction_policy=eviction_policy, bucket_priority = bucket_priority) + eviction_policy=eviction_policy, + bucket_priority = bucket_priority, + lww=lww) return _task.result(timeout) def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None, bucket_priority=None): diff --git a/lib/couchbase_helper/data.py b/lib/couchbase_helper/data.py index 20714db04..ca37dc761 100644 --- a/lib/couchbase_helper/data.py +++ b/lib/couchbase_helper/data.py @@ -35,4 +35,16 @@ 'Sinhalese', 'Portuguese', 'Romanian'] DEPT = ['Engineering', 'Sales', 'Support', 'Marketing', 'Info-tech', 'Finance', - 'HR', 'Pre-sales', 'Accounts', 'Dev-ops', 'Training'] \ No newline at end of file + 'HR', 'Pre-sales', 'Accounts', 'Dev-ops', 'Training'] + +COUNTRIES = ["India", "US", "UK", "Japan", "France", "Germany", "China", "Korea", "Canada", "Cuba", + "West Indies", "Australia", "New Zealand", "Nepal", "Sri Lanka", "Pakistan", "Mexico", + "belgium", "Netherlands", "Brazil", "Costa Rica", "Cambodia", "Fiji", "Finland", "haiti", + "Hong Kong", "Iceland", "Iran", "Iraq", "Italy", "Greece", "Jamaica", "Kenya", "Kuwait", "Macau", + "Spain","Morocco", "Maldives", "Norway"] + +COUNTRY_CODE = ["Ind123", "US123", "UK123", "Jap123", "Fra123", "Ger123", "Chi123", "Kor123", "Can123", + "Cub123", "Wes123", "Aus123", "New123", "Nep123", "Sri123", "Pak123", "Mex123", "bel123", + "Net123", "Bra123", "Cos123", "Cam123", "Fij123", "Fin123", "hai123", "Hon123", "Ice123", + "Ira123", "Ira123", "Ita123", "Gre123", "Jam123", "Ken123", "Kuw123", "Mac123", "Spa123", + "Mor123", "Mal123", "Nor123"] diff --git a/lib/couchbase_helper/documentgenerator.py b/lib/couchbase_helper/documentgenerator.py index 49bf0da0f..9b6b9415c 100644 --- a/lib/couchbase_helper/documentgenerator.py +++ b/lib/couchbase_helper/documentgenerator.py @@ -6,7 +6,6 @@ from data import FIRST_NAMES, LAST_NAMES, DEPT, LANGUAGES import itertools - class KVGenerator(object): def __init__(self, name, start, end): self.name = name @@ -14,14 +13,6 @@ def __init__(self, name, start, end): self.end = end self.current = start self.itr = start - - def setrange(self, args): - self.itr = args['start'] - self.end = args['end'] - - def getrange(self): - return self.start, self.end - def has_next(self): return self.itr < self.end @@ -89,8 +80,8 @@ def next(self): value = arg[seed % len(arg)] doc_args.append(value) seed /= len(arg) - - json_doc = json.loads(self.template.format(*doc_args).replace('\'', '"').replace('True', 'true').replace('False', 'false')) + doc = self.template.format(*doc_args).replace('\'', '"').replace('True', 'true').replace('False', 'false').replace('\\', '\\\\') + json_doc = json.loads(doc) json_doc['_id'] = self.name + '-' + str(self.itr) json_doc['mutated'] = 0 self.itr += 1 diff --git a/lib/couchbase_helper/query_definitions.py b/lib/couchbase_helper/query_definitions.py index f160d6f6e..1100abed2 100644 --- a/lib/couchbase_helper/query_definitions.py +++ b/lib/couchbase_helper/query_definitions.py @@ -1,4 +1,6 @@ import uuid +import random + FULL_SCAN_TEMPLATE = "SELECT {0} FROM %s" RANGE_SCAN_TEMPLATE = "SELECT {0} FROM %s WHERE {1}" FULL_SCAN_GROUP_BY_TEMPLATE = "SELECT {0} FROM %s GROUP by {2}" @@ -11,6 +13,10 @@ INDEX_CREATION_TEMPLATE = "CREATE INDEX %s ON %s(%s)" INDEX_DROP_TEMPLATE = "DROP INDEX %s.%s" SIMPLE_INDEX="simple" +SIMPLE_ARRAY="simple_array" +ARRAY="array" +DUPLICATE_ARRAY="duplicate_array" +DISTINCT_ARRAY="distinct_array" COMPOSITE_INDEX="composite" GROUP_BY="groupby" ORDER_BY="orderby" @@ -24,6 +30,7 @@ LESS_THAN="less_than" AND = "and" OR = "or" + class QueryDefinition(object): def __init__(self, name = "default", index_name = "Random", index_fields = [], index_creation_template = INDEX_CREATION_TEMPLATE, index_drop_template = INDEX_DROP_TEMPLATE, query_template = "", groups = [], index_where_clause = None, gsi_type = None): @@ -56,6 +63,24 @@ def generate_index_create_query(self, bucket = "default", use_gsi_for_secondary query += " WITH " + str(deployment_plan) return query + def generate_gsi_index_create_query_using_rest(self, bucket="default", deploy_node_info=None, defer_build=None, + index_where_clause=None, gsi_type="forestdb", expr_type="N1QL"): + deployment_plan = {} + ind_content = {} + ind_content["name"] = self.index_name + ind_content["bucket"] = "{0}".format(bucket) + ind_content["secExprs"] = self.index_fields + ind_content["using"] = gsi_type + ind_content["exprType"] = "{0}".format(expr_type) + if index_where_clause: + ind_content["whereExpr"] = index_where_clause + if deploy_node_info != None: + deployment_plan["nodes"] = deploy_node_info + if defer_build != None: + deployment_plan["defer_build"] = defer_build + ind_content["with"] = str(deployment_plan) + return ind_content + def generate_index_drop_query(self, bucket = "default", use_gsi_for_secondary = True, use_gsi_for_primary = True): if "primary" in self.index_name: query = "DROP PRIMARY INDEX ON {0}".format(bucket) @@ -65,7 +90,7 @@ def generate_index_drop_query(self, bucket = "default", use_gsi_for_secondary = query += " USING GSI" elif use_gsi_for_primary and "primary" in self.index_name: query += " USING GSI" - if not use_gsi_for_secondary: + if not use_gsi_for_secondary: query += " USING VIEW " return query @@ -309,7 +334,197 @@ def generate_employee_data_query_definitions_for_index_expressions(self): groups = [SIMPLE_INDEX,RANGE_SCAN, NO_ORDERBY_GROUPBY, LESS_THAN,"employee"], index_where_clause = " join_yr < 2014 ")) return definitions_list - def filter_by_group(self, groups = [], query_definitions = []): + def generate_airlines_data_query_definitions(self): + definitions_list = [] + + #emit_fields = "name, job_title, join_yr, join_mo, join_day" + emit_fields = "*" + and_conditions = ["job_title = \"Sales\"","job_title != \"Sales\""] + #Primary Index + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_primary_index", + index_fields=[], query_template="SELECT * FROM %s", + groups=["full_data_set","primary"], + index_where_clause="")) + #simple index on string + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_name", + index_fields=["name"], + query_template=RANGE_SCAN_ORDER_BY_TEMPLATE.format( + emit_fields, "name IS NOT NULL","name,_id"), + groups=["all", SIMPLE_INDEX, FULL_SCAN, ORDER_BY, "airlines", "isnotnull"], + index_where_clause=" name IS NOT NULL ")) + #simple index on int + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_age", + index_fields=["age"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % "age = 40"), + groups=["all", SIMPLE_INDEX, NO_ORDERBY_GROUPBY, EQUALS], + index_where_clause=" age IS NOT NULL ")) + #simple index on boolean + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_premium_customer", + index_fields=["premium_customer"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % "premium_customer = True"), + groups=["all", SIMPLE_INDEX, NO_ORDERBY_GROUPBY, EQUALS], + index_where_clause=" premium_customer IS NOT NULL ")) + #array duplicate index on strings + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_travel_history_duplicate", + index_fields=["ALL ARRAY t FOR t in `travel_history` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN travel_history SATISFIES t = \"India\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DUPLICATE_ARRAY, ORDER_BY, EQUALS,"airlines"], + index_where_clause=" travel_history IS NOT NULL ")) + #array distinct index on strings + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_travel_history_distinct", + index_fields=["DISTINCT ARRAY t FOR t in `travel_history` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN travel_history SATISFIES t = \"India\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_INDEX, SIMPLE_ARRAY, DISTINCT_ARRAY, + ORDER_BY, EQUALS, "airlines"], + index_where_clause=" travel_history IS NOT NULL ")) + #array duplicate index on alphanumeric + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_travel_history_code_duplicate", + index_fields=["ALL ARRAY t FOR t in `travel_history_code` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN travel_history_code SATISFIES t = \"Ind123\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DUPLICATE_ARRAY, ORDER_BY, EQUALS,"airlines"], + index_where_clause=" travel_history_code IS NOT NULL ")) + #array distinct index on alphanumeric + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_travel_history_code_distinct", + index_fields=["DISTINCT ARRAY t FOR t in `travel_history_code` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN travel_history_code SATISFIES t = \"Ind123\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_INDEX, SIMPLE_ARRAY, DISTINCT_ARRAY, + ORDER_BY, EQUALS, "airlines"], + index_where_clause=" travel_history_code IS NOT NULL ")) + #array duplicate index on numbers + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_credit_cards_duplicate", + index_fields=["ALL ARRAY t FOR t in `credit_cards` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN credit_cards SATISFIES t > 5000000 END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DUPLICATE_ARRAY, RANGE_SCAN, ORDER_BY,"airlines"], + index_where_clause=" credit_cards IS NOT NULL ")) + #array distinct index on numbers + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_credit_cards_distinct", + index_fields=["DISTINCT ARRAY t FOR t in `credit_cards` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN credit_cards SATISFIES t > 5000000 END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_INDEX, SIMPLE_ARRAY, DISTINCT_ARRAY, + RANGE_SCAN, ORDER_BY, "airlines"], + index_where_clause=" credit_cards IS NOT NULL ")) + #Duplcate array on boolean array + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_question_values_duplicate", + index_fields=["ALL ARRAY t FOR t in `question_values` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " + % "ANY t IN question_values SATISFIES t = True END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DUPLICATE_ARRAY, RANGE_SCAN, ORDER_BY, "airlines"], + index_where_clause=" question_values IS NOT NULL ")) + #Distinct array on boolean array + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_question_values_duplicate", + index_fields=["DISTINCT ARRAY t FOR t in `question_values` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN question_values SATISFIES t = True END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_INDEX, SIMPLE_ARRAY, DISTINCT_ARRAY, + RANGE_SCAN, ORDER_BY, "airlines"], + index_where_clause=" question_values IS NOT NULL ")) + #array distinct index on mixed data type + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_secret_combination", + index_fields=["DISTINCT ARRAY t FOR t in `secret_combination` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN secret_combination SATISFIES t > \"a\" OR t > 1 END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_INDEX, SIMPLE_ARRAY, DISTINCT_ARRAY, + RANGE_SCAN, ORDER_BY, OR, "airlines"], + index_where_clause=" secret_combination IS NOT NULL ")) + #array index on items if object + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_booking_duplicate", + index_fields=["ALL ARRAY t FOR t in TO_ARRAY(`booking`) END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN booking SATISFIES t.source = \"India\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DUPLICATE_ARRAY, RANGE_SCAN, + ORDER_BY, EQUALS, "airlines"], + index_where_clause=" booking IS NOT NULL ")) + #array index on items if object + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_booking_distinct", + index_fields=["DISTINCT ARRAY t FOR t in TO_ARRAY(`booking`) END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN booking SATISFIES t.source = \"India\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DISTINCT_ARRAY, RANGE_SCAN, + ORDER_BY, EQUALS, "airlines"], + index_where_clause=" booking IS NOT NULL ")) + #Composite array distinct index + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_travel_history_name_age", + index_fields=["DISTINCT ARRAY t FOR t in `travel_history` END", "name", "age"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN travel_history SATISFIES t = \"India\" END AND name IS NOT NULL ORDER BY _id"), + groups=["all", ARRAY, COMPOSITE_INDEX, DISTINCT_ARRAY, RANGE_SCAN, + ORDER_BY, AND, EQUALS, "airlines"], + index_where_clause=" travel_history IS NOT NULL ")) + #Simple array on scalar + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_name_array", + index_fields=["DISTINCT ARRAY t FOR t in TO_ARRAY(`name`) END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN name SATISFIES t = \"Ciara\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DISTINCT_ARRAY, RANGE_SCAN, + ORDER_BY, EQUALS, "airlines"], + index_where_clause=" name IS NOT NULL ")) + #Duplicate array Index on Array of objects + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_travel_details_duplicate", + index_fields=["ALL ARRAY t FOR t in `travel_details` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN travel_details SATISFIES t.country = \"India\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, DUPLICATE_ARRAY, RANGE_SCAN, + ORDER_BY, EQUALS, "airlines"], + index_where_clause=" travel_details IS NOT NULL ")) + #Distinct array Index on Array of objects + index_name_prefix = "airlines_" + str(random.randint(100000, 999999)) + definitions_list.append( + QueryDefinition(index_name=index_name_prefix + "_travel_details_distinct", + index_fields=["DISTINCT ARRAY t FOR t in `travel_details` END"], + query_template=RANGE_SCAN_TEMPLATE.format(emit_fields," %s " % + "ANY t IN travel_details SATISFIES t.country = \"India\" END ORDER BY _id"), + groups=["all", ARRAY, SIMPLE_ARRAY, SIMPLE_INDEX, DISTINCT_ARRAY, + RANGE_SCAN, ORDER_BY, EQUALS, "airlines"], + index_where_clause=" travel_details IS NOT NULL ")) + return definitions_list + + def filter_by_group(self, groups = None, query_definitions = None): + if not groups: + groups = [] + if not query_definitions: + query_definitions = [] new_query_definitions = {} for query_definition in query_definitions: count = 0 diff --git a/lib/couchbase_helper/query_helper.py b/lib/couchbase_helper/query_helper.py index 612d70fce..1bf44adb4 100644 --- a/lib/couchbase_helper/query_helper.py +++ b/lib/couchbase_helper/query_helper.py @@ -233,6 +233,9 @@ def _gen_query_with_subquery(self, sql = "", table_map = {},count1 = 0): new_n1ql = new_n1ql.replace("EQUALS"," IN ") new_sql = new_sql.replace("RAW","") new_n1ql = new_n1ql.replace("AND_OUTER_INNER_TABLE_PRIMARY_KEY_COMPARISON","") + #print "new n1ql is %s"%(new_n1ql) + #print "new sql is %s"%(new_sql) + return {"sql":new_sql, "n1ql":new_n1ql},outer_table_map @@ -486,7 +489,7 @@ def _find_string_type(self, n1ql_query, hints = []): return hint def _gen_json_from_results_with_primary_key(self, columns, rows, primary_key = ""): - print "generate json from results with primary key" + #print "generate json from results with primary key" primary_key_index = 0 count = 0 dict = {} @@ -757,6 +760,7 @@ def _convert_sql_template_to_value_with_subqueries(self, n1ql_template ="", tabl "expected_result":None, "indexes":{} } + #print "map is %s" %(map) return map def _convert_sql_template_to_value_for_secondary_indexes(self, n1ql_template ="", table_map = {}, table_name= "simple_table", define_gsi_index=False): diff --git a/lib/couchbase_helper/tuq_generators.py b/lib/couchbase_helper/tuq_generators.py index aad0e815e..9d2ca4f6e 100644 --- a/lib/couchbase_helper/tuq_generators.py +++ b/lib/couchbase_helper/tuq_generators.py @@ -2,11 +2,13 @@ from documentgenerator import DocumentGenerator import re import datetime -import logger import json -import random +import random, string import os import logger + +from data import COUNTRIES, COUNTRY_CODE, FIRST_NAMES, LAST_NAMES + log = logger.Logger.get_logger() class TuqGenerators(object): @@ -27,6 +29,8 @@ def __init__(self, log, full_set): if isinstance(attr[1], bool)] self.type_args['list_str'] = [attr[0] for attr in full_set[0].iteritems() if isinstance(attr[1], list) and isinstance(attr[1][0], unicode)] + self.type_args['list_int'] = [attr[0] for attr in full_set[0].iteritems() + if isinstance(attr[1], list) and isinstance(attr[1][0], int)] self.type_args['list_obj'] = [attr[0] for attr in full_set[0].iteritems() if isinstance(attr[1], list) and isinstance(attr[1][0], dict)] self.type_args['obj'] = [attr[0] for attr in full_set[0].iteritems() @@ -105,7 +109,7 @@ def _format_where_clause(self, from_clause=None): conditions = conditions.replace('IS NOT NULL', 'is not None') satisfy_expr = self.format_satisfy_clause() if satisfy_expr: - conditions = re.sub(r'ANY.*END', '', clause) + conditions = re.sub(r'ANY.*END', '', conditions).strip() regex = re.compile("[\w']+\.[\w']+") atts = regex.findall(conditions) for att in atts: @@ -126,7 +130,19 @@ def _format_where_clause(self, from_clause=None): for attr in attributes: conditions = conditions.replace(' %s ' % attr, ' doc["%s"] ' % attr) if satisfy_expr: - conditions += '' + satisfy_expr + if conditions: + for join in ["AND", "OR"]: + present = conditions.find(join) + if present > -1: + conditions = conditions.replace(join, join.lower()) + if present > 0: + conditions += '' + satisfy_expr + break + else: + conditions = satisfy_expr + ' ' + conditions + break + else: + conditions += '' + satisfy_expr if from_clause and from_clause.find('.') != -1: sub_attrs = [att for name, group in self.type_args.iteritems() for att in group if att not in attributes] @@ -216,7 +232,7 @@ def _format_select_clause(self, from_clause=None): self.aggr_fns['COUNT'] = {} if attr[0].upper() == 'DISTINCT': attr = attr[1:] - self.distict= True + self.distinct= True if attr[0].find('.') != -1: parent, child = attr[0].split('.') attr[0] = child @@ -242,7 +258,7 @@ def _format_select_clause(self, from_clause=None): continue elif attr[0].upper() == 'DISTINCT': attr = attr[1:] - self.distict= True + self.distinct= True if attr[0] == '*': condition += '"*" : doc,' elif len(attr) == 1: @@ -312,7 +328,7 @@ def _filter_full_set(self, select_clause, where_clause, unnest_clause): result = [eval(select_clause) for doc in self.full_set if eval(where_clause)] else: result = [eval(select_clause) for doc in self.full_set] - if self.distict: + if self.distinct: result = [dict(y) for y in set(tuple(x.items()) for x in result)] if unnest_clause: unnest_attr = unnest_clause[5:-2] @@ -532,12 +548,14 @@ def format_satisfy_clause(self): main_attr = 'doc["%s"]["%s"]' % (self.aliases[parent], child) else: main_attr = 'doc["%s"]' % (child) + var = "att" if self.query.find('ANY') != -1: - result_clause = 'len([att for att in %s if ' % main_attr + var = re.sub(r'.*ANY', '', re.sub(r'IN.*', '', self.query)).strip() + result_clause = 'len([{0} for {1} in {2} if '.format(var, var, main_attr) satisfy_expr = re.sub(r'.*SATISFIES', '', re.sub(r'END.*', '', satisfy_clause)).strip() for expr in satisfy_expr.split(): if expr.find('.') != -1: - result_clause += ' att["%s"] ' % expr.split('.')[1] + result_clause += ' {0}["{1}"] '.format(var, expr.split('.')[1]) elif expr.find('=') != -1: result_clause += ' == ' elif expr.upper() in ['AND', 'OR', 'NOT']: @@ -548,7 +566,7 @@ def format_satisfy_clause(self): return result_clause def _clear_current_query(self): - self.distict = False + self.distinct = False self.aggr_fns = {} self.aliases = {} self.attr_order_clause_greater_than_select = [] @@ -813,24 +831,6 @@ def generate_docs_sales(self, key_prefix = "sales_dataset", test_data_type = Tru sales, [delivery], is_support, is_priority, [contact], [name], rate, - start=start, docs_per_day=end)) - else: - template = '{{ "join_yr" : {0}, "join_mo" : {1}, "join_day" : {2},' - if self.template_items_num: - for num in xrange(self.template_items_num - 2): - template += '"item_%s" : "value_%s",' % (num, num) - template += ' "sales" : {3} }}' - sales = self._shuffle([200000, 400000, 600000, 800000],isShuffle) - for year in join_yr: - for month in join_mo: - for day in join_day: - random.seed(count) - count += 1 - prefix = str(random.random()*100000) - generators.append(DocumentGenerator(key_prefix + prefix, - template, - [year], [month], [day], - sales, start=start, end=end)) return generators @@ -885,6 +885,74 @@ def generate_docs_array(self, key_prefix="array_dataset", start=0, docs_per_day= name, email, countries, codes, start=start, end=end)) return generators + def generate_all_type_documents_for_gsi(self, start=0, docs_per_day=10): + """ + Document fields: + name: String + age: Number + email: Alphanumeric + Special Character + premium_customer: Boolean or + Address: Object + {Line 1: Alphanumeric + Special Character + Line 2: Alphanumeric + Special Character or + City: String + Country: String + postal_code: Number + } + travel_history: Array of string - Duplicate elements ["India", "US", "UK", "India"] + travel_history_code: Array of alphanumerics - Duplicate elements + booking_history: Array of objects + {source: + destination: + } + credit_cards: Array of numbers + secret_combination: Array of mixed data types + countries_visited: Array of strings - non-duplicate elements + + :param start: + :param docs_per_day: + :param isShuffle: + :return: + """ + generators = [] + bool_vals = [True, False] + template = r'{{ "name":"{0}", "email":"{1}", "age":{2}, "premium_customer":{3}, ' \ + '"address":{4}, "travel_history":{5}, "travel_history_code":{6}, "travel_details":{7},' \ + '"booking":{8}, "credit_cards":{9}, "secret_combination":{10}, "countries_visited":{11}, ' \ + '"question_values":{12}}}' + name = random.choice(FIRST_NAMES) + age = random.randint(25, 70) + last_name = random.choice(LAST_NAMES) + dob = "{0}-{1}-{2}".format(random.randint(1970, 1999), + random.randint(1, 28), random.randint(1, 12)) + email = "{0}.{1}.{2}@abc.com".format(name, last_name, dob.split("-")[1]) + premium_customer = random.choice(bool_vals) + address = {} + address["line_1"] = "Street No. {0}".format(random.randint(100, 200)) + address["line_2"] = "null" + if not random.choice(bool_vals): + address["address2"] = "Building {0}".format(random.randint(1, 6)) + address["city"] = "Bangalore" + address["contact"] = "{0} {1}".format(name, last_name) + address["country"] = "India" + address["postal_code"] = "{0}".format(random.randint(560071, 560090)) + credit_cards = [random.randint(-1000000, 9999999) for i in range(random.randint(3, 7))] + secret_combo = [''.join(random.choice(string.lowercase) for i in range(7)), + random.randint(1000000, 9999999)] + travel_history = [random.choice(COUNTRIES[:9]) for i in range(1, 11)] + travel_history_code = [COUNTRY_CODE[COUNTRIES.index(i)] for i in travel_history] + travel_details = [{"country": travel_history[i], "code": travel_history_code[i]} + for i in range(len(travel_history))] + countries_visited = list(set(travel_history)) + booking = {"source": random.choice(COUNTRIES), "destination": random.choice(COUNTRIES)} + confirm_question_values = [random.choice(bool_vals) for i in range(5)] + prefix = "airline_record_" + str(random.random()*100000) + generators.append(DocumentGenerator(prefix, template, [name], [email], [age], [premium_customer], + [address], [travel_history], [travel_history_code], [travel_details], + [booking], [credit_cards], [secret_combo], [countries_visited], + [confirm_question_values], start=start, end=docs_per_day)) + return generators + def generate_docs_employee_data(self, key_prefix ="employee_dataset", start=0, docs_per_day = 1, isShuffle = False): generators = [] count = 1 @@ -912,7 +980,6 @@ def generate_docs_employee_data(self, key_prefix ="employee_dataset", start=0, d for month in join_mo: for day in join_day: random.seed(count) - count += 1 prefix = str(random.random()*100000) generators.append(DocumentGenerator(key_prefix + prefix, template, @@ -963,4 +1030,3 @@ def _shuffle(self, data, isShuffle): random.shuffle(data) return data return data - diff --git a/lib/couchbase_helper/tuq_helper.py b/lib/couchbase_helper/tuq_helper.py index 90e9f8211..e28e32af7 100644 --- a/lib/couchbase_helper/tuq_helper.py +++ b/lib/couchbase_helper/tuq_helper.py @@ -419,7 +419,7 @@ def create_primary_index(self, using_gsi = True, server = None): try: check = self._is_index_in_list(bucket.name, "#primary", server = server) if not check: - self.run_cbq_query(server = server) + self.run_cbq_query(server = server,query_params={'timeout' : '900s'}) check = self.is_index_online_and_in_list(bucket.name, "#primary", server = server) if not check: raise Exception(" Timed-out Exception while building primary index for bucket {0} !!!".format(bucket.name)) diff --git a/lib/mc_bin_client.py b/lib/mc_bin_client.py index 1aebfdf5d..33b844284 100644 --- a/lib/mc_bin_client.py +++ b/lib/mc_bin_client.py @@ -252,24 +252,67 @@ def setWithMeta(self, key, value, exp, flags, seqno, remote_cas): return self._doMetaCmd(memcacheConstants.CMD_SET_WITH_META, key, value, 0, exp, flags, seqno, remote_cas) + # set with meta using the LWW conflict resolution CAS + def setWithMetaLWW(self, key, value, exp, flags,cas): + """Set a value and its meta data in the memcached server. + The format is described here https://github.com/couchbase/ep-engine/blob/master/docs/protocol/set_with_meta.md, + the first CAS will be 0 because that is the traditional CAS, and the CAS in the "extras" will be populated. + The sequence number will be 0 because as to the best of my understanding it is not used with LWW. + + """ + # + SET_META_EXTRA_FMT = '>IIQQH' # flags (4), expiration (4), seqno (8), CAS (8), metalen (2) + META_LEN = 0 + SEQNO = 0 + + self._set_vbucket(key, -1) + + return self._doCmd(memcacheConstants.CMD_SET_WITH_META, key, value, + struct.pack(memcacheConstants.META_EXTRA_FMT, flags, exp, SEQNO, cas, META_LEN)) + + + # set with meta using the LWW conflict resolution CAS + def delWithMetaLWW(self, key, exp, flags,cas): + """Set a value and its meta data in the memcached server. + The format is described here https://github.com/couchbase/ep-engine/blob/master/docs/protocol/del_with_meta.md, + the first CAS will be 0 because that is the traditional CAS, and the CAS in the "extras" will be populated. + The sequence number will be 0 because as to the best of my understanding it is not used with LWW. + + """ + + META_LEN = 0 + SEQNO = 0 + + + self._set_vbucket(key, -1) + + return self._doCmd(memcacheConstants.CMD_DEL_WITH_META, key, '', + struct.pack(memcacheConstants.META_EXTRA_FMT, flags, exp, SEQNO, cas, META_LEN)) + + + # hope to remove this and migrate existing calls to the aboce def set_with_meta(self, key, exp, flags, seqno, cas, val, vbucket= -1, add_extended_meta_data=False, - adjusted_time=0, conflict_resolution_mode=0, skipCR=False): + adjusted_time=0, conflict_resolution_mode=0): """Set a value in the memcached server.""" self._set_vbucket(key, vbucket) - if skipCR: - return self._doCmd(memcacheConstants.CMD_SET_WITH_META, - key, - val, - struct.pack(memcacheConstants.SKIP_META_CMD_FMT, - flags, - exp, - seqno, - cas, - memcacheConstants.CR - )) + return self._doCmd(memcacheConstants.CMD_SET_WITH_META, + key, + val, + struct.pack(memcacheConstants.SKIP_META_CMD_FMT, + flags, + exp, + seqno, + cas, + memcacheConstants.CR + )) + + + # Extended meta data was a 4.0 and 4.5 era construct, and not supported in 4.6 Not sure if will ever be needed + # but leaving the code visible in case it is + """ if add_extended_meta_data: extended_meta_data = self.pack_the_extended_meta_data( adjusted_time, conflict_resolution_mode) return self._doCmd(memcacheConstants.CMD_SET_WITH_META, key, val, @@ -278,6 +321,7 @@ def set_with_meta(self, key, exp, flags, seqno, cas, val, vbucket= -1, add_exten else: return self._doCmd(memcacheConstants.CMD_SET_WITH_META, key, val, struct.pack(META_CMD_FMT, flags, exp, seqno, cas) ) + """ diff --git a/lib/membase/api/rest_client.py b/lib/membase/api/rest_client.py index 5dc789b04..5330b5687 100644 --- a/lib/membase/api/rest_client.py +++ b/lib/membase/api/rest_client.py @@ -14,6 +14,7 @@ import httplib2 import logger +import traceback try: from couchbase_helper.document import DesignDocument, View @@ -362,7 +363,14 @@ def is_cluster_mixed(self): http_res, success = self.init_http_request(self.baseUrl + 'pools/default') if http_res == u'unknown pool': return False - versions = list(set([node["version"][:1] for node in http_res["nodes"]])) + try: + versions = list(set([node["version"][:1] for node in http_res["nodes"]])) + except: + log.error('Error while processing cluster info {0}'.format(http_res)) + # not really clear what to return but False see to be a good start until we figure what is happening + return False + + if '1' in versions and '2' in versions: return True return False @@ -1658,6 +1666,8 @@ def fetch_bucket_stats(self, bucket='default', zoom='minute'): status, content, header = self._http_request(api) return json.loads(content) + + def fetch_bucket_xdcr_stats(self, bucket='default', zoom='minute'): """Return deserialized bucket xdcr stats. Keyword argument: @@ -1814,7 +1824,7 @@ def get_bucket_json(self, bucket='default'): def is_lww_enabled(self, bucket='default'): bucket_info = self.get_bucket_json(bucket=bucket) try: - if bucket_info['timeSynchronization'] == 'enabledWithoutDrift': + if bucket_info['conflictResolutionType'] == 'lww': return True except KeyError: return False @@ -1865,8 +1875,7 @@ def create_bucket(self, bucket='', threadsNumber=3, flushEnabled=1, evictionPolicy='valueOnly', - lww=False, - drift=False): + lww=False): api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets') params = urllib.urlencode({}) @@ -1909,10 +1918,7 @@ def create_bucket(self, bucket='', 'flushEnabled': flushEnabled, 'evictionPolicy': evictionPolicy} if lww: - if drift: - init_params['timeSynchronization'] = 'enabledWithDrift' - else: - init_params['timeSynchronization'] = 'enabledWithoutDrift' + init_params['conflictResolutionType'] = 'lww' params = urllib.urlencode(init_params) log.info("{0} with param: {1}".format(api, params)) create_start_time = time.time() @@ -2049,6 +2055,23 @@ def disable_alerts(self): status, content, header = self._http_request(api, 'POST', params) return status + + + def set_cas_drift_threshold(self, bucket, ahead_threshold_in_millisecond, behind_threshold_in_millisecond): + + api = self.baseUrl + 'pools/default/buckets/{0}'. format( bucket ) + params_dict ={'driftAheadThresholdMs': ahead_threshold_in_millisecond, + 'driftBehindThresholdMs': behind_threshold_in_millisecond} + params = urllib.urlencode(params_dict) + log.info("%s with param: %s" % (api, params)) + status, content, header = self._http_request(api, 'POST', params) + return status + + + + + + def stop_rebalance(self, wait_timeout=10): api = self.baseUrl + '/controller/stopRebalance' status, content, header = self._http_request(api, 'POST') @@ -2372,6 +2395,24 @@ def run_fts_query(self, index_name, query_json): return content['total_hits'], content['hits'], content['took'], \ content['status'] + def run_fts_query_with_facets(self, index_name, query_json): + """Method run an FTS query through rest api""" + api = self.fts_baseUrl + "api/index/{0}/query".format(index_name) + headers = self._create_capi_headers_with_auth( + self.username, + self.password) + status, content, header = self._http_request( + api, + "POST", + json.dumps(query_json, ensure_ascii=False).encode('utf8'), + headers, + timeout=70) + + if status: + content = json.loads(content) + return content['total_hits'], content['hits'], content['took'], \ + content['status'], content['facets'] + """ End of FTS rest APIs """ @@ -2441,6 +2482,11 @@ def set_mc_threads(self, mc_threads=4): return self.diag_eval(cmd) + def get_auto_compaction_settings(self): + api = self.baseUrl + "settings/autoCompaction" + status, content, header = self._http_request(api) + return json.loads(content) + def set_auto_compaction(self, parallelDBAndVC="false", dbFragmentThreshold=None, viewFragmntThreshold=None, @@ -2555,6 +2601,13 @@ def set_indexer_params(self, parameter, val): log.info('Indexer {0} set to {1}'.format(parameter, val)) return status + def get_global_index_settings(self): + api = self.baseUrl + "settings/indexes" + status, content, header = self._http_request(api) + if status: + return json.loads(content) + return None + def set_couchdb_option(self, section, option, value): """Dynamic settings changes""" @@ -2617,6 +2670,11 @@ def print_UI_logs(self, last_n=10, contains_text=None): log.info("Latest logs from UI on {0}:".format(self.ip)) for lg in logs: log.error(lg) + def get_ro_user(self): + api = self.baseUrl + 'settings/readOnlyAdminName' + status, content, header = self._http_request(api, 'GET', '') + return content, status + def delete_ro_user(self): api = self.baseUrl + 'settings/readOnlyUser' status, content, header = self._http_request(api, 'DELETE', '') @@ -2687,6 +2745,56 @@ def query_tool(self, query, port=8093, timeout=650, query_params={}, is_prepared except ValueError: return content + def analytics_tool(self, query, port=8095, timeout=650, query_params={}, is_prepared=False, named_prepare=None, + verbose = True, encoded_plan=None, servers=None): + key = 'prepared' if is_prepared else 'statement' + headers = None + content="" + prepared = json.dumps(query) + if is_prepared: + if named_prepare and encoded_plan: + http = httplib2.Http() + if len(servers)>1: + url = "http://%s:%s/query/service" % (servers[1].ip, port) + else: + url = "http://%s:%s/query/service" % (self.ip, port) + + headers = {'Content-type': 'application/json'} + body = {'prepared': named_prepare, 'encoded_plan':encoded_plan} + + response, content = http.request(url, 'POST', headers=headers, body=json.dumps(body)) + + return eval(content) + + elif named_prepare and not encoded_plan: + params = 'prepared=' + urllib.quote(prepared, '~()') + params = 'prepared="%s"'% named_prepare + else: + prepared = json.dumps(query) + prepared = str(prepared.encode('utf-8')) + params = 'prepared=' + urllib.quote(prepared, '~()') + if 'creds' in query_params and query_params['creds']: + headers = self._create_headers_with_auth(query_params['creds'][0]['user'].encode('utf-8'), + query_params['creds'][0]['pass'].encode('utf-8')) + api = "http://%s:%s/analytics/service?%s" % (self.ip, port, params) + log.info("%s"%api) + else: + params = {key : query} + if 'creds' in query_params and query_params['creds']: + headers = self._create_headers_with_auth(query_params['creds'][0]['user'].encode('utf-8'), + query_params['creds'][0]['pass'].encode('utf-8')) + del query_params['creds'] + params.update(query_params) + params = urllib.urlencode(params) + if verbose: + log.info('query params : {0}'.format(params)) + api = "http://%s:%s/analytics/service?%s" % (self.ip, port, params) + status, content, header = self._http_request(api, 'POST', timeout=timeout, headers=headers) + try: + return json.loads(content) + except ValueError: + return content + def query_tool_stats(self): log.info('query n1ql stats') api = "http://%s:8093/admin/stats" % (self.ip) diff --git a/lib/memcacheConstants.py b/lib/memcacheConstants.py index 039f8e1b4..89f1b7aaa 100644 --- a/lib/memcacheConstants.py +++ b/lib/memcacheConstants.py @@ -197,7 +197,7 @@ # Flags, expiration SET_PKT_FMT = ">II" -META_CMD_FMT = '>IIQQ' +META_CMD_FMT = '>IIQQ' # flags (4 bytes), expiration (4), seqno (8), CAS (8), metalen (2) META_CMD_FMT = '>IIQQ' EXTENDED_META_CMD_FMT = '>IIQQH' @@ -240,6 +240,10 @@ # Time bomb FLUSH_PKT_FMT = ">I" + +# Meta LWW extras +META_EXTRA_FMT = '>IIQQH' + # Touch commands # expiration TOUCH_PKT_FMT = ">I" diff --git a/lib/memcached/helper/kvstore.py b/lib/memcached/helper/kvstore.py index 1fc8ec9ba..eb70824d1 100644 --- a/lib/memcached/helper/kvstore.py +++ b/lib/memcached/helper/kvstore.py @@ -10,6 +10,7 @@ class KVStore(object): def __init__(self, num_locks=16): self.num_locks = num_locks self.reset() + self.acquire_lock = threading.Lock() # needed for deadlocks where different threads grab some of the partitions def reset(self): self.cache = {} @@ -22,14 +23,16 @@ def acquire_partition(self, key): return partition def acquire_partitions(self, keys): - part_obj_keys = collections.defaultdict(list) + self.acquire_lock.acquire() + part_obj_keys = {} for key in keys: partition = self.cache[self._hash(key)] - ''' - frozenset because dict is mutable , cant be hashed - frozenset converts a dict to immutable object - ''' - part_obj_keys[frozenset(partition.items())].append(key) + partition_obj = partition["partition"] + if partition_obj not in part_obj_keys: + partition["lock"].acquire() + part_obj_keys[partition_obj] = [] + part_obj_keys[partition_obj].append(key) + self.acquire_lock.release() return part_obj_keys def acquire_random_partition(self, has_valid=True): @@ -188,4 +191,4 @@ def __enter__(self): return self.partition["partition"] def __exit__(self, exc_type, exc_value, traceback): - self.partition["lock"].release() \ No newline at end of file + self.partition["lock"].release() diff --git a/lib/remote/remote_util.py b/lib/remote/remote_util.py index fdb15a0b7..b04b211e0 100644 --- a/lib/remote/remote_util.py +++ b/lib/remote/remote_util.py @@ -1734,7 +1734,7 @@ def wait_till_compaction_end(self, rest, bucket, timeout_in_seconds=60): log.error("auto compaction has not ended in {0} sec.".format(str(timeout_in_seconds))) return False - def wait_till_process_ended(self, process_name, timeout_in_seconds=360): + def wait_till_process_ended(self, process_name, timeout_in_seconds=600): if process_name[-1:] == "-": process_name = process_name[:-1] end_time = time.time() + float(timeout_in_seconds) @@ -1756,8 +1756,9 @@ def wait_till_process_ended(self, process_name, timeout_in_seconds=360): log.error("{1}: process {0} may not run" \ .format(process_name, self.ip)) if time.time() >= end_time and not process_ended: - log.info("Process {0} on node {1} is still running even" - " after 9 minutes".format(process_name, self.ip)) + log.info("Process {0} on node {1} is still running" + " after 10 minutes VERSION.txt file was removed" + .format(process_name, self.ip)) return process_ended def terminate_processes(self, info, list): @@ -2241,7 +2242,6 @@ def execute_commands_inside(self, main_command,query, queries,bucket1,password,b filedata = "" if not(query==""): main_command = main_command + " -s=\"" + query+ '"' - print "main_command is %s" %main_command elif (self.remote and not(queries=="")): sftp = self._ssh_client.open_sftp() filein = sftp.open(filename, 'w') @@ -3020,7 +3020,10 @@ def execute_cluster_backup(self, login_info="Administrator:password", backup_loc command_options_string = "" if command_options is not '': - command_options_string = ' '.join(command_options) + if "-b" not in command_options: + command_options_string = ' '.join(command_options) + else: + command_options_string = command_options cluster_ip = cluster_ip or self.ip cluster_port = cluster_port or self.port @@ -3339,7 +3342,9 @@ def execute_vbuckettool(self, keys, prefix=None): return output, error def execute_batch_command(self, command): - remote_command = "echo \"{0}\" > /tmp/cmd.bat; /tmp/cmd.bat".format(command) + remote_command = \ + "echo \"{0}\" > /tmp/cmd.bat; chmod u=rwx /tmp/cmd.bat; /tmp/cmd.bat"\ + .format(command) o, r = self.execute_command_raw(remote_command) if r and r!=['']: log.error("Command didn't run successfully. Error: {0}".format(r)) @@ -3425,11 +3430,12 @@ def check_openssl_version(self, deliverable_type, openssl, version): o, r = self.execute_command("dpkg --get-selections | grep libssl") self.log_command_output(o, r) if not o: - o, r = self.execute_command("apt-get install -y libssl0.9.8") - self.log_command_output(o, r) - o, r = self.execute_command("dpkg --get-selections | grep libssl") - log.info("package {0} should not appear below".format(o[:11])) - self.log_command_output(o, r) + pass # CBQE-36124 - some SSL stuff which is not needed anymore anyway + #o, r = self.execute_command("apt-get install -y libssl0.9.8") + #self.log_command_output(o, r) + #o, r = self.execute_command("dpkg --get-selections | grep libssl") + #log.info("package {0} should not appear below".format(o[:11])) + #self.log_command_output(o, r) elif o: for s in o: if "libssl0.9.8" not in s: @@ -3438,47 +3444,7 @@ def check_openssl_version(self, deliverable_type, openssl, version): o, r = self.execute_command("dpkg --get-selections | grep libssl") log.info("package {0} should not appear below".format(s[:11])) self.log_command_output(o, r) - if self.info.deliverable_type == "rpm" and \ - "SUSE" not in self.info.distribution_type: - centos_version = ["6.4"] - o, r = self.execute_command("cat /etc/redhat-release") - self.log_command_output(o, r) - if o[0] != "": - o = o[0].split(" ") - if o[2] in centos_version and "1" in openssl: - o, r = self.execute_command("rpm -qa | grep openssl") - self.log_command_output(o, r) - for s in o: - if "openssl098e" in s: - o, r = self.execute_command("yum remove -y {0}".format(s)) - self.log_command_output(o, r) - o, r = self.execute_command("rpm -qa | grep openssl") - log.info("package {0} should not appear below".format(s)) - self.log_command_output(o, r) - elif "openssl-1.0.0" not in s: - o, r = self.execute_command("yum install -y openssl") - self.log_command_output(o, r) - o, r = self.execute_command("rpm -qa | grep openssl") - log.info("openssl-1.0.0 should appear below".format(s)) - self.log_command_output(o, r) - elif openssl == "": - o, r = self.execute_command("rpm -qa | grep openssl") - self.log_command_output(o, r) - if not o: - o, r = self.execute_command("yum install -y openssl098e") - self.log_command_output(o, r) - o, r = self.execute_command("rpm -qa | grep openssl") - log.info("package openssl098e should appear below") - self.log_command_output(o, r) - elif o: - for s in o: - if "openssl098e" not in s: - o, r = self.execute_command("yum install -y openssl098e") - self.log_command_output(o, r) - o, r = self.execute_command("rpm -qa | grep openssl") - log.info("package openssl098e should appear below") - self.log_command_output(o, r) - + def check_pkgconfig(self, deliverable_type, openssl): if "SUSE" in self.info.distribution_type: o, r = self.execute_command("zypper -n if pkg-config 2>/dev/null| grep -i \"Installed: Yes\"") @@ -3617,6 +3583,28 @@ def set_cbauth_env(self, server): '"http://{0}:{1}@{2}:8091/query"'\ .format(rest_username, rest_password,server.ip)) + def change_system_time(self, time_change_in_seconds): + + # note that time change may be positive or negative + + + # need to support Windows too + output, error = self.execute_command("date +%s") + if len(error) > 0: + return False + curr_time = int(output[-1]) + new_time = curr_time + time_change_in_seconds + + output, error = self.execute_command("date --date @" + str(new_time)) + if len(error) > 0: + return False + + output, error = self.execute_command("date --set='" + output[-1] + "'") + if len(error) > 0: + return False + else: + return True + class RemoteUtilHelper(object): diff --git a/lib/sdk_client.py b/lib/sdk_client.py index 6ad6cd719..18df17097 100644 --- a/lib/sdk_client.py +++ b/lib/sdk_client.py @@ -13,6 +13,10 @@ from couchbase.n1ql import N1QLQuery, N1QLRequest import threading +import couchbase +import json + + class SDKClient(object): """Python SDK Client Implementation for testrunner - master branch Implementation""" @@ -27,6 +31,7 @@ def __init__(self, bucket, hosts = ["localhost"] , scheme = "couchbase", self.transcoder = transcoder self.default_timeout = 0 self._createConn() + couchbase.set_json_converters(json.dumps, json.loads) def _createString(self, scheme ="couchbase", bucket = None, hosts = ["localhost"], certpath = None, uhm_options = ""): connection_string = "{0}://{1}".format(scheme, ", ".join(hosts).replace(" ","")) @@ -230,11 +235,11 @@ def set(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to= def upsert(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0): try: - self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to) + return self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to) except CouchbaseError as e: try: time.sleep(10) - self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to) + return self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to) except CouchbaseError as e: raise @@ -558,7 +563,7 @@ def memcached(self, key): return self.client def set(self, key, exp, flags, value, format = FMT_AUTO): - return self.client.set(key, value, ttl = exp, format = format) + rc = self.client.set(key, value, ttl = exp, format = format) def append(self, key, value, format = FMT_AUTO): return self.client.set(key, value, format = format) @@ -732,4 +737,4 @@ def kv_mc_sync_get(self, key, status): mc_item = self.mc_get(key) self._rlock.release() - return kv_item, mc_item \ No newline at end of file + return kv_item, mc_item diff --git a/lib/tasks/task.py b/lib/tasks/task.py index 2f1f348a1..4a5d72c77 100644 --- a/lib/tasks/task.py +++ b/lib/tasks/task.py @@ -273,7 +273,7 @@ def check(self, task_manager): class BucketCreateTask(Task): def __init__(self, server, bucket='default', replicas=1, size=0, port=11211, password=None, bucket_type='membase', - enable_replica_index=1, eviction_policy='valueOnly', bucket_priority=None): + enable_replica_index=1, eviction_policy='valueOnly', bucket_priority=None,lww=False): Task.__init__(self, "bucket_create_task") self.server = server self.bucket = bucket @@ -285,6 +285,7 @@ def __init__(self, server, bucket='default', replicas=1, size=0, port=11211, pas self.enable_replica_index = enable_replica_index self.eviction_policy = eviction_policy self.bucket_priority = None + self.lww = lww if bucket_priority is not None: self.bucket_priority = 8 @@ -325,7 +326,9 @@ def execute(self, task_manager): bucketType=self.bucket_type, replica_index=self.enable_replica_index, evictionPolicy=self.eviction_policy, - threadsNumber=self.bucket_priority) + threadsNumber=self.bucket_priority, + lww=self.lww + ) else: rest.create_bucket(bucket=self.bucket, ramQuotaMB=self.size, @@ -335,7 +338,8 @@ def execute(self, task_manager): saslPassword=self.password, bucketType=self.bucket_type, replica_index=self.enable_replica_index, - evictionPolicy=self.eviction_policy) + evictionPolicy=self.eviction_policy, + lww=self.lww) self.state = CHECKING task_manager.schedule(self) diff --git a/lib/testconstants.py b/lib/testconstants.py index a0556bf79..9ecfd6343 100644 --- a/lib/testconstants.py +++ b/lib/testconstants.py @@ -10,13 +10,17 @@ "3.0.1", "3.0.2", "3.0.3", "3.1.0", "3.1.1", "3.1.2", "3.1.3", "3.1.4", "3.1.5", "3.1.6", "3.5.0", "4.0.0", "4.0.1", "4.1.0", "4.1.1", "4.1.2", "4.5.0", "4.5.1", "4.6.0", "4.7.0"] +CB_RELEASE_BUILDS = {"2.1.1":"764", "2.2.0":"821", "2.5.2":"1154", + "3.0.3":"1716", "3.1.5":"1859","3.1.6":"1904", + "4.0.0":"4051", "4.1.0":"5005", "4.1.1":"5914", "4.1.2":"6088", + "4.5.0":"2061", "4.5.1":"0000", "4.6.0":"0000", "4.7.0":"0000"} COUCHBASE_FROM_VERSION_3 = ["3.0.0", "3.0.1", "3.0.2", "3.0.3", "3.1.0", "3.1.1", "3.1.2", "3.1.3", "3.1.4", "3.1.5", "3.1.6", "3.5.0", "4.0.0", "4.0.1", "4.1.0", "4.1.1", "4.1.2", "4.5.0", "4.5.1", "4.6.0", "4.7.0"] COUCHBASE_RELEASE_FROM_VERSION_3 = ["3.0.0", "3.0.1", "3.0.2", "3.0.3", "3.1.0", "3.1.1", "3.1.2", "3.1.3", "3.1.5", "4.0.0", - "4.1.0", "4.1.1"] + "4.1.0", "4.1.1", "4.1.2", "4.5.0"] COUCHBASE_FROM_VERSION_4 = ["4.0.0", "4.0.1", "4.1.0", "4.1.1", "4.1.2", "4.5.0", "4.5.1", "4.6.0", "4.7.0"] COUCHBASE_FROM_SHERLOCK = ["4.0.0", "4.0.1", "4.1.0", "4.1.1", "4.1.2", "4.5.0", @@ -103,12 +107,16 @@ INDEX_QUOTA = 512 FTS_QUOTA = 256 LINUX_COUCHBASE_BIN_PATH = "/opt/couchbase/bin/" +LINUX_COUCHBASE_PORT_CONFIG_PATH = "/opt/couchbase/etc/couchbase" +LINUX_COUCHBASE_OLD_CONFIG_PATH = "/opt/couchbase/var/lib/couchbase/config/" LINUX_COUCHBASE_SAMPLE_PATH = "/opt/couchbase/samples/" LINUX_BACKUP_PATH = "/tmp/backup/" LINUX_ROOT_PATH = "/root/" WIN_COUCHBASE_BIN_PATH = "/cygdrive/c/Program\ Files/Couchbase/Server/bin/" WIN_COUCHBASE_SAMPLE_PATH = "/cygdrive/c/Program\ Files/Couchbase/Server/samples/" WIN_COUCHBASE_BIN_PATH_RAW = 'C:/Program\ Files/Couchbase/Server/bin/' +WIN_COUCHBASE_PORT_CONFIG_PATH = "/cygdrive/c/Program\ Files/couchbase/Server/etc/couchbase" +WIN_COUCHBASE_OLD_CONFIG_PATH = "/cygdrive/c/Program\ Files/couchbase/Server/var/lib/couchbase/config" WIN_TMP_PATH = '/cygdrive/c/tmp/' WIN_TMP_PATH_RAW = 'C:/tmp/' WIN_BACKUP_C_PATH = "c:/tmp/backup/" diff --git a/pytests/2i/array_index_2i.py b/pytests/2i/array_index_2i.py index 6226ed4cd..b3a22c22a 100644 --- a/pytests/2i/array_index_2i.py +++ b/pytests/2i/array_index_2i.py @@ -1,3 +1,4 @@ +import json import logging import random from couchbase.bucket import Bucket @@ -191,4 +192,4 @@ def _get_documets(self, bucket_name, field): log.info("Updating {0} in all documents in bucket {1}...".format(field, bucket_name)) query = "SELECT * FROM {0}".format(bucket_name) for row in bucket.n1ql_query(query): - yield row[bucket.bucket]['_id'], bucket.get(key=row[bucket.bucket]['_id']).value \ No newline at end of file + yield row[bucket.bucket]['_id'], json.loads(bucket.get(key=row[bucket.bucket]['_id']).value) \ No newline at end of file diff --git a/pytests/2i/base_2i.py b/pytests/2i/base_2i.py index 7ae5bd015..1c71b7a8d 100644 --- a/pytests/2i/base_2i.py +++ b/pytests/2i/base_2i.py @@ -22,9 +22,6 @@ def setUp(self): self.scan_consistency= self.input.param("scan_consistency","request_plus") self.scan_vector_per_values= self.input.param("scan_vector_per_values",None) self.timeout_for_index_online= self.input.param("timeout_for_index_online",600) - self.max_attempts_check_index= self.input.param("max_attempts_check_index",10) - self.max_attempts_query_and_validate= self.input.param("max_attempts_query_and_validate",10) - self.index_present= self.input.param("index_present",True) self.run_create_index= self.input.param("run_create_index",True) self.verify_query_result= self.input.param("verify_query_result",True) self.verify_explain_result= self.input.param("verify_explain_result",True) @@ -46,9 +43,10 @@ def setUp(self): self.query_definitions = query_definition_generator.generate_sabre_data_query_definitions() if self.dataset == "bigdata": self.query_definitions = query_definition_generator.generate_big_data_query_definitions() + if self.dataset == "array": + self.query_definitions = query_definition_generator.generate_airlines_data_query_definitions() self.query_definitions = query_definition_generator.filter_by_group(self.groups, self.query_definitions) self.ops_map = self._create_operation_map() - #self.log.info(self.ops_map) self.find_nodes_in_list() self.generate_map_nodes_out_dist() self.memory_create_list = [] @@ -92,6 +90,15 @@ def async_create_index(self, bucket, query_definition, deploy_node_info = None): defer_build=self.defer_build) return create_index_task + def create_index_using_rest(self, bucket, query_definition, exprType='N1QL', deploy_node_info=None): + ind_content = query_definition.generate_gsi_index_create_query_using_rest(bucket=bucket, deploy_node_info=None, + defer_build=None, + index_where_clause=None, + gsi_type=self.gsi_type) + + log.info("Creating index {0}...".format(query_definition.index_name)) + return self.rest.create_index_with_rest(ind_content) + def async_build_index(self, bucket = "default", index_list = []): self.query = self.n1ql_helper.gen_build_index_query(bucket = bucket, index_list = index_list) self.log.info(self.query) @@ -116,6 +123,20 @@ def multi_create_index(self, buckets=[], query_definitions=[], deploy_node_info= self.memory_create_list.append(index_info) self.create_index(bucket.name, query_definition, deploy_node_info) + def multi_create_index_using_rest(self, buckets=None, query_definitions=None, deploy_node_info=None): + self.index_id_map = {} + if not buckets: + buckets = self.buckets + if not query_definitions: + query_definitions = self.query_definitions + for bucket in buckets: + if bucket not in self.index_id_map.keys(): + self.index_id_map[bucket] = {} + for query_definition in query_definitions: + id_map = self.create_index_using_rest(bucket=bucket, query_definition=query_definition, + deploy_node_info=deploy_node_info) + self.index_id_map[bucket][query_definition] = id_map["id"] + def async_multi_create_index(self, buckets=[], query_definitions=[]): create_index_tasks = [] self.index_lost_during_move_out =[] @@ -155,6 +176,15 @@ def async_multi_create_index(self, buckets=[], query_definitions=[]): else: return create_index_tasks + def multi_drop_index_using_rest(self, buckets=None, query_definitions=None): + if not buckets: + buckets = self.buckets + if not query_definitions: + query_definitions = self.query_definitions + for bucket in buckets: + for query_definition in query_definitions: + self.drop_index_using_rest(bucket, query_definition) + def multi_drop_index(self, buckets = [], query_definitions =[]): for bucket in buckets: for query_definition in query_definitions: @@ -173,13 +203,6 @@ def async_multi_drop_index(self, buckets = [], query_definitions =[]): drop_index_tasks.append(self.async_drop_index(bucket.name, query_definition)) return drop_index_tasks - def sync_multi_drop_index(self, buckets = [], query_definitions =[]): - for bucket in buckets: - for query_definition in query_definitions: - index_info = query_definition.generate_index_drop_query(bucket = bucket.name) - if index_info not in self.memory_drop_list: - self.memory_drop_list.append(index_info) - self.sync_drop_index(bucket.name, query_definition) def drop_index(self, bucket, query_definition, verifydrop = True): try: @@ -195,6 +218,13 @@ def drop_index(self, bucket, query_definition, verifydrop = True): actual_result = self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_node) self.log.info(actual_result) + def drop_index_using_rest(self, bucket, query_definition, verifydrop=True): + self.log.info("Dropping index: {0}...".format(query_definition.index_name)) + self.rest.drop_index_with_rest(self.index_id_map[bucket][query_definition]) + if verifydrop: + check = self.n1ql_helper._is_index_in_list(bucket, query_definition.index_name, server=self.n1ql_node) + self.assertFalse(check, "Index {0} failed to be deleted".format(query_definition.index_name)) + del(self.index_id_map[bucket][query_definition]) def async_drop_index(self, bucket, query_definition): self.query = query_definition.generate_index_drop_query(bucket = bucket, @@ -205,15 +235,6 @@ def async_drop_index(self, bucket, query_definition): index_name = query_definition.index_name) return drop_index_task - def sync_drop_index(self, bucket, query_definition): - self.query = query_definition.generate_index_drop_query(bucket = bucket, - use_gsi_for_secondary = self.use_gsi_for_secondary, use_gsi_for_primary = self.use_gsi_for_primary) - self.gsi_thread.drop_index(self, - server = self.n1ql_node, bucket = bucket, - query = self.query , n1ql_helper = self.n1ql_helper, - index_name = query_definition.index_name) - return drop_index_task - def query_using_index_with_explain(self, bucket, query_definition): self.query = query_definition.generate_query_with_explain(bucket = bucket) actual_result = self.n1ql_helper.run_cbq_query(query = self.query, server = self.n1ql_node) @@ -260,6 +281,8 @@ def sync_multi_query_using_index_with_explain(self, buckets =[], query_definitio def query_using_index(self, bucket, query_definition, expected_result=None, scan_consistency=None, scan_vector=None, verify_results=True): + if not scan_consistency: + scan_consistency = self.scan_consistency self.gen_results.query = query_definition.generate_query(bucket=bucket) if expected_result == None: expected_result = self.gen_results.generate_expected_result(print_expected_result=False) @@ -307,21 +330,25 @@ def multi_query_using_index_with_emptyresult(self, buckets =[], query_definition for query_definition in query_definitions: self.query_using_index_with_emptyset(bucket.name, query_definition) - def multi_query_using_index(self, buckets=[], query_definitions=[], - expected_results={}, scan_consistency=None, + def multi_query_using_index(self, buckets=None, query_definitions=None, + expected_results=None, scan_consistency=None, scan_vectors=None, verify_results=True): + if not buckets: + buckets = self.buckets + if not query_definitions: + query_definitions = self.query_definitions for bucket in buckets: scan_vector = None if scan_vectors != None: scan_vector = scan_vectors[bucket.name] for query_definition in query_definitions: if expected_results: - self.query_using_index(bucket.name, query_definition, expected_results[query_definition.index_name], - scan_consistency=scan_consistency, scan_vector=scan_vector, - verify_results=verify_results) + expected_result = expected_results[query_definition.index_name] else: - self.query_using_index(bucket.name,query_definition, None, scan_consistency=scan_consistency, - scan_vector=scan_vector, verify_results=verify_results) + expected_result = None + self.query_using_index(bucket=bucket.name, query_definition=query_definition, + expected_result=expected_result, scan_consistency=scan_consistency, + scan_vector=scan_vector, verify_results=verify_results) def async_multi_query_using_index(self, buckets =[], query_definitions = [], expected_results = {}, scan_consistency = None, scan_vectors = None): multi_query_tasks = [] @@ -438,6 +465,85 @@ def async_run_multi_operations(self, buckets=None, query_definitions=None, expec raise return tasks + def async_run_operations(self, phase, buckets=None, query_definitions=None, expected_results=None, + scan_consistency=None, scan_vectors=None): + if not buckets: + buckets = self.buckets + if not query_definitions: + query_definitions = self.query_definitions + if not scan_consistency: + scan_consistency = self.scan_consistency + tasks = [] + operation_map = self.generate_operation_map(phase) + self.log.info("=== {0}: {1} ===".format(phase.upper(), operation_map)) + nodes_out = [] + if isinstance(self.nodes_out_dist, str): + for service in self.nodes_out_dist.split("-"): + nodes_out.append(service.split(":")[0]) + if operation_map: + try: + if "create_index" in operation_map: + if ("index" in nodes_out or "n1ql" in nodes_out) and phase == "in_between": + tasks = [] + else: + tasks += self.async_multi_create_index(buckets, query_definitions) + if "query_with_explain" in operation_map: + if "n1ql" in nodes_out and phase == "in_between": + tasks = [] + else: + tasks += self.async_multi_query_using_index_with_explain(buckets, query_definitions) + if "query" in operation_map: + if "n1ql" in nodes_out and phase == "in_between": + tasks = [] + else: + tasks += self.async_multi_query_using_index(buckets, query_definitions, expected_results, + scan_consistency=scan_consistency, + scan_vectors=scan_vectors) + if "drop_index" in operation_map: + if "index" in nodes_out or "n1ql" in nodes_out: + if phase == "in_between": + tasks = [] + else: + tasks += self.async_multi_drop_index(self.buckets, query_definitions) + except Exception, ex: + log.info(ex) + raise + return tasks + + def run_full_table_scan_using_rest(self, bucket, query_definition, verify_result=False): + expected_result = [] + actual_result = [] + full_scan_query = "SELECT * FROM {0} WHERE {1}".format(bucket.name, query_definition.index_where_clause) + self.gen_results.query = full_scan_query + temp = self.gen_results.generate_expected_result(print_expected_result=False) + for item in temp: + expected_result.append(item.values()) + if self.scan_consistency == "request_plus": + body = {"stale": "False"} + else: + body = {"stale": "ok"} + content = self.rest.full_table_scan_gsi_index_with_rest(self.index_id_map[bucket][query_definition], body) + if verify_result: + doc_id_list = [] + for item in content: + if item["docid"] not in doc_id_list: + for doc in self.full_docs_list: + if doc["_id"] == item["docid"]: + actual_result.append([doc]) + doc_id_list.append(item["docid"]) + self.assertEqual(len(sorted(actual_result)), len(sorted(expected_result)), + "Actual Items {0} are not equal to expected Items {1}". + format(len(sorted(actual_result)), len(sorted(expected_result)))) + msg = "The number of rows match but the results mismatch, please check" + if sorted(actual_result) != sorted(expected_result): + raise Exception(msg) + + def run_lookup_gsi_index_with_rest(self, bucket, query_definition): + pass + + def run_range_scan_with_rest(self, bucket, query_definition): + pass + def gen_scan_vector(self, use_percentage = 1.0, use_random = False): servers = self.get_kv_nodes(servers= self.servers[:self.nodes_init]) sequence_bucket_map = self.get_vbucket_seqnos(servers,self.buckets) @@ -556,25 +662,28 @@ def _verify_items_count(self): """ index_map = self.get_index_stats() for bucket_name in index_map.keys(): + self.log.info("Bucket: {0}".format(bucket_name)) for index_name, index_val in index_map[bucket_name].iteritems(): - if index_val["num_docs_pending"] and index_val["num_docs_queued"]: + self.log.info("number of docs pending: {0}".format(index_val["num_docs_pending"])) + self.log.info("number of docs queued: {0}".format(index_val["num_docs_pending"])) + if index_val["num_docs_pending"] and index_val["num_docs_pending"]: return False return True - def _verify_bucket_count_with_index_count(self, query_definitions=None, buckets=[]): + def _verify_bucket_count_with_index_count(self, query_definitions=None, buckets=None): """ :param bucket: :param index: :return: """ + count = 0 if not query_definitions: query_definitions = self.query_definitions if not buckets: buckets = self.buckets - count = 0 while not self._verify_items_count() and count < 15: self.log.info("All Items Yet to be Indexed...") - self.sleep(5) + self.sleep(10) count += 1 if not self._verify_items_count(): self.log.info("All Items didn't get Indexed...") @@ -613,6 +722,23 @@ def _create_operation_map(self): map_after[op_type] = True return {"initial":map_initial, "before":map_before, "in_between": map_in_between, "after": map_after} + def generate_operation_map(self, phase): + operation_map = [] + self.verify_query_result = False + self.verify_explain_result = False + ops = self.input.param(phase, "") + for type in ops.split("-"): + for op_type in type.split(":"): + if "verify_query_result" in op_type: + self.verify_query_result = True + continue + if "verify_explain_result" in op_type: + self.verify_explain_result = True + continue + if op_type != '': + operation_map.append(op_type) + return operation_map + def _query_explain_in_async(self): tasks = self.async_run_multi_operations(buckets = self.buckets, query_definitions = self.query_definitions, @@ -640,23 +766,6 @@ def _set_query_explain_flags(self, phase): self.ops_map[phase]["query_explain_ops"] = False self.log.info(self.ops_map) - def async_index_operations(self, index_op=None, buckets=[], query_definitions=[]): - func_map = { - "create": self.async_multi_create_index, - "query": self.async_multi_query_using_index, - "query_with_explain": self.async_multi_query_using_index_with_explain, - "drop": self.async_multi_drop_index - } - if not buckets: - buckets = self.buckets - if not query_definitions: - query_definitions = self.query_definitions - if not index_op: - index_op = self.index_op - if index_op != None: - return func_map[index_op](buckets, query_definitions) - return None - def set_indexer_logLevel(self, loglevel="info"): """ :param loglevel: @@ -674,4 +783,4 @@ def set_indexer_logLevel(self, loglevel="info"): self.log.info("Setting indexer log level to {0}".format(loglevel)) server = self.get_nodes_from_services_map(service_type="index") rest = RestConnection(server) - status = rest.set_indexer_params("logLevel", loglevel) \ No newline at end of file + status = rest.set_indexer_params("logLevel", loglevel) diff --git a/pytests/2i/compaction_settings_2i.py b/pytests/2i/compaction_settings_2i.py index f0a414a24..c3516f887 100644 --- a/pytests/2i/compaction_settings_2i.py +++ b/pytests/2i/compaction_settings_2i.py @@ -108,21 +108,21 @@ def test_set_compaction_during_compaction(self): self._run_tasks(kv_ops) def test_set_compaction_end_time_abort(self): - kv_ops = self._run_kvops_tasks() - date = datetime.now() servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True) rest = RestConnection(servers[0]) initial_index_map = rest.get_index_stats() + kv_ops = self._run_kvops_tasks() + date = datetime.now() #Trust Me this works dayOfWeek = (date.weekday() + (date.hour+((date.minute+5)/60))/24)%7 status, content, header = rest.set_indexer_compaction(indexDayOfWeek=DAYS[dayOfWeek], - indexFromHour=date.hour+((date.minute+1)/60), - indexFromMinute=(date.minute+1)%60, - indexToHour=date.hour+((date.minute+2)/60), - indexToMinute=(date.minute+2)%60, + indexFromHour=date.hour+((date.minute+2)/60), + indexFromMinute=(date.minute+2)%60, + indexToHour=date.hour+((date.minute+3)/60), + indexToMinute=(date.minute+3)%60, abortOutside=True) self.assertTrue(status, "Error in setting Circular Compaction... {0}".format(content)) - self.sleep(180) + self.sleep(300) final_index_map = rest.get_index_stats() self.check_compaction_number(initial_index_map, final_index_map) self._validate_compaction_for_abort_or_complete() diff --git a/pytests/2i/memdb_oom_2i.py b/pytests/2i/memdb_oom_2i.py index e8e038156..4c6a61209 100644 --- a/pytests/2i/memdb_oom_2i.py +++ b/pytests/2i/memdb_oom_2i.py @@ -1,5 +1,6 @@ import logging +from couchbase_helper.tuq_generators import TuqGenerators from couchbase_helper.query_definitions import QueryDefinition from membase.api.rest_client import RestConnection from membase.helper.cluster_helper import ClusterOperationHelper @@ -35,8 +36,8 @@ def setUp(self): self.load_query_definitions.append(query_definition) self.multi_create_index(buckets=self.buckets, query_definitions=self.load_query_definitions, deploy_node_info=self.deploy_node_info) - log.info("Setting indexer memory quota to 300 MB...") - rest.set_indexer_memoryQuota(indexMemoryQuota=300) + log.info("Setting indexer memory quota to 256 MB...") + rest.set_indexer_memoryQuota(indexMemoryQuota=256) self.sleep(30) def tearDown(self): @@ -71,10 +72,10 @@ def test_oom_increase_mem_quota(self): self.sleep(120) indexer_oom = self._validate_indexer_status_oom() if not indexer_oom: - log.info("Indexer out of OOM...") break self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM") - self.sleep(180) + self.sleep(60) + log.info("=== Indexer out of OOM ===") self._verify_bucket_count_with_index_count(self.load_query_definitions) self.multi_query_using_index(buckets=self.buckets, query_definitions=self.load_query_definitions) @@ -97,7 +98,7 @@ def test_oom_drop_indexes(self): log.info("Indexer out of OOM...") self.load_query_definitions = self.load_query_definitions[i+1:] break - self.sleep(180) + self.sleep(60) self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM") self._verify_bucket_count_with_index_count(self.load_query_definitions) self.multi_query_using_index(buckets=self.buckets, @@ -119,7 +120,7 @@ def test_oom_flush_bucket(self): if not self._validate_indexer_status_oom(): log.info("Indexer out of OOM...") break - self.sleep(180) + self.sleep(60) self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM") self._verify_bucket_count_with_index_count(self.load_query_definitions) self.multi_query_using_index(buckets=self.buckets, @@ -136,7 +137,7 @@ def test_oom_delete_bucket(self): for i in range(len(self.buckets)): log.info("Deleting bucket {0}...".format(self.buckets[i].name)) BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.oomServer, bucket=self.buckets[i].name) - self.sleep(120) + self.sleep(60) check = self._validate_indexer_status_oom() if not check: if i < len(self.buckets): @@ -146,7 +147,7 @@ def test_oom_delete_bucket(self): self.buckets = [] break log.info("Indexer Still in OOM...") - self.sleep(180) + self.sleep(60) self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM") self._verify_bucket_count_with_index_count(self.load_query_definitions) self.multi_query_using_index(buckets=self.buckets, @@ -166,7 +167,7 @@ def test_oom_kv_rebalance_in(self): self.nodes_in_list, [], services = ["kv"]) rebalance.result() self._bring_indexer_back_to_life() - self.sleep(180) + self.sleep(60) self._verify_bucket_count_with_index_count(self.load_query_definitions) self.multi_query_using_index(buckets=self.buckets, query_definitions=self.load_query_definitions) @@ -186,15 +187,14 @@ def test_oom_kv_rebalance_out(self): rebalance = self.cluster.async_rebalance(self.servers, [], [kv_node]) rebalance.result() self._bring_indexer_back_to_life() - self.sleep(180) + self.sleep(60) self._verify_bucket_count_with_index_count(self.load_query_definitions) - self.multi_query_using_index(buckets=self.buckets, - query_definitions=self.load_query_definitions) + self.multi_query_using_index(buckets=self.buckets, query_definitions=self.load_query_definitions) def test_oom_kv_restart(self): """ 1. Get indexer to OOM. - 2. Stop COuchbase on one of the KV nodes. + 2. Stop Couchbase on one of the KV nodes. 3. Get indexer out of OOM. 4. Query - Should Fail 5. Start Couchbase on that KV node. @@ -216,7 +216,7 @@ def test_oom_kv_restart(self): log.info("Indexer out of OOM...") self.load_query_definitions = self.load_query_definitions[i+1:] break - self.sleep(180) + self.sleep(60) try: self._verify_bucket_count_with_index_count(self.load_query_definitions) self.multi_query_using_index(buckets=self.buckets, @@ -226,10 +226,9 @@ def test_oom_kv_restart(self): finally: log.info("Starting Couchbase on {0}".format(kv_node.ip)) remote.start_server() - self.sleep(180) + self.sleep(60) self._verify_bucket_count_with_index_count(self.load_query_definitions) - self.multi_query_using_index(buckets=self.buckets, - query_definitions=self.load_query_definitions) + self.multi_query_using_index(buckets=self.buckets, query_definitions=self.load_query_definitions) def test_oom_indexer_reboot(self): """ @@ -239,16 +238,14 @@ def test_oom_indexer_reboot(self): """ self.assertTrue(self._push_indexer_off_the_cliff(), "OOM Can't be achieved") log.info("Rebooting {0}".format(self.oomServer.ip)) - #rest = RemoteMachineShellConnection(self.oomServer) self._reboot_node(self.oomServer) check = self._validate_indexer_status_oom() if check: log.info("Indexer in OOM after reboot...") self._bring_indexer_back_to_life() - self.sleep(180) + self.sleep(60) self._verify_bucket_count_with_index_count(self.load_query_definitions) - self.multi_query_using_index(buckets=self.buckets, - query_definitions=self.load_query_definitions) + self.multi_query_using_index(buckets=self.buckets, query_definitions=self.load_query_definitions) def test_oom_reduce_mem_quota(self): """ @@ -259,11 +256,20 @@ def test_oom_reduce_mem_quota(self): """ indexer_memQuota = self.get_indexer_mem_quota() log.info("Current Indexer Memory Quota is {}".format(indexer_memQuota)) - used_memory = self.get_indexer_mem_quota() rest = RestConnection(self.oomServer) - rest.set_indexer_memoryQuota(indexMemoryQuota=used_memory) - self.sleep(180) - self.assertTrue(self._validate_indexer_status_oom(), "Indexer is still Online") + count = 0 + while count < 5: + used_memory = self.get_indexer_mem_quota() + #Setting memory to 90 % of used memory. + set_memory = int(used_memory) * 90/100 + rest.set_indexer_memoryQuota(indexMemoryQuota=set_memory) + self.sleep(120) + check = self._validate_indexer_status_oom() + if check: + log.info("Indexer is Paused after setting memory quota to {0}".format(set_memory)) + break + if count == 5: + self.assertFalse(self._validate_indexer_status_oom(), "Indexer still in OOM") def test_change_mem_quota_when_index_building(self): rest = RestConnection(self.oomServer) @@ -273,8 +279,8 @@ def test_change_mem_quota_when_index_building(self): query_definitions = [] for x in range(3): index_name = "index_"+str(x) - query_definition = QueryDefinition(index_name=index_name, index_fields = ["job_title"], - query_template = self.query_template, groups = ["simple"]) + query_definition = QueryDefinition(index_name=index_name, index_fields=["job_title"], + query_template=self.query_template, groups=["simple"]) query_definitions.append(query_definition) create_tasks = [] build_tasks = [] @@ -325,8 +331,8 @@ def test_oom_create_build_index(self): """ self.assertTrue(self._push_indexer_off_the_cliff(), "OOM Can't be achieved") index_name = "oom_index" - query_definition = QueryDefinition(index_name=index_name, index_fields = ["join_mo"], \ - query_template = "", groups = ["simple"]) + query_definition = QueryDefinition(index_name=index_name, index_fields=["join_mo"], + query_template="", groups=["simple"]) try: self.create_index(self.buckets[0].name, query_definition, self.deploy_node_info) except Exception, ex: @@ -340,12 +346,13 @@ def test_oom_create_index(self): self.defer_build = False self.assertTrue(self._push_indexer_off_the_cliff(), "OOM Can't be achieved") index_name = "oom_index" - query_definition = QueryDefinition(index_name=index_name, index_fields = ["join_mo"], \ - query_template = "", groups = ["simple"]) + query_definition = QueryDefinition(index_name=index_name, index_fields=["join_mo"], + query_template="", groups=["simple"]) try: - task = self.async_create_index(bucket[0].name, query_definition) + task = self.async_create_index(self.buckets[0].name, query_definition) task.result() except Exception, ex: + log.info("Cannot Create Index om Paused Indexer as expected") log.info("{0}".format(str(ex))) def _push_indexer_off_the_cliff(self): @@ -355,17 +362,15 @@ def _push_indexer_off_the_cliff(self): """ cnt = 0 docs = 50 - indexer_oom = self._validate_indexer_status_oom() - while not indexer_oom and cnt < 10: - for task in self.kv_mutations(docs, cnt%2): + while cnt < 10: + if self._validate_indexer_status_oom(): + log.info("OOM on {0} is achieved".format(self.oomServer.ip)) + return True + for task in self.kv_mutations(docs): task.result() self.sleep(30) - indexer_oom = self._validate_indexer_status_oom() cnt += 1 docs += 50 - if indexer_oom: - log.info("OOM on {0} is achieved".format(self.oomServer.ip)) - return True return False def _bring_indexer_back_to_life(self): @@ -418,12 +423,13 @@ def _validate_indexer_status_oom(self): else: return False - def kv_mutations(self, docs=1, count=0): + def kv_mutations(self, docs=1): if not docs: docs = self.docs_per_day - ops = {0: "create", 1: "update"} gens_load = self.generate_docs(docs) - tasks = self.async_load(generators_load=gens_load, op_type=ops[count], + self.full_docs_list = self.generate_full_docs_list(gens_load) + self.gen_results = TuqGenerators(self.log, self.full_docs_list) + tasks = self.async_load(generators_load=gens_load, op_type="create", batch_size=self.batch_size) return tasks @@ -451,4 +457,4 @@ def _reboot_node(self, node): # disable firewall on these nodes self.stop_firewall_on_node(node) # wait till node is ready after warmup - ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self, wait_if_warmup=True) \ No newline at end of file + ClusterOperationHelper.wait_for_ns_servers_or_assert([node], self, wait_if_warmup=True) diff --git a/pytests/2i/recovery_2i.py b/pytests/2i/recovery_2i.py index eab34536c..3bcd87f0a 100644 --- a/pytests/2i/recovery_2i.py +++ b/pytests/2i/recovery_2i.py @@ -167,7 +167,11 @@ def test_server_crash(self): before_index_ops = self._run_before_index_tasks() for node in self.nodes_out_list: remote = RemoteMachineShellConnection(node) - remote.terminate_process(process_name=self.targetProcess) + if self.targetProcess == "memcached": + remote.kill_memcached() + else: + remote.terminate_process(process_name=self.targetProcess) + self.sleep(20) in_between_index_ops = self._run_in_between_tasks() self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops]) self._run_after_index_tasks() @@ -201,10 +205,11 @@ def test_server_restart(self): for node in self.nodes_out_list: remote = RemoteMachineShellConnection(node) remote.stop_server() - self.sleep(20) + self.sleep(30) for node in self.nodes_out_list: remote = RemoteMachineShellConnection(node) remote.start_server() + self.sleep(30) in_between_index_ops = self._run_in_between_tasks() self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops]) self._run_after_index_tasks() @@ -228,7 +233,7 @@ def test_failover(self): rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], servr_out) rebalance.result() - self.sleep(100) + self.sleep(120) self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops]) self._run_after_index_tasks() except Exception, ex: @@ -262,7 +267,7 @@ def test_failover_add_back(self): rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], []) in_between_index_ops = self._run_in_between_tasks() rebalance.result() - self.sleep(100) + self.sleep(120) self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops]) self._run_after_index_tasks() except Exception, ex: @@ -377,8 +382,8 @@ def test_autofailover(self): [], [servr_out[0]]) in_between_index_ops = self._run_in_between_tasks() rebalance.result() + self.sleep(120) self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops]) - self.sleep(100) self._run_after_index_tasks() except Exception, ex: raise @@ -397,7 +402,7 @@ def test_network_partitioning(self): self._run_tasks([before_index_ops]) for node in self.nodes_out_list: self.start_firewall_on_node(node) - self.sleep(10) + self.sleep(20) in_between_index_ops = self._run_in_between_tasks() self._run_tasks([kvOps_tasks, in_between_index_ops]) except Exception, ex: @@ -406,7 +411,7 @@ def test_network_partitioning(self): finally: for node in self.nodes_out_list: self.stop_firewall_on_node(node) - self.sleep(10) + self.sleep(30) self._run_after_index_tasks() def test_couchbase_bucket_compaction(self): @@ -417,7 +422,7 @@ def test_couchbase_bucket_compaction(self): kvOps_tasks = self._run_kvops_tasks() before_index_ops = self._run_before_index_tasks() for bucket in self.buckets: - compact_tasks.append(self.cluster.async_compact_bucket(self.master,bucket)) + compact_tasks.append(self.cluster.async_compact_bucket(self.master, bucket)) in_between_index_ops = self._run_in_between_tasks() self._run_tasks([kvOps_tasks, before_index_ops, in_between_index_ops]) for task in compact_tasks: diff --git a/pytests/2i/upgrade_2i.py b/pytests/2i/upgrade_2i.py index fa2d8f4d2..aab79500c 100644 --- a/pytests/2i/upgrade_2i.py +++ b/pytests/2i/upgrade_2i.py @@ -1,4 +1,5 @@ import logging +from datetime import datetime from base_2i import BaseSecondaryIndexingTests from couchbase_helper.query_definitions import QueryDefinition, SQLDefinitionGenerator @@ -21,8 +22,8 @@ def setUp(self): self.whereCondition= self.input.param("whereCondition"," job_title != \"Sales\" ") query_template += " WHERE {0}".format(self.whereCondition) self.load_query_definitions = [] - self.initial_index_number = self.input.param("initial_index_number", 2) - for x in range(1,self.initial_index_number): + self.initial_index_number = self.input.param("initial_index_number", 3) + for x in range(self.initial_index_number): index_name = "index_name_"+str(x) query_definition = QueryDefinition(index_name=index_name, index_fields=["job_title"], query_template=query_template, groups=["simple"]) @@ -60,14 +61,11 @@ def test_offline_upgrade(self): query_definitions=self.load_query_definitions) def test_online_upgrade(self): - index_task = [] services_in = [] - if self.index_op != "create": - create_task = self.async_index_operations("create") - self._run_tasks([create_task]) + before_tasks = self.async_run_operations(buckets=self.buckets, phase="before") server_out = self.nodes_out_list - if not self.all_index_nodes_lost: - index_task = self.async_index_operations() + self._run_tasks([before_tasks]) + in_between_tasks = self.async_run_operations(buckets=self.buckets, phase="in_between") kv_ops = self.kv_mutations() log.info("Upgrading servers to {0}...".format(self.upgrade_to)) rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],[],self.nodes_out_list) @@ -87,29 +85,29 @@ def test_online_upgrade(self): self.nodes_out_list, [], services=services_in) rebalance.result() - self._run_tasks([kv_ops, index_task]) + self._run_tasks([kv_ops, in_between_tasks]) self.sleep(60) log.info("Upgraded to: {0}".format(node_version)) - if self.index_op == "drop" or self.all_index_nodes_lost: - create_task = self.async_index_operations("create") - self._run_tasks([create_task]) - self._verify_bucket_count_with_index_count() - index_task = self.async_index_operations("query") - self._run_tasks([index_task]) + nodes_out = [] + for service in self.nodes_out_dist.split("-"): + nodes_out.append(service.split(":")[0]) + if "index" in nodes_out or "n1ql" in nodes_out: + self._verify_bucket_count_with_index_count(query_definitions=self.load_query_definitions) + else: + self._verify_bucket_count_with_index_count() + after_tasks = self.async_run_operations(buckets=self.buckets, phase="after") + self.sleep(180) + self._run_tasks([after_tasks]) def test_online_upgrade_swap_rebalance(self): """ - :return: """ - index_task = [] + before_tasks = self.async_run_operations(buckets=self.buckets, phase="before") + self._run_tasks([before_tasks]) self._install(self.nodes_in_list,version=self.upgrade_to) - if self.index_op != "create": - create_task = self.async_index_operations("create") - self._run_tasks([create_task]) + in_between_tasks = self.async_run_operations(buckets=self.buckets, phase="in_between") kv_ops = self.kv_mutations() - if not self.all_index_nodes_lost: - index_task = self.async_index_operations() log.info("Swapping servers...") rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.nodes_in_list, @@ -117,18 +115,25 @@ def test_online_upgrade_swap_rebalance(self): rebalance.result() log.info("===== Nodes Swapped with Upgraded versions =====") self.upgrade_servers = self.nodes_in_list - self._run_tasks([kv_ops, index_task]) + self._run_tasks([kv_ops, in_between_tasks]) self.sleep(60) - if self.index_op != "drop": + nodes_out = [] + for service in self.nodes_out_dist.split("-"): + nodes_out.append(service.split(":")[0]) + if "index" in nodes_out or "n1ql" in nodes_out: + self._verify_bucket_count_with_index_count(query_definitions=self.load_query_definitions) + else: self._verify_bucket_count_with_index_count() - index_task = self.async_index_operations("query") - self._run_tasks([index_task]) + after_tasks = self.async_run_operations(buckets=self.buckets, phase="after") + self.sleep(180) + self._run_tasks([after_tasks]) def test_upgrade_with_memdb(self): """ Keep N1ql node on one of the kv nodes :return: """ + self.set_circular_compaction = self.input.param("set_circular_compaction", False) kv_nodes = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True) log.info("Upgrading all kv nodes...") for node in kv_nodes: @@ -190,11 +195,99 @@ def test_upgrade_with_memdb(self): services=services_in) rebalance.result() self.sleep(60) - create_task = self.async_index_operations("create") - self._run_tasks([create_task]) + if self.set_circular_compaction: + DAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] + servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True) + rest = RestConnection(servers[0]) + date = datetime.now() + dayOfWeek = (date.weekday() + (date.hour+((date.minute+5)/60))/24)%7 + status, content, header = rest.set_indexer_compaction(indexDayOfWeek=DAYS[dayOfWeek], + indexFromHour=date.hour+((date.minute+1)/60), + indexFromMinute=(date.minute+1)%60) + self.assertTrue(status, "Error in setting Circular Compaction... {0}".format(content)) + self.multi_create_index(self.buckets, self.query_definitions) self._verify_bucket_count_with_index_count() - index_task = self.async_index_operations("query") - self._run_tasks([index_task]) + self.multi_query_using_index(self.buckets, self.query_definitions) + + def test_online_upgrade_with_two_query_nodes(self): + query_nodes = self.get_nodes_from_services_map(service_type="n1ql", get_all_nodes=True) + upgrade_node = query_nodes[0] + self.assertGreater(len(query_nodes), 1, "Test requires more than 1 Query Node") + before_tasks = self.async_run_operations(buckets=self.buckets, phase="before") + self._run_tasks([before_tasks]) + in_between_tasks = self.async_run_operations(buckets=self.buckets, phase="in_between") + kv_ops = self.kv_mutations() + log.info("Upgrading servers to {0}...".format(self.upgrade_to)) + self.n1ql_node = query_nodes[1] + rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],[], [upgrade_node]) + rebalance.result() + self.upgrade_servers = self.nodes_out_list + upgrade_th = self._async_update(self.upgrade_to, [upgrade_node]) + for th in upgrade_th: + th.join() + log.info("==== Upgrade Complete ====") + node_version = RestConnection(upgrade_node).get_nodes_versions() + services_in = ["n1ql"] + rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], + [upgrade_node], [], + services=services_in) + rebalance.result() + self._run_tasks([kv_ops, in_between_tasks]) + self.sleep(60) + log.info("Upgraded to: {0}".format(node_version)) + for node in query_nodes: + if node == upgrade_node: + self.n1ql_node = node + try: + self._verify_bucket_count_with_index_count() + after_tasks = self.async_run_operations(buckets=self.buckets, phase="after") + self._run_tasks([after_tasks]) + except Exception, ex: + log.info(str(ex)) + else: + self._verify_bucket_count_with_index_count() + after_tasks = self.async_run_operations(buckets=self.buckets, phase="after") + self._run_tasks([after_tasks]) + + def test_online_upgrade_with_mixed_mode_cluster(self): + kv_nodes = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True) + upgrade_node = kv_nodes[0] + self.assertGreater(len(kv_nodes), 1, "Test requires more than 1 kv Node") + before_tasks = self.async_run_operations(buckets=self.buckets, phase="before") + self._run_tasks([before_tasks]) + self.n1ql_node = kv_nodes[1] + in_between_tasks = self.async_run_operations(buckets=self.buckets, phase="in_between") + kv_ops = self.kv_mutations() + log.info("Upgrading servers to {0}...".format(self.upgrade_to)) + rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],[], [upgrade_node]) + rebalance.result() + self.upgrade_servers = self.nodes_out_list + upgrade_th = self._async_update(self.upgrade_to, [upgrade_node]) + for th in upgrade_th: + th.join() + log.info("==== Upgrade Complete ====") + node_version = RestConnection(upgrade_node).get_nodes_versions() + services_in = self.services_in + rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], + [upgrade_node], [], + services=services_in) + rebalance.result() + self._run_tasks([kv_ops, in_between_tasks]) + self.sleep(60) + log.info("Upgraded to: {0}".format(node_version)) + for node in kv_nodes: + if node == upgrade_node: + self.n1ql_node = node + try: + self._verify_bucket_count_with_index_count() + after_tasks = self.async_run_operations(buckets=self.buckets, phase="after") + self._run_tasks([after_tasks]) + except Exception, ex: + log.info(str(ex)) + else: + self._verify_bucket_count_with_index_count() + after_tasks = self.async_run_operations(buckets=self.buckets, phase="after") + self._run_tasks([after_tasks]) def kv_mutations(self, docs=None): if not docs: @@ -205,6 +298,5 @@ def kv_mutations(self, docs=None): def _run_tasks(self, tasks_list): for tasks in tasks_list: - if tasks: - for th in tasks: - th.result() + for task in tasks: + task.result() diff --git a/pytests/CCCP.py b/pytests/CCCP.py index 2b089273d..939cccbcb 100644 --- a/pytests/CCCP.py +++ b/pytests/CCCP.py @@ -5,6 +5,7 @@ from couchbase_helper.document import View from couchbase_helper.documentgenerator import BlobGenerator from remote.remote_util import RemoteMachineShellConnection +from testconstants import COUCHBASE_FROM_VERSION_4 class CCCP(BaseTestCase): @@ -40,7 +41,8 @@ def test_get_config_rest(self): tasks = self.run_ops() for task in tasks: if not task: - self.fail("failed to failover") + self.fail("no task to run") + task.result() for bucket in self.buckets: config = RestConnection(self.master).get_bucket_CCCP(bucket) self.verify_config(config, bucket) @@ -86,7 +88,12 @@ def verify_config(self, config_json, bucket): self.assertTrue(param in config_json, "No %s in config" % param) self.assertTrue("name" in config_json and config_json["name"] == bucket.name, "No bucket name in config") - self.assertTrue(len(config_json["nodes"]) == self.nodes_init, + if self.cb_version[:5] in COUCHBASE_FROM_VERSION_4: + self.assertTrue(len(config_json["nodesExt"]) == self.nodes_init, + "Number of nodes expected %s, actual %s" % ( + self.nodes_init, len(config_json["nodesExt"]))) + else: + self.assertTrue(len(config_json["nodes"]) == self.nodes_init, "Number of nodes expected %s, actual %s" % ( self.nodes_init, len(config_json["nodes"]))) for node in config_json["nodes"]: diff --git a/pytests/autocompaction.py b/pytests/autocompaction.py index ab981718e..d3553a1e5 100644 --- a/pytests/autocompaction.py +++ b/pytests/autocompaction.py @@ -130,7 +130,7 @@ def test_database_fragmentation(self): insert_thread.start() compact_run = remote_client.wait_till_compaction_end(rest, bucket_name, - timeout_in_seconds=(self.wait_timeout * 5)) + timeout_in_seconds=(self.wait_timeout * 10)) if not compact_run: self.fail("auto compaction does not run") diff --git a/pytests/backup/ibr.py b/pytests/backup/ibr.py index d7bf72918..3557c237e 100644 --- a/pytests/backup/ibr.py +++ b/pytests/backup/ibr.py @@ -93,7 +93,7 @@ def verify_dir_structure(self, total_backups, buckets, nodes): if 'failover.json' in line: if re.search(pattern_backup_files, line): failover += 1 - if 'meta.json' in line: + if self.cb_version[:5] != "4.5.1" and 'meta.json' in line: if re.search(pattern_backup_files, line): meta_json += 1 if 'design.json' in line: @@ -104,16 +104,21 @@ def verify_dir_structure(self, total_backups, buckets, nodes): .format(expected_data_cbb, data_cbb)) self.log.info("expected_failover_json {0} failover {1}" .format(expected_failover_json, failover)) - self.log.info("expected_meta_json {0} meta_json {1}" + if self.cb_version[:5] != "4.5.1": + self.log.info("expected_meta_json {0} meta_json {1}" .format(expected_meta_json, meta_json)) """ add json support later in this test self.log.info("expected_design_json {0} design_json {1}" .format(expected_design_json, design_json)) """ - if data_cbb == expected_data_cbb and failover == expected_failover_json and \ - meta_json == expected_meta_json: - # add support later in and design_json == expected_design_json: - return True + if self.cb_version[:5] != "4.5.1": + if data_cbb == expected_data_cbb and failover == expected_failover_json and \ + meta_json == expected_meta_json: + # add support later in and design_json == expected_design_json: + return True + else: + if data_cbb == expected_data_cbb and failover == expected_failover_json: + return True return False diff --git a/pytests/basetestcase.py b/pytests/basetestcase.py index 1affe0bec..be9d0880e 100644 --- a/pytests/basetestcase.py +++ b/pytests/basetestcase.py @@ -30,6 +30,8 @@ from testconstants import MIN_COMPACTION_THRESHOLD from testconstants import MAX_COMPACTION_THRESHOLD from membase.helper.cluster_helper import ClusterOperationHelper + +from couchbase_cli import CouchbaseCLI import testconstants @@ -39,6 +41,7 @@ def setUp(self): self.input = TestInputSingleton.input self.primary_index_created = False self.use_sdk_client = self.input.param("use_sdk_client",False) + self.analytics = self.input.param("analytics",False) if self.input.param("log_level", None): self.log.setLevel(level=0) for hd in self.log.handlers: @@ -133,7 +136,7 @@ def setUp(self): self.enable_time_sync = self.input.param("enable_time_sync", False) self.gsi_type = self.input.param("gsi_type", 'forestdb') self.bucket_size = self.input.param("bucket_size", None) - self.kv_store_required = self.input.param("kv_store_required", 1) + self.lww = self.input.param("lww", False) # only applies to LWW but is here because the bucket is created here if self.skip_setup_cleanup: self.buckets = RestConnection(self.master).get_buckets() return @@ -422,10 +425,11 @@ def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None, reb return quota def _bucket_creation(self): - if (self.default_bucket==True): + if self.default_bucket: self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas, enable_replica_index=self.enable_replica_index, - eviction_policy=self.eviction_policy) + eviction_policy=self.eviction_policy, + lww=self.lww) self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size, eviction_policy=self.eviction_policy)) @@ -879,7 +883,7 @@ def _load_doc_data_all_buckets(self, data_op="create", batch_size=1000, gen_load def verify_cluster_stats(self, servers=None, master=None, max_verify=None, timeout=None, check_items=True, only_store_hash=True, replica_to_read=None, batch_size=1000, check_bucket_stats=True, - check_ep_items_remaining=False): + check_ep_items_remaining=False, verify_total_items=True): servers = self.get_kv_nodes(servers) if servers is None: servers = self.servers @@ -903,11 +907,12 @@ def verify_cluster_stats(self, servers=None, master=None, max_verify=None, if check_bucket_stats: self._verify_stats_all_buckets(servers, timeout=(timeout or 120)) # verify that curr_items_tot corresponds to sum of curr_items from all nodes - verified = True - for bucket in self.buckets: - verified &= RebalanceHelper.wait_till_total_numbers_match(master, bucket) - self.assertTrue(verified, "Lost items!!! Replication was completed but " - " sum(curr_items) don't match the curr_items_total") + if verify_total_items: + verified = True + for bucket in self.buckets: + verified &= RebalanceHelper.wait_till_total_numbers_match(master, bucket) + self.assertTrue(verified, "Lost items!!! Replication was completed but " + " sum(curr_items) don't match the curr_items_total") else: self.log.warn("verification of items was omitted") @@ -1118,17 +1123,16 @@ def change_checkpoint_params(self): def change_password(self, new_password="new_password"): nodes = RestConnection(self.master).node_statuses() - remote_client = RemoteMachineShellConnection(self.master) - options = "--cluster-init-password=%s" % new_password - cli_command = "cluster-edit" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, - cluster_host="localhost:8091", - user=self.master.rest_username, - password=self.master.rest_password) + + + cli = CouchbaseCLI(self.master, self.master.rest_username, self.master.rest_password ) + output, err, result = cli.setting_cluster(data_ramsize=False, index_ramsize=False, fts_ramsize=False, cluster_name=None, + cluster_username=None, cluster_password=new_password, cluster_port=False) + self.log.info(output) # MB-10136 & MB-9991 - if error: - raise Exception("Password didn't change! %s" % error) + if not result: + raise Exception("Password didn't change!") self.log.info("new password '%s' on nodes: %s" % (new_password, [node.ip for node in nodes])) for node in nodes: for server in self.servers: diff --git a/pytests/cbas/__init__.py b/pytests/cbas/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pytests/cbas/cbas_base.py b/pytests/cbas/cbas_base.py new file mode 100644 index 000000000..6a5b999d0 --- /dev/null +++ b/pytests/cbas/cbas_base.py @@ -0,0 +1,321 @@ +import json +from TestInput import TestInputSingleton +from remote.remote_util import RemoteMachineShellConnection +from basetestcase import BaseTestCase +from lib.couchbase_helper.analytics_helper import * +from couchbase_helper.documentgenerator import DocumentGenerator + + +class CBASBaseTest(BaseTestCase): + def setUp(self): + super(CBASBaseTest, self).setUp() + self.cbas_node = self.input.cbas + self.analytics_helper = AnalyticsHelper() + invalid_ip = '10.111.151.109' + self.cb_bucket_name = TestInputSingleton.input.param('cb_bucket_name', + 'travel-sample') + self.cbas_bucket_name = TestInputSingleton.input.param( + 'cbas_bucket_name', 'travel') + self.cb_bucket_password = TestInputSingleton.input.param( + 'cb_bucket_password', '') + self.expected_error = TestInputSingleton.input.param("error", None) + if self.expected_error: + self.expected_error = self.expected_error.replace("INVALID_IP",invalid_ip) + self.expected_error = self.expected_error.replace("PORT",self.master.port) + self.cb_server_ip = TestInputSingleton.input.param("cb_server_ip", + self.master.ip) + self.cb_server_ip = self.cb_server_ip.replace('INVALID_IP',invalid_ip) + self.cbas_dataset_name = TestInputSingleton.input.param( + "cbas_dataset_name", 'travel_ds') + self.cbas_bucket_name_invalid = self.input.param( + 'cbas_bucket_name_invalid', self.cbas_bucket_name) + self.cbas_dataset2_name = self.input.param('cbas_dataset2_name', None) + self.skip_create_dataset = self.input.param('skip_create_dataset', + False) + self.disconnect_if_connected = self.input.param( + 'disconnect_if_connected', False) + self.cbas_dataset_name_invalid = self.input.param( + 'cbas_dataset_name_invalid', self.cbas_dataset_name) + self.skip_drop_connection = self.input.param('skip_drop_connection', + False) + self.skip_drop_dataset = self.input.param('skip_drop_dataset', False) + + self.query_id = self.input.param('query_id',None) + + # Drop any existing buckets and datasets + self.cleanup_cbas() + + def tearDown(self): + super(CBASBaseTest, self).tearDown() + + def load_sample_buckets(self, server, bucketName): + """ + Load the specified sample bucket in Couchbase + """ + shell = RemoteMachineShellConnection(server) + shell.execute_command("""curl -v -u Administrator:password -X POST http://{0}:8091/sampleBuckets/install -d '["{1}"]'""".format(server.ip, bucketName)) + shell.disconnect() + self.sleep(5) + + def create_bucket_on_cbas(self, cbas_bucket_name, cb_bucket_name, + cb_server_ip, + validate_error_msg=False): + """ + Creates a bucket on CBAS + """ + cmd_create_bucket = "create bucket " + cbas_bucket_name + " with {\"name\":\"" + cb_bucket_name + "\",\"nodes\":\"" + cb_server_ip + "\"};" + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_create_bucket, self.master) + if validate_error_msg: + return self.validate_error_in_response(status, errors) + else: + if status != "success": + return False + else: + return True + + def create_dataset_on_bucket(self, cbas_bucket_name, cbas_dataset_name, + where_field=None, where_value = None, + validate_error_msg=False): + """ + Creates a shadow dataset on a CBAS bucket + """ + cmd_create_dataset = "create shadow dataset {0} on {1};".format( + cbas_dataset_name, cbas_bucket_name) + if where_field and where_value: + cmd_create_dataset = "create shadow dataset {0} on {1} WHERE `{2}`=\"{3}\";".format( + cbas_dataset_name, cbas_bucket_name, where_field, where_value) + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_create_dataset, self.master) + if validate_error_msg: + return self.validate_error_in_response(status, errors) + else: + if status != "success": + return False + else: + return True + + def connect_to_bucket(self, cbas_bucket_name, cb_bucket_password="", + validate_error_msg=False): + """ + Connects to a CBAS bucket + """ + cmd_connect_bucket = "connect bucket " + cbas_bucket_name + " with {\"password\":\"" + cb_bucket_password + "\"};" + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_connect_bucket, self.master) + if validate_error_msg: + return self.validate_error_in_response(status, errors) + else: + if status != "success": + return False + else: + return True + + def disconnect_from_bucket(self, cbas_bucket_name, + disconnect_if_connected=False, + validate_error_msg=False): + """ + Disconnects from a CBAS bucket + """ + if disconnect_if_connected: + cmd_disconnect_bucket = "disconnect bucket {0} if connected;".format( + cbas_bucket_name) + else: + cmd_disconnect_bucket = "disconnect bucket {0};".format( + cbas_bucket_name) + + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_disconnect_bucket, self.master) + if validate_error_msg: + return self.validate_error_in_response(status, errors) + else: + if status != "success": + return False + else: + return True + + def drop_dataset(self, cbas_dataset_name, validate_error_msg=False): + """ + Drop dataset from CBAS + """ + cmd_drop_dataset = "drop dataset {0};".format(cbas_dataset_name) + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_drop_dataset, self.master) + if validate_error_msg: + return self.validate_error_in_response(status, errors) + else: + if status != "success": + return False + else: + return True + + def drop_cbas_bucket(self, cbas_bucket_name, validate_error_msg=False): + """ + Drop a CBAS bucket + """ + cmd_drop_bucket = "drop bucket {0};".format(cbas_bucket_name) + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_drop_bucket, self.master) + if validate_error_msg: + return self.validate_error_in_response(status, errors) + else: + if status != "success": + return False + else: + return True + + def execute_statement_on_cbas(self, statement, server): + """ + Executes a statement on CBAS using the REST API + """ + shell = RemoteMachineShellConnection(server) + output, error = shell.execute_command( + """curl -s --data pretty=true --data-urlencode 'statement={1}' http://{0}:8095/analytics/service -v""".format( + self.cbas_node.ip, statement)) + response = "" + for line in output: + response = response + line + response = json.loads(response) + self.log.info(response) + shell.disconnect() + + if "errors" in response: + errors = response["errors"] + else: + errors = None + + if "results" in response: + results = response["results"] + else: + results = None + + return response["status"], response["metrics"], errors, results + + def validate_error_in_response(self, status, errors): + """ + Validates if the error message in the response is same as the expected one. + """ + if status != "success": + actual_error = errors[0]["msg"] + if self.expected_error != actual_error: + return False + else: + return True + + def cleanup_cbas(self): + """ + Drops all connections, datasets and buckets from CBAS + """ + try: + # Disconnect from all connected buckets + #cmd_get_buckets = "select BucketName from Metadata.`Bucket`;" + cmd_get_buckets = "select Name from Metadata.`Bucket`;" + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_get_buckets, self.master) + if (results != None) & (len(results) > 0): + for row in results: + self.disconnect_from_bucket(row['Name'], + disconnect_if_connected=True) + self.log.info( + "********* Disconnected all buckets *********") + else: + self.log.info("********* No buckets to disconnect *********") + + # Drop all datasets + cmd_get_datasets = "select DatasetName from Metadata.`Dataset` where DataverseName != \"Metadata\";" + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_get_datasets, self.master) + if (results != None) & (len(results) > 0): + for row in results: + self.drop_dataset(row['DatasetName']) + self.log.info("********* Dropped all datasets *********") + else: + self.log.info("********* No datasets to drop *********") + + # Drop all buckets + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_get_buckets, self.master) + if (results != None) & (len(results) > 0): + for row in results: + self.drop_cbas_bucket(row['Name']) + self.log.info("********* Dropped all buckets *********") + else: + self.log.info("********* No buckets to drop *********") + except Exception as e: + self.log.info(e.message) + + def perform_doc_ops_in_all_cb_buckets(self, num_items, operation,start_key=0, end_key=1000): + """ + Create/Update/Delete docs in all cb buckets + :param num_items: No. of items to be created/deleted/updated + :param operation: String - "create","update","delete" + :param start_key: Doc Key to start the operation with + :param end_key: Doc Key to end the operation with + :return: + """ + number = 100 + first = ['james', 'sharon'] + template = '{{ "number": {0}, "first_name": "{1}" , "mutated":0}}' + gen_load = DocumentGenerator('test_docs', template, [number, ], first, + start=start_key, end=end_key) + self.log.info("%s %s documents..." % (operation, num_items)) + try: + self._load_all_buckets(self.master, gen_load, operation, 0) + self._verify_stats_all_buckets([self.master]) + except Exception as e: + self.log.info(e.message) + + def get_num_items_in_cbas_dataset(self, dataset_name): + """ + Gets the count of docs in the cbas dataset + """ + total_items = -1 + mutated_items = -1 + cmd_get_num_items = "select count(*) from %s;" % dataset_name + cmd_get_num_mutated_items = "select count(*) from %s where mutated>0;" % dataset_name + + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_get_num_items, self.master) + if status != "success": + self.log.error("Query failed") + else: + self.log.info("No. of items in CBAS dataset {0} : {1}".format(dataset_name,results[0]['$1'])) + total_items = results[0]['$1'] + + status, metrics, errors, results = self.execute_statement_on_cbas( + cmd_get_num_mutated_items, self.master) + if status != "success": + self.log.error("Query failed") + else: + self.log.info("No. of items mutated in CBAS dataset {0} : {1}".format(dataset_name, results[0]['$1'])) + mutated_items = results[0]['$1'] + + return total_items, mutated_items + + def validate_cbas_dataset_items_count(self, dataset_name, expected_count, expected_mutated_count=0): + """ + Compares the count of CBAS dataset total and mutated items with the expected values. + """ + count, mutated_count = self.get_num_items_in_cbas_dataset(dataset_name) + tries = 12 + if expected_mutated_count: + while (count < expected_count or mutated_count < expected_mutated_count) and tries > 0: + self.sleep(10) + count, mutated_count = self.get_num_items_in_cbas_dataset(dataset_name) + tries -= 1 + else : + while count < expected_count and tries > 0: + self.sleep(10) + count, mutated_count = self.get_num_items_in_cbas_dataset( + dataset_name) + tries -= 1 + + self.log.info("Expected Count: %s, Actual Count: %s" % (expected_count, count)) + self.log.info("Expected Mutated Count: %s, Actual Mutated Count: %s" % (expected_mutated_count, mutated_count)) + + if count != expected_count: + return False + elif mutated_count == expected_mutated_count: + return True + else: + return False \ No newline at end of file diff --git a/pytests/cbas/cbas_bucket_operations.py b/pytests/cbas/cbas_bucket_operations.py new file mode 100644 index 000000000..db571bc1d --- /dev/null +++ b/pytests/cbas/cbas_bucket_operations.py @@ -0,0 +1,263 @@ +from cbas_base import * + + +class CBASBucketOperations(CBASBaseTest): + def setUp(self): + super(CBASBucketOperations, self).setUp() + + def tearDown(self): + super(CBASBucketOperations, self).tearDown() + + def setup_for_test(self, skip_data_loading=False): + if not skip_data_loading: + # Load Couchbase bucket first. + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, + self.num_items) + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + self.create_dataset_on_bucket(cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + if not skip_data_loading: + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def load_docs_in_cb_bucket_before_cbas_connect(self): + self.setup_for_test() + + def load_docs_in_cb_bucket_before_and_after_cbas_connect(self): + self.setup_for_test() + + # Load more docs in Couchbase bucket. + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", + self.num_items, + self.num_items * 2) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items * 2): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def load_docs_in_cb_bucket_after_cbas_connect(self): + self.setup_for_test(skip_data_loading=True) + + # Load Couchbase bucket first. + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", 0, + self.num_items) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def delete_some_docs_in_cb_bucket(self): + self.setup_for_test() + + # Delete some docs in Couchbase bucket. + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "delete", 0, + self.num_items / 2) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items / 2): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def delete_all_docs_in_cb_bucket(self): + self.setup_for_test() + + # Delete all docs in Couchbase bucket. + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "delete", 0, + self.num_items) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + 0): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def update_some_docs_in_cb_bucket(self): + self.setup_for_test() + + # Update some docs in Couchbase bucket + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "update", 0, + self.num_items / 10) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items, + self.num_items / 10): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def update_all_docs_in_cb_bucket(self): + self.setup_for_test() + + # Update all docs in Couchbase bucket + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "update", 0, + self.num_items) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items, + self.num_items): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def create_update_delete_cb_bucket_then_cbas_connect(self): + self.setup_for_test() + + # Disconnect from bucket + self.disconnect_from_bucket(self.cbas_bucket_name) + + # Perform Create, Update, Delete ops in the CB bucket + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", + self.num_items, + self.num_items * 2) + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "update", 0, + self.num_items) + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "delete", 0, + self.num_items / 2) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items * 3 / 2, + self.num_items / 2): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def create_update_delete_cb_bucket_with_cbas_connected(self): + self.setup_for_test() + + # Perform Create, Update, Delete ops in the CB bucket + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "create", + self.num_items, + self.num_items * 2) + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "update", 0, + self.num_items) + self.perform_doc_ops_in_all_cb_buckets(self.num_items, "delete", 0, + self.num_items / 2) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items * 3 / 2, + self.num_items / 2): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def flush_cb_bucket_with_cbas_connected(self): + self.setup_for_test() + + # Flush the CB bucket + self.cluster.bucket_flush(server=self.master, + bucket=self.cb_bucket_name) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + 0): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def flush_cb_bucket_then_cbas_connect(self): + self.setup_for_test() + + # Disconnect from bucket + self.disconnect_from_bucket(self.cbas_bucket_name) + + # Flush the CB bucket + self.cluster.bucket_flush(server=self.master, + bucket=self.cb_bucket_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + 0): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def delete_cb_bucket_with_cbas_connected(self): + self.setup_for_test() + + # Delete the CB bucket + self.cluster.bucket_delete(server=self.master, + bucket=self.cb_bucket_name) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + 0): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def delete_cb_bucket_then_cbas_connect(self): + self.setup_for_test() + + # Disconnect from bucket + self.disconnect_from_bucket(self.cbas_bucket_name) + + # Delete the CB bucket + self.cluster.bucket_delete(server=self.master, + bucket=self.cb_bucket_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + 0): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def compact_cb_bucket_with_cbas_connected(self): + self.setup_for_test() + + # Compact the CB bucket + self.cluster.compact_bucket(server=self.master, + bucket=self.cb_bucket_name) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") + + def compact_cb_bucket_then_cbas_connect(self): + self.setup_for_test() + + # Disconnect from bucket + self.disconnect_from_bucket(self.cbas_bucket_name) + + # Compact the CB bucket + self.cluster.compact_bucket(server=self.master, + bucket=self.cb_bucket_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Validate no. of items in CBAS dataset + if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name, + self.num_items): + self.fail( + "No. of items in CBAS dataset do not match that in the CB bucket") diff --git a/pytests/cbas/cbas_demo_queries.py b/pytests/cbas/cbas_demo_queries.py new file mode 100644 index 000000000..02b15b713 --- /dev/null +++ b/pytests/cbas/cbas_demo_queries.py @@ -0,0 +1,169 @@ +from cbas_base import * + + +class QueryDetails: + query_details = [ + { + "id": "extential_quantification", + "dataset_id": "beer-sample", + "query": "WITH nested_breweries AS (SELECT bw.name AS brewer, bw.phone, (SELECT br.name, br.abv FROM beers br WHERE br.brewery_id = meta(bw).id) AS beers FROM breweries bw) SELECT VALUE nb FROM nested_breweries nb WHERE (SOME b IN nb.beers SATISFIES b.name LIKE \"%IPA%\") LIMIT 5;", + "expected_status": "success", + "expected_hits": "5" + }, + { + "id": "universal_quantification", + "dataset_id": "beer-sample", + "query": "WITH nested_breweries AS (SELECT bw.name AS brewer, bw.phone, (SELECT br.name, br.abv FROM beers br WHERE br.brewery_id = meta(bw).id) AS beers FROM breweries bw) SELECT VALUE nb FROM nested_breweries nb WHERE (EVERY b IN nb.beers SATISFIES b.name LIKE \"%IPA%\") LIMIT 5;", + "expected_status": "success", + "expected_hits": "5" + }, + { + "id": "lookup_table_metadata", + "dataset_id": "beer-sample", + "query": "select DataverseName from Metadata.`Dataverse`;", + "expected_status": "success", + "expected_hits": "2" + }, + { + "id": "simple_aggregation", + "dataset_id": "beer-sample", + "query": "SELECT COUNT(*) AS num_beers FROM beers;", + "expected_status": "success", + "expected_hits": "1" + }, + { + "id": "simple_aggregation_unwrapped", + "dataset_id": "beer-sample", + "query": "SELECT VALUE COUNT(b) FROM beers b;", + "expected_status": "success", + "expected_hits": "1" + }, + { + "id": "aggregation_array_count", + "dataset_id": "beer-sample", + "query": "SELECT VALUE ARRAY_COUNT((SELECT b FROM beers b));", + "expected_status": "success", + "expected_hits": "1" + }, + { + "id": "grouping_aggregation", + "dataset_id": "beer-sample", + "query": "SELECT br.brewery_id, COUNT(*) AS num_beers FROM beers br GROUP BY br.brewery_id HAVING num_beers > 30 and br.brewery_id != \"\";", + "expected_status": "success", + "expected_hits": "11" + }, + { + "id": "hash_based_grouping_aggregation", + "dataset_id": "beer-sample", + "query": "SELECT br.brewery_id, COUNT(*) AS num_beers FROM beers br /*+ hash */ GROUP BY br.brewery_id HAVING num_beers > 30 and br.brewery_id != \"\";", + "expected_status": "success", + "expected_hits": "11" + }, + { + "id": "grouping_limits", + "dataset_id": "beer-sample", + "query": "SELECT bw.name, COUNT(*) AS num_beers, AVG(br.abv) AS abv_avg, MIN(br.abv) AS abv_min, MAX(br.abv) AS abv_max FROM breweries bw, beers br WHERE br.brewery_id = meta(bw).id GROUP BY bw.name ORDER BY num_beers DESC LIMIT 3;", + "expected_status": "success", + "expected_hits": "3" + }, + { + "id": "equijoin_limits", + "dataset_id": "beer-sample", + "query": "SELECT * FROM beers b1, beers b2 WHERE b1.name = b2.name AND b1.brewery_id != b2.brewery_id LIMIT 20;", + "expected_status": "success", + "expected_hits": "20" + } + ] + + dataset_details = [ + { + "id": "beer-sample", + "cb_bucket_name": "beer-sample", + "cbas_bucket_name": "beerBucket", + "ds_details": [ + { + "ds_name": "beers", + "where_field": "type", + "where_value": "beer" + }, + { + "ds_name": "breweries", + "where_field": "type", + "where_value": "brewery" + } + ] + } + ] + + def __init__(self, query_id): + self.query_id = query_id + + def get_query_and_dataset_details(self): + query_record = None + dataset_record = None + + for record in self.query_details: + if record['id'] == self.query_id: + query_record = record + + if query_record: + for record in self.dataset_details: + if record['id'] == query_record['dataset_id']: + dataset_record = record + + return query_record, dataset_record + + +class CBASDemoQueries(CBASBaseTest): + def setUp(self): + super(CBASDemoQueries, self).setUp() + + def tearDown(self): + super(CBASDemoQueries, self).tearDown() + + def test_demo_query(self): + query_details = QueryDetails(self.query_id) + query_record, dataset_record = query_details.get_query_and_dataset_details() + + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, + bucketName=dataset_record['cb_bucket_name']) + + # Create bucket on CBAS + self.create_bucket_on_cbas( + cbas_bucket_name=dataset_record['cbas_bucket_name'], + cb_bucket_name=dataset_record['cb_bucket_name'], + cb_server_ip=self.cb_server_ip) + + # Create datasets on the CBAS bucket + for dataset in dataset_record['ds_details']: + self.create_dataset_on_bucket( + cbas_bucket_name=dataset_record['cbas_bucket_name'], + cbas_dataset_name=dataset['ds_name'], + where_field=dataset['where_field'], + where_value=dataset['where_value']) + + # Connect to Bucket + self.connect_to_bucket( + cbas_bucket_name=dataset_record['cbas_bucket_name'], + cb_bucket_password=self.cb_bucket_password) + + self.sleep(5) + + # Execute Query + status, metrics, errors, results = self.execute_statement_on_cbas( + query_record['query'], self.master) + self.log.info('Actual Status : ' + status) + self.log.info('Expected Status : ' + query_record['expected_status']) + self.log.info('Actual # Hits : ' + metrics['resultCount']) + self.log.info('Expected # Hits : ' + query_record['expected_hits']) + + # Validate Query output + result = False + if (status == query_record['expected_status']) and ( + metrics['resultCount'] == query_record['expected_hits']): + result = True + + if not result: + self.fail("FAIL : Status and/or # Hits not as expected") diff --git a/pytests/cbas/cbas_functional_tests.py b/pytests/cbas/cbas_functional_tests.py new file mode 100644 index 000000000..934b75478 --- /dev/null +++ b/pytests/cbas/cbas_functional_tests.py @@ -0,0 +1,264 @@ +from cbas_base import * + + +class CBASFunctionalTests(CBASBaseTest): + def setUp(self): + super(CBASFunctionalTests, self).setUp() + self.validate_error = False + if self.expected_error: + self.validate_error = True + + def tearDown(self): + super(CBASFunctionalTests, self).tearDown() + + def test_create_bucket_on_cbas(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, bucketName="travel-sample") + + # Create bucket on CBAS + result = self.create_bucket_on_cbas( + cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip, + validate_error_msg=self.validate_error) + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_create_another_bucket_on_cbas(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, bucketName="travel-sample") + + # Create first bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create another bucket on CBAS + result = self.create_bucket_on_cbas( + cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip, + validate_error_msg=self.validate_error) + if not result: + self.fail("Test failed") + + def test_create_dataset_on_bucket(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + result = self.create_dataset_on_bucket( + cbas_bucket_name=self.cbas_bucket_name_invalid, + cbas_dataset_name=self.cbas_dataset_name, + validate_error_msg=self.validate_error) + + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_create_another_dataset_on_bucket(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create first dataset on the CBAS bucket + self.create_dataset_on_bucket(cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Create another dataset on the CBAS bucket + result = self.create_dataset_on_bucket( + cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset2_name, + validate_error_msg=self.validate_error) + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_connect_bucket(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, + bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + if not self.skip_create_dataset: + self.create_dataset_on_bucket( + cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Connect to Bucket + result = self.connect_to_bucket(cbas_bucket_name= + self.cbas_bucket_name_invalid, + cb_bucket_password=self.cb_bucket_password, + validate_error_msg=self.validate_error) + + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_connect_bucket_on_a_connected_bucket(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, + bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + self.create_dataset_on_bucket( + cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Create another connection to bucket + result = self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password, + validate_error_msg=self.validate_error) + + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_disconnect_bucket(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, + bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + self.create_dataset_on_bucket(cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Disconnect from bucket + result = self.disconnect_from_bucket(cbas_bucket_name= + self.cbas_bucket_name_invalid, + disconnect_if_connected= + self.disconnect_if_connected, + validate_error_msg=self.validate_error) + + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_disconnect_bucket_already_disconnected(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + self.create_dataset_on_bucket( + cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Connect to Bucket + self.connect_to_bucket( + cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Disconnect from Bucket + self.disconnect_from_bucket(cbas_bucket_name=self.cbas_bucket_name) + + # Disconnect again from the same bucket + result = self.disconnect_from_bucket(cbas_bucket_name= + self.cbas_bucket_name, + disconnect_if_connected= + self.disconnect_if_connected, + validate_error_msg=self.validate_error) + + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_drop_dataset_on_bucket(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + self.create_dataset_on_bucket(cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Drop Connection + if not self.skip_drop_connection: + self.disconnect_from_bucket(self.cbas_bucket_name) + + # Drop dataset + result = self.drop_dataset( + cbas_dataset_name=self.cbas_dataset_name_invalid, + validate_error_msg=self.validate_error) + + if not result: + self.fail("FAIL : Actual error msg does not match the expected") + + def test_drop_cbas_bucket(self): + # Delete Default bucket and load travel-sample bucket + self.cluster.bucket_delete(server=self.master, bucket="default") + self.load_sample_buckets(server=self.master, bucketName="travel-sample") + + # Create bucket on CBAS + self.create_bucket_on_cbas(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_name=self.cb_bucket_name, + cb_server_ip=self.cb_server_ip) + + # Create dataset on the CBAS bucket + self.create_dataset_on_bucket(cbas_bucket_name=self.cbas_bucket_name, + cbas_dataset_name=self.cbas_dataset_name) + + # Connect to Bucket + self.connect_to_bucket(cbas_bucket_name=self.cbas_bucket_name, + cb_bucket_password=self.cb_bucket_password) + + # Drop connection and dataset + if not self.skip_drop_connection: + self.disconnect_from_bucket(self.cbas_bucket_name) + + if not self.skip_drop_dataset: + self.drop_dataset(self.cbas_dataset_name) + + result = self.drop_cbas_bucket( + cbas_bucket_name=self.cbas_bucket_name_invalid, + validate_error_msg=self.validate_error) + + if not result: + self.fail("FAIL : Actual error msg does not match the expected") diff --git a/pytests/clitest/cli_base.py b/pytests/clitest/cli_base.py index 357e59adf..f08f32d05 100644 --- a/pytests/clitest/cli_base.py +++ b/pytests/clitest/cli_base.py @@ -4,7 +4,9 @@ from testconstants import LINUX_COUCHBASE_BIN_PATH, LINUX_ROOT_PATH from testconstants import WIN_COUCHBASE_BIN_PATH, WIN_ROOT_PATH from testconstants import MAC_COUCHBASE_BIN_PATH -from testconstants import LINUX_COUCHBASE_SAMPLE_PATH, WIN_COUCHBASE_SAMPLE_PATH +from testconstants import LINUX_COUCHBASE_SAMPLE_PATH, WIN_COUCHBASE_SAMPLE_PATH,\ + WIN_BACKUP_C_PATH, LINUX_BACKUP_PATH, LINUX_COUCHBASE_LOGS_PATH, \ + WIN_COUCHBASE_LOGS_PATH import logger import random import time @@ -21,6 +23,14 @@ def setUp(self): self.vbucket_count = 1024 self.shell = RemoteMachineShellConnection(self.master) self.rest = RestConnection(self.master) + self.import_back = self.input.param("import_back", False) + if self.import_back: + if len(self.servers) < 3: + self.fail("This test needs minimum of 3 vms to run ") + self.test_type = self.input.param("test_type", "import") + self.import_file = self.input.param("import_file", None) + self.imex_type = self.input.param("imex_type", "json") + self.format_type = self.input.param("format_type", None) self.node_version = self.rest.get_nodes_version() self.force_failover = self.input.param("force_failover", False) info = self.shell.extract_remote_info() @@ -33,26 +43,45 @@ def setUp(self): self.cli_command_path = LINUX_COUCHBASE_BIN_PATH self.root_path = LINUX_ROOT_PATH self.tmp_path = "/tmp/" + self.cmd_backup_path = LINUX_BACKUP_PATH + self.backup_path = LINUX_BACKUP_PATH + self.cmd_ext = "" + self.src_file = "" + self.des_file = "" self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH + self.log_path = LINUX_COUCHBASE_LOGS_PATH if type == 'windows': self.os = 'windows' + self.cmd_ext = ".exe" self.root_path = WIN_ROOT_PATH self.tmp_path = WIN_TMP_PATH + self.cmd_backup_path = WIN_BACKUP_C_PATH + self.backup_path = WIN_BACKUP_PATH self.cli_command_path = WIN_COUCHBASE_BIN_PATH self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH + self.log_path = WIN_COUCHBASE_LOGS_PATH if info.distribution_type.lower() == 'mac': self.os = 'mac' self.cli_command_path = MAC_COUCHBASE_BIN_PATH self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type) self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username) self.couchbase_password = "%s" % (self.input.membase_settings.rest_password) + self.cb_login_info = "%s:%s" % (self.couchbase_usrname, + self.couchbase_password) + self.path_type = self.input.param("path_type", None) + if self.path_type is None: + self.log.info("Test command with absolute path ") + elif self.path_type == "local": + self.log.info("Test command at %s dir " % self.cli_command_path) + self.cli_command_path = "cd %s; ./" % self.cli_command_path self.cli_command = self.input.param("cli_command", None) self.command_options = self.input.param("command_options", None) if self.command_options is not None: self.command_options = self.command_options.split(";") if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1: - servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)] - self.cluster.rebalance(self.servers[:1], servers_in, []) + if len(self.servers) > 1 and int(self.nodes_init) == 1: + servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)] + self.cluster.rebalance(self.servers[:1], servers_in, []) def tearDown(self): if not self.input.param("skip_cleanup", True): @@ -118,6 +147,7 @@ def verifyServices(self, server, expected_services): rest = RestConnection(server) hostname = "%s:%s" % (server.ip, server.port) expected_services = expected_services.replace("data", "kv") + expected_services = expected_services.replace("query", "n1ql") expected_services = expected_services.split(",") nodes_services = rest.get_nodes_services() @@ -137,38 +167,6 @@ def verifyServices(self, server, expected_services): log.info("Services on %s not found, the server may not exist", hostname) return False - - def verifyIndexStorageMode(self, server, storage_mode): - """Verifies that the index storage mode was set properly - - Options: - server - A TestInputServer object of the server to connect to - storage_mode - A string containing the expected storage mode - - Returns a boolean corresponding to whether or not the expected index storage mode was set. - """ - # TODO - The ports for services should be inferred by the rest client - server.index_port = 9102 - rest = RestConnection(server) - settings = rest.get_index_settings() - - if storage_mode == "default": - storage_mode = "forestdb" - elif storage_mode == "memopt": - storage_mode = "memory_optimized" - - if "indexer.settings.storage_mode" in settings: - if settings["indexer.settings.storage_mode"] == storage_mode: - return True - else: - log.info("Index storage mode does not match expected (`%s` vs `%s`)", - settings["indexer.settings.storage_mode"], storage_mode) - else: - log.info("Index storage mode not found in settings") - - return False - - def verifyRamQuotas(self, server, data, index, fts): """Verifies that the RAM quotas for each service are set properly @@ -315,6 +313,406 @@ def verifyNotificationsEnabled(self, server): return True return False + def verifyIndexSettings(self, server, max_rollbacks, stable_snap_interval, mem_snap_interval, + storage_mode, threads, log_level): + rest = RestConnection(server) + settings = rest.get_global_index_settings() + + if storage_mode == "default": + storage_mode = "forestdb" + elif storage_mode == "memopt": + storage_mode = "memory_optimized" + + if max_rollbacks and str(settings["maxRollbackPoints"]) != str(max_rollbacks): + log.info("Max rollbacks does not match (%s vs. %s)", str(settings["maxRollbackPoints"]), str(max_rollbacks)) + return False + if stable_snap_interval and str(settings["stableSnapshotInterval"]) != str(stable_snap_interval): + log.info("Stable snapshot interval does not match (%s vs. %s)", str(settings["stableSnapshotInterval"]), + str(stable_snap_interval)) + return False + if mem_snap_interval and str(settings["memorySnapshotInterval"]) != str(mem_snap_interval): + log.info("Memory snapshot interval does not match (%s vs. %s)", str(settings["memorySnapshotInterval"]), + str(mem_snap_interval)) + return False + if storage_mode and str(settings["storageMode"]) != str(storage_mode): + log.info("Storage mode does not match (%s vs. %s)", str(settings["storageMode"]), str(storage_mode)) + return False + if threads and str(settings["indexerThreads"]) != str(threads): + log.info("Threads does not match (%s vs. %s)", str(settings["indexerThreads"]), str(threads)) + return False + if log_level and str(settings["logLevel"]) != str(log_level): + log.info("Log level does not match (%s vs. %s)", str(settings["logLevel"]), str(log_level)) + return False + + return True + + def verifyAutofailoverSettings(self, server, enabled, timeout): + rest = RestConnection(server) + settings = rest.get_autofailover_settings() + + if enabled and not ((str(enabled) == "1" and settings.enabled) or (str(enabled) == "0" and not settings.enabled)): + log.info("Enabled does not match (%s vs. %s)", str(enabled), str(settings.enabled)) + return False + if timeout and str(settings.timeout) != str(timeout): + log.info("Timeout does not match (%s vs. %s)", str(timeout), str(settings.timeout)) + return False + + return True + + def verifyAuditSettings(self, server, enabled, log_path, rotate_interval): + rest = RestConnection(server) + settings = rest.getAuditSettings() + + if enabled and not ((str(enabled) == "1" and settings["auditdEnabled"]) or (str(enabled) == "0" and not settings["auditdEnabled"])): + log.info("Enabled does not match (%s vs. %s)", str(enabled), str(settings["auditdEnabled"])) + return False + if log_path and str(str(settings["logPath"])) != str(log_path): + log.info("Log path does not match (%s vs. %s)", str(log_path), str(settings["logPath"])) + return False + + if rotate_interval and str(str(settings["rotateInterval"])) != str(rotate_interval): + log.info("Rotate interval does not match (%s vs. %s)", str(rotate_interval), str(settings["rotateInterval"])) + return False + + return True + + def verifyPendingServer(self, server, server_to_add, group_name, services): + rest = RestConnection(server) + settings = rest.get_all_zones_info() + if not settings or "groups" not in settings: + log.info("Group settings payload appears to be invalid") + return False + + expected_services = services.replace("data", "kv") + expected_services = expected_services.replace("query", "n1ql") + expected_services = expected_services.split(",") + + for group in settings["groups"]: + for node in group["nodes"]: + if node["hostname"] == server_to_add: + if node["clusterMembership"] != "inactiveAdded": + log.info("Node `%s` not in pending status", server_to_add) + return False + + if group["name"] != group_name: + log.info("Node `%s` not in correct group (%s vs %s)", node["hostname"], group_name, + group["name"]) + return False + + if len(node["services"]) != len(expected_services): + log.info("Services do not match on %s (%s vs %s) ", node["hostname"], services, + ",".join(node["services"])) + return False + + for service in node["services"]: + if service not in expected_services: + log.info("Services do not match on %s (%s vs %s) ", node["hostname"], services, + ",".join(node["services"])) + return False + return True + + log.info("Node `%s` not found in nodes list", server_to_add) + return False + + def verifyPendingServerDoesNotExist(self, server, server_to_add): + rest = RestConnection(server) + settings = rest.get_all_zones_info() + if not settings or "groups" not in settings: + log.info("Group settings payload appears to be invalid") + return False + + for group in settings["groups"]: + for node in group["nodes"]: + if node["hostname"] == server_to_add: + return False + + log.info("Node `%s` not found in nodes list", server_to_add) + return True + + def verifyActiveServers(self, server, expected_num_servers): + return self._verifyServersByStatus(server, expected_num_servers, "active") + + def verifyFailedServers(self, server, expected_num_servers): + return self._verifyServersByStatus(server, expected_num_servers, "inactiveFailed") + + def _verifyServersByStatus(self, server, expected_num_servers, status): + rest = RestConnection(server) + settings = rest.get_pools_default() + + count = 0 + for node in settings["nodes"]: + if node["clusterMembership"] == status: + count += 1 + + return count == expected_num_servers + + def verifyRecoveryType(self, server, recovery_servers, recovery_type): + rest = RestConnection(server) + settings = rest.get_all_zones_info() + if not settings or "groups" not in settings: + log.info("Group settings payload appears to be invalid") + return False + + if not recovery_servers: + return True + + num_found = 0 + recovery_servers = recovery_servers.split(",") + for group in settings["groups"]: + for node in group["nodes"]: + for rs in recovery_servers: + if node["hostname"] == rs: + if node["recoveryType"] != recovery_type: + log.info("Node %s doesn't contain recovery type %s ", rs, recovery_type) + return False + else: + num_found = num_found + 1 + + if num_found == len(recovery_servers): + return True + + log.info("Node `%s` not found in nodes list", ",".join(recovery_servers)) + return False + + def verifyReadOnlyUser(self, server, username): + rest = RestConnection(server) + ro_user, status = rest.get_ro_user() + if not status: + log.info("Getting the read only user failed") + return False + + if ro_user.startswith('"') and ro_user.endswith('"'): + ro_user = ro_user[1:-1] + + if ro_user != username: + log.info("Read only user name does not match (%s vs %s)", ro_user, username) + return False + return True + + def verifyLdapSettings(self, server, admins, ro_admins, default, enabled): + rest = RestConnection(server) + settings = rest.ldapRestOperationGetResponse() + + if admins is None: + admins = [] + else: + admins = admins.split(",") + + if ro_admins is None: + ro_admins = [] + else: + ro_admins = ro_admins.split(",") + + if str(enabled) == "0": + admins = [] + ro_admins = [] + + if default == "admins" and str(enabled) == "1": + if settings["admins"] != "asterisk": + log.info("Admins don't match (%s vs asterisk)", settings["admins"]) + return False + elif not self._list_compare(settings["admins"], admins): + log.info("Admins don't match (%s vs %s)", settings["admins"], admins) + return False + + if default == "roadmins" and str(enabled) == "1": + if settings["roAdmins"] != "asterisk": + log.info("Read only admins don't match (%s vs asterisk)", settings["roAdmins"]) + return False + elif not self._list_compare(settings["roAdmins"], ro_admins): + log.info("Read only admins don't match (%s vs %s)", settings["roAdmins"], ro_admins) + return False + + return True + + def verifyAlertSettings(self, server, enabled, email_recipients, email_sender, email_username, email_password, email_host, + email_port, encrypted, alert_af_node, alert_af_max_reached, alert_af_node_down, alert_af_small, + alert_af_disable, alert_ip_changed, alert_disk_space, alert_meta_overhead, alert_meta_oom, + alert_write_failed, alert_audit_dropped): + rest = RestConnection(server) + settings = rest.get_alerts_settings() + print settings + + if not enabled: + if not settings["enabled"]: + return True + else: + log.info("Alerts should be disabled") + return False + + if encrypted is None or encrypted == "0": + encrypted = False + else: + encrypted = True + + if email_recipients is not None and not self._list_compare(email_recipients.split(","), settings["recipients"]): + log.info("Email recipients don't match (%s vs %s)", email_recipients.split(","), settings["recipients"]) + return False + + if email_sender is not None and email_sender != settings["sender"]: + log.info("Email sender does not match (%s vs %s)", email_sender, settings["sender"]) + return False + + if email_username is not None and email_username != settings["emailServer"]["user"]: + log.info("Email username does not match (%s vs %s)", email_username, settings["emailServer"]["user"]) + return False + + if email_host is not None and email_host != settings["emailServer"]["host"]: + log.info("Email host does not match (%s vs %s)", email_host, settings["emailServer"]["host"]) + return False + + if email_port is not None and email_port != settings["emailServer"]["port"]: + log.info("Email port does not match (%s vs %s)", email_port, settings["emailServer"]["port"]) + return False + + if encrypted is not None and encrypted != settings["emailServer"]["encrypt"]: + log.info("Email encryption does not match (%s vs %s)", encrypted, settings["emailServer"]["encrypt"]) + return False + + alerts = list() + if alert_af_node: + alerts.append('auto_failover_node') + if alert_af_max_reached: + alerts.append('auto_failover_maximum_reached') + if alert_af_node_down: + alerts.append('auto_failover_other_nodes_down') + if alert_af_small: + alerts.append('auto_failover_cluster_too_small') + if alert_af_disable: + alerts.append('auto_failover_disabled') + if alert_ip_changed: + alerts.append('ip') + if alert_disk_space: + alerts.append('disk') + if alert_meta_overhead: + alerts.append('overhead') + if alert_meta_oom: + alerts.append('ep_oom_errors') + if alert_write_failed: + alerts.append('ep_item_commit_failed') + if alert_audit_dropped: + alerts.append('audit_dropped_events') + + if not self._list_compare(alerts, settings["alerts"]): + log.info("Alerts don't match (%s vs %s)", alerts, settings["alerts"]) + return False + + return True + + def verify_node_settings(self, server, data_path, index_path, hostname): + rest = RestConnection(server) + node_settings = rest.get_nodes_self() + + if data_path != node_settings.storage[0].path: + log.info("Data path does not match (%s vs %s)", data_path, node_settings.storage[0].path) + return False + if index_path != node_settings.storage[0].index_path: + log.info("Index path does not match (%s vs %s)", index_path, node_settings.storage[0].index_path) + return False + if hostname is not None: + if hostname != node_settings.hostname: + log.info("Hostname does not match (%s vs %s)", hostname, node_settings.hostname) + return True + return True + + def verifyCompactionSettings(self, server, db_frag_perc, db_frag_size, view_frag_perc, view_frag_size, from_period, + to_period, abort_outside, parallel_compact, purgeInt): + rest = RestConnection(server) + settings = rest.get_auto_compaction_settings() + ac = settings["autoCompactionSettings"] + + if db_frag_perc is not None and str(db_frag_perc) != str(ac["databaseFragmentationThreshold"]["percentage"]): + log.info("DB frag perc does not match (%s vs %s)", str(db_frag_perc), + str(ac["databaseFragmentationThreshold"]["percentage"])) + return False + + if db_frag_size is not None and str(db_frag_size*1024**2) != str(ac["databaseFragmentationThreshold"]["size"]): + log.info("DB frag size does not match (%s vs %s)", str(db_frag_size*1024**2), + str(ac["databaseFragmentationThreshold"]["size"])) + return False + + if view_frag_perc is not None and str(view_frag_perc) != str(ac["viewFragmentationThreshold"]["percentage"]): + log.info("View frag perc does not match (%s vs %s)", str(view_frag_perc), + str(ac["viewFragmentationThreshold"]["percentage"])) + return False + + if view_frag_size is not None and str(view_frag_size*1024**2) != str(ac["viewFragmentationThreshold"]["size"]): + log.info("View frag size does not match (%s vs %s)", str(view_frag_size*1024**2), + str(ac["viewFragmentationThreshold"]["size"])) + return False + + print from_period, to_period + if from_period is not None: + fromHour, fromMin = from_period.split(":", 1) + if int(fromHour) != int(ac["allowedTimePeriod"]["fromHour"]): + log.info("From hour does not match (%s vs %s)", str(fromHour), + str(ac["allowedTimePeriod"]["fromHour"])) + return False + if int(fromMin) != int(ac["allowedTimePeriod"]["fromMinute"]): + log.info("From minute does not match (%s vs %s)", str(fromMin), + str(ac["allowedTimePeriod"]["fromMinute"])) + return False + + if to_period is not None: + toHour, toMin = to_period.split(":", 1) + if int(toHour) != int(ac["allowedTimePeriod"]["toHour"]): + log.info("To hour does not match (%s vs %s)", str(toHour), + str(ac["allowedTimePeriod"]["toHour"])) + return False + if int(toMin) != int(ac["allowedTimePeriod"]["toMinute"]): + log.info("To minute does not match (%s vs %s)", str(toMin), + str(ac["allowedTimePeriod"]["toMinute"])) + return False + + if str(abort_outside) == "1": + abort_outside = True + elif str(abort_outside) == "0": + abort_outside = False + + if abort_outside is not None and abort_outside != ac["allowedTimePeriod"]["abortOutside"]: + log.info("Abort outside does not match (%s vs %s)", abort_outside, ac["allowedTimePeriod"]["abortOutside"]) + return False + + if str(parallel_compact) == "1": + parallel_compact = True + elif str(parallel_compact) == "0": + parallel_compact = False + + if parallel_compact is not None and parallel_compact != ac["parallelDBAndViewCompaction"]: + log.info("Parallel compact does not match (%s vs %s)", str(parallel_compact), + str(ac["parallelDBAndViewCompaction"])) + return False + + if purgeInt is not None and str(purgeInt) != str(settings["purgeInterval"]): + log.info("Purge interval does not match (%s vs %s)", str(purgeInt), str(settings["purgeInterval"])) + return False + + return True + + def verifyGroupExists(self, server, name): + rest = RestConnection(server) + groups = rest.get_zone_names() + print groups + + for gname, _ in groups.iteritems(): + if name == gname: + return True + + return False + + def _list_compare(self, list1, list2): + if len(list1) != len(list2): + return False + for elem1 in list1: + found = False + for elem2 in list2: + if elem1 == elem2: + found = True + break + if not found: + return False + return True + def waitForItemCount(self, server, bucket_name, count, timeout=30): rest = RestConnection(server) for sec in range(timeout): diff --git a/pytests/clitest/couchbase_clitest.py b/pytests/clitest/couchbase_clitest.py index 2648406c0..2f71209ca 100644 --- a/pytests/clitest/couchbase_clitest.py +++ b/pytests/clitest/couchbase_clitest.py @@ -2,12 +2,14 @@ import json import os from threading import Thread +import time from membase.api.rest_client import RestConnection from memcached.helper.data_helper import MemcachedClientHelper from TestInput import TestInputSingleton from clitest.cli_base import CliBaseTest from remote.remote_util import RemoteMachineShellConnection +from couchbase_cli import CouchbaseCLI from pprint import pprint from testconstants import CLI_COMMANDS, COUCHBASE_FROM_WATSON,\ COUCHBASE_FROM_SPOCK, LINUX_COUCHBASE_BIN_PATH,\ @@ -296,10 +298,10 @@ def _get_cluster_info(self, remote_client, cluster_host="localhost", cluster_por else: self.fail("server-info return error output") - def _create_bucket(self, remote_client, bucket="default", bucket_type="couchbase", bucket_port=11211, bucket_password=None, \ + def _create_bucket(self, remote_client, bucket="default", bucket_type="couchbase", bucket_password=None, \ bucket_ramsize=200, bucket_replica=1, wait=False, enable_flush=None, enable_index_replica=None): - options = "--bucket={0} --bucket-type={1} --bucket-port={2} --bucket-ramsize={3} --bucket-replica={4}".\ - format(bucket, bucket_type, bucket_port, bucket_ramsize, bucket_replica) + options = "--bucket={0} --bucket-type={1} --bucket-ramsize={2} --bucket-replica={3}".\ + format(bucket, bucket_type, bucket_ramsize, bucket_replica) options += (" --enable-flush={0}".format(enable_flush), "")[enable_flush is None] options += (" --enable-index-replica={0}".format(enable_index_replica), "")[enable_index_replica is None] options += (" --enable-flush={0}".format(enable_flush), "")[enable_flush is None] @@ -311,7 +313,7 @@ def _create_bucket(self, remote_client, bucket="default", bucket_type="couchbase if "TIMED OUT" in output[0]: raise Exception("Timed out. Could not create bucket") else: - self.assertTrue("SUCCESS: bucket-create" in output[0], "Fail to create bucket") + self.assertTrue("SUCCESS: Bucket created" in output[0], "Fail to create bucket") def testHelp(self): command_with_error = {} @@ -387,13 +389,14 @@ def testAddRemoveNodes(self): --server-add-password=password" \ .format(self.servers[num + 1].ip) output, error = \ - remote_client.execute_couchbase_cli(cli_command=cli_command,\ - options=options, cluster_host="localhost", \ - user="Administrator", password="password") + remote_client.execute_couchbase_cli(cli_command=cli_command, + options=options, cluster_host="localhost", + cluster_port=8091, user="Administrator", + password="password") server_added = False if len(output) >= 1: for x in output: - if "Server %s:8091 added" % (self.servers[num + 1].ip) in x: + if "SUCCESS: Server added" in x: server_added = True break if not server_added: @@ -407,22 +410,19 @@ def testAddRemoveNodes(self): for num in xrange(nodes_rem): options = "--server-remove={0}:8091".format(self.servers[nodes_add - num].ip) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", \ + options=options, cluster_host="localhost", cluster_port=8091,\ user="Administrator", password="password") - self.assertTrue("INFO: rebalancing" in output[0]) - if len(output) == 4: - self.assertEqual(output[2], "SUCCESS: rebalanced cluster") - else: - self.assertEqual(output[1], "SUCCESS: rebalanced cluster") + self.assertTrue("SUCCESS: Rebalance complete" in output) if nodes_rem == 0 and nodes_add > 0: cli_command = "rebalance" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - cluster_host="localhost", user="Administrator", password="password") + output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, cluster_host="localhost", + cluster_port=8091, user="Administrator", + password="password") if len(output) == 4: self.assertEqual(output, ["INFO: rebalancing ", "", "SUCCESS: rebalanced cluster", ""]) else: - self.assertEqual(output, ["INFO: rebalancing . ", "SUCCESS: rebalanced cluster"]) + self.assertTrue("SUCCESS: Rebalance complete" in output) """ when no bucket, have to add option --force to failover since no data => no graceful failover. Need to add test @@ -438,55 +438,42 @@ def testAddRemoveNodes(self): options += " --force" output, error = remote_client.execute_couchbase_cli(\ cli_command=cli_command, options=options, \ - cluster_host="localhost", user="Administrator", \ - password="password") + cluster_host="localhost", cluster_port=8091, \ + user="Administrator", password="password") if len(output) == 2: self.assertEqual(output, ["SUCCESS: failover ns_1@{0}" \ .format(self.servers[nodes_add - nodes_rem - num].ip), ""]) else: - self.assertEqual(output, ["SUCCESS: failover ns_1@{0}" \ - .format(self.servers[nodes_add - nodes_rem - num].ip)]) + self.assertTrue("SUCCESS: Server failed over" in output) else: output, error = remote_client.execute_couchbase_cli(\ cli_command=cli_command, options=options, \ - cluster_host="localhost", user="Administrator", \ - password="password") - output[0] = output[0].rstrip(" .") - if len(output) == 3: - self.assertEqual(output, ["INFO: graceful failover", \ - "SUCCESS: failover ns_1@{0}" \ - .format(self.servers[nodes_add - nodes_rem - num].ip), ""]) - else: - self.assertEqual(output, ["INFO: graceful failover", \ - "SUCCESS: failover ns_1@{0}" \ - .format(self.servers[nodes_add - nodes_rem - num].ip)]) + cluster_host="localhost", cluster_port=8091, \ + user="Administrator", password="password") + self.assertTrue("SUCCESS: Server failed over" in output) cli_command = "server-readd" for num in xrange(nodes_readd): self.log.info("add back node {0} to cluster" \ .format(self.servers[nodes_add - nodes_rem - num ].ip)) - options = "--server-add={0}:8091 --server-add-username=Administrator \ - --server-add-password=password" \ - .format(self.servers[nodes_add - nodes_rem - num ].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", \ - user="Administrator", password="password") + options = "--server-add={0}:8091".format(self.servers[nodes_add - nodes_rem - num].ip) + output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, + options=options, cluster_host="localhost", + cluster_port=8091, user="Administrator", + password="password") if len(output) == 2: - self.assertEqual(output, ["SUCCESS: re-add ns_1@{0}" \ - .format(self.servers[nodes_add - nodes_rem - num ].ip), ""]) + self.assertEqual(output, ["DEPRECATED: This command is deprecated and has been replaced " + "by the recovery command", "SUCCESS: Servers recovered"]) else: - self.assertEqual(output, ["SUCCESS: re-add ns_1@{0}" \ - .format(self.servers[nodes_add - nodes_rem - num ].ip)]) + self.assertEqual(output, ["DEPRECATED: This command is deprecated and has been replaced " + "by the recovery command", "SUCCESS: Servers recovered"]) cli_command = "rebalance" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - cluster_host="localhost", user="Administrator", password="password") - output[0] = output[0].rstrip(" .") - if len(output) == 4: - self.assertEqual(output, ["INFO: rebalancing", "", "SUCCESS: rebalanced cluster", ""]) - else: - self.assertEqual(output, ["INFO: rebalancing", "SUCCESS: rebalanced cluster"]) + output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, + cluster_host="localhost", cluster_port=8091, + user="Administrator", password="password") + self.assertTrue("SUCCESS: Rebalance complete" in output) remote_client.disconnect() @@ -502,14 +489,10 @@ def testAddRemoveNodesWithRecovery(self): for num in xrange(nodes_add): self.log.info("add node {0} to cluster".format(self.servers[num + 1].ip)) options = "--server-add={0}:8091 --server-add-username=Administrator --server-add-password=password".format(self.servers[num + 1].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") - if len(output) == 2: - self.assertEqual(output, ["Warning: Adding server from group-manage is deprecated", - "Server {0}:8091 added".format(self.servers[num + 1].ip)]) - else: - self.assertEqual(output, ["Warning: Adding server from group-manage is deprecated", - "Server {0}:8091 added".format(self.servers[num + 1].ip)], - "") + output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, + cluster_host="localhost", cluster_port=8091, + user="Administrator", password="password") + self.assertTrue("SUCCESS: Server added" in output) else: raise Exception("Node add should be smaller total number vms in ini file") @@ -517,16 +500,12 @@ def testAddRemoveNodesWithRecovery(self): for num in xrange(nodes_rem): options = "--server-remove={0}:8091".format(self.servers[nodes_add - num].ip) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertTrue("INFO: rebalancing" in output[0]) - if len(output) == 4: - self.assertEqual(output[2], "SUCCESS: rebalanced cluster") - else: - self.assertEqual(output[1], "SUCCESS: rebalanced cluster") + self.assertTrue("SUCCESS: Rebalance complete" in output) if nodes_rem == 0 and nodes_add > 0: cli_command = "rebalance" output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, cluster_host="localhost", user="Administrator", password="password") - self.assertTrue(output, ["INFO: rebalancing . ", "SUCCESS: rebalanced cluster"]) + self.assertTrue("SUCCESS: Rebalance complete" in output) self._create_bucket(remote_client) @@ -537,49 +516,49 @@ def testAddRemoveNodesWithRecovery(self): if self.force_failover or num == nodes_failover - 1: options += " --force" output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertTrue('SUCCESS: failover ns_1@{0}'.format(self.servers[nodes_add - nodes_rem - num].ip) in output, error) + self.assertTrue("SUCCESS: Server failed over" in output) cli_command = "recovery" for num in xrange(nodes_failover): # try to set recovery when nodes failovered (MB-11230) options = "--server-recovery={0}:8091 --recovery-type=delta".format(self.servers[nodes_add - nodes_rem - num].ip) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual("SUCCESS: setRecoveryType for node ns_1@{0}".format(self.servers[nodes_add - nodes_rem - num].ip), output[0]) + self.assertEqual("SUCCESS: Servers recovered", output[0]) for num in xrange(nodes_recovery): cli_command = "server-readd" self.log.info("add node {0} back to cluster".format(self.servers[nodes_add - nodes_rem - num].ip)) - options = "--server-add={0}:8091 --server-add-username=Administrator --server-add-password=password".format(self.servers[nodes_add - nodes_rem - num].ip) + options = "--server-add={0}:8091".format(self.servers[nodes_add - nodes_rem - num].ip) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") if (len(output) == 2): - self.assertEqual(output, ["SUCCESS: re-add ns_1@{0}".format(self.servers[nodes_add - nodes_rem - num].ip), ""]) + self.assertEqual(output, ["DEPRECATED: This command is deprecated and has been replaced " + "by the recovery command", "SUCCESS: Servers recovered"]) else: - self.assertEqual(output, ["SUCCESS: re-add ns_1@{0}".format(self.servers[nodes_add - nodes_rem - num].ip)]) + self.assertEqual(output, ["DEPRECATED: This command is deprecated and has been replaced " + "by the recovery command", "SUCCESS: Servers recovered"]) cli_command = "recovery" options = "--server-recovery={0}:8091 --recovery-type=delta".format(self.servers[nodes_add - nodes_rem - num].ip) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") if (len(output) == 2): - self.assertEqual(output, ["SUCCESS: setRecoveryType for node ns_1@{0}".format(self.servers[nodes_add - nodes_rem - num].ip), ""]) + self.assertEqual(output, ["SUCCESS: Servers recovered", ""]) else: - self.assertEqual(output, ["SUCCESS: setRecoveryType for node ns_1@{0}".format(self.servers[nodes_add - nodes_rem - num].ip)]) + self.assertEqual(output, ["SUCCESS: Servers recovered"]) cli_command = "server-readd" for num in xrange(nodes_readd): self.log.info("add back node {0} to cluster".format(self.servers[nodes_add - nodes_rem - num ].ip)) - options = "--server-add={0}:8091 --server-add-username=Administrator --server-add-password=password".format(self.servers[nodes_add - nodes_rem - num ].ip) + options = "--server-add={0}:8091".format(self.servers[nodes_add - nodes_rem - num ].ip) output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") if (len(output) == 2): - self.assertEqual(output, ["SUCCESS: re-add ns_1@{0}".format(self.servers[nodes_add - nodes_rem - num ].ip), ""]) + self.assertEqual(output, ["DEPRECATED: This command is deprecated and has been replaced " + "by the recovery command", "SUCCESS: Servers recovered"]) else: - self.assertEqual(output, ["SUCCESS: re-add ns_1@{0}".format(self.servers[nodes_add - nodes_rem - num ].ip)]) + self.assertEqual(output, ["DEPRECATED: This command is deprecated and has been replaced " + "by the recovery command", "SUCCESS: Servers recovered"]) cli_command = "rebalance" output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, cluster_host="localhost", user="Administrator", password="password") - self.assertTrue("INFO: rebalancing . " in output[0]) - if (len(output) == 4): - self.assertEqual("SUCCESS: rebalanced cluster", output[2]) - else: - self.assertEqual("SUCCESS: rebalanced cluster", output[1]) + self.assertTrue("SUCCESS: Rebalance complete" in output) remote_client.disconnect() @@ -655,54 +634,6 @@ def testStartStopRebalance(self): self.assertEqual(output, ["(u'notRunning', None)"]) remote_client.disconnect() - def testNodeInit(self): - server = self.servers[-1] - remote_client = RemoteMachineShellConnection(server) - prefix = '' - type = remote_client.extract_remote_info().distribution_type - if type.lower() == 'windows': - prefix_path = "C:" - - data_path = self.input.param("data_path", None) - index_path = self.input.param("index_path", None) - - if data_path is not None: - data_path = prefix + data_path.replace('|', "/") - if index_path is not None: - index_path = prefix + index_path.replace('|', "/") - - server_info = self._get_cluster_info(remote_client, cluster_port=server.port, \ - user=server.rest_username, password=server.rest_password) - data_path_before = server_info["storage"]["hdd"][0]["path"] - index_path_before = server_info["storage"]["hdd"][0]["index_path"] - - try: - rest = RestConnection(server) - rest.force_eject_node() - cli_command = "node-init" - options = "" - options += ("--node-init-data-path={0} ".format(data_path), "")[data_path is None] - options += ("--node-init-index-path={0} ".format(index_path), "")[index_path is None] - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", \ - user="Administrator", password="password") - self.sleep(7) # time needed to reload couchbase - """ no output print out. Bug https://issues.couchbase.com/browse/MB-13704 - output [] - error [] - It need to check when this bug is fixed """ - #self.assertEqual(output[0], "SUCCESS: init localhost") - rest.init_cluster() - server_info = self._get_cluster_info(remote_client, cluster_port=server.port, user=server.rest_username, password=server.rest_password) - data_path_after = server_info["storage"]["hdd"][0]["path"] - index_path_after = server_info["storage"]["hdd"][0]["index_path"] - self.assertEqual((data_path, data_path_before)[data_path is None], data_path_after) - self.assertEqual((index_path, index_path_before)[index_path is None], index_path_after) - finally: - rest = RestConnection(server) - rest.force_eject_node() - rest.init_cluster() - def testClusterInit(self): username = self.input.param("username", None) password = self.input.param("password", None) @@ -717,52 +648,35 @@ def testClusterInit(self): expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") - initial_server = self.servers[-1] + initial_server = self.servers[0] server = copy.deepcopy(initial_server) - hostname = "%s:%s" % (server.ip, server.port) if not initialized: rest = RestConnection(server) rest.force_eject_node() - options = "" + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.cluster_init(data_ramsize, index_ramsize, fts_ramsize, services, index_storage_mode, name, + username, password, port) + if username: - options += " --cluster-username " + str(username) server.rest_username = username if password: - options += " --cluster-password " + str(password) server.rest_password = password - if data_ramsize: - options += " --cluster-ramsize " + str(data_ramsize) - if index_ramsize: - options += " --cluster-index-ramsize " + str(index_ramsize) - if fts_ramsize: - options += " --cluster-fts-ramsize " + str(fts_ramsize) - if name: - options += " --cluster-name " + str(name) - # strip quotes if the cluster name contains spaces - if (name[0] == name[-1]) and name.startswith(("'", '"')): - name = name[1:-1] - if index_storage_mode: - options += " --index-storage-setting " + str(index_storage_mode) - elif services and "index" in services: + # strip quotes if the cluster name contains spaces + if name and (name[0] == name[-1]) and name.startswith(("'", '"')): + name = name[1:-1] + if not index_storage_mode and services and "index" in services: index_storage_mode = "forestdb" - if port: - options += " --cluster-port " + str(port) - if services: - options += " --services " + str(services) - else: + if not services: services = "data" - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli("cluster-init", hostname, options) - if not expect_error: # Update the cluster manager port if it was specified to be changed if port: server.port = port - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Cluster initialized"), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Cluster initialized"), "Expected command to succeed") self.assertTrue(self.isClusterInitialized(server), "Cluster was not initialized") self.assertTrue(self.verifyServices(server, services), "Services do not match") @@ -770,13 +684,13 @@ def testClusterInit(self): self.assertTrue(self.verifyClusterName(server, name), "Cluster name does not match") if "index" in services: - self.assertTrue(self.verifyIndexStorageMode(server, index_storage_mode), + self.assertTrue(self.verifyIndexSettings(server, None, None, None, index_storage_mode, None, None), "Index storage mode not properly set") self.assertTrue(self.verifyRamQuotas(server, data_ramsize, index_ramsize, fts_ramsize), "Ram quotas not set properly") else: - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), "Expected error message not found") + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") if not initialized: self.assertTrue(not self.isClusterInitialized(server), "Cluster was initialized, but error was received") @@ -784,6 +698,164 @@ def testClusterInit(self): rest = RestConnection(server) rest.init_cluster(initial_server.rest_username, initial_server.rest_password, initial_server.port) + def testRebalanceStop(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + init_start_rebalance = self.input.param("init-rebalance", False) + + server = copy.deepcopy(self.servers[0]) + add_server = self.servers[1] + + rest = RestConnection(server) + rest.force_eject_node() + + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + if init_start_rebalance: + self.assertTrue(rest.rebalance(otpNodes=["%s:%s" % (add_server.ip, add_server.port)]), + "Rebalance failed to start") + + stdout, _, _ = cli.rebalance_stop() + + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Rebalance stopped"), + "Expected command to succeed") + if init_start_rebalance: + self.assertTrue(rest.isRebalanced(), "Rebalance does not appear to be stopped") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if not initialized: + self.assertTrue(not self.isClusterInitialized(server), + "Cluster was initialized, but error was received") + + def testSettingAudit(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + enabled = self.input.param("enabled", None) + log_path = self.input.param("log-path", None) + rotate_interval = self.input.param("rotate-interval", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + if log_path is not None and log_path == "valid": + log_path = self.log_path + + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + stdout, _, _ = cli.setting_audit(enabled, log_path, rotate_interval) + + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Audit settings modified"), + "Expected command to succeed") + self.assertTrue(self.verifyAuditSettings(server, enabled, log_path, rotate_interval), + "Audit settings were not set properly") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if not initialized: + self.assertTrue(not self.isClusterInitialized(server), + "Cluster was initialized, but error was received") + + def testSettingCompaction(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + + db_frag_perc = self.input.param("db-frag-perc", None) + db_frag_size = self.input.param("db-frag-size", None) + view_frag_perc = self.input.param("view-frag-perc", None) + view_frag_size = self.input.param("view-frag-size", None) + from_period = self.input.param("from-period", None) + to_period = self.input.param("to-period", None) + abort_outside = self.input.param("abort-outside", None) + parallel_compact = self.input.param("parallel-compact", None) + purgeInt = self.input.param("purge-interval", None) + + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + stdout, _, errored = cli.setting_compaction(db_frag_perc, db_frag_size, view_frag_perc, view_frag_size, + from_period, to_period, abort_outside, parallel_compact, purgeInt) + + if not expect_error: + self.assertTrue(errored, "Expected command to succeed") + self.assertTrue(self.verifyCompactionSettings(server, db_frag_perc, db_frag_size, view_frag_perc, + view_frag_size, from_period, to_period, abort_outside, + parallel_compact, purgeInt), + "Settings don't match") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if not initialized: + self.assertTrue(not self.isClusterInitialized(server), + "Cluster was initialized, but error was received") + + def testSettingAutoFailover(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + enabled = self.input.param("enabled", None) + timeout = self.input.param("timeout", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + stdout, _, _ = cli.setting_autofailover(enabled, timeout) + + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Auto-failover settings modified"), + "Expected command to succeed") + self.assertTrue(self.verifyAutofailoverSettings(server, enabled, timeout), + "Auto-failover settings were not set properly") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if not initialized: + self.assertTrue(not self.isClusterInitialized(server), + "Cluster was initialized, but error was received") + def testSettingNotification(self): enable = self.input.param("enable", None) username = self.input.param("username", None) @@ -791,39 +863,31 @@ def testSettingNotification(self): initialized = self.input.param("initialized", False) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") + server = copy.deepcopy(self.servers[0]) - server = copy.deepcopy(self.servers[-1]) - hostname = "%s:%s" % (server.ip, server.port) - - if not initialized: - rest = RestConnection(server) - rest.force_eject_node() + rest = RestConnection(server) + rest.force_eject_node() - options = "" - if username is not None: - options += " -u " + str(username) - server.rest_username = username - if password is not None: - options += " -p " + str(password) - server.rest_password = password - if enable is not None: - options += " --enable-notification " + str(enable) + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") initialy_enabled = self.verifyNotificationsEnabled(server) - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli("setting-notification", hostname, options) - remote_client.disconnect() + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.setting_notification(enable) if not expect_error: - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Notification settings updated"), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Notification settings updated"), "Expected command to succeed") if enable == 1: self.assertTrue(self.verifyNotificationsEnabled(server), "Notification not enabled") else: self.assertTrue(not self.verifyNotificationsEnabled(server), "Notification are enabled") else: - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), "Expected error message not found") + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") self.assertTrue(self.verifyNotificationsEnabled(server) == initialy_enabled, "Notifications changed after error") def testSettingCluster(self): @@ -846,69 +910,36 @@ def clusterSettings(self, cmd): expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") - init_username = self.input.param("init-username", username) - init_password = self.input.param("init-password", password) init_data_memory = 256 init_index_memory = 256 init_fts_memory = 256 init_name = "testrunner" - initial_server = self.servers[-1] + initial_server = self.servers[0] server = copy.deepcopy(initial_server) - hostname = "%s:%s" % (server.ip, server.port) rest = RestConnection(server) rest.force_eject_node() + cli = CouchbaseCLI(server, username, password) if initialized: - if init_username is None: - init_username = "Administrator" - if init_password is None: - init_password = "password" - server.rest_username = init_username - server.rest_password = init_password - rest = RestConnection(server) - self.assertTrue(rest.init_cluster(init_username, init_password), - "Cluster initialization failed during test setup") - self.assertTrue(rest.init_cluster_memoryQuota(init_username, init_password, init_data_memory), - "Setting data service RAM quota failed during test setup") - self.assertTrue(rest.set_indexer_memoryQuota(init_username, init_password, init_index_memory), - "Setting index service RAM quota failed during test setup") - self.assertTrue(rest.set_fts_memoryQuota(init_username, init_password, init_fts_memory), - "Setting full-text service RAM quota failed during test setup") - self.assertTrue(rest.set_cluster_name(init_name), "Setting cluster name failed during test setup") - - - options = "" - if username is not None: - options += " -u " + str(username) - if password is not None: - options += " -p " + str(password) - if new_username is not None: - options += " --cluster-username " + str(new_username) - if not expect_error: + _, _, success = cli.cluster_init(init_data_memory, init_index_memory, init_fts_memory, None, None, + init_name, server.rest_username, server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + if cmd == "cluster-edit": + stdout, _, _ = cli.cluster_edit(data_ramsize, index_ramsize, fts_ramsize, name, new_username, + new_password, port) + else: + stdout, _, _ = cli.setting_cluster(data_ramsize, index_ramsize, fts_ramsize, name, new_username, + new_password, port) + + if new_username and not expect_error: server.rest_username = new_username - if new_password is not None: - options += " --cluster-password " + str(new_password) - if not expect_error: - server.rest_password = new_password - if data_ramsize: - options += " --cluster-ramsize " + str(data_ramsize) - if index_ramsize: - options += " --cluster-index-ramsize " + str(index_ramsize) - if fts_ramsize: - options += " --cluster-fts-ramsize " + str(fts_ramsize) - if name: - options += " --cluster-name " + str(name) - # strip quotes if the cluster name contains spaces - if (name[0] == name[-1]) and name.startswith(("'", '"')): - name = name[1:-1] - if port: - options += " --cluster-port " + str(port) - - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli(cmd, hostname, options) - remote_client.disconnect() + if new_password and not expect_error: + server.rest_password = new_password + if name and (name[0] == name[-1]) and name.startswith(("'", '"')): + name = name[1:-1] if not expect_error: # Update the cluster manager port if it was specified to be changed @@ -924,14 +955,14 @@ def clusterSettings(self, cmd): name = init_name if cmd == "cluster-edit": - self.verifyWarningOutput(output, "The cluster-edit command is depercated, use setting-cluster instead") - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Cluster settings modified"), + self.verifyWarningOutput(stdout, "The cluster-edit command is depercated, use setting-cluster instead") + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Cluster settings modified"), "Expected command to succeed") self.assertTrue(self.verifyRamQuotas(server, data_ramsize, index_ramsize, fts_ramsize), "Ram quotas not set properly") self.assertTrue(self.verifyClusterName(server, name), "Cluster name does not match") else: - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), "Expected error message not found") + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") if not initialized: self.assertTrue(not self.isClusterInitialized(server), "Cluster was initialized, but error was received") @@ -940,6 +971,140 @@ def clusterSettings(self, cmd): self.assertTrue(rest.init_cluster(initial_server.rest_username, initial_server.rest_password, initial_server.port), "Cluster was not re-initialized at the end of the test") + def testSettingIndex(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + max_rollbacks = self.input.param("max-rollback-points", None) + stable_snap_interval = self.input.param("stable-snapshot-interval", None) + mem_snap_interval = self.input.param("memory-snapshot-interval", None) + storage_mode = self.input.param("storage-mode", None) + threads = self.input.param("threads", None) + log_level = self.input.param("log-level", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + stdout, _, _ = cli.setting_index(max_rollbacks, stable_snap_interval, mem_snap_interval, storage_mode, threads, + log_level) + + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Indexer settings modified"), + "Expected command to succeed") + self.assertTrue(self.verifyIndexSettings(server, max_rollbacks, stable_snap_interval, mem_snap_interval, + storage_mode, threads, log_level), + "Index settings were not set properly") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if not initialized: + self.assertTrue(not self.isClusterInitialized(server), + "Cluster was initialized, but error was received") + + def testSettingLdap(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + admins = self.input.param("admins", None) + ro_admins = self.input.param("ro-admins", None) + enabled = self.input.param("enabled", False) + default = self.input.param("default", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + stdout, _, _ = cli.setting_ldap(admins, ro_admins, default, enabled) + + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "LDAP settings modified"), + "Expected command to succeed") + self.assertTrue(self.verifyLdapSettings(server, admins, ro_admins, default, enabled), "LDAP settings not set") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if not initialized: + self.assertTrue(self.verifyLdapSettings(server, None, None, None, 0), "LDAP setting changed") + + def testSettingAlert(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + enabled = self.input.param("enabled", None) + email_recipients = self.input.param("email-recipients", None) + email_sender = self.input.param("email-sender", None) + email_username = self.input.param("email-user", None) + email_password = self.input.param("email-password", None) + email_host = self.input.param("email-host", None) + email_port = self.input.param("email-port", None) + encrypted = self.input.param("encrypted", None) + alert_af_node = self.input.param("alert-auto-failover-node", False) + alert_af_max_reached = self.input.param("alert-auto-failover-max-reached", False) + alert_af_node_down = self.input.param("alert-auto-failover-node-down", False) + alert_af_small = self.input.param("alert-auto-failover-cluster-small", False) + alert_af_disable = self.input.param("alert-auto-failover-disable", False) + alert_ip_changed = self.input.param("alert-ip-changed", False) + alert_disk_space = self.input.param("alert-disk-space", False) + alert_meta_overhead = self.input.param("alert-meta-overhead", False) + alert_meta_oom = self.input.param("alert-meta-oom", False) + alert_write_failed = self.input.param("alert-write-failed", False) + alert_audit_dropped = self.input.param("alert-audit-msg-dropped", False) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + cli = CouchbaseCLI(server, username, password) + if initialized: + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + stdout, _, _ = cli.setting_alert(enabled, email_recipients, email_sender, email_username, email_password, + email_host, email_port, encrypted, alert_af_node, alert_af_max_reached, + alert_af_node_down, alert_af_small, alert_af_disable, alert_ip_changed, + alert_disk_space, alert_meta_overhead, alert_meta_oom, alert_write_failed, + alert_audit_dropped) + + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Email alert settings modified"), + "Expected command to succeed") + self.assertTrue(self.verifyAlertSettings(server, enabled, email_recipients, email_sender, email_username, + email_password, email_host, email_port, encrypted, alert_af_node, + alert_af_max_reached, alert_af_node_down, alert_af_small, + alert_af_disable, alert_ip_changed, alert_disk_space, + alert_meta_overhead, alert_meta_oom, alert_write_failed, + alert_audit_dropped), + "Alerts settings not set") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + def testBucketCompact(self): username = self.input.param("username", None) password = self.input.param("password", None) @@ -949,65 +1114,33 @@ def testBucketCompact(self): initialized = self.input.param("initialized", True) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") - - init_username = self.input.param("init-username", username) - init_password = self.input.param("init-password", password) init_bucket_type = self.input.param("init-bucket-type", None) - initial_server = self.servers[-1] - server = copy.deepcopy(initial_server) - hostname = "%s:%s" % (server.ip, server.port) + server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() - if init_bucket_type == "couchbase": - init_bucket_type = "membase" - if initialized: - if init_username is None: - init_username = "Administrator" - if init_password is None: - init_password = "password" - server.rest_username = init_username - server.rest_password = init_password - rest = RestConnection(server) - self.assertTrue(rest.init_cluster(init_username, init_password), - "Cluster initialization failed during test setup") + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") if init_bucket_type is not None: - self.assertTrue(rest.create_bucket(bucket_name, 256, "sasl", "", 1, 0, init_bucket_type, 0, 3, - 0, "valueOnly"), - "Bucket not created during test setup") - - options = "" - if username is not None: - options += " -u " + str(username) - if password is not None: - options += " -p " + str(password) - if bucket_name is not None: - options += " --bucket " + bucket_name - if data_only: - options += " --data-only" - if views_only: - options += " --view-only" - - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli("bucket-compact", hostname, options) - remote_client.disconnect() + _, _, success = cli.bucket_create(bucket_name, '""', init_bucket_type, 256, None, None, None, None, + None, None) + self.assertTrue(success, "Bucket not created during test setup") + + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.bucket_compact(bucket_name, data_only, views_only) if not expect_error: - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Bucket compaction started"), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Bucket compaction started"), "Expected command to succeed") else: - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") - # Reset the cluster (This is important for when we change the port number or username/password) - rest = RestConnection(server) - self.assertTrue( - rest.init_cluster(initial_server.rest_username, initial_server.rest_password, initial_server.port), - "Cluster was not re-initialized at the end of the test") - def testBucketCreate(self): username = self.input.param("username", None) password = self.input.param("password", None) @@ -1025,59 +1158,22 @@ def testBucketCreate(self): expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") - init_username = self.input.param("init-username", username) - init_password = self.input.param("init-password", password) - - initial_server = self.servers[-1] - server = copy.deepcopy(initial_server) - hostname = "%s:%s" % (server.ip, server.port) + server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() + cli = CouchbaseCLI(server, username, password) if initialized: - if init_username is None: - init_username = "Administrator" - if init_password is None: - init_password = "password" - server.rest_username = init_username - server.rest_password = init_password - rest = RestConnection(server) - self.assertTrue(rest.init_cluster(init_username, init_password), - "Cluster initialization failed during test setup") - - options = "" - if username is not None: - options += " -u " + str(username) - if password is not None: - options += " -p " + str(password) - if bucket_name is not None: - options += " --bucket " + bucket_name - if bucket_password is not None: - options += " --bucket-password " + bucket_password - if bucket_type is not None: - options += " --bucket-type " + bucket_type - if memory_quota is not None: - options += " --bucket-ramsize " + str(memory_quota) - if eviction_policy is not None: - options += " --bucket-eviction-policy " + eviction_policy - if replica_count is not None: - options += " --bucket-replica " + str(replica_count) - if enable_index_replica is not None: - options += " --enable-index-replica " + str(enable_index_replica) - if priority is not None: - options += " --bucket-priority " + priority - if enable_flush is not None: - options += " --enable-flush " + str(enable_flush) - if wait: - options += " --wait" - - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli("bucket-create", hostname, options) - remote_client.disconnect() + _, _, success = cli.cluster_init(512, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + stdout, _, _ = cli.bucket_create(bucket_name, bucket_password, bucket_type, memory_quota, eviction_policy, + replica_count, enable_index_replica, priority, enable_flush, wait) if not expect_error: - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Bucket created"), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Bucket created"), "Expected command to succeed") self.assertTrue(self.verifyBucketSettings(server, bucket_name, bucket_password, bucket_type, memory_quota, eviction_policy, replica_count, enable_index_replica, priority, @@ -1086,14 +1182,9 @@ def testBucketCreate(self): else: self.assertTrue(not self.verifyContainsBucket(server, bucket_name), "Bucket was created even though an error occurred") - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") - # Reset the cluster (This is important for when we change the port number or username/password) - rest = RestConnection(server) - self.assertTrue(rest.init_cluster(initial_server.rest_username, initial_server.rest_password, initial_server.port), - "Cluster was not re-initialized at the end of the test") - def testBucketEdit(self): username = self.input.param("username", None) password = self.input.param("password", None) @@ -1107,75 +1198,38 @@ def testBucketEdit(self): initialized = self.input.param("initialized", True) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") + init_bucket_type = self.input.param("init-bucket-type", None) - init_username = self.input.param("init-username", username) - init_password = self.input.param("init-password", password) - init_bucket_type = self.input.param("init-bucket-type") - - initial_server = self.servers[-1] - server = copy.deepcopy(initial_server) - hostname = "%s:%s" % (server.ip, server.port) + server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() - if init_bucket_type == "couchbase": - init_bucket_type = "membase" - if initialized: - if init_username is None: - init_username = "Administrator" - if init_password is None: - init_password = "password" - server.rest_username = init_username - server.rest_password = init_password - rest = RestConnection(server) - self.assertTrue(rest.init_cluster(init_username, init_password), - "Cluster initialization failed during test setup") - self.assertTrue(rest.create_bucket(bucket_name, 256, "sasl", "", 1, 0, init_bucket_type, 0, 3, 0, "valueOnly"), - "Bucket not created during test setup") - - options = "" - if username is not None: - options += " -u " + str(username) - if password is not None: - options += " -p " + str(password) - if bucket_name is not None: - options += " --bucket " + bucket_name - if bucket_password is not None: - options += " --bucket-password " + bucket_password - if memory_quota is not None: - options += " --bucket-ramsize " + str(memory_quota) - if eviction_policy is not None: - options += " --bucket-eviction-policy " + eviction_policy - if replica_count is not None: - options += " --bucket-replica " + str(replica_count) - if priority is not None: - options += " --bucket-priority " + priority - if enable_flush is not None: - options += " --enable-flush " + str(enable_flush) - - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli("bucket-edit", hostname, options) - remote_client.disconnect() + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(512, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + if init_bucket_type is not None: + _, _, success = cli.bucket_create(bucket_name, '""', init_bucket_type, 256, None, None, None, + None, 0, None) + self.assertTrue(success, "Bucket not created during test setup") + + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.bucket_edit(bucket_name, bucket_password, memory_quota, eviction_policy, replica_count, + priority, enable_flush) if not expect_error: - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Bucket edited"), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Bucket edited"), "Expected command to succeed") self.assertTrue(self.verifyBucketSettings(server, bucket_name, bucket_password, None, memory_quota, eviction_policy, replica_count, None, priority, enable_flush), "Bucket settings not set properly") else: # List buckets - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") - # Reset the cluster (This is important for when we change the port number or username/password) - rest = RestConnection(server) - self.assertTrue( - rest.init_cluster(initial_server.rest_username, initial_server.rest_password, initial_server.port), - "Cluster was not re-initialized at the end of the test") - def testBucketDelete(self): username = self.input.param("username", None) password = self.input.param("password", None) @@ -1183,9 +1237,6 @@ def testBucketDelete(self): initialized = self.input.param("initialized", True) expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") - - init_username = self.input.param("init-username", username) - init_password = self.input.param("init-password", password) init_bucket_type = self.input.param("init-bucket-type", None) server = copy.deepcopy(self.servers[-1]) @@ -1194,41 +1245,25 @@ def testBucketDelete(self): rest = RestConnection(server) rest.force_eject_node() - if init_bucket_type == "couchbase": - init_bucket_type = "membase" - if initialized: - if init_username is None: - init_username = "Administrator" - if init_password is None: - init_password = "password" - server.rest_username = init_username - server.rest_password = init_password - self.assertTrue(rest.init_cluster(init_username, init_password), - "Cluster initialization failed during test setup") - if init_bucket_type != None: - self.assertTrue(rest.create_bucket(bucket_name, 256, "sasl", "", 1, 0, init_bucket_type, 0, 3, - 0, "valueOnly"), - "Bucket not created during test setup") - - options = "" - if username is not None: - options += " -u " + str(username) - if password is not None: - options += " -p " + str(password) - if bucket_name is not None: - options += " --bucket " + bucket_name - - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli("bucket-delete", hostname, options) - remote_client.disconnect() + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(512, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + if init_bucket_type is not None: + _, _, success = cli.bucket_create(bucket_name, '""', init_bucket_type, 256, None, None, None, + None, 0, None) + self.assertTrue(success, "Bucket not created during test setup") + + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.bucket_delete(bucket_name) if not expect_error: - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Bucket deleted"), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Bucket deleted"), "Expected command to succeed") self.assertTrue(not self.verifyContainsBucket(server, bucket_name), "Bucket was not deleted") else: - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") if initialized and init_bucket_type is not None: self.assertTrue(self.verifyContainsBucket(server, bucket_name), "Bucket should not have been deleted") @@ -1242,420 +1277,629 @@ def testBucketFlush(self): expect_error = self.input.param("expect-error") error_msg = self.input.param("error-msg", "") - init_username = self.input.param("init-username", username) - init_password = self.input.param("init-password", password) init_bucket_type = self.input.param("init-bucket-type", None) init_enable_flush = int(self.input.param("init-enable-flush", 0)) insert_keys = 12 - initial_server = self.servers[-1] - server = copy.deepcopy(initial_server) - hostname = "%s:%s" % (server.ip, server.port) + server = copy.deepcopy(self.servers[0]) rest = RestConnection(server) rest.force_eject_node() - if init_bucket_type == "couchbase": - init_bucket_type = "membase" - if initialized: - if init_username is None: - init_username = "Administrator" - if init_password is None: - init_password = "password" - - server.rest_username = init_username - server.rest_password = init_password - rest = RestConnection(server) - self.assertTrue(rest.init_cluster(init_username, init_password), - "Cluster initialization failed during test setup") + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(512, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") if init_bucket_type is not None: - self.assertTrue(rest.create_bucket(bucket_name, 256, "sasl", "", 1, 0, init_bucket_type, 0, 3, - init_enable_flush, "valueOnly"), - "Bucket not created during test setup") + _, _, success = cli.bucket_create(bucket_name, '""', init_bucket_type, 256, None, None, None, + None, init_enable_flush, None) + self.assertTrue(success, "Bucket not created during test setup") + MemcachedClientHelper.load_bucket_and_return_the_keys([server], name=bucket_name, number_of_threads=1, write_only=True, number_of_items=insert_keys, moxi=False) inserted = int(rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"]) self.assertTrue(self.waitForItemCount(server, bucket_name, insert_keys)) - options = "" - if username is not None: - options += " -u " + str(username) - if password is not None: - options += " -p " + str(password) - if bucket_name is not None: - options += " --bucket " + bucket_name - if force: - options += " --force" - - remote_client = RemoteMachineShellConnection(server) - output, error = remote_client.couchbase_cli("bucket-flush", hostname, options) - remote_client.disconnect() + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.bucket_flush(bucket_name, force) if not expect_error: - self.assertTrue(self.verifyCommandOutput(output, expect_error, "Bucket flushed"), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Bucket flushed"), "Expected command to succeed") self.assertTrue(self.waitForItemCount(server, bucket_name, 0), "Expected 0 keys to be in bucket after the flush") else: - self.assertTrue(self.verifyCommandOutput(output, expect_error, error_msg), + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), "Expected error message not found") if initialized and init_bucket_type is not None: self.assertTrue(self.waitForItemCount(server, bucket_name, insert_keys), "Expected keys to exist after the flush failed") - # Reset the cluster (This is important for when we change the port number or username/password) + def testServerAdd(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + num_servers = self.input.param("num-add-servers", None) + server_username = self.input.param("server-add-username", None) + server_password = self.input.param("server-add-password", None) + group = self.input.param("group-name", None) + services = self.input.param("services", None) + index_storage_mode = self.input.param("index-storage-mode", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + init_index_storage_mode = self.input.param("init-index-storage-mode", None) + init_services = self.input.param("init-services", None) + + server = copy.deepcopy(self.servers[0]) + + servers_list = list() + for i in range(0, num_servers): + servers_list.append("%s:%s" % (self.servers[i+1].ip, self.servers[i+1].port)) + server_to_add = ",".join(servers_list) + rest = RestConnection(server) - self.assertTrue( - rest.init_cluster(initial_server.rest_username, initial_server.rest_password, initial_server.port), - "Cluster was not re-initialized at the end of the test") + rest.force_eject_node() - def testIndexerSettings(self): - cli_command = "setting-index" - rest = RestConnection(self.master) - remote_client = RemoteMachineShellConnection(self.master) + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, init_services, init_index_storage_mode, None, + server.rest_username, server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.server_add(server_to_add, server_username, server_password, group, services, + index_storage_mode) + + if not services: + services = "kv" + if group: + if (group[0] == group[-1]) and group.startswith(("'", '"')): + group = group[1:-1] + else: + group = "Group 1" - options = (" --index-threads=3") - options += (" --index-max-rollback-points=6") - options += (" --index-stable-snapshot-interval=4900") - options += (" --index-memory-snapshot-interval=220") - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", \ - user="Administrator", password="password") - self.assertEqual(output, ['SUCCESS: set index settings']) - remote_client.disconnect() + if not index_storage_mode and "index" in services.split(","): + index_storage_mode = "default" - # MB-8566 - def testSettingCompacttion(self): - '''setting-compacttion OPTIONS: - --compaction-db-percentage=PERCENTAGE at which point database compaction is triggered - --compaction-db-size=SIZE[MB] at which point database compaction is triggered - --compaction-view-percentage=PERCENTAGE at which point view compaction is triggered - --compaction-view-size=SIZE[MB] at which point view compaction is triggered - --compaction-period-from=HH:MM allow compaction time period from - --compaction-period-to=HH:MM allow compaction time period to - --enable-compaction-abort=[0|1] allow compaction abort when time expires - --enable-compaction-parallel=[0|1] allow parallel compaction for database and view''' - compaction_db_percentage = self.input.param("compaction-db-percentage", None) - compaction_db_size = self.input.param("compaction-db-size", None) - compaction_view_percentage = self.input.param("compaction-view-percentage", None) - compaction_view_size = self.input.param("compaction-view-size", None) - compaction_period_from = self.input.param("compaction-period-from", None) - compaction_period_to = self.input.param("compaction-period-to", None) - enable_compaction_abort = self.input.param("enable-compaction-abort", None) - enable_compaction_parallel = self.input.param("enable-compaction-parallel", None) - bucket = self.input.param("bucket", "default") - output = self.input.param("output", '') - rest = RestConnection(self.master) - remote_client = RemoteMachineShellConnection(self.master) - self.testBucketCreation() - cli_command = "setting-compacttion" - options = "--bucket={0}".format(bucket) - options += (" --compaction-db-percentage={0}".format(compaction_db_percentage), "")[compaction_db_percentage is None] - options += (" --compaction-db-size={0}".format(compaction_db_size), "")[compaction_db_size is None] - options += (" --compaction-view-percentage={0}".format(compaction_view_percentage), "")[compaction_view_percentage is None] - options += (" --compaction-view-size={0}".format(compaction_view_size), "")[compaction_view_size is None] - options += (" --compaction-period-from={0}".format(compaction_period_from), "")[compaction_period_from is None] - options += (" --compaction-period-to={0}".format(compaction_period_to), "")[compaction_period_to is None] - options += (" --enable-compaction-abort={0}".format(enable_compaction_abort), "")[enable_compaction_abort is None] - options += (" --enable-compaction-parallel={0}".format(enable_compaction_parallel), "")[enable_compaction_parallel is None] + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Server added"), + "Expected command to succeed") + self.assertTrue(self.verifyPendingServer(server, server_to_add, group, services), + "Pending server has incorrect settings") + self.assertTrue(self.verifyIndexSettings(server, None, None, None, index_storage_mode, None, None), + "Invalid index storage setting") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if initialized: + self.assertTrue(self.verifyPendingServerDoesNotExist(server, server_to_add), + "Pending server exists") - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output, ['SUCCESS: bucket-edit']) - cluster_status = rest.cluster_status() - remote_client.disconnect() + def testRebalance(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + num_add_servers = self.input.param("num-add-servers", 0) + num_remove_servers = self.input.param("num-remove-servers", 0) + num_initial_servers = self.input.param("num-initial-servers", 1) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") - """ tests for the group-manage option. group creation, renaming and deletion are tested . - These tests require a cluster of four or more nodes. """ - def testCreateRenameDeleteGroup(self): - remote_client = RemoteMachineShellConnection(self.master) - cli_command = "group-manage" + self.assertTrue(num_initial_servers > num_remove_servers, "Specified more remove servers than initial servers") - if self.os == "linux": - # create group - options = " --create --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output, ["SUCCESS: group created group2"]) - # create existing group. operation should fail - options = " --create --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output[0], "ERROR: unable to create group group2 (400) Bad Request") - self.assertEqual(output[1], '{"name":"already exists"}') - # rename group test - options = " --rename=group3 --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output, ["SUCCESS: group renamed group2"]) - # delete group test - options = " --delete --group-name=group3" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output, ["SUCCESS: group deleted group3"]) - # delete non-empty group test - options = " --delete --group-name=\"Group 1\"" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output[0], "ERROR: unable to delete group Group 1 (400) Bad Request") - self.assertEqual(output[1], '{"_":"group is not empty"}') - # delete non-existing group - options = " --delete --group-name=groupn" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output, ["ERROR: invalid group name:groupn"]) - remote_client.disconnect() - - if self.os == "windows": - # create group - options = " --create --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: group created group2") - # create existing group. operation should fail - options = " --create --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "ERROR: unable to create group group2 (400) Bad Request") - self.assertEqual(output[2], '{"name":"already exists"}') - # rename group test - options = " --rename=group3 --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: group renamed group2") - # delete group test - options = " --delete --group-name=group3" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: group deleted group3") - # delete non-empty group test - options = " --delete --group-name=\"Group 1\"" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "ERROR: unable to delete group Group 1 (400) Bad Request") - self.assertEqual(output[2], '{"_":"group is not empty"}') - # delete non-existing group - options = " --delete --group-name=groupn" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "ERROR: invalid group name:groupn") - remote_client.disconnect() + srv_idx = 0 + server = copy.deepcopy(self.servers[srv_idx]) + srv_idx += 1 - """ tests for the group-manage option. adding and moving servers between groups are tested. - These tests require a cluster of four or more nodes. """ - def testAddMoveServerListGroup(self): - nodes_add = self.input.param("nodes_add", 1) - remote_client = RemoteMachineShellConnection(self.master) - cli_command = "group-manage" - if self.os == "linux": - # create a group to use in this testcase - options = " --create --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output, ["SUCCESS: group created group2"]) - # add multiple servers to group - for num in xrange(nodes_add): - options = "--add-servers=\"{0}:8091;{1}:8091\" --group-name=group2 \ - --server-add-username=Administrator --server-add-password=password" \ - .format(self.servers[num + 1].ip, self.servers[num + 2].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="{0}:8091".format(self.servers[num].ip), \ - user="Administrator", password="password") - output = self.del_runCmd_value(output) - # This one is before Watson - #self.assertEqual(output[0], "SUCCESS: add server {0}:8091' to group 'group2'" \ - # .format(self.servers[num + 1].ip)) - self.assertEqual(output[0], "Server {0}:8091 added to group group2" \ - .format(self.servers[num + 1].ip)) - """Server 172.23.105.114:8091 added to group group2""" - # This one is before Watson - #self.assertEqual(output[1], "SUCCESS: add server '{0}:8091' to group 'group2'" \ - # .format(self.servers[num + 2].ip)) - self.assertEqual(output[1], "Server {0}:8091 added to group group2" \ - .format(self.servers[num + 2].ip)) - # add single server to group - for num in xrange(nodes_add): - options = "--add-servers={0}:8091 --group-name=group2 \ - --server-add-username=Administrator --server-add-password=password" \ - .format(self.servers[num + 3].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - # This one is before Watson - #self.assertEqual(output, ["SUCCESS: add server '{0}:8091' to group 'group2'" \ - # .format(self.servers[num + 3].ip)]) - self.assertEqual(output, ["Server {0}:8091 added to group group2" \ - .format(self.servers[num + 3].ip)]) - # list servers in group - for num in xrange(nodes_add): - options = " --list --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output[0], "group2") - self.assertEqual(output[1], " server: {0}:8091".format(self.servers[num + 1].ip)) - self.assertEqual(output[2], " server: {0}:8091".format(self.servers[num + 2].ip)) - self.assertEqual(output[3], " server: {0}:8091".format(self.servers[num + 3].ip)) - # test move multiple servers - for num in xrange(nodes_add): - options = "--from-group=group2 --to-group=\"Group 1\" \ - --move-servers=\"{0}:8091;{1}:8091;{2}:8091\"".format(self.servers[num + 1].ip, \ - self.servers[num + 2].ip, self.servers[num + 3].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output, ["SUCCESS: move servers from group 'group2' to group 'Group 1'"]) - # clean up by deleting group - options = " --delete --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - self.assertEqual(output, ["SUCCESS: group deleted group2"]) + initial_servers_list = list() + for _ in range(0, num_initial_servers-1): + initial_servers_list.append("%s:%s" % (self.servers[srv_idx].ip, self.servers[srv_idx].port)) + srv_idx += 1 + initial_servers = ",".join(initial_servers_list) - if self.os == "windows": - # create a group to use in this testcase - options = " --create --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: group created group2") - # add multiple servers to group - for num in xrange(nodes_add): - options = "--add-servers=\"{0}:8091;{1}:8091\" --group-name=group2 \ - --server-add-username=Administrator --server-add-password=password" \ - .format(self.servers[num + 1].ip, self.servers[num + 2].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="{0}:8091".format(self.servers[num].ip), \ - user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: add server '{0}:8091' to group 'group2'" \ - .format(self.servers[num + 1].ip)) - self.assertEqual(output[2], "SUCCESS: add server '{0}:8091' to group 'group2'" \ - .format(self.servers[num + 2].ip)) - # add single server to group - for num in xrange(nodes_add): - options = "--add-servers={0}:8091 --group-name=group2 --server-add-username=Administrator \ - --server-add-password=password".format(self.servers[num + 3].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: add server '{0}:8091' to group 'group2'" \ - .format(self.servers[num + 3].ip)) - # list servers in group - for num in xrange(nodes_add): - options = " --list --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "group2") - self.assertEqual(output[2], " server: {0}:8091".format(self.servers[num + 1].ip)) - self.assertEqual(output[4], " server: {0}:8091".format(self.servers[num + 2].ip)) - self.assertEqual(output[6], " server: {0}:8091".format(self.servers[num + 3].ip)) - # test move multiple servers - for num in xrange(nodes_add): - options = "--from-group=group2 --to-group=\"Group 1\" \ - --move-servers=\"{0}:8091;{1}:8091;{2}:8091\"".format(self.servers[num + 1].ip, \ - self.servers[num + 2].ip, self.servers[num + 3].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: move servers from group 'group2' to group 'Group 1'") - # clean up by deleting group - options = " --delete --group-name=group2" - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: group deleted group2") + add_servers_list = list() + for _ in range(0, num_add_servers): + add_servers_list.append("%s:%s" % (self.servers[srv_idx].ip, self.servers[srv_idx].port)) + srv_idx += 1 + servers_to_add = ",".join(add_servers_list) - """ tests for the server-add option with group manage rider. - These tests require a cluster of four or more nodes. """ - def testServerAddRebalancewithGroupManage(self): - nodes_add = self.input.param("nodes_add", 1) - remote_client = RemoteMachineShellConnection(self.master) + remove_servers_list = list() + for i in range(0, num_remove_servers): + remove_servers_list.append("%s:%s" % (self.servers[i+1].ip, self.servers[i+1].port)) + servers_to_remove = ",".join(remove_servers_list) - if self.os == "linux": - # test server-add command with group manage option - cli_command = "server-add" - for num in xrange(nodes_add): - options = "--server-add={0}:8091 --server-add-username=Administrator \ - --server-add-password=password --group-name=\"Group 1\"" \ - .format(self.servers[num + 1].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - # This one is before Watson - #self.assertEqual(output, ["SUCCESS: add server '{0}:8091' to group 'Group 1'" \ - # .format(self.servers[num + 1].ip)]) - self.assertEqual(output[1], "Server {0}:8091 added to group Group 1" \ - .format(self.servers[num + 1].ip)) - - # test rebalance command with add server and group manage option - cli_command = "rebalance" - for num in xrange(nodes_add): - options = "--server-add={0}:8091 --server-add-username=Administrator \ - --server-add-password=password --group-name=\"Group 1\"" \ - .format(self.servers[num + 2].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - # This one before watson - #self.assertEqual(output[0], "SUCCESS: add server '{0}:8091' to group 'Group 1'" \ - # .format(self.servers[num + 2].ip)) - self.assertEqual(output[1], "Server {0}:8091 added to group Group 1" \ - .format(self.servers[num + 2].ip)) - - self.assertTrue(self._check_output("SUCCESS: rebalanced cluster", output)) + rest = RestConnection(server) + rest.force_eject_node() - for num in xrange(nodes_add): - options = "--server-remove={0}:8091 --server-add={1}:8091 \ - --server-add-username=Administrator --server-add-password=password \ - --group-name=\"Group 1\"".format(self.servers[num + 2].ip, self.servers[num + 3].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - output = self.del_runCmd_value(output) - # This one before watson - #self.assertEqual(output[0], "SUCCESS: add server '{0}:8091' to group 'Group 1'" \ - self.assertTrue(self._check_output("Server %s:8091 added" %self.servers[num + 3].ip, output)) - self.assertTrue(self._check_output("SUCCESS: rebalanced cluster", output)) - - - if self.os == "windows": - # test server-add command with group manage option - cli_command = "server-add" - for num in xrange(nodes_add): - options = "--server-add={0}:8091 --server-add-username=Administrator \ - --server-add-password=password --group-name=\"Group 1\"" \ - .format(self.servers[num + 1].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: add server '{0}:8091' to group 'Group 1'" \ - .format(self.servers[num + 1].ip)) - # test rebalance command with add server and group manage option - cli_command = "rebalance" - for num in xrange(nodes_add): - options = "--server-add={0}:8091 --server-add-username=Administrator \ - --server-add-password=password --group-name=\"Group 1\"" \ - .format(self.servers[num + 2].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[4], "SUCCESS: add server '{0}:8091' to group 'Group 1'" \ - .format(self.servers[num + 2].ip)) - # old before watson - #self.assertEqual(output[2], "SUCCESS: rebalanced cluster") - self.assertTrue(self._check_output("SUCCESS: rebalanced cluster", output)) + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + time.sleep(5) + if initial_servers != "": + _, _, errored = cli.server_add(initial_servers, server.rest_username, server.rest_password, None, None, None) + self.assertTrue(errored, "Unable to add initial servers") + _, _, errored = cli.rebalance(None) + self.assertTrue(errored, "Unable to complete initial rebalance") + if servers_to_add != "": + _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, None) + self.assertTrue(errored, "Unable to add initial servers") + + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.rebalance(servers_to_remove) - for num in xrange(nodes_add): - options = "--server-remove={0}:8091 --server-add={1}:8091 \ - --server-add-username=Administrator --server-add-password=password \ - --group-name=\"Group 1\"".format(self.servers[num + 2].ip, self.servers[num + 3].ip) - output, error = remote_client.execute_couchbase_cli(cli_command=cli_command, \ - options=options, cluster_host="localhost", user="Administrator", password="password") - self.assertEqual(output[0], "SUCCESS: add server '{0}:8091' to group 'Group 1'" \ - .format(self.servers[num + 3].ip)) - self.assertTrue("INFO: rebalancing" in output[1]) - self.assertEqual(output[2], "SUCCESS: rebalanced cluster") + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Rebalance complete"), + "Expected command to succeed") + self.assertTrue(self.verifyActiveServers(server, num_initial_servers + num_add_servers - num_remove_servers), + "No new servers were added to the cluster") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if initialized: + self.assertTrue(self.verifyActiveServers(server, num_initial_servers), + "Expected no new servers to be in the cluster") + + def testRebalanceInvalidRemoveServer(self): + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + time.sleep(5) + + stdout, _, _ = cli.rebalance("invalid.server:8091") + + self.assertTrue(self.verifyCommandOutput(stdout, True, error_msg), + "Expected error message not found") + self.assertTrue(self.verifyActiveServers(server, 1), + "Expected no new servers to be in the cluster") + + def testFailover(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + num_initial_servers = self.input.param("num-initial-servers", 2) + invalid_node = self.input.param("invalid-node", False) + no_failover_servers = self.input.param("no-failover-servers", False) + force = self.input.param("force", False) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + initial_servers_list = list() + for i in range(0, num_initial_servers - 1): + initial_servers_list.append("%s:%s" % (self.servers[i + 1].ip, self.servers[i + 1].port)) + initial_servers = ",".join(initial_servers_list) + + server_to_failover = "%s:%s" % (self.servers[1].ip, self.servers[1].port) + if invalid_node: + server_to_failover = "invalid.server:8091" + if no_failover_servers: + server_to_failover = None + + rest = RestConnection(server) + rest.force_eject_node() + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, None, None, None, None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + if initial_servers != "": + time.sleep(5) + _, _, errored = cli.server_add(initial_servers, server.rest_username, server.rest_password, None, None, + None) + self.assertTrue(errored, "Unable to add initial servers") + _, _, errored = cli.rebalance(None) + self.assertTrue(errored, "Unable to complete initial rebalance") + _, _, success = cli.bucket_create("bucket", '""', "couchbase", 256, None, None, None, None, + None, None) + self.assertTrue(success, "Bucket not created during test setup") + time.sleep(10) + + cli = CouchbaseCLI(server, username, password) + stdout, _, _ = cli.failover(server_to_failover, force) + + if not expect_error: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, "Server failed over"), + "Expected command to succeed") + self.assertTrue(self.verifyActiveServers(server, num_initial_servers - 1), + "Servers not failed over") + self.assertTrue(self.verifyFailedServers(server, 1), + "Not all servers failed over have `inactiveFailed` status") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if initialized: + self.assertTrue(self.verifyActiveServers(server, num_initial_servers), + "Servers should not have been failed over") + + def testUserManage(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + list = self.input.param("list", False) + delete = self.input.param("delete", False) + set = self.input.param("set", False) + ro_username = self.input.param("ro-username", None) + ro_password = self.input.param("ro-password", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + init_ro_username = self.input.param("init-ro-username", None) + init_ro_password = self.input.param("init-ro-password", None) + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + if init_ro_username and init_ro_password: + self.assertTrue(rest.create_ro_user(init_ro_username, init_ro_password), "Setting initial ro user failed") + + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, errored = cli.user_manage(delete, list, set, ro_username, ro_password) + + if not expect_error: + if list: + self.assertTrue(stdout[0] == init_ro_username, "Listed ro user is not correct") + else: + self.assertTrue(errored, "Expected command to succeed") + if set: + self.assertTrue(self.verifyReadOnlyUser(server, ro_username), "Read only user was not set") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + if initialized and init_ro_username: + self.assertTrue(self.verifyReadOnlyUser(server, init_ro_username), "Read only user was changed") + + def testCollectLogStart(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + all_nodes = self.input.param("all-nodes", False) + nodes = self.input.param("nodes", 0) + upload = self.input.param("upload", None) + upload_host = self.input.param("upload-host", None) + upload_customer = self.input.param("customer", None) + upload_ticket = self.input.param("ticket", None) + invalid_node = self.input.param("invalid-node", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + init_num_servers = self.input.param("init-num-servers", 1) + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + servers_to_add = list() + for idx in range(init_num_servers-1): + servers_to_add.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) + servers_to_add = ",".join(servers_to_add) + + log_nodes = None + if nodes > 0 or invalid_node is not None: + log_nodes = list() + for idx in range(nodes): + log_nodes.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) + if invalid_node is not None: + log_nodes.append("invalid:8091") + log_nodes = ",".join(log_nodes) + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + if init_num_servers > 1: + time.sleep(5) + _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, None) + self.assertTrue(errored, "Could not add initial servers") + _, _, errored = cli.rebalance(None) + self.assertTrue(errored, "Unable to complete initial rebalance") + + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, errored = cli.collect_logs_start(all_nodes, log_nodes, upload, upload_host, upload_customer, upload_ticket) + + if not expect_error: + self.assertTrue(errored, "Expected command to succeed") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + + def testCollectLogStop(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, errored = cli.collect_logs_stop() + + if not expect_error: + self.assertTrue(errored, "Expected command to succeed") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + + def testNodeInit(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + data_path = self.input.param("data-path", None) + index_path = self.input.param("index-path", None) + hostname = self.input.param("hostname", None) + initialized = self.input.param("initialized", False) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + node_settings = rest.get_nodes_self() + + if data_path is not None and data_path == "valid": + data_path = self.log_path + + if index_path is not None and index_path == "valid": + index_path = self.log_path + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, errored = cli.node_init(data_path, index_path, hostname) + + if not expect_error: + self.assertTrue(errored, "Expected command to succeed") + if data_path is None: + data_path = node_settings.storage[0].path + if index_path is None: + index_path = node_settings.storage[0].index_path + self.assertTrue(self.verify_node_settings(server, data_path, index_path, hostname), + "Node settings not changed") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + + def testGroupManage(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + create = self.input.param("create", None) + delete = self.input.param("delete", None) + list = self.input.param("list", None) + move = self.input.param("move-servers", 0) + rename = self.input.param("rename", None) + name = self.input.param("name", None) + from_group = self.input.param("from-group", None) + to_group = self.input.param("to-group", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + init_group = self.input.param("init-group", None) + init_num_servers = self.input.param("init-num-servers", 1) + invalid_move_server = self.input.param("invalid-move-server", None) + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + to_move = None + if move > 0: + to_move = [] + for idx in range(move): + to_move.append("%s:%s" % (self.servers[idx].ip, self.servers[idx].port)) + to_move = ",".join(to_move) + + if invalid_move_server: + to_move = invalid_move_server + + servers_to_add = [] + for idx in range(init_num_servers-1): + servers_to_add.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) + servers_to_add = ",".join(servers_to_add) + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + if init_num_servers > 1: + time.sleep(5) + _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, None) + self.assertTrue(errored, "Could not add initial servers") + _, _, errored = cli.rebalance(None) + self.assertTrue(errored, "Unable to complete initial rebalance") + + if init_group is not None: + time.sleep(5) + _, _, errored = cli.group_manage(True, False, False, None, None, init_group, None, None) + + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, errored = cli.group_manage(create, delete, list, to_move, rename, name, to_group, from_group) + + if not expect_error: + self.assertTrue(errored, "Expected command to succeed") + if create: + self.assertTrue(self.verifyGroupExists(server, name), "Group doesn't exist") + elif delete: + self.assertTrue(not self.verifyGroupExists(server, name), "Group doesn't exist") + elif rename: + self.assertTrue(self.verifyGroupExists(server, name), "Group not renamed") + elif move > 0: + _, _, errored = cli.group_manage(False, False, False, to_move, None, None, from_group, to_group) + self.assertTrue(errored, "Group reset failed") + + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + + def testRecovery(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + servers = self.input.param("servers", 0) + recovery_type = self.input.param("recovery-type", None) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + skip_failover = self.input.param("skip-failover", False) + init_num_servers = self.input.param("init-num-servers", 1) + invalid_recover_server = self.input.param("invalid-recover-server", None) + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + servers_to_recover = None + if servers > 0: + servers_to_recover = [] + for idx in range(servers): + servers_to_recover.append("%s:%s" % (self.servers[idx+1].ip, self.servers[idx+1].port)) + servers_to_recover = ",".join(servers_to_recover) + + if invalid_recover_server: + servers_to_recover = invalid_recover_server + + servers_to_add = [] + for idx in range(init_num_servers - 1): + servers_to_add.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) + servers_to_add = ",".join(servers_to_add) + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + if init_num_servers > 1: + time.sleep(5) + _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, + None) + self.assertTrue(errored, "Could not add initial servers") + _, _, errored = cli.rebalance(None) + self.assertTrue(errored, "Unable to complete initial rebalance") + + if servers_to_recover and not skip_failover: + for restore_server in servers_to_recover.split(","): + _, _, errored = cli.failover(restore_server, True) + self.assertTrue(errored, "Unable to failover servers") + + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, errored = cli.recovery(servers_to_recover, recovery_type) + + if not expect_error: + self.assertTrue(errored, "Expected command to succeed") + self.assertTrue(self.verifyRecoveryType(server, servers_to_recover, recovery_type), "Servers not recovered") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") + + def testServerReadd(self): + username = self.input.param("username", None) + password = self.input.param("password", None) + servers = self.input.param("servers", 0) + initialized = self.input.param("initialized", True) + expect_error = self.input.param("expect-error") + error_msg = self.input.param("error-msg", "") + + skip_failover = self.input.param("skip-failover", False) + init_num_servers = self.input.param("init-num-servers", 1) + invalid_recover_server = self.input.param("invalid-recover-server", None) + + server = copy.deepcopy(self.servers[0]) + + rest = RestConnection(server) + rest.force_eject_node() + + servers_to_recover = None + if servers > 0: + servers_to_recover = [] + for idx in range(servers): + servers_to_recover.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) + servers_to_recover = ",".join(servers_to_recover) + + if invalid_recover_server: + servers_to_recover = invalid_recover_server + + servers_to_add = [] + for idx in range(init_num_servers - 1): + servers_to_add.append("%s:%s" % (self.servers[idx + 1].ip, self.servers[idx + 1].port)) + servers_to_add = ",".join(servers_to_add) + + if initialized: + cli = CouchbaseCLI(server, server.rest_username, server.rest_password) + _, _, success = cli.cluster_init(256, 256, None, "data", None, None, server.rest_username, + server.rest_password, None) + self.assertTrue(success, "Cluster initialization failed during test setup") + + if init_num_servers > 1: + time.sleep(5) + _, _, errored = cli.server_add(servers_to_add, server.rest_username, server.rest_password, None, None, + None) + self.assertTrue(errored, "Could not add initial servers") + _, _, errored = cli.rebalance(None) + self.assertTrue(errored, "Unable to complete initial rebalance") + + if servers_to_recover and not skip_failover: + for restore_server in servers_to_recover.split(","): + _, _, errored = cli.failover(restore_server, True) + self.assertTrue(errored, "Unable to failover servers") + + time.sleep(5) + cli = CouchbaseCLI(server, username, password) + stdout, _, errored = cli.server_readd(servers_to_recover) + + if not expect_error: + self.assertTrue(errored, "Expected command to succeed") + self.assertTrue(self.verifyRecoveryType(server, servers_to_recover, "full"), "Servers not recovered") + else: + self.assertTrue(self.verifyCommandOutput(stdout, expect_error, error_msg), + "Expected error message not found") def test_change_admin_password_with_read_only_account(self): """ this test automation for bug MB-20170. @@ -1727,6 +1971,191 @@ def test_change_admin_password_with_read_only_account(self): else: self.log.info("readonly account does not support on this version") + def test_directory_backup_stuctrue(self): + """ directory of backup stuctrure should be like + /backup_path/date/date-mode/ as in + /tmp/backup/2016-08-19T185902Z/2016-08-19T185902Z-full/ + automation test for bug in ticket MB-20021 + + default params to run: + backup_cmd=cbbackup,load_all=false,num_sasl_buckets=1 """ + backup_cmd = self.input.param("backup_cmd", None) + load_all = self.input.param("load_all", None) + num_backup_bucket = self.input.param("num_backup_bucket", "all") + num_sasl_buckets = self.input.param("num_sasl_buckets", 1) + self.bucket_size = 200 + self.cluster.create_default_bucket(self.master, self.bucket_size, + self.num_replicas, + enable_replica_index=self.enable_replica_index, + eviction_policy=self.eviction_policy) + self._create_sasl_buckets(self.master, num_sasl_buckets) + self.buckets = RestConnection(self.master).get_buckets() + if load_all is None: + self.shell.execute_cbworkloadgen("Administrator", "password", 10000, + 0.95, "default", 125, " -j") + elif str(load_all).lower() == "true": + for bucket in self.buckets: + self.log.info("load data to bucket %s " % bucket.name) + self.shell.execute_cbworkloadgen("Administrator", "password", 10000, + 0.95, bucket.name, 125, " -j") + self.log.info("remove any backup data on node ") + self.shell.execute_command("rm -rf %sbackup" % self.tmp_path) + self.log.info("create backup data on node ") + self.shell.execute_command("mkdir %sbackup " % self.tmp_path) + + """ start backup data and check directory structure """ + output, error = self.shell.execute_command("date | grep -o '....$'") + dir_start_with = output[0] + "-" + backup_all_buckets = True + partial_backup_buckets = [] + if self.cb_version[:5] not in COUCHBASE_FROM_SPOCK and \ + backup_cmd == "cbbackup": + if num_backup_bucket == "all": + self.shell.execute_cluster_backup() + else: + if str(num_backup_bucket).isdigit() and \ + int(num_sasl_buckets) >= int(num_backup_bucket): + backup_all_buckets = False + for i in range(int(num_backup_bucket)): + partial_backup_buckets.append("bucket" + str(i)) + option = "-b " + "bucket%s " % str(i) + self.shell.execute_cluster_backup(command_options=option, + delete_backup=False) + output, error = self.shell.execute_command("ls %sbackup " % self.tmp_path) + if output and len(output) == 1: + if output[0].startswith(dir_start_with): + self.log.info("first level of backup dir is correct %s" % output[0]) + else: + self.fail("Incorrect directory name %s. It should start with %s" + % (output[0], dir_start_with)) + elif output and len(output) > 1: + self.fail("first backup dir level should only have one directory") + elif not output: + self.fail("backup command did not run. Empty directory %s" % output) + output, error = self.shell.execute_command("ls %sbackup/* " % self.tmp_path) + if output and len(output) == 1: + if output[0].startswith(dir_start_with): + self.log.info("second level of backup dir is correct %s" % output[0]) + else: + self.fail("Incorrect directory name %s. It should start with %s" + % (output[0], dir_start_with)) + elif output and len(output) > 1: + self.fail("second backup level should only have 1 directory at this run") + elif not output: + self.fail("backup command did not run. Empty directory %s" % output) + output, error = self.shell.execute_command("ls %sbackup/*/* " % self.tmp_path) + self.log.info("backuped buckets: %s" % output) + + """ check total buckets backup """ + if backup_all_buckets: + for bucket in self.buckets: + if "bucket-" + bucket.name in output: + self.log.info("bucket %s was backuped " % bucket.name) + else: + self.fail("failed to backup bucket %s " % bucket.name) + else: + for bucket in partial_backup_buckets: + if "bucket-%s" % bucket in output: + self.log.info("bucket %s was backuped " % bucket) + else: + self.fail("failed to backup bucket %s " % bucket) + elif self.cb_version[:5] in COUCHBASE_FROM_SPOCK: + if backup_cmd == "cbbackupmgr": + backup_repo = "backup-test" + node_credential = "--username Administrator --password password " + self.log.info("Create backup repo : %s" % backup_repo) + """ format of setting backup repo + ./cbbackupmgr config --archive /tmp/backup --repo backup-test """ + self.shell.execute_command("%s%s%s config --archive %s --repo %s" + % (self.cli_command_path, backup_cmd, + self.cmd_ext, self.cmd_backup_path, + backup_repo)) + output, error = self.shell.execute_command("ls %s" % self.backup_path) + result = output + if result and backup_repo in result: + self.log.info("repo %s successful created " % backup_repo) + output, error = self.shell.execute_command("ls %s%s" + % (self.backup_path, backup_repo)) + if output and "backup-meta.json" in output: + self.log.info("backup-meta.json successful created ") + elif output: + self.fail("fail to create backup-meta.json file") + if result and "logs" in result: + self.log.info("logs dir successful created ") + output, error = self.shell.execute_command("ls %slogs" + % self.backup_path) + if output and "backup.log" in output: + self.log.info("backup.log file successful created ") + else: + self.fail("fail to create backup.log file") + """ start backup bucket + command format: + cbbackupmgr backup --archive /tmp/backup --repo backup-test + --host ip_addr + --username Administrator + --password password """ + if num_backup_bucket == "all": + self.shell.execute_command("%s%s%s backup --archive %s " + " --repo %s --host %s:8091 %s" + % (self.cli_command_path, backup_cmd, + self.cmd_ext, self.cmd_backup_path, + backup_repo, self.shell.ip, + node_credential)) + out, err = self.shell.execute_command("ls %s%s" + % (self.backup_path, backup_repo)) + if out and len(out) > 1: + """ Since in this dir, there is a dir start with a number + and a file. So the dir will list first. + Example of this dir: 2016-08-.. backup-meta.json """ + if out[0].startswith(dir_start_with): + self.log.info("First level of backup dir is correct %s" + % out[0]) + else: + self.fail("Incorrect directory name %s. " + "It should start with %s" + % (out[0], dir_start_with)) + elif out: + self.fail("backup did not run correctly %s" % out) + out, err = self.shell.execute_command("ls %s%s/%s*" + % (self.backup_path, backup_repo, + dir_start_with)) + """ get buckets in directory """ + if out and len(out) >=1: + if "plan.json" in out: + out.remove("plan.json") + else: + self.fail("Missing plan.json file in this dir") + out = [w.split("-", 1)[0] for w in out] + if backup_all_buckets: + for bucket in self.buckets: + if bucket.name in out: + self.log.info("Bucket %s was backuped " + % bucket.name) + else: + self.fail("failed to backup bucket %s " + % bucket.name) + """ Check content of backup bucket. + Total dir and files: + bucket-config.json data full-text.json gsi.json + range.json views.json """ + backup_folder_content = ["bucket-config.json", "data", + "full-text.json", "gsi.json", + "range.json", "views.json"] + for bucket in self.buckets: + out, err = self.shell.execute_command("ls %s%s/%s*/%s-*" + % (self.backup_path, backup_repo, + dir_start_with, bucket.name)) + if out and len(out) > 1: + self.log.info("Check content of backup dir of bucket %s: %s" + % (bucket.name, out)) + self.assertEqual(out, backup_folder_content) + else: + self.fail("Missing backup dir or files in backup bucket %s" + % bucket.name) + self.log.info("Remove backup directory at the end of test") + self.shell.execute_command("rm -rf %sbackup" % self.tmp_path) + self.shell.disconnect() + def _check_output(self, word_check, output): found = False if len(output) >=1 : diff --git a/pytests/clitest/importexporttest.py b/pytests/clitest/importexporttest.py new file mode 100644 index 000000000..f1e835da9 --- /dev/null +++ b/pytests/clitest/importexporttest.py @@ -0,0 +1,630 @@ +import copy +import json, filecmp +import os, shutil, ast +from threading import Thread + +from membase.api.rest_client import RestConnection +from memcached.helper.data_helper import MemcachedClientHelper +from TestInput import TestInputSingleton +from clitest.cli_base import CliBaseTest +from remote.remote_util import RemoteMachineShellConnection +from membase.helper.bucket_helper import BucketOperationHelper +from membase.helper.cluster_helper import ClusterOperationHelper +from couchbase_cli import CouchbaseCLI +from pprint import pprint +from testconstants import CLI_COMMANDS, COUCHBASE_FROM_WATSON,\ + COUCHBASE_FROM_SPOCK, LINUX_COUCHBASE_BIN_PATH,\ + WIN_COUCHBASE_BIN_PATH, COUCHBASE_FROM_SHERLOCK + + +class ImportExportTests(CliBaseTest): + def setUp(self): + super(ImportExportTests, self).setUp() + + def tearDown(self): + super(ImportExportTests, self).tearDown() + self.import_back = self.input.param("import_back", False) + if self.import_back: + self.log.info("clean up server in import back tests") + imp_servers = copy.deepcopy(self.servers[2:]) + BucketOperationHelper.delete_all_buckets_or_assert(imp_servers, self) + ClusterOperationHelper.cleanup_cluster(imp_servers, imp_servers[0]) + ClusterOperationHelper.wait_for_ns_servers_or_assert(imp_servers, self) + + + def test_export_from_empty_bucket(self): + options = {"load_doc": False, "bucket":"empty"} + return self._common_imex_test("export", options) + + def test_export_from_sasl_bucket(self): + options = {"load_doc": True, "docs":"1000"} + return self._common_imex_test("export", options) + + def test_export_and_import_back(self): + options = {"load_doc": True, "docs":"10"} + return self._common_imex_test("export", options) + + def test_imex_during_rebalance(self): + server = copy.deepcopy(self.servers[0]) + if self.test_type == "import": + self.test_type = "cbimport" + self._remote_copy_import_file(self.import_file) + if len(self.buckets) >= 1: + if self.imex_type == "json": + for bucket in self.buckets: + key_gen = "%index%" + + def test_imex_non_default_port(self): + options = {"load_doc": True, "docs":"10"} + server = copy.deepcopy(self.servers[0]) + import_method = self.input.param("import_method", "file://") + default_port = 8091 + new_port = 9000 + port_changed = False + test_failed = False + try: + """ change default port from 8091 to 9000 """ + port_cmd = "%s%s%s %s -c %s:%s -u Administrator -p password --cluster-port=%s "\ + % (self.cli_command_path, "couchbase-cli", self.cmd_ext, + "cluster-edit", server.ip, default_port, new_port) + output, error = self.shell.execute_command(port_cmd) + if self._check_output("SUCCESS", output): + self.log.info("Port was changed from 8091 to 9000") + port_changed = True + else: + self.fail("Fail to change port 8091 to 9000") + if self.test_type == "import": + self.test_type = "cbimport" + self._remote_copy_import_file(self.import_file) + if len(self.buckets) >= 1: + if self.imex_type == "json": + for bucket in self.buckets: + key_gen = "%index%" + """ ./cbimport json -c 12.11.10.132 -u Administrator -p password + -b default -d file:///tmp/export/default -f list -g %index% """ + imp_cmd_str = "%s%s%s %s -c %s:%s -u Administrator -p password "\ + "-b %s -d %s%s -f %s -g %s"\ + % (self.cli_command_path, self.test_type, self.cmd_ext, + self.imex_type, server.ip, new_port, bucket.name, + import_method, self.des_file, self.format_type, key_gen) + output, error = self.shell.execute_command(imp_cmd_str) + self.log.info("Output from execute command %s " % output) + elif self.test_type == "export": + self.test_type = "cbexport" + self.ex_path = self.tmp_path + "export/" + if len(self.buckets) >= 1: + for bucket in self.buckets: + self.log.info("load json to bucket %s " % bucket.name) + load_cmd = "%s%s%s -n %s:%s -u Administrator -p password "\ + "-j -i %s -b %s "\ + % (self.cli_command_path, "cbworkloadgen", self.cmd_ext, + server.ip, new_port, options["docs"], bucket.name) + self.shell.execute_command(load_cmd) + self.shell.execute_command("rm -rf %sexport " % self.tmp_path) + self.shell.execute_command("mkdir %sexport " % self.tmp_path) + """ /opt/couchbase/bin/cbexport json -c localhost -u Administrator + -p password -b default -f list -o /tmp/test4.zip """ + if len(self.buckets) >= 1: + for bucket in self.buckets: + export_file = self.ex_path + bucket.name + exe_cmd_str = "%s%s%s %s -c %s:%s -u Administrator "\ + "-p password -b %s -f %s -o %s"\ + % (self.cli_command_path, self.test_type, + self.cmd_ext, self.imex_type, server.ip, + new_port, bucket.name, self.format_type, + export_file) + self.shell.execute_command(exe_cmd_str) + self._verify_export_file(bucket.name, options) + except Exception, e: + if e: + print "Exception throw: ", e + test_failed = True + finally: + if port_changed: + self.log.info("change port back to default port 8091") + port_cmd = "%s%s%s %s -c %s:%s -u Administrator -p password --cluster-port=%s "\ + % (self.cli_command_path, "couchbase-cli", self.cmd_ext, + "cluster-edit", server.ip, new_port, default_port) + output, error = self.shell.execute_command(port_cmd) + if test_failed: + self.fail("Test failed. Check exception throw above.") + + def test_imex_flags(self): + """ imex_type = json + cluster_flag = -c + user_flag = -u + password_flag = -p + bucket_flag = -b + dataset_flag = -d + format_flag = -f + generate_flag = -g + format_type = list/lines + import_file = json_list_1000_lines + =lines,.... + ./cbimport json -c 12.11.10.132 -u Administrator -p password + -b default -d file:///tmp/export/default -f list -g %index% """ + server = copy.deepcopy(self.servers[0]) + self.sample_file = self.input.param("sample_file", None) + self.cluster_flag = self.input.param("cluster_flag", "-c") + self.user_flag = self.input.param("user_flag", "-u") + self.password_flag = self.input.param("password_flag", "-p") + self.bucket_flag = self.input.param("bucket_flag", "-b") + self.dataset_flag = self.input.param("dataset_flag", "-d") + self.format_flag = self.input.param("format_flag", "-f") + self.generate_flag = self.input.param("generate_flag", "-g") + if self.test_type == "import": + self.test_type = "cbimport" + cmd_str = "%s%s%s %s %s %s %s Administrator %s password %s default %s "\ + "file://%sdefault %s lines %s %%index%%"\ + % (self.cli_command_path, self.test_type, self.cmd_ext, + self.imex_type, self.cluster_flag, server.ip, + self.user_flag, self.password_flag, self.bucket_flag, + self.dataset_flag, self.tmp_path, self.format_flag, + self.generate_flag) + output, error = self.shell.execute_command(cmd_str) + if self.imex_type == "": + if "Unknown flag: -c" in output: + self.log.info("%s detects missing 'json' option " % self.test_type) + else: + self.fail("%s could not detect missing 'json' option" + % self.test_type) + if self.cluster_flag == "": + if "Invalid subcommand `%s`" % server.ip in output \ + and "Required Flags:" in output: + self.log.info("%s detected missing '-c or --clusger' flag" + % self.test_type) + else: + self.fail("%s could not detect missing '-c or --cluster' flag" + % self.test_type) + if self.user_flag == "": + if "Expected flag: Administrator" in output \ + and "Required Flags:" in output: + self.log.info("%s detected missing '-u or --username' flag" + % self.test_type) + else: + self.fail("%s could not detect missing '-u or --username' flag" + % self.test_type) + if self.password_flag == "": + if "Expected flag: password" in output \ + and "Required Flags:" in output: + self.log.info("%s detected missing '-p or --password' flag" + % self.test_type) + else: + self.fail("%s could not detect missing '-p or --password' flag" + % self.test_type) + if self.bucket_flag == "": + if "Expected flag: default" in output \ + and "Required Flags:" in output: + self.log.info("%s detected missing '-b or --bucket' flag" + % self.test_type) + else: + self.fail("%s could not detect missing '-b or --bucket' flag" + % self.test_type) + if self.dataset_flag == "": + if "Expected flag: file://%sdefault" % self.tmp_path in output \ + and "Required Flags:" in output: + self.log.info("%s detected missing '-d or --dataset' flag" + % self.test_type) + else: + self.fail("%s could not detect missing '-d or --dataset' flag" + % self.test_type) + if self.format_flag == "": + if "Expected flag: lines" in output \ + and "Required Flags:" in output: + self.log.info("%s detected missing '-f or --format' flag" + % self.test_type) + else: + self.fail("%s could not detect missing '-f or --format' flag" + % self.test_type) + if self.generate_flag == "": + if "Expected flag: %index%" in output \ + and "Required Flags:" in output: + self.log.info("%s detected missing '-g or --generate' flag" + % self.test_type) + else: + self.fail("%s could not detect missing '-g or --generate' flag" + % self.test_type) + self.log.info("Output from execute command %s " % output) + + def test_imex_optional_flags(self): + """ imex_type = json + threads_flag = -t + errors_flag = -e + logs_flag = -l """ + server = copy.deepcopy(self.servers[0]) + self.threads_flag = self.input.param("threads_flag", "") + self.errors_flag = self.input.param("errors_flag", "") + self.logs_flag = self.input.param("logs_flag", "") + self.import_file = self.input.param("import_file", None) + self.format_type = self.input.param("format_type", None) + import_method = self.input.param("import_method", "file://") + threads_flag = "" + if self.threads_flag != "": + threads_flag = "-t" + errors_flag = "" + errors_path = "" + if self.errors_flag != "": + errors_flag = "-e" + self.shell.execute_command("rm -rf %serrors" % self.tmp_path) + self.shell.execute_command("mkdir %serrors" % self.tmp_path) + if self.errors_flag == "empty": + errors_path = "" + elif self.errors_flag == "error": + errors_path = self.errors_flag + self.shell.execute_command("rm -rf %serror" % self.cli_command_path) + elif self.errors_flag == "relative_path": + errors_path = "~/error" + self.shell.execute_command("rm -rf ~/error") + elif self.errors_flag == "absolute_path": + errors_path = self.tmp_path + "errors/" + self.errors_flag + logs_flag = "" + logs_path = "" + if self.logs_flag != "": + logs_flag = "-l" + self.shell.execute_command("rm -rf %slogs" % self.tmp_path) + self.shell.execute_command("mkdir %slogs" % self.tmp_path) + if self.logs_flag == "empty": + logs_path = "" + elif self.logs_flag == "log": + logs_path = self.logs_flag + self.shell.execute_command("rm -rf %slog" % self.cli_command_path) + elif self.logs_flag == "relative_path": + logs_path = "~/log" + self.shell.execute_command("rm -rf ~/log") + elif self.logs_flag == "absolute_path": + logs_path = self.tmp_path + "logs/" + self.logs_flag + error_check = True + if self.test_type == "import": + self.test_cmd = "cbimport" + self._remote_copy_import_file(self.import_file) + if self.imex_type == "json": + for bucket in self.buckets: + key_gen = "%index%" + """ ./cbimport json -c 12.11.10.132 -u Administrator -p password + -b default -d file:///tmp/export/default -f list -g %index% """ + imp_cmd_str = "%s%s%s %s -c %s -u Administrator -p password -b %s "\ + "-d %s%s -f %s -g %%index%% %s %s %s %s %s %s"\ + % (self.cli_command_path, self.test_cmd, self.cmd_ext, + self.imex_type, server.ip, bucket.name, + import_method, self.des_file, self.format_type, + threads_flag, self.threads_flag, + errors_flag, errors_path, + logs_flag, logs_path) + self.log.info("command to run %s " % imp_cmd_str) + output, error = self.shell.execute_command(imp_cmd_str) + self.log.info("Output from execute command %s " % output) + if self.threads_flag == "empty": + error_check = False + if "Expected argument for option: -t" in output: + self.log.info("%s detected empty value of threads argument" + % self.test_type) + else: + self.fail("%s could not detect empty value of argument" + % self.test_type) + elif self.threads_flag == "notnumber": + error_check = False + if "Unable to process value for flag: -t" in output: + self.log.info("%s detected incorrect value of threads argument" + % self.test_type) + else: + self.fail("%s could not detect incorrect value of argument" + % self.test_type) + if self.errors_flag == "empty": + error_check = False + if "Expected argument for option: -e" in output: + self.log.info("%s detected empty value of error argument" + % self.test_type) + else: + self.fail("%s could not detect empty value of argument" + % self.test_type) + elif self.errors_flag == "relative_path": + output, error = self.shell.execute_command("ls %s " % self.root_path) + if self._check_output(errors_path[2:], output): + error_check = False + self.log.info("%s error file created" % self.test_type) + else: + self.fail("%s failed to create error file in log flag" + % self.test_type) + elif self.errors_flag == "absolute_path": + output, error = self.shell.execute_command("ls %s " % self.cli_command_path) + if self._check_output("error", output): + error_check = False + self.log.info("%s error file created" % self.test_type) + else: + self.fail("%s failed to create error file in log flag" + % self.test_type) + elif self.errors_flag != "": + output, error = self.shell.execute_command("ls %s " % errors_path) + if self._check_output(errors_path, output): + error_check = False + self.log.info("%s error file created" % self.test_type) + else: + self.fail("%s failed to create error file in error flag" + % self.test_type) + if self.logs_flag == "empty": + error_check = False + if "Expected argument for option: -l" in output: + self.log.info("%s detected empty value of log argument" + % self.test_type) + else: + self.fail("%s could not detect empty value of logs argument" + % self.test_type) + elif self.logs_flag == "relative_path": + output, error = self.shell.execute_command("ls %s " % self.root_path) + if self._check_output(logs_path[2:], output): + error_check = False + self.log.info("%s log file created" % self.test_type) + else: + self.fail("%s failed to create log file in log flag" + % self.test_type) + elif self.logs_flag == "absolute_path": + output, error = self.shell.execute_command("ls %s " % self.cli_command_path) + if self._check_output("log", output): + error_check = False + self.log.info("%s log file created" % self.test_type) + else: + self.fail("%s failed to create log file in log flag" + % self.test_type) + + if error_check and not self._check_output("successfully", output): + self.fail("failed to run optional flags") + + def test_import_invalid_folder_structure(self): + """ not in 4.6 """ + options = {"load_doc": False} + return self._common_imex_test("import", options) + + """ /opt/couchbase/bin/cbimport json -c 12.11.10.130:8091 + -u Administrator -p password -b travel-sample + -d /opt/couchbase/samples/travel-sample.zip -f sample """ + def test_import_invalid_json_sample(self): + options = {"load_doc": False} + return self._common_imex_test("import", options) + + def test_import_json_sample(self): + """ test_import_json_sample + -p default_bucket=False,imex_type=json,sample_file=travel-sample """ + username = self.input.param("username", None) + password = self.input.param("password", None) + self.sample_file = self.input.param("sample_file", None) + self.imex_type = self.input.param("imex_type", None) + sample_file_path = self.sample_files_path + self.sample_file + ".zip" + server = copy.deepcopy(self.servers[0]) + if username is None: + username = server.rest_username + if password is None: + password = server.rest_password + if self.sample_file is not None: + cmd = "cbimport" + imp_cmd_str = "%s%s%s %s -c %s -u %s -p %s -b %s -d %s -f sample"\ + % (self.cli_command_path, cmd, self.cmd_ext, self.imex_type, + server.ip, username, password, self.sample_file, + sample_file_path) + output, error = self.shell.execute_command(imp_cmd_str) + if not self._check_output("SUCCESS", output): + self.log.info("Output from command %s" % output) + self.fail("Failed to load sample file %s" % self.sample_file) + + """ imex_type=json,format_type=list,import_file=json_list_1000_lines + =lines,.... """ + def test_import_json_file(self): + options = {"load_doc": False} + self.import_file = self.input.param("import_file", None) + return self._common_imex_test("import", options) + + def test_import_json_generate_keys(self): + options = {"load_doc": False} + self.import_file = self.input.param("import_file", None) + return self._common_imex_test("import", options) + + """ not in 4.6 """ + def test_import_json_with_limit_first_10_lines(self): + options = {"load_doc": False} + return self._common_imex_test("import", options) + + def _common_imex_test(self, cmd, options): + username = self.input.param("username", None) + password = self.input.param("password", None) + path = self.input.param("path", None) + self.short_flag = self.input.param("short_flag", True) + import_method = self.input.param("import_method", "file://") + if "url" in import_method: + import_method = "" + self.ex_path = self.tmp_path + "export/" + master = self.servers[0] + server = copy.deepcopy(master) + + if username is None: + username = server.rest_username + if password is None: + password = server.rest_password + if path is None: + self.log.info("test with absolute path ") + elif path == "local": + self.log.info("test with local bin path ") + self.cli_command_path = "cd %s; ./" % self.cli_command_path + self.buckets = RestConnection(server).get_buckets() + if "export" in cmd: + cmd = "cbexport" + if options["load_doc"]: + if len(self.buckets) >= 1: + for bucket in self.buckets: + self.log.info("load json to bucket %s " % bucket.name) + load_cmd = "%s%s%s -n %s:8091 -u %s -p %s -j -i %s -b %s "\ + % (self.cli_command_path, "cbworkloadgen", self.cmd_ext, + server.ip, username, password, options["docs"], + bucket.name) + self.shell.execute_command(load_cmd) + """ remove previous export directory at tmp dir and re-create it + in linux: /tmp/export + in windows: /cygdrive/c/tmp/export """ + self.shell.execute_command("rm -rf %sexport " % self.tmp_path) + self.shell.execute_command("mkdir %sexport " % self.tmp_path) + """ /opt/couchbase/bin/cbexport json -c localhost -u Administrator + -p password -b default -f list -o /tmp/test4.zip """ + if len(self.buckets) >= 1: + for bucket in self.buckets: + export_file = self.ex_path + bucket.name + exe_cmd_str = "%s%s%s %s -c %s -u %s -p %s -b %s -f %s -o %s"\ + % (self.cli_command_path, cmd, self.cmd_ext, self.imex_type, + server.ip, username, password, bucket.name, + self.format_type, export_file) + output, error = self.shell.execute_command(exe_cmd_str) + self._verify_export_file(bucket.name, options) + + if self.import_back: + import_file = export_file + import_servers = copy.deepcopy(self.servers) + imp_rest = RestConnection(import_servers[2]) + import_shell = RemoteMachineShellConnection(import_servers[2]) + imp_rest.force_eject_node() + self.sleep(2) + imp_rest = RestConnection(import_servers[2]) + status = False + info = imp_rest.get_nodes_self() + if info.memoryQuota and int(info.memoryQuota) > 0: + self.quota = info.memoryQuota + imp_rest.init_node() + self.cluster.rebalance(import_servers[2:], [import_servers[3]], []) + self.cluster.create_default_bucket(import_servers[2], "250", self.num_replicas, + enable_replica_index=self.enable_replica_index, + eviction_policy=self.eviction_policy) + imp_cmd_str = "%s%s%s %s -c %s -u %s -p %s -b %s -d file://%s -f %s -g %s"\ + % (self.cli_command_path, "cbimport", self.cmd_ext, self.imex_type, + import_servers[2].ip, username, password, "default", + import_file, self.format_type, "index") + output, error = import_shell.execute_command(imp_cmd_str) + if self._check_output("error", output): + self.fail("Fail to run import back to bucket") + elif "import" in cmd: + cmd = "cbimport" + if import_method != "": + self.im_path = self.tmp_path + "import/" + self.log.info("copy import file from local to remote") + output, error = self.shell.execute_command("ls %s " % self.tmp_path) + if self._check_output("import", output): + self.log.info("remove %simport directory" % self.tmp_path) + self.shell.execute_command("rm -rf %simport " % self.tmp_path) + output, error = self.shell.execute_command("ls %s " % self.tmp_path) + if self._check_output("import", output): + self.fail("fail to delete import dir ") + self.shell.execute_command("mkdir %simport " % self.tmp_path) + if self.import_file is not None: + src_file = "resources/imex/"+ self.import_file + else: + self.fail("Need import_file param") + des_file = self.im_path + self.import_file + self.shell.copy_file_local_to_remote(src_file, des_file) + else: + des_file = self.import_file + + if len(self.buckets) >= 1: + if self.imex_type == "json": + for bucket in self.buckets: + key_gen = "%index%" + """ ./cbimport json -c 12.11.10.132 -u Administrator -p password + -b default -d file:///tmp/export/default -f list -g %index% """ + imp_cmd_str = "%s%s%s %s -c %s -u %s -p %s -b %s -d %s%s -f %s -g %s"\ + % (self.cli_command_path, cmd, self.cmd_ext, self.imex_type, + server.ip, username, password, bucket.name, + import_method, des_file, + self.format_type, key_gen) + output, error = self.shell.execute_command(imp_cmd_str) + self.log.info("Output from execute command %s " % output) + """ Json `file:///root/json_list` imported to `http://12.11.10.130:8091` successfully """ + json_loaded = False + if "invalid" in self.import_file: + if self._check_output("Json import failed:", output): + json_loaded = True + elif self._check_output("successfully", output): + json_loaded = True + if not json_loaded: + self.fail("Failed to execute command") + + def _verify_export_file(self, export_file_name, options): + if not options["load_doc"]: + if "bucket" in options and options["bucket"] == "empty": + output, error = self.shell.execute_command("ls %s" % self.ex_path) + if export_file_name in output[0]: + self.log.info("check if export file %s is empty" % export_file_name) + output, error = self.shell.execute_command("cat %s%s"\ + % (self.ex_path, export_file_name)) + if output: + self.fail("file %s should be empty" % export_file_name) + else: + self.fail("Fail to export. File %s does not exist" \ + % export_file_name) + elif options["load_doc"]: + found = self.shell.file_exists(self.ex_path, export_file_name) + if found: + self.log.info("copy export file from remote to local") + if os.path.exists("/tmp/export"): + shutil.rmtree("/tmp/export") + os.makedirs("/tmp/export") + self.shell.copy_file_remote_to_local(self.ex_path+export_file_name,\ + "/tmp/export/"+export_file_name) + self.log.info("compare 2 json files") + if self.format_type == "lines": + sample_file = open("resources/imex/json_%s_lines" % options["docs"]) + samples = sample_file.readlines() + export_file = open("/tmp/export/"+ export_file_name) + exports = export_file.readlines() + if sorted(samples) == sorted(exports): + self.log.info("export and sample json mathch") + else: + self.fail("export and sample json does not match") + sample_file.close() + export_file.close() + elif self.format_type == "list": + sample_file = open("resources/imex/json_list_%s_lines" % options["docs"]) + samples = sample_file.read() + samples = ast.literal_eval(samples) + samples.sort(key=lambda k: k['name']) + export_file = open("/tmp/export/"+ export_file_name) + exports = export_file.read() + exports = ast.literal_eval(exports) + exports.sort(key=lambda k: k['name']) + + if samples == exports: + self.log.info("export and sample json files are matched") + else: + self.fail("export and sample json files did not match") + sample_file.close() + export_file.close() + else: + self.fail("There is not export file in %s%s"\ + % (self.ex_path, export_file_name)) + + def _check_output(self, word_check, output): + found = False + if len(output) >=1 : + for x in output: + if word_check.lower() in x.lower(): + self.log.info("Found \"%s\" in CLI output" % word_check) + found = True + break + return found + + def _remote_copy_import_file(self, import_file): + import_method = self.input.param("import_method", "file://") + if "url" in import_method: + import_method = "" + if import_method != "": + self.im_path = self.tmp_path + "import/" + self.log.info("copy import file from local to remote") + output, error = self.shell.execute_command("ls %s " % self.tmp_path) + if self._check_output("import", output): + self.log.info("remove %simport directory" % self.tmp_path) + self.shell.execute_command("rm -rf %simport " % self.tmp_path) + output, error = self.shell.execute_command("ls %s " % self.tmp_path) + if self._check_output("import", output): + self.fail("fail to delete import dir ") + self.shell.execute_command("mkdir %simport " % self.tmp_path) + if import_file is not None: + self.src_file = "resources/imex/"+ import_file + else: + self.fail("Need import_file param") + self.des_file = self.im_path + import_file + self.shell.copy_file_local_to_remote(self.src_file, self.des_file) + else: + self.des_file = self.import_file diff --git a/pytests/cwc/cwctests.py b/pytests/cwc/cwctests.py index ee81ff639..c1d494b0b 100644 --- a/pytests/cwc/cwctests.py +++ b/pytests/cwc/cwctests.py @@ -159,6 +159,7 @@ def test_cli_start_collect_log(self): --upload-host='{4}' --customer='{5}' --ticket='{6}' " .format(self.bin_path, \ command, self.master.ip, num_node_collect, self.uploadHost, self.customer, \ self.ticket)) + self.log.info("Command output is {0} {1}".format(o,e) ) shell.log_command_output(o, e) if "runCmd" in o[0]: o = o[1:] diff --git a/pytests/ent_backup_restore/enterprise_backup_restore_base.py b/pytests/ent_backup_restore/enterprise_backup_restore_base.py index 5bdb0cadc..95eb97fd9 100644 --- a/pytests/ent_backup_restore/enterprise_backup_restore_base.py +++ b/pytests/ent_backup_restore/enterprise_backup_restore_base.py @@ -180,7 +180,7 @@ def backup_create_validate(self): self.log.info(msg) def backup_cluster(self): - args = "backup --archive {0} --repo {1} --host http://{2}:{3} --username {4} --password {5}". \ + args = "backup --archive {0} --repo {1} --cluster http://{2}:{3} --username {4} --password {5}". \ format(self.backupset.directory, self.backupset.name, self.backupset.cluster_host.ip, self.backupset.cluster_host.port, self.backupset.cluster_host_username, self.backupset.cluster_host_password) diff --git a/pytests/epengine/bucket_config.py b/pytests/epengine/bucket_config.py index 8d939b914..bb3032a49 100644 --- a/pytests/epengine/bucket_config.py +++ b/pytests/epengine/bucket_config.py @@ -19,6 +19,7 @@ from remote.remote_util import RemoteMachineShellConnection from membase.helper.rebalance_helper import RebalanceHelper import re +import traceback class BucketConfig(unittest.TestCase): @@ -73,6 +74,7 @@ def test_modify_bucket_params(self): self.log.info("Modifying timeSynchronization value after bucket creation .....") self._modify_bucket() except Exception, e: + traceback.print_exc() self.fail('[ERROR] Modify testcase failed .., {0}'.format(e)) def test_restart(self): @@ -82,7 +84,8 @@ def test_restart(self): self.log.info("Verifying bucket settings after restart ..") self._check_config() except Exception, e: - self.fail("[ERROR] Restart failed with exception {0}".format(e)) + traceback.print_exc() + self.fail("[ERROR] Check data after restart failed with exception {0}".format(e)) def test_failover(self): num_nodes=1 @@ -93,6 +96,7 @@ def test_failover(self): self.log.info("Verifying bucket settings after failover ..") self._check_config() except Exception, e: + traceback.print_exc() self.fail('[ERROR]Failed to failover .. , {0}'.format(e)) def test_rebalance_in(self): @@ -143,8 +147,9 @@ def test_backup_diff_bucket(self): ''' Helper functions for above testcases ''' - #create a bucket if it doesn't exist + #create a bucket if it doesn't exist. The drift parameter is currently unused def _create_bucket(self, lww=True, drift=False, name=None): + if lww: self.lww=lww @@ -157,7 +162,7 @@ def _create_bucket(self, lww=True, drift=False, name=None): self.servers) info = self.rest.get_nodes_self() self.rest.create_bucket(bucket=self.bucket, - ramQuotaMB=512,authType='sasl',lww=self.lww,drift=self.drift) + ramQuotaMB=512,authType='sasl',lww=self.lww) try: ready = BucketOperationHelper.wait_for_memcached(self.master, self.bucket) @@ -214,8 +219,16 @@ def _reboot_server(self): self.log.info("Warming-up servers ..") time.sleep(100) + + def _check_config(self): - result = self.rest.get_bucket_json(self.bucket)["timeSynchronization"] + rc = self.rest.get_bucket_json(self.bucket) + if 'conflictResolution' in rc: + conflictResolution = self.rest.get_bucket_json(self.bucket)['conflictResolutionType'] + self.assertTrue(conflictResolution == 'lww','Expected conflict resolution of lww but got {0}'.format(conflictResolution)) + + + """ drift is disabled in 4.6, commenting out for now as it may come back later if self.lww and not self.drift: time_sync = 'enabledWithoutDrift' elif self.lww and self.drift: @@ -225,3 +238,6 @@ def _check_config(self): self.assertEqual(result,time_sync, msg='ERROR, Mismatch on expected time synchronization values, ' \ 'expected {0} but got {1}'.format(time_sync, result)) self.log.info("Verified results") + """ + + diff --git a/pytests/epengine/lww_stats.py b/pytests/epengine/lww_stats.py new file mode 100644 index 000000000..a6ea46f2d --- /dev/null +++ b/pytests/epengine/lww_stats.py @@ -0,0 +1,402 @@ +import time +from basetestcase import BaseTestCase + + +from sdk_client import SDKClient + + +from memcached.helper.data_helper import VBucketAwareMemcached, MemcachedClientHelper +from remote.remote_util import RemoteMachineShellConnection +from couchbase_helper.documentgenerator import BlobGenerator +from membase.api.rest_client import RestConnection, RestHelper + +import zlib + +class LWWStatsTests(BaseTestCase): + + # The stats related epctl vbucket commands actually apply to the whole bucket but we need a vbucket parameter, + # (which is apparently ignore) + DUMMY_VBUCKET = ' 123' # the leading space is needed + DEFAULT_THRESHOLD = 5000000 + ONE_HOUR_IN_SECONDS = 3600 + + def setUp(self): + + super(LWWStatsTests, self).setUp() + + + + + def tearDown(self): + super(LWWStatsTests, self).tearDown() + + + def test_time_sync_threshold_setting(self): + + self.log.info('starting test_time_sync_threshold_setting') + + # bucket is created with lww in base test case using the LWW parameter + + # get the stats + client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0]) + ahead_threshold = int(client.stats()["ep_hlc_ahead_threshold_us"]) + self.assertTrue(ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD, + 'Ahead threshold mismatch expected: {0} actual {1}'.format(LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold)) + # change the setting and verify it is per the new setting - this may or may not be supported + + shell = RemoteMachineShellConnection(self.servers[0]) + output, error = shell.execute_cbepctl(self.buckets[0], "", "set vbucket_param", + "hlc_drift_ahead_threshold_us ", str(LWWStatsTests.DEFAULT_THRESHOLD/2) + LWWStatsTests.DUMMY_VBUCKET) + if len(error) > 0: + self.fail('Failed to set the drift counter threshold, please check the logs.') + + ahead_threshold = int(client.stats()["ep_hlc_ahead_threshold_us"]) + self.assertTrue(ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD/2, + 'Ahead threshold mismatch expected: {0} actual {1}'.format(LWWStatsTests.DEFAULT_THRESHOLD/2, ahead_threshold)) + + + # generally need to fill out a matrix here behind/ahead - big and small + + + + def test_time_sync_threshold_setting_rest_call(self): + + self.log.info('starting test_time_sync_threshold_setting_rest_call') + + # bucket is created with lww in base test case using the LWW parameter + + + client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0]) + + + rest = RestConnection(self.master) + self.assertTrue( rest.set_cas_drift_threshold( self.buckets[0], 10000, 20000), 'Unable to set the CAS drift threshold') + time.sleep(15) # take a few seconds for the stats to settle in + stats = client.stats() + + + self.assertTrue( int(stats['ep_hlc_ahead_threshold_us']) == 10000 * 1000, + 'Ahead threshold incorrect. Expected {0} actual {1}'.format(10000 * 1000 , stats['ep_hlc_ahead_threshold_us'])) + + self.assertTrue( int(stats['ep_hlc_behind_threshold_us']) == 20000 * 1000, + 'Ahead threshold incorrect. Expected {0} actual {1}'.format(20000 * 1000, stats['ep_hlc_behind_threshold_us'])) + + + + # generally need to fill out a matrix here behind/ahead - big and small + + + def test_poisoned_cas(self): + + + self.log.info('starting test_poisoned_cas') + + + + """ + - set the clock ahead + - do lots of sets and get some CASs + - do a set and get the CAS (flag, CAS, value) and save it + - set the clock back + - verify the CAS is still big on new sets + - reset the CAS + - do the vbucket max cas and verify + - do a new mutation and verify the CAS is smaller + + + """ + + + + + sdk_client = SDKClient(scheme='couchbase',hosts = [self.servers[0].ip], bucket = self.buckets[0].name) + mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0]) + shell = RemoteMachineShellConnection(self.servers[0]) + + # move the system clock ahead to poison the CAS + shell = RemoteMachineShellConnection(self.servers[0]) + self.assertTrue( shell.change_system_time( LWWStatsTests.ONE_HOUR_IN_SECONDS ), 'Failed to advance the clock') + + + output,error = shell.execute_command('date') + self.log.info('Date after is set forward {0}'.format( output )) + + + + + rc = sdk_client.set('key1', 'val1') + rc = mc_client.get('key1' ) + poisoned_cas = rc[1] + self.log.info('The poisoned CAS is {0}'.format(poisoned_cas)) + + + + + # do lots of mutations to set the max CAS for all vbuckets + + gen_load = BlobGenerator('key-for-cas-test', 'value-for-cas-test-', self.value_size, end=10000) + self._load_all_buckets(self.master, gen_load, "create", 0) + + + # move the clock back again and verify the CAS stays large + self.assertTrue( shell.change_system_time( -LWWStatsTests.ONE_HOUR_IN_SECONDS ), 'Failed to change the clock') + output, error = shell.execute_command('date') + self.log.info('Date after is set backwards {0}'.format( output)) + + + + + rc = sdk_client.set('key2', 'val2') + second_poisoned_cas = rc.cas + self.log.info('The second_poisoned CAS is {0}'.format(second_poisoned_cas)) + self.assertTrue( second_poisoned_cas > poisoned_cas, + 'Second poisoned CAS {0} is not larger than the first poisoned cas'.format(second_poisoned_cas,poisoned_cas)) + + + + + # reset the CAS for all vbuckets. This needs to be done in conjunction with a clock change. If the clock is not + # changed then the CAS will immediately continue with the clock. I see two scenarios: + # 1. Set the clock back 1 hours and the CAS back 30 minutes, the CAS should be used + # 2. Set the clock back 1 hour, set the CAS back 2 hours, the clock should be use + + + # do case 1, set the CAS back 30 minutes. Calculation below assumes the CAS is in nanseconds + earlier_max_cas = poisoned_cas - 30 * 60 * 1000000000 + for i in range(self.vbuckets): + output, error = shell.execute_cbepctl(self.buckets[0], "", "set_vbucket_param", + "max_cas ", str(i) + ' ' + str(earlier_max_cas) ) + if len(error) > 0: + self.fail('Failed to set the max cas') + + # verify the max CAS + + for i in range(self.vbuckets): + max_cas = int( mc_client.stats('vbucket-details')['vb_' + str(i) + ':max_cas'] ) + self.assertTrue(max_cas == earlier_max_cas, + 'Max CAS not properly set for vbucket {0} set as {1} and observed {2}'.format(i,earlier_max_cas, max_cas ) ) + self.log.info('Per cbstats the max cas for bucket {0} is {1}'.format(i, max_cas) ) + + + + rc1 = sdk_client.set('key-after-resetting cas', 'val1') + rc2 = mc_client.get('key-after-resetting cas' ) + set_cas_after_reset_max_cas = rc2[1] + self.log.info('The later CAS is {0}'.format(set_cas_after_reset_max_cas)) + self.assertTrue( set_cas_after_reset_max_cas < poisoned_cas, + 'For {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}'.format('key-after-resetting cas', set_cas_after_reset_max_cas, poisoned_cas)) + + + # do a bunch of sets and verify the CAS is small - this is really only one set, need to do more + + + gen_load = BlobGenerator('key-for-cas-test-after-cas-is-reset', 'value-for-cas-test-', self.value_size, end=1000) + self._load_all_buckets(self.master, gen_load, "create", 0) + + gen_load.reset() + while gen_load.has_next(): + key, value = gen_load.next() + try: + rc = mc_client.get( key ) + #rc = sdk_client.get(key) + cas = rc[1] + self.assertTrue( cas < poisoned_cas, 'For key {0} CAS has not decreased. Current CAS {1} poisoned CAS {2}'.format(key, cas, poisoned_cas)) + except: + self.log.info('get error with {0}'.format(key)) + + + rc = sdk_client.set('key3', 'val1') + better_cas = rc.cas + + self.log.info('The better CAS is {0}'.format(better_cas)) + + self.assertTrue( better_cas < poisoned_cas, 'The CAS was not improved') + + + + + + + + # set the clock way ahead - remote_util_OS.py (new) + # do a bunch of mutations - not really needed + # do the fix command - cbepctl, the existing way (remote util) + + # do some mutations, verify they conform to the new CAS - build on the CAS code, + # where to iterate over the keys and get the CAS? + """ + use the SDK client + while gen.has_next(): + key, value = gen.next() + get the cas for these + also do the vbucket stats + + + """ + # also can be checked in the vbucket stats somewhere + # revert the clock + + + def test_drift_stats(self): + + # An exercise in filling out the matrix with the right amount of code, + # we want to test (ahead,behind) and (setwithmeta, deleteWithmeta) and (active,replica). + # So for now let's do the set/del in sequences + + + self.log.info('starting test_drift_stats') + + + check_ahead_threshold = self.input.param("check_ahead_threshold",True) + + self.log.info('Checking the ahead threshold? {0}'.format(check_ahead_threshold)) + + + sdk_client = SDKClient(scheme='couchbase',hosts = [self.servers[0].ip], bucket = self.buckets[0].name) + mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0]) + shell = RemoteMachineShellConnection(self.servers[0]) + + + + # get the current time + rc = sdk_client.set('key1', 'val1') + current_time_cas = rc.cas + + + + test_key = 'test-set-with-metaxxxx' + vbId = (((zlib.crc32(test_key)) >> 16) & 0x7fff) & (self.vbuckets- 1) + + + # verifying the case where we are within the threshold, do a set and del, neither should trigger + rc = mc_client.setWithMetaLWW(test_key, 'test-value', 0, 0, current_time_cas) + rc = mc_client.delWithMetaLWW(test_key, 0, 0, current_time_cas+1) + + vbucket_stats = mc_client.stats('vbucket-details') + ahead_exceeded = int( vbucket_stats['vb_' + str(vbId) + ':drift_ahead_threshold_exceeded'] ) + self.assertTrue( ahead_exceeded == 0, 'Ahead exceeded expected is 0 but is {0}'.format( ahead_exceeded)) + + + behind_exceeded = int( vbucket_stats['vb_' + str(vbId) + ':drift_behind_threshold_exceeded'] ) + self.assertTrue( behind_exceeded == 0, 'Behind exceeded expected is 0 but is {0}'.format( behind_exceeded)) + + + # out of curiousity, log the total counts + self.log.info('Total stats: total abs drift {0} and total abs drift count {1}'. + format(vbucket_stats['vb_' + str(vbId) + ':total_abs_drift'], + vbucket_stats['vb_' + str(vbId) + ':total_abs_drift_count'])) + + + + + + + # do the ahead set with meta case - verify: ahead threshold exceeded, total_abs_drift count and abs_drift + if check_ahead_threshold: + stat_descriptor = 'ahead' + cas = current_time_cas + 5 * LWWStatsTests.DEFAULT_THRESHOLD + + else: + stat_descriptor = 'behind' + cas = current_time_cas -5 * LWWStatsTests.DEFAULT_THRESHOLD + + + rc = mc_client.setWithMetaLWW(test_key, 'test-value', 0, 0, cas) + rc = mc_client.delWithMetaLWW(test_key, 0, 0, cas+1) + + + + + + # verify the vbucket stats + vbucket_stats = mc_client.stats('vbucket-details') + drift_counter_stat = 'vb_' + str(vbId) + ':drift_' + stat_descriptor + '_threshold_exceeded' + threshold_exceeded = int( mc_client.stats('vbucket-details')[drift_counter_stat] ) + # MB-21450 self.assertTrue( ahead_exceeded == 2, '{0} exceeded expected is 1 but is {1}'. + # format( stat_descriptor, threshold_exceeded)) + + self.log.info('Total stats: total abs drift {0} and total abs drift count {1}'. + format(vbucket_stats['vb_' + str(vbId) + ':total_abs_drift'], + vbucket_stats['vb_' + str(vbId) + ':total_abs_drift_count'])) + + + # and verify the bucket stats: ep_active_hlc_drift_count, ep_clock_cas_drift_threshold_exceeded, + # ep_active_hlc_drift + bucket_stats = mc_client.stats() + ep_active_hlc_drift_count = int(bucket_stats['ep_active_hlc_drift_count']) + ep_clock_cas_drift_threshold_exceeded = int(bucket_stats['ep_clock_cas_drift_threshold_exceeded']) + ep_active_hlc_drift = int(bucket_stats['ep_active_hlc_drift']) + + # Drift count appears to be the number of mutations + self.assertTrue( ep_active_hlc_drift_count > 0, 'ep_active_hlc_drift_count is 0, expected a positive value') + + + # drift itself is the sum of the absolute values of all drifts, so check that it is greater than 0 + self.assertTrue( ep_active_hlc_drift > 0, 'ep_active_hlc_drift is 0, expected a positive value') + + # the actual drift count is a little more granular + expected_drift_threshold_exceed_count = 2 + self.assertTrue( expected_drift_threshold_exceed_count == ep_clock_cas_drift_threshold_exceeded, + 'ep_clock_cas_drift_threshold_exceeded is incorrect. Expected {0}, actual {1}'. + format(expected_drift_threshold_exceed_count, + ep_clock_cas_drift_threshold_exceeded) ) + + + + + + def test_logical_clock_ticks(self): + + + self.log.info('starting test_logical_clock_ticks') + + + + sdk_client = SDKClient(scheme='couchbase',hosts = [self.servers[0].ip], bucket = self.buckets[0].name) + mc_client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0]) + shell = RemoteMachineShellConnection(self.servers[0]) + + + # do a bunch of mutations to set the max cas + gen_load = BlobGenerator('key-for-cas-test-logical-ticks', 'value-for-cas-test-', self.value_size, end=10000) + self._load_all_buckets(self.master, gen_load, "create", 0) + + vbucket_stats = mc_client.stats('vbucket-details') + base_total_logical_clock_ticks = 0 + for i in range(self.vbuckets): + #print vbucket_stats['vb_' + str(i) + ':logical_clock_ticks'] + base_total_logical_clock_ticks = base_total_logical_clock_ticks + int(vbucket_stats['vb_' + str(i) + ':logical_clock_ticks']) + self.log.info('The base total logical clock ticks is {0}'.format( base_total_logical_clock_ticks)) + + + + + # move the system clock back so the logical counter part of HLC is used and the logical clock ticks + # stat is incremented + self.assertTrue( shell.change_system_time( -LWWStatsTests.ONE_HOUR_IN_SECONDS ), 'Failed to advance the clock') + + # do more mutations + NUMBER_OF_MUTATIONS = 10000 + gen_load = BlobGenerator('key-for-cas-test-logical-ticks', 'value-for-cas-test-', self.value_size, end=NUMBER_OF_MUTATIONS) + self._load_all_buckets(self.master, gen_load, "create", 0) + + vbucket_stats = mc_client.stats('vbucket-details') + total_logical_clock_ticks = 0 + for i in range(self.vbuckets): + total_logical_clock_ticks = total_logical_clock_ticks + int(vbucket_stats['vb_' + str(i) + ':logical_clock_ticks']) + + + self.log.info('The total logical clock ticks is {0}'.format( total_logical_clock_ticks)) + + self.assertTrue( total_logical_clock_ticks - base_total_logical_clock_ticks == NUMBER_OF_MUTATIONS, + 'Expected clock tick {0} actual {1}'.format(NUMBER_OF_MUTATIONS, + total_logical_clock_ticks- base_total_logical_clock_ticks )) + + + # put the clock back, do mutations, the HLC and the tick counter should increment + #LWWStatsTests + # will it wrap? + + + + diff --git a/pytests/epengine/opschangecas.py b/pytests/epengine/opschangecas.py index 3bfec1343..84acb739e 100644 --- a/pytests/epengine/opschangecas.py +++ b/pytests/epengine/opschangecas.py @@ -58,7 +58,7 @@ def test_meta_rebalance_out(self): rebalance.result() replica_CAS = mc_replica.getMeta(KEY_NAME)[4] - get_meta_resp = mc_replica.getMeta(KEY_NAME,request_extended_meta_data=True) + get_meta_resp = mc_replica.getMeta(KEY_NAME,request_extended_meta_data=False) #print 'replica CAS {0}'.format(replica_CAS) #print 'replica ext meta {0}'.format(get_meta_resp) @@ -75,7 +75,7 @@ def test_meta_rebalance_out(self): #print 'active cas {0}'.format(active_CAS) self.assertTrue(replica_CAS == active_CAS, 'cas mismatch active: {0} replica {1}'.format(active_CAS,replica_CAS)) - self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set') + # not supported in 4.6 self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set') def test_meta_failover(self): KEY_NAME = 'key2' @@ -126,7 +126,7 @@ def test_meta_failover(self): active_CAS = mc_active.getMeta(KEY_NAME)[4] #print 'active cas {0}'.format(active_CAS) - get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=True) + get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False) #print 'replica CAS {0}'.format(replica_CAS) #print 'replica ext meta {0}'.format(get_meta_resp) @@ -165,12 +165,12 @@ def test_meta_soft_restart(self): cas_post = mc_active.getMeta(KEY_NAME)[4] #print 'post cas {0}'.format(cas_post) - get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=True) + get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False) #print 'post CAS {0}'.format(cas_post) #print 'post ext meta {0}'.format(get_meta_resp) self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post)) - self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set') + # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set') def test_meta_hard_restart(self): KEY_NAME = 'key2' @@ -205,12 +205,12 @@ def test_meta_hard_restart(self): cas_post = mc_active.getMeta(KEY_NAME)[4] #print 'post cas {0}'.format(cas_post) - get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=True) + get_meta_resp = mc_active.getMeta(KEY_NAME,request_extended_meta_data=False) #print 'post CAS {0}'.format(cas_post) #print 'post ext meta {0}'.format(get_meta_resp) self.assertTrue(cas_pre == cas_post, 'cas mismatch active: {0} replica {1}'.format(cas_pre, cas_post)) - self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set') + # extended meta is not supported self.assertTrue( get_meta_resp[5] == 1, msg='Metadata indicate conflict resolution is not set') ''' Test Incremental sets on cas and max cas values for keys ''' @@ -218,7 +218,7 @@ def test_cas_set(self): self.log.info(' Starting test-sets') self._load_ops(ops='set', mutations=20) time.sleep(60) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) ''' Test Incremental updates on cas and max cas values for keys ''' @@ -228,7 +228,7 @@ def test_cas_updates(self): #self._load_ops(ops='add') self._load_ops(ops='replace',mutations=20) #self._load_ops(ops='delete') - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) ''' Test Incremental deletes on cas and max cas values for keys ''' @@ -238,7 +238,7 @@ def test_cas_deletes(self): #self._load_ops(ops='add') self._load_ops(ops='replace',mutations=20) self._load_ops(ops='delete') - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) ''' Test expiry on cas and max cas values for keys ''' @@ -248,7 +248,7 @@ def test_cas_expiry(self): #self._load_ops(ops='add') #self._load_ops(ops='replace',mutations=20) self._load_ops(ops='expiry') - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) self._check_expiry() ''' Test touch on cas and max cas values for keys @@ -259,26 +259,26 @@ def test_cas_touch(self): #self._load_ops(ops='add') #self._load_ops(ops='replace',mutations=20) self._load_ops(ops='touch') - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) ''' Test getMeta on cas and max cas values for keys ''' def test_cas_getMeta(self): self.log.info(' Starting test-getMeta') self._load_ops(ops='set', mutations=20) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) #self._load_ops(ops='add') self._load_ops(ops='replace',mutations=20) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) self._load_ops(ops='delete') - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) def test_cas_setMeta_lower(self): self.log.info(' Starting test-getMeta') self._load_ops(ops='set', mutations=20) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) k=0 # Select arbit key @@ -300,7 +300,7 @@ def test_cas_setMeta_lower(self): set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, cas, '123456789',vbucket_id, add_extended_meta_data=True, conflict_resolution_mode=1) cas_post_meta = mc_active.getMeta(key)[4] - get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr2 {0}'.format(get_meta_2) max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) @@ -316,7 +316,7 @@ def test_cas_setMeta_lower(self): set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 225, cas+1, '223456789',vbucket_id, add_extended_meta_data=True, conflict_resolution_mode=1) cas_post_meta = mc_active.getMeta(key)[4] - get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr3 {0}'.format(get_meta_3) max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) @@ -332,7 +332,7 @@ def test_cas_setMeta_higher(self): self.log.info(' Starting test-getMeta') self._load_ops(ops='set', mutations=20) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) k=0 @@ -346,7 +346,7 @@ def test_cas_setMeta_higher(self): mc_active = self.client.memcached(key) mc_master = self.client.memcached_for_vbucket( vbucket_id ) mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id) - get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr {0}'.format(get_meta_1) #print '-'*100 TEST_SEQNO = 123 @@ -356,7 +356,7 @@ def test_cas_setMeta_higher(self): set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, '123456789',vbucket_id, add_extended_meta_data=True, conflict_resolution_mode=1) cas_post_meta = mc_active.getMeta(key)[4] - get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr2 {0}'.format(get_meta_2) max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) @@ -371,7 +371,7 @@ def test_cas_setMeta_higher(self): set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, TEST_CAS+1, '223456789',vbucket_id, add_extended_meta_data=True, conflict_resolution_mode=1) cas_post_meta = mc_active.getMeta(key)[4] - get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_3 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr3 {0}'.format(get_meta_3) max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) @@ -389,11 +389,15 @@ def test_cas_setMeta_higher(self): def test_cas_deleteMeta(self): self.log.info(' Starting test-deleteMeta') + + + # load 20 kvs and check the CAS self._load_ops(ops='set', mutations=20) time.sleep(60) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) k=0 + test_cas = 456 while k<1: @@ -406,10 +410,15 @@ def test_cas_deleteMeta(self): mc_replica = self.client.memcached_for_replica_vbucket(vbucket_id) TEST_SEQNO = 123 - TEST_CAS = 456 + test_cas = test_cas + 1 + + # get the meta data cas = mc_active.getMeta(key)[4] - set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, '123456789',vbucket_id) + + set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, TEST_SEQNO, test_cas, '123456789',vbucket_id) + + cas_post_meta = mc_active.getMeta(key)[4] max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) @@ -423,7 +432,7 @@ def test_cas_deleteMeta(self): max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) self.assertTrue(max_cas == cas, '[ERROR]Max cas is not equal to cas {0}'.format(cas)) - set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, TEST_CAS, '123456789',vbucket_id) + set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, 125, test_cas, '123456789',vbucket_id) cas_post_meta = mc_active.getMeta(key)[4] max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) @@ -437,10 +446,10 @@ def test_cas_deleteMeta(self): self.log.info('Doing delete with meta, using a lower CAS value') get_meta_pre = mc_active.getMeta(key)[4] - del_with_meta_resp = mc_active.del_with_meta(key, 0, 0, TEST_SEQNO, TEST_CAS, TEST_CAS+1) + del_with_meta_resp = mc_active.del_with_meta(key, 0, 0, TEST_SEQNO, test_cas, test_cas+1) get_meta_post = mc_active.getMeta(key)[4] max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) - self.assertTrue(max_cas > TEST_CAS+1, '[ERROR]Max cas {0} is not greater than delete cas {1}'.format(max_cas, TEST_CAS)) + self.assertTrue(max_cas > test_cas+1, '[ERROR]Max cas {0} is not greater than delete cas {1}'.format(max_cas, test_cas)) ''' Testing skipping conflict resolution, whereby the last write wins, and it does neither cas CR nor rev id CR ''' @@ -448,7 +457,7 @@ def test_cas_skip_conflict_resolution(self): self.log.info(' Starting test_cas_skip_conflict_resolution ..') self._load_ops(ops='set', mutations=20) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) k=0 @@ -472,7 +481,7 @@ def test_cas_skip_conflict_resolution(self): self.log.info('Forcing conflict_resolution to allow insertion of lower Seq Number') lower_cas = int(cas)-1 - set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, low_seq, lower_cas, '123456789',vbucket_id, skipCR=True) + set_with_meta_resp = mc_active.set_with_meta(key, 0, 0, low_seq, lower_cas, '123456789',vbucket_id) cas_post_meta = mc_active.getMeta(key)[4] all_post_meta = mc_active.getMeta(key) post_seq = mc_active.getMeta(key)[3] @@ -492,7 +501,7 @@ def test_revid_conflict_resolution(self): self.log.info(' Starting test_cas_revid_conflict_resolution ..') self._load_ops(ops='set', mutations=20) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) k=0 @@ -520,7 +529,7 @@ def test_revid_conflict_resolution(self): cas_post_meta = mc_active.getMeta(key)[4] all_post_meta = mc_active.getMeta(key) post_seq = mc_active.getMeta(key)[3] - get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr2 {0}'.format(get_meta_2) max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) self.log.info('Expect No conflict_resolution to occur, and the last updated mutation to be the winner..') @@ -535,7 +544,7 @@ def test_cas_conflict_resolution(self): self.log.info(' Starting test_cas_conflict_resolution ..') self._load_ops(ops='set', mutations=20) - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) k=0 @@ -605,7 +614,7 @@ def test_restart_revid_conflict_resolution(self): pre_seq = mc_active.getMeta(key)[3] pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) all = mc_active.getMeta(key) - get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr {0}'.format(get_meta_1) self.log.info('all meta data before set_meta_force {0}'.format(all)) self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas)) @@ -615,7 +624,7 @@ def test_restart_revid_conflict_resolution(self): cas_post = mc_active.getMeta(key)[4] all_post_meta = mc_active.getMeta(key) post_seq = mc_active.getMeta(key)[3] - get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr {0}'.format(get_meta_2) max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..') @@ -634,7 +643,7 @@ def test_restart_revid_conflict_resolution(self): cas_restart = mc_active.getMeta(key)[4] #print 'post cas {0}'.format(cas_post) - get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False) #print 'post CAS {0}'.format(cas_post) #print 'post ext meta {0}'.format(get_meta_resp) @@ -666,7 +675,7 @@ def test_rebalance_revid_conflict_resolution(self): pre_seq = mc_active.getMeta(key)[3] pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) all = mc_active.getMeta(key) - get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr {0}'.format(get_meta_1) self.log.info('all meta data before set_meta_force {0}'.format(all)) self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas)) @@ -676,7 +685,7 @@ def test_rebalance_revid_conflict_resolution(self): cas_post = mc_active.getMeta(key)[4] all_post_meta = mc_active.getMeta(key) post_seq = mc_active.getMeta(key)[3] - get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr {0}'.format(get_meta_2) max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..') @@ -692,7 +701,7 @@ def test_rebalance_revid_conflict_resolution(self): rebalance.result() time.sleep(120) replica_CAS = mc_replica.getMeta(key)[4] - get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=True) + get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False) #print 'replica CAS {0}'.format(replica_CAS) #print 'replica ext meta {0}'.format(get_meta_resp) @@ -736,7 +745,7 @@ def test_failover_revid_conflict_resolution(self): pre_seq = mc_active.getMeta(key)[3] pre_max_cas = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) all = mc_active.getMeta(key) - get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_1 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr {0}'.format(get_meta_1) self.log.info('all meta data before set_meta_force {0}'.format(all)) self.log.info('max_cas before set_meta_force {0}'.format(pre_max_cas)) @@ -746,7 +755,7 @@ def test_failover_revid_conflict_resolution(self): cas_post = mc_active.getMeta(key)[4] all_post_meta = mc_active.getMeta(key) post_seq = mc_active.getMeta(key)[3] - get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_2 = mc_active.getMeta(key,request_extended_meta_data=False) #print 'cr {0}'.format(get_meta_2) max_cas_post = int( mc_active.stats('vbucket-details')['vb_' + str(self.client._get_vBucket_id(key)) + ':max_cas'] ) self.log.info('Expect RevId conflict_resolution to occur, and the last updated mutation to be the winner..') @@ -766,7 +775,7 @@ def test_failover_revid_conflict_resolution(self): rebalance.result() time.sleep(120) replica_CAS = mc_replica.getMeta(key)[4] - get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=True) + get_meta_resp = mc_replica.getMeta(key,request_extended_meta_data=False) #print 'replica CAS {0}'.format(replica_CAS) #print 'replica ext meta {0}'.format(get_meta_resp) @@ -843,7 +852,7 @@ def test_meta_backup(self): shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets]) print 'Done with restore' finally: - self._check_cas(check_conflict_resolution=True) + self._check_cas(check_conflict_resolution=False) ''' Common function to verify the expected values on cas ''' @@ -867,7 +876,7 @@ def _check_cas(self, check_conflict_resolution=False, master=None, bucket=None, self.assertTrue(cas == max_cas, '[ERROR]Max cas is not 0 it is {0}'.format(cas)) if check_conflict_resolution: - get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=True) + get_meta_resp = mc_active.getMeta(key,request_extended_meta_data=False) if time_sync == 'enabledWithoutDrift': self.assertTrue( get_meta_resp[5] == 1, msg='[ERROR] Metadata indicate conflict resolution is not set') elif time_sync == 'disabled': diff --git a/pytests/flush/bucketflush.py b/pytests/flush/bucketflush.py index 67a690caf..a1a586d53 100644 --- a/pytests/flush/bucketflush.py +++ b/pytests/flush/bucketflush.py @@ -39,7 +39,7 @@ def default_test_setup(self, load_data=True): def persist_and_verify(self): self._wait_for_stats_all_buckets(self.servers[:self.nodes_in + 1]) - self._verify_all_buckets(self.master, max_verify=self.max_verify) + self._verify_all_buckets(self.master, max_verify=self.max_verify,timeout=360) self._verify_stats_all_buckets(self.servers[:self.nodes_in + 1]) """Basic test for bucket flush functionality. Test loads data in bucket and then calls Flush. Verify curr_items=0 after flush. diff --git a/pytests/fts/custom_map_generator/map_generator.py b/pytests/fts/custom_map_generator/map_generator.py index a2256c263..f60340a71 100644 --- a/pytests/fts/custom_map_generator/map_generator.py +++ b/pytests/fts/custom_map_generator/map_generator.py @@ -72,10 +72,14 @@ "min": 3, "type": "edge_ngram" }, - "keyword_marker_en": { - "keywords_token_map": "stop_en", + "keyword_marker": { + "keywords_token_map": "stopwords", "type": "keyword_marker" }, + "stopwords": { + "stop_token_map": "stopwords", + "type": "stop_tokens" + }, "length": { "max": 5, "min": 3, @@ -98,6 +102,49 @@ "length": 10, "type": "truncate_token" } + }, + "token_maps": { + "stopwords": { + "tokens": ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', + 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', + 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', + 'itself', 'they', 'them', 'their', 'theirs', 'themselves', + 'what', 'which', 'who', 'whom', 'this', 'that', 'these', + 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', + 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', + 'doing', 'would', 'should', 'could', 'ought', "i'm", "you're", + "he's", "she's", "it's", "we're", "they're", "i've", "you've", + "we've", "they've", "i'd", "you'd", "he'd", "she'd", "we'd", + "they'd", "i'll", "you'll", "he'll", "she'll", "we'll", + "they'll", "isn't", "aren't", "wasn't", "weren't", "hasn't", + "haven't", "hadn't", "doesn't", "don't", "didn't", "won't", + "wouldn't", "shan't", "shouldn't", "can't", 'cannot', + "couldn't", "mustn't", "let's", "that's", "who's", "what's", + "here's", "there's", "when's", "where's", "why's", "how's", + 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', + 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', + 'against', 'between', 'into', 'through', 'during', 'before', + 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', + 'out', 'on', 'off', 'over', 'under', 'again', 'further', + 'then', 'once', 'here', 'there', 'when', 'where', 'why', + 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', + 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', + 'same', 'so', 'than', 'too', 'very'], + "type": "custom" + } + }, + "char_filters": { + "mapping": { + "regexp": "[f]", + "replace": "ph", + "type": "regexp" + } + }, + "tokenizers": { + "alphanumeric": { + "regexp": "[0-9a-zA-Z_]*", + "type": "regexp" + } } } @@ -105,13 +152,15 @@ LANG_ANALYZERS = ["ar", "cjk", "fr", "fa", "hi", "it", "pt", "en", "web"] -CHAR_FILTERS = ["html"] +CHAR_FILTERS = ["html","mapping"] -TOKENIZERS = ["letter","single","unicode","web","whitespace"] +TOKENIZERS = ["letter","single","unicode","web","whitespace","alphanumeric"] TOKEN_FILTERS = ["apostrophe","elision_fr","to_lower","ngram", "front_edge_ngram","back_edge_ngram","shingle", - "truncate","stemmer_porter","length"] + "truncate","stemmer_porter","length","keyword_marker", + "stopwords","cjk_bigram","stemmer_it_light", + "stemmer_fr_light","stemmer_fr_min","stemmer_pt_light"] class CustomMapGenerator: """ diff --git a/pytests/fts/es_base.py b/pytests/fts/es_base.py index 699ccf5c5..7bc0d9767 100644 --- a/pytests/fts/es_base.py +++ b/pytests/fts/es_base.py @@ -48,6 +48,20 @@ class BLEVE: "analysis": { "analyzer": { }, + "char_filter": { + "mapping": { + "type": "mapping", + "mappings": [ + "f => ph" + ] + } + }, + "tokenizer":{ + "alphanumeric":{ + "type":"pattern", + "pattern":"[^a-zA-Z0-9_]" + } + }, "filter": { "back_edge_ngram": { "type":"edgeNGram", @@ -67,10 +81,14 @@ class BLEVE: "max_gram": 5, "side": "front" }, - "keyword_marker_en": { + "keyword_marker": { "type":"keyword_marker", "keywords":STOPWORDS }, + "stopwords": { + "type":"stop", + "stopwords":STOPWORDS + }, "length": { "type":"length", "min":3, @@ -88,6 +106,25 @@ class BLEVE: "truncate": { "length": 10, "type": "truncate" + }, + "cjk_bigram": { + "type": "cjk_bigram" + }, + "stemmer_it_light": { + "type": "stemmer", + "name": "light_italian" + }, + "stemmer_fr_light": { + "type": "stemmer", + "name": "light_french" + }, + "stemmer_fr_min": { + "type": "stemmer", + "name": "minimal_french" + }, + "stemmer_pt_light": { + "type": "stemmer", + "name": "light_portuguese" } } } @@ -97,7 +134,8 @@ class BLEVE: FTS_ES_ANALYZER_MAPPING = { "char_filters" : { "html":"html_strip", - "zero_width_spaces":"html_strip" + "zero_width_spaces":"html_strip", + "mapping":"mapping" }, "token_filters": { "apostrophe":"apostrophe", @@ -110,14 +148,22 @@ class BLEVE: "shingle":"shingle", "stemmer_porter":"porter_stem", "truncate":"truncate", - "keyword_marker_en":"keyword_marker_en" + "keyword_marker":"keyword_marker", + "stopwords":"stopwords", + "cjk_width":"cjk_width", + "cjk_bigram":"cjk_bigram", + "stemmer_it_light":"stemmer_it_light", + "stemmer_fr_light":"stemmer_fr_light", + "stemmer_fr_min": "stemmer_fr_min", + "stemmer_pt_light": "stemmer_pt_light" }, "tokenizers": { "letter":"letter", "web":"uax_url_email", "whitespace":"whitespace", "unicode":"standard", - "single":"keyword" + "single":"keyword", + "alphanumeric":"alphanumeric" } } diff --git a/pytests/fts/fts_base.py b/pytests/fts/fts_base.py index 4c90f1e6b..4ecb4bfe2 100644 --- a/pytests/fts/fts_base.py +++ b/pytests/fts/fts_base.py @@ -27,8 +27,8 @@ from lib.membase.api.exception import FTSException from es_base import ElasticSearchBase -class RenameNodeException(FTSException): +class RenameNodeException(FTSException): """Exception thrown when converting ip to hostname failed """ @@ -37,7 +37,6 @@ def __init__(self, msg=''): class RebalanceNotStopException(FTSException): - """Exception thrown when stopping rebalance failed """ @@ -51,6 +50,7 @@ def raise_if(cond, ex): if cond: raise ex + class OPS: CREATE = "create" UPDATE = "update" @@ -89,91 +89,90 @@ class STATE: class CHECK_AUDIT_EVENT: CHECK = False -class INDEX_DEFAULTS: +class INDEX_DEFAULTS: BLEVE_MAPPING = { - "mapping": { - "default_mapping": { - "enabled": True, - "dynamic": True, - "default_analyzer": "" - }, - "type_field": "type", - "default_type": "_default", - "default_analyzer": "standard", - "default_datetime_parser": "dateTimeOptional", - "default_field": "_all", - "analysis": {} - } - } + "mapping": { + "default_mapping": { + "enabled": True, + "dynamic": True, + "default_analyzer": "" + }, + "type_field": "type", + "default_type": "_default", + "default_analyzer": "standard", + "default_datetime_parser": "dateTimeOptional", + "default_field": "_all", + "analysis": {} + } + } ALIAS_DEFINITION = {"targets": {}} - PLAN_PARAMS = { - "maxPartitionsPerPIndex": 32, - "numReplicas": 0, - "hierarchyRules": None, - "nodePlanParams": None, - "planFrozen": False - } + "maxPartitionsPerPIndex": 32, + "numReplicas": 0, + "hierarchyRules": None, + "nodePlanParams": None, + "planFrozen": False + } SOURCE_CB_PARAMS = { - "authUser": "default", - "authPassword": "", - "authSaslUser": "", - "authSaslPassword": "", - "clusterManagerBackoffFactor": 0, - "clusterManagerSleepInitMS": 0, - "clusterManagerSleepMaxMS": 20000, - "dataManagerBackoffFactor": 0, - "dataManagerSleepInitMS": 0, - "dataManagerSleepMaxMS": 20000, - "feedBufferSizeBytes": 0, - "feedBufferAckThreshold": 0 - } + "authUser": "default", + "authPassword": "", + "authSaslUser": "", + "authSaslPassword": "", + "clusterManagerBackoffFactor": 0, + "clusterManagerSleepInitMS": 0, + "clusterManagerSleepMaxMS": 20000, + "dataManagerBackoffFactor": 0, + "dataManagerSleepInitMS": 0, + "dataManagerSleepMaxMS": 20000, + "feedBufferSizeBytes": 0, + "feedBufferAckThreshold": 0 + } SOURCE_FILE_PARAMS = { - "regExps": [ - ".txt$", - ".md$" - ], - "maxFileSize": 0, - "numPartitions": 0, - "sleepStartMS": 5000, - "backoffFactor": 1.5, - "maxSleepMS": 300000 - } + "regExps": [ + ".txt$", + ".md$" + ], + "maxFileSize": 0, + "numPartitions": 0, + "sleepStartMS": 5000, + "backoffFactor": 1.5, + "maxSleepMS": 300000 + } INDEX_DEFINITION = { - "type": "fulltext-index", - "name": "", - "uuid": "", - "params": {}, - "sourceType": "couchbase", - "sourceName": "default", - "sourceUUID": "", - "sourceParams": SOURCE_CB_PARAMS, - "planParams": {} - } + "type": "fulltext-index", + "name": "", + "uuid": "", + "params": {}, + "sourceType": "couchbase", + "sourceName": "default", + "sourceUUID": "", + "sourceParams": SOURCE_CB_PARAMS, + "planParams": {} + } -class QUERY: +class QUERY: JSON = { - "indexName": "", - "size": 10, - "from": 0, - "explain": False, - "query": {}, - "fields": [], - "ctl": { - "consistency": { - "level": "", - "vectors": {} - }, - "timeout": 60000 - } - } + "indexName": "", + "size": 10, + "from": 0, + "explain": False, + "query": {}, + "fields": [], + "ctl": { + "consistency": { + "level": "", + "vectors": {} + }, + "timeout": 60000 + } + } # Event Definition: @@ -334,7 +333,8 @@ def wait_warmup_completed(warmupnodes, bucket_names=["default"]): break elif mc.stats()["ep_warmup_thread"] == "running": NodeHelper._log.info( - "Still warming up .. ep_warmup_key_count : %s" % (mc.stats("warmup")["ep_warmup_key_count"])) + "Still warming up .. ep_warmup_key_count : %s" % ( + mc.stats("warmup")["ep_warmup_key_count"])) continue else: NodeHelper._log.info( @@ -345,10 +345,9 @@ def wait_warmup_completed(warmupnodes, bucket_names=["default"]): time.sleep(10) if mc.stats()["ep_warmup_thread"] == "running": NodeHelper._log.info( - "ERROR: ep_warmup_thread's status not complete") + "ERROR: ep_warmup_thread's status not complete") mc.close() - @staticmethod def wait_node_restarted( server, test_case, wait_time=120, wait_if_warmup=False, @@ -401,9 +400,19 @@ def kill_cbft_process(server): def get_log_dir(node): """Gets couchbase log directory, even for cluster_run """ - _, dir = RestConnection(node).diag_eval('filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).') + _, dir = RestConnection(node).diag_eval( + 'filename:absname(element(2, application:get_env(ns_server,error_logger_mf_dir))).') return str(dir) + @staticmethod + def get_data_dir(node): + """Gets couchbase data directory, even for cluster_run + """ + _, dir = RestConnection(node).diag_eval( + 'filename:absname(element(2, application:get_env(ns_server,path_config_datadir))).') + + return str(dir).replace('\"','') + @staticmethod def rename_nodes(servers): """Rename server name from ip to their hostname @@ -464,7 +473,6 @@ def collect_logs(server): class FloatingServers: - """Keep Track of free servers, For Rebalance-in or swap-rebalance operations. """ @@ -487,6 +495,7 @@ class FTSIndex: plan_params = {'maxPartitionsPerIndex' : 40} ) """ + def __init__(self, cluster, name, source_type='couchbase', source_name=None, index_type='fulltext-index', index_params=None, plan_params=None, source_params=None, source_uuid=None): @@ -519,16 +528,16 @@ def __init__(self, cluster, name, source_type='couchbase', self.index_type = index_type self.num_pindexes = 0 self.index_definition = { - "type": "fulltext-index", - "name": "", - "uuid": "", - "params": {}, - "sourceType": "couchbase", - "sourceName": "default", - "sourceUUID": "", - "sourceParams": INDEX_DEFAULTS.SOURCE_CB_PARAMS, - "planParams": {} - } + "type": "fulltext-index", + "name": "", + "uuid": "", + "params": {}, + "sourceType": "couchbase", + "sourceName": "default", + "sourceUUID": "", + "sourceParams": INDEX_DEFAULTS.SOURCE_CB_PARAMS, + "planParams": {} + } self.name = self.index_definition['name'] = name self.es_custom_map = None self.smart_query_fields = None @@ -570,7 +579,7 @@ def __init__(self, cluster, name, source_type='couchbase', if TestInputSingleton.input.param("kvstore", None): self.index_definition['params']['store'] = {"kvStoreName": - TestInputSingleton.input.param("kvstore", None)} + TestInputSingleton.input.param("kvstore", None)} if TestInputSingleton.input.param("memory_only", None): self.index_definition['params']['store'] = \ @@ -582,10 +591,9 @@ def __init__(self, cluster, name, source_type='couchbase', {"mossStoreOptions": { "CompactionPercentage": int(TestInputSingleton.input.param( "moss_compact_threshold", - None)) + None)) + } } - } - self.moss_enabled = TestInputSingleton.input.param("moss", True) if not self.moss_enabled: @@ -595,20 +603,22 @@ def __init__(self, cluster, name, source_type='couchbase', def generate_new_custom_map(self, seed): from custom_map_generator.map_generator import CustomMapGenerator - cm_gen = CustomMapGenerator(seed=seed, dataset=self.dataset, num_custom_analyzers=self.num_custom_analyzers, - multiple_filters=self.multiple_filters) + cm_gen = CustomMapGenerator(seed=seed, dataset=self.dataset, + num_custom_analyzers=self.num_custom_analyzers, + multiple_filters=self.multiple_filters) fts_map, self.es_custom_map = cm_gen.get_map() self.smart_query_fields = cm_gen.get_smart_query_fields() print self.smart_query_fields self.index_definition['params'] = self.build_custom_index_params( - fts_map) + fts_map) if self.num_custom_analyzers > 0: custom_analyzer_def = cm_gen.build_custom_analyzer() - self.index_definition["params"]["mapping"]["analysis"] = custom_analyzer_def + self.index_definition["params"]["mapping"]["analysis"] = \ + custom_analyzer_def self.index_definition['params']['mapping']['default_analyzer'] = \ cm_gen.get_random_value(custom_analyzer_def["analyzers"].keys()) self.__log.info(json.dumps(self.index_definition["params"], - indent=3)) + indent=3)) def update_custom_analyzer(self, seed): """ @@ -637,8 +647,8 @@ def update_custom_analyzer(self, seed): else: from custom_map_generator.map_generator import CustomMapGenerator cm_gen = CustomMapGenerator(seed=seed, dataset=self.dataset, - num_custom_analyzers=self.num_custom_analyzers, - multiple_filters=self.multiple_filters) + num_custom_analyzers=self.num_custom_analyzers, + multiple_filters=self.multiple_filters) if self.num_custom_analyzers > 0: custom_analyzer_def = cm_gen.build_custom_analyzer() self.index_definition["params"]["mapping"]["analysis"] = \ @@ -672,33 +682,157 @@ def build_source_params(self, source_params): return src_params def add_child_field_to_default_mapping(self, field_name, field_type, - field_alias=None): + field_alias=None, analyzer=None): """ This method will add a field mapping to a default mapping """ - self.index_definition['params']['mapping']={} - self.index_definition['params']['mapping']['default_mapping'] = {} - self.index_definition['params']['mapping']['default_mapping']\ - ['properties']={} + fields = str.split(field_name, '.') + nesting_level = len(fields) + + child_map = {} + child_map['dynamic'] = False + child_map['enabled'] = True + child_map['properties'] = {} + + child_field = {} + child_field['dynamic'] = False + child_field['enabled'] = True if not field_alias: - field_alias = field_name - field_mapping={"dynamic": False, - "enabled": True, - "fields":[ - { - "analyzer": "", - "display_order": "0", - "include_in_all": True, - "include_term_vectors": True, - "index": True, - "name": field_alias, - "store": True, - "type": field_type - } - ] + field_alias = fields[len(fields) - 1] + child_field['fields'] = [ + { + "analyzer": analyzer, + "display_order": "0", + "include_in_all": True, + "include_term_vectors": True, + "index": True, + "name": field_alias, + "store": True, + "type": field_type + } + ] + + field_maps = [] + field_maps.append(child_field) + + if nesting_level > 1: + for x in xrange(0, nesting_level - 1): + field = fields.pop() + # Do a deepcopy of child_map into field_map since we dont + # want to have child_map altered because of changes on field_map + field_map = copy.deepcopy(child_map) + field_map['properties'][field] = field_maps.pop() + field_maps.append(field_map) + + map = {} + if not self.index_definition['params'].has_key('mapping'): + map['default_mapping'] = {} + map['default_mapping']['properties'] = {} + map['default_mapping']['dynamic'] = True + map['default_mapping']['enabled'] = True + map['default_mapping']['properties'][fields.pop()] = field_maps.pop() + self.index_definition['params']['mapping'] = map + else: + self.index_definition['params']['mapping']['default_mapping'] \ + ['properties'][fields.pop()] = field_maps.pop() + + def add_analyzer_to_existing_field_map(self, field_name, field_type, + field_alias=None, analyzer=None): + """ + Add another field mapping with a different analyzer to an existing field map. + Can be enhanced to update other fields as well if required. + """ + fields = str.split(field_name, '.') + + if not field_alias: + field_alias = fields[len(fields) - 1] + + child_field = { + "analyzer": analyzer, + "display_order": "0", + "include_in_all": True, + "include_term_vectors": True, + "index": True, + "name": field_alias, + "store": True, + "type": field_type } - self.index_definition['params']['mapping']['default_mapping']\ - ['properties'][field_name]=field_mapping + + map = copy.deepcopy(self.index_definition['params']['mapping'] + ['default_mapping']['properties']) + + map = self.update_nested_field_mapping(fields[len(fields) - 1], + child_field, map) + self.index_definition['params']['mapping']['default_mapping'] \ + ['properties'] = map + + def update_nested_field_mapping(self, key, value, map): + """ + Recurse through a given nested field mapping, and append the leaf node with the specified value. + Can be enhanced to update the current value as well if required. + """ + for k, v in map.iteritems(): + if k == key: + map[k]['fields'].append(value) + return map + else: + if map[k].has_key('properties'): + map[k]['properties'] = \ + self.update_nested_field_mapping(key, value, + map[k]['properties']) + return map + + def add_type_mapping_to_index_definition(self,type,analyzer): + """ + Add Type Mapping to Index Definition (and disable default mapping) + """ + type_map = {} + type_map[type] = {} + type_map[type]['default_analyzer'] = analyzer + type_map[type]['display_order'] = 0 + type_map[type]['dynamic'] = True + type_map[type]['enabled'] = True + + if not self.index_definition['params'].has_key('mapping'): + self.index_definition['params']['mapping'] = {} + self.index_definition['params']['mapping']['default_mapping'] = {} + self.index_definition['params']['mapping']['default_mapping'] \ + ['properties'] = {} + self.index_definition['params']['mapping']['default_mapping'] \ + ['dynamic'] = False + + self.index_definition['params']['mapping']['default_mapping'] \ + ['enabled'] = False + self.index_definition['params']['mapping']['types'] = type_map + + def add_doc_config_to_index_definition(self, mode): + """ + Add Document Type Configuration to Index Definition + """ + doc_config = {} + doc_config['mode']=mode + + if mode=='docid_regexp': + doc_config['docid_regexp'] = "([^_]*)" + + if mode == 'docid_prefix': + doc_config['docid_prefix_delim'] = "-" + + if mode == "type_field": + doc_config['type_field'] = "type" + + self.index_definition['params']['doc_config'] = {} + self.index_definition['params']['doc_config'] = doc_config + + def get_rank_of_doc_in_search_results(self, content, doc_id): + """ + Fetch rank of a given document in Search Results + """ + try: + return content.index(doc_id) + 1 + except Exception as err: + self.__log.info("Doc ID %s not found in search results." % doc_id) + return -1 def create(self): self.__log.info("Checking if index already exists ...") @@ -756,8 +890,9 @@ def get_uuid(self): rest = RestConnection(self.__cluster.get_random_fts_node()) return rest.get_fts_index_uuid(self.name) - def construct_cbft_query_json(self, query, fields=None, timeout=None): - + def construct_cbft_query_json(self, query, fields=None, timeout=None, + facets=False, + sort_fields=None): max_matches = TestInputSingleton.input.param("query_max_matches", 10000000) query_json = QUERY.JSON # query is a unicode dict @@ -769,20 +904,81 @@ def construct_cbft_query_json(self, query, fields=None, timeout=None): query_json['timeout'] = int(timeout) if fields: query_json['fields'] = fields + if facets: + query_json['facets'] = self.construct_facets_definition() + if sort_fields: + query_json['sort'] = sort_fields return query_json - def execute_query(self, query, zero_results_ok=True, expected_hits=None): + def construct_facets_definition(self): + """ + Constructs the facets definition of the query json + """ + facets = TestInputSingleton.input.param("facets", None).split(",") + size = TestInputSingleton.input.param("facets_size", 5) + terms_field = "dept" + terms_facet_name = "Department" + numeric_range_field = "salary" + numeric_range_facet_name = "Salaries" + date_range_field = "join_date" + date_range_facet_name = "No. of Years" + facet_definition = {} + + date_range_buckets = [ + {"name": "1 year", "start": "2015-08-01"}, + {"name": "2-5 years", "start": "2011-08-01", "end": "2015-07-31"}, + {"name": "6-10 years", "start": "2006-08-01", "end": "2011-07-31"}, + {"name": "10+ years", "end": "2006-07-31"} + ] + + numeric_range_buckets = [ + {"name": "high salary", "min": 150001}, + {"name": "average salary", "min": 110001, "max": 150000}, + {"name": "low salary", "max": 110000} + ] + + for facet in facets: + if facet == 'terms': + facet_definition[terms_facet_name] = {} + facet_definition[terms_facet_name]['field'] = terms_field + facet_definition[terms_facet_name]['size'] = size + + if facet == 'numeric_ranges': + facet_definition[numeric_range_facet_name] = {} + facet_definition[numeric_range_facet_name]['field'] = \ + numeric_range_field + facet_definition[numeric_range_facet_name]['size'] = size + facet_definition[numeric_range_facet_name]['numeric_ranges'] = [] + for bucket in numeric_range_buckets: + facet_definition[numeric_range_facet_name] \ + ['numeric_ranges'].append(bucket) + + if facet == 'date_ranges': + facet_definition[date_range_facet_name] = {} + facet_definition[date_range_facet_name]['field'] = \ + date_range_field + facet_definition[date_range_facet_name]['size'] = size + facet_definition[date_range_facet_name]['date_ranges'] = [] + for bucket in date_range_buckets: + facet_definition[date_range_facet_name] \ + ['date_ranges'].append(bucket) + + return facet_definition + + def execute_query(self, query, zero_results_ok=True, expected_hits=None, + return_raw_hits=False, sort_fields=None): """ Takes a query dict, constructs a json, runs and returns results """ - query_dict = self.construct_cbft_query_json(query) + query_dict = self.construct_cbft_query_json(query, + sort_fields=sort_fields) hits = -1 matches = [] doc_ids = [] time_taken = 0 status = {} try: - hits, matches, time_taken, status =\ + hits, matches, time_taken, status = \ self.__cluster.run_fts_query(self.name, query_dict) except ServerUnavailableException: # query time outs @@ -793,18 +989,272 @@ def execute_query(self, query, zero_results_ok=True, expected_hits=None): for doc in matches: doc_ids.append(doc['id']) if int(hits) == 0 and not zero_results_ok: - raise FTSException("No docs returned for query : %s" %query_dict) + raise FTSException("No docs returned for query : %s" % query_dict) if expected_hits and expected_hits != hits: raise FTSException("Expected hits: %s, fts returned: %s" % (expected_hits, hits)) if expected_hits and expected_hits == hits: self.__log.info("SUCCESS! Expected hits: %s, fts returned: %s" + % (expected_hits, hits)) + if not return_raw_hits: + return hits, doc_ids, time_taken, status + else: + return hits, matches, time_taken, status + + def execute_query_with_facets(self, query, zero_results_ok=True, + expected_hits=None): + """ + Takes a query dict with facet definition, constructs a json, + runs and returns results + """ + query_dict = self.construct_cbft_query_json(query, facets=True) + hits = -1 + matches = [] + doc_ids = [] + time_taken = 0 + status = {} + try: + hits, matches, time_taken, status, facets = \ + self.__cluster.run_fts_query_with_facets(self.name, query_dict) + except ServerUnavailableException: + # query time outs + raise ServerUnavailableException + except Exception as e: + self.__log.error("Error running query: %s" % e) + if hits: + for doc in matches: + doc_ids.append(doc['id']) + if int(hits) == 0 and not zero_results_ok: + raise FTSException("No docs returned for query : %s" % query_dict) + if expected_hits and expected_hits != hits: + raise FTSException("Expected hits: %s, fts returned: %s" % (expected_hits, hits)) - return hits, doc_ids, time_taken, status + if expected_hits and expected_hits == hits: + self.__log.info("SUCCESS! Expected hits: %s, fts returned: %s" + % (expected_hits, hits)) + return hits, doc_ids, time_taken, status, facets + + def validate_facets_in_search_results(self, no_of_hits, facets_returned): + """ + Validate the facet data returned in the query response JSON. + """ + facets = TestInputSingleton.input.param("facets", None).split(",") + size = TestInputSingleton.input.param("facets_size", 5) + field_indexed = TestInputSingleton.input.param("field_indexed", True) + terms_facet_name = "Department" + numeric_range_facet_name = "Salaries" + date_range_facet_name = "No. of Years" + + for facet in facets: + if facet == 'terms': + facet_name = terms_facet_name + if facet == 'numeric_ranges': + facet_name = numeric_range_facet_name + if facet == 'date_ranges': + facet_name = date_range_facet_name + + # Validate Facet name + if not facets_returned.has_key(facet_name): + raise FTSException(facet_name + " not present in the " + "search results") + + # Validate Total No. with no. of hits. It can be unequal if + # the field is not indexed, but not otherwise. + total_count = facets_returned[facet_name]['total'] + missing_count = facets_returned[facet_name]['missing'] + others_count = facets_returned[facet_name]['other'] + if not total_count == no_of_hits: + if field_indexed: + raise FTSException("Total count of results in " + facet_name + + " Facet (" + str(total_count) + + ") is not equal to total hits in search " + "results (" + str(no_of_hits) + ")") + else: + if not ((missing_count == no_of_hits) and (total_count == 0)): + raise FTSException("Field not indexed, but counts " + "are not expected") + + # Validate only if there are some search results + if not total_count == 0: + # Validate no. of terms returned, and it should be <= size + no_of_buckets_in_facet = len(facets_returned[facet_name] \ + [facet]) + if no_of_buckets_in_facet > size: + raise FTSException("Total no. of buckets in facets (" + + no_of_buckets_in_facet + + ") exceeds the size defined (" + + str(size) + ")") + + # Validate count in each facet and total it up. + # Should be Total - missing - others + total_count_in_buckets = 0 + for bucket in facets_returned[facet_name][facet]: + self.__log.info(bucket) + total_count_in_buckets += bucket['count'] + + if not total_count_in_buckets == (total_count - missing_count - + others_count): + raise FTSException("Total count (%d) in buckets not correct" + % total_count_in_buckets) + + if not self.validate_query_run_with_facet_data\ + (query=TestInputSingleton.input.param("query", ""), + facets_returned=facets_returned, facet_type=facet): + raise FTSException("Requerying returns different results " + "than expected") + else: + self.__log.info("Zero total count in facet.") + + self.__log.info("Validated Facets in search results") + + def validate_query_run_with_facet_data(self, query, facets_returned, + facet_type): + """ + Form a query based on the facet data and check the # hits. + """ + if facet_type == 'terms': + facet_name = 'Department' + field_name = 'dept' + value = facets_returned[facet_name][facet_type][0]['term'] + expected_hits = facets_returned[facet_name][facet_type][0]['count'] + new_query = "{\"conjuncts\" :[" + query + ",{\"match\":\"" + \ + value + "\", \"field\":\"" + field_name + "\"}]}" + + if facet_type == 'numeric_ranges': + facet_name = 'Salaries' + field_name = 'salary' + max_value = None + min_value = None + min_value_query = "" + max_value_query = "" + try: + max_value = facets_returned[facet_name][facet_type][0]['max'] + max_value_query = ",{\"inclusive_max\":true, \"field\":\"" \ + + field_name + "\", \"max\":" + \ + str(max_value) + "}" + except: + self.__log.info("max key doesnt exist for Salary facet") + try: + min_value = facets_returned[facet_name][facet_type][0]['min'] + min_value_query = ",{\"inclusive_min\":true, \"field\":\"" \ + + field_name + "\", \"min\":" + \ + str(min_value) + "}" + except: + self.__log.info("min key doesnt exist for Salary facet") + + expected_hits = facets_returned[facet_name][facet_type][0]['count'] + + new_query = "{\"conjuncts\" :[" + query + min_value_query + \ + max_value_query + "]}" + + if facet_type == 'date_ranges': + facet_name = 'No. of Years' + field_name = 'join_date' + end_value = None + start_value = None + start_value_query = "" + end_value_query = "" + try: + end_value = facets_returned[facet_name][facet_type][0]['end'] + end_value_query = ",{\"inclusive_end\":true, \"field\":\"" + \ + field_name + "\", \"end\":\"" + \ + end_value + "\"}" + except: + self.__log.info("end key doesnt exist for No. of Years facet") -class CouchbaseCluster: + try: + start_value = facets_returned[facet_name][facet_type][0]['start'] + start_value_query = ",{\"inclusive_start\":true, \"field\":\"" \ + + field_name + "\", \"start\":\"" + \ + start_value + "\"}" + except: + self.__log.info("start key doesnt exist for No. of Years facet") + + expected_hits = facets_returned[facet_name][facet_type][0]['count'] + + new_query = "{\"conjuncts\" :[" + query + end_value_query + \ + start_value_query + "]}" + + self.__log.info(new_query) + new_query = json.loads(new_query) + hits, _, _, _ = self.execute_query(query=new_query, + zero_results_ok=True, + expected_hits=expected_hits) + if not hits == expected_hits: + return False + else: + return True + + def validate_sorted_results(self, raw_hits, sort_fields): + """ + Validate if the docs returned in the search result match the expected values + """ + result = False + expected_docs = TestInputSingleton.input.param("expected", None).split( + ',') + docs = [] + # Fetch the Doc IDs from raw_hits + for doc in raw_hits: + docs.append(doc['id']) + + # Compare docs with the expected values. + if docs == expected_docs: + result = True + else: + # Sometimes, if there are two docs with same field value, their rank + # may be interchanged. To handle this, if the actual doc order + # doesn't match the expected value, swap the two such docs and then + # try to match + tolerance = TestInputSingleton.input.param("tolerance", None) + if tolerance: + tolerance = tolerance.split(',') + index1, index2 = expected_docs.index( + tolerance[0]), expected_docs.index(tolerance[1]) + expected_docs[index1], expected_docs[index2] = expected_docs[ + index2], \ + expected_docs[ + index1] + if docs == expected_docs: + result = True + else: + self.__log.info("Actual docs returned : %s", docs) + self.__log.info("Expected docs : %s", expected_docs) + return False + else: + self.__log.info("Actual docs returned : %s", docs) + self.__log.info("Expected docs : %s", expected_docs) + return False + + # Validate the sort fields in the result + for doc in raw_hits: + if 'sort' in doc.keys(): + if len(doc['sort']) == len(sort_fields): + result &= True + elif not sort_fields and len(doc['sort']) == 1: + result &= True + else: + self.__log.info("Sort fields do not match for the following document - ") + self.__log.info(doc) + return False + + return result + + + + def get_score_from_query_result_content(self, contents, doc_id): + for content in contents: + if content['id'] == doc_id: + return content['score'] + + def is_doc_present_in_query_result_content(self, contents, doc_id): + for content in contents: + if content['id'] == doc_id: + return True + return False + +class CouchbaseCluster: def __init__(self, name, nodes, log, use_hostname=False): """ @param name: Couchbase cluster name. e.g C1, C2 to distinguish in logs. @@ -920,7 +1370,7 @@ def set_bypass_fts_node(self, node): self.__bypass_fts_nodes.append(node) def get_random_node(self): - return self.__nodes[random.randint(0, len(self.__nodes)-1)] + return self.__nodes[random.randint(0, len(self.__nodes) - 1)] def get_random_fts_node(self): self.__separate_nodes_on_services() @@ -928,13 +1378,13 @@ def get_random_fts_node(self): self.__fts_nodes.remove(node) if not self.__fts_nodes: raise FTSException("No node in the cluster has 'fts' service" - " enabled") + " enabled") if len(self.__fts_nodes) == 1: return self.__fts_nodes[0] - return self.__fts_nodes[random.randint(0, len(self.__fts_nodes)-1)] + return self.__fts_nodes[random.randint(0, len(self.__fts_nodes) - 1)] def get_random_non_fts_node(self): - return self.__non_fts_nodes[random.randint(0, len(self.__fts_nodes)-1)] + return self.__non_fts_nodes[random.randint(0, len(self.__fts_nodes) - 1)] def are_index_files_deleted_from_disk(self, index_name): nodes = self.get_fts_nodes() @@ -946,14 +1396,14 @@ def are_index_files_deleted_from_disk(self, index_name): while count != 0: count, err = shell.execute_command( "ls {0}/@fts |grep {1}*.pindex | wc -l". - format(data_dir, index_name)) + format(data_dir, index_name)) count = int(count[0]) self.__log.info(count) retry += 1 if retry > 5: files, err = shell.execute_command( "ls {0}/@fts |grep {1}*.pindex". - format(data_dir, index_name)) + format(data_dir, index_name)) self.__log.info(files) return False return True @@ -987,10 +1437,10 @@ def init_cluster(self, cluster_services, available_nodes): """ self.__log.info("Initializing Cluster ...") - if len(cluster_services)-1 > len(available_nodes): + if len(cluster_services) - 1 > len(available_nodes): raise FTSException("Only %s nodes present for given cluster" - "configuration %s" - % (len(available_nodes)+1, cluster_services)) + "configuration %s" + % (len(available_nodes) + 1, cluster_services)) self.__init_nodes() if available_nodes: nodes_to_add = [] @@ -999,27 +1449,27 @@ def init_cluster(self, cluster_services, available_nodes): if index == 0: # first node is always a data/kv node continue - self.__log.info("%s will be configured with services %s" %( - available_nodes[index-1].ip, - node_service)) - nodes_to_add.append(available_nodes[index-1]) + self.__log.info("%s will be configured with services %s" % ( + available_nodes[index - 1].ip, + node_service)) + nodes_to_add.append(available_nodes[index - 1]) node_services.append(node_service) try: self.__clusterop.async_rebalance( - self.__nodes, - nodes_to_add, - [], - use_hostnames=self.__use_hostname, - services=node_services).result() + self.__nodes, + nodes_to_add, + [], + use_hostnames=self.__use_hostname, + services=node_services).result() except Exception as e: - raise FTSException("Unable to initialize cluster with config " - "%s: %s" %(cluster_services, e)) + raise FTSException("Unable to initialize cluster with config " + "%s: %s" % (cluster_services, e)) self.__nodes += nodes_to_add self.__separate_nodes_on_services() if not self.is_cluster_run() and \ - (TestInputSingleton.input.param("fdb_compact_interval", None) or \ - TestInputSingleton.input.param("fdb_compact_threshold", None)): + (TestInputSingleton.input.param("fdb_compact_interval", None) or \ + TestInputSingleton.input.param("fdb_compact_threshold", None)): for node in self.__fts_nodes: NodeHelper.set_cbft_env_fdb_options(node) @@ -1240,6 +1690,23 @@ def run_fts_query(self, index_name, query_dict, node=None): RestConnection(node).run_fts_query(index_name, query_dict) return total_hits, hit_list, time_taken, status + def run_fts_query_with_facets(self, index_name, query_dict, node=None): + """ Runs a query defined in query_json against an index/alias and + a specific node + + @return total_hits : total hits for the query, + @return hit_list : list of docs that match the query + + """ + if not node: + node = self.get_random_fts_node() + self.__log.info("Running query %s on node: %s:%s" + % (json.dumps(query_dict, ensure_ascii=False), + node.ip, node.fts_port)) + total_hits, hit_list, time_taken, status, facets = \ + RestConnection(node).run_fts_query_with_facets(index_name, query_dict) + return total_hits, hit_list, time_taken, status, facets + def get_buckets(self): return self.__buckets @@ -1306,9 +1773,9 @@ def async_load_bucket(self, bucket, num_items, exp=0, """ seed = "%s-key-" % self.__name self._kv_gen[OPS.CREATE] = JsonDocGenerator(seed, - encoding="utf-8", - start=0, - end=num_items) + encoding="utf-8", + start=0, + end=num_items) gen = copy.deepcopy(self._kv_gen[OPS.CREATE]) task = self.__clusterop.async_load_gen_docs( @@ -1356,9 +1823,9 @@ def async_load_all_buckets(self, num_items, exp=0, """ prefix = "%s-" % self.__name self._kv_gen[OPS.CREATE] = JsonDocGenerator(prefix, - encoding="utf-8", - start=0, - end=num_items) + encoding="utf-8", + start=0, + end=num_items) tasks = [] for bucket in self.__buckets: gen = copy.deepcopy(self._kv_gen[OPS.CREATE]) @@ -1440,8 +1907,8 @@ def async_load_all_buckets_from_generator(self, kv_gen, ops=OPS.CREATE, exp=0, return tasks def async_load_bucket_from_generator(self, bucket, kv_gen, ops=OPS.CREATE, exp=0, - kv_store=1, flag=0, only_store_hash=True, - batch_size=5000, pause_secs=1, timeout_secs=30): + kv_store=1, flag=0, only_store_hash=True, + batch_size=5000, pause_secs=1, timeout_secs=30): """Load data asynchronously on all buckets. Function wait for load data to finish. @param bucket: pass object of bucket to load into @@ -1465,7 +1932,6 @@ def async_load_bucket_from_generator(self, bucket, kv_gen, ops=OPS.CREATE, exp=0 ) return task - def load_all_buckets_till_dgm(self, active_resident_ratio, es=None, items=1000, exp=0, kv_store=1, flag=0, only_store_hash=True, batch_size=1000, @@ -1484,7 +1950,7 @@ def load_all_buckets_till_dgm(self, active_resident_ratio, es=None, """ items = int(items) self.__log.info("First loading \"items\" {0} number keys to handle " - "update/deletes in dgm cases".format(items)) + "update/deletes in dgm cases".format(items)) self.load_all_buckets(items) self.__log.info("Now loading extra keys to reach dgm limit") @@ -1499,7 +1965,7 @@ def load_all_buckets_till_dgm(self, active_resident_ratio, es=None, start = items while int(current_active_resident) > active_resident_ratio: end = start + batch_size * 10 - self.__log.info("loading %s keys ..." % (end-start)) + self.__log.info("loading %s keys ..." % (end - start)) kv_gen = JsonDocGenerator(seed, encoding="utf-8", @@ -1514,8 +1980,8 @@ def load_all_buckets_till_dgm(self, active_resident_ratio, es=None, if es: tasks.append(es.async_bulk_load_ES(index_name='default_es_index', - gen=kv_gen, - op_type='create')) + gen=kv_gen, + op_type='create')) for task in tasks: task.result() @@ -1531,7 +1997,7 @@ def load_all_buckets_till_dgm(self, active_resident_ratio, es=None, active_resident_ratio, bucket.name)) self.__log.info("Loaded a total of %s keys into bucket %s" - % (end,bucket.name)) + % (end, bucket.name)) self._kv_gen[OPS.CREATE] = JsonDocGenerator(seed, encoding="utf-8", start=0, @@ -1539,8 +2005,8 @@ def load_all_buckets_till_dgm(self, active_resident_ratio, es=None, return self._kv_gen[OPS.CREATE] def update_bucket(self, bucket, fields_to_update=None, exp=0, - kv_store=1, flag=0, only_store_hash=True, - batch_size=1000, pause_secs=1, timeout_secs=30): + kv_store=1, flag=0, only_store_hash=True, + batch_size=1000, pause_secs=1, timeout_secs=30): """Load data synchronously on given bucket. Function wait for load data to finish. @param bucket: bucket where to load data. @@ -1554,8 +2020,8 @@ def update_bucket(self, bucket, fields_to_update=None, exp=0, @param pause_secs: pause for next batch load. @param timeout_secs: timeout """ - self.__log.info("Updating fields %s in bucket %s" %(fields_to_update, - bucket.name)) + self.__log.info("Updating fields %s in bucket %s" % (fields_to_update, + bucket.name)) task = self.async_update_bucket(bucket, fields_to_update=fields_to_update, exp=exp, kv_store=kv_store, flag=flag, only_store_hash=only_store_hash, @@ -1564,10 +2030,9 @@ def update_bucket(self, bucket, fields_to_update=None, exp=0, timeout_secs=timeout_secs) task.result() - def async_update_bucket(self, bucket, fields_to_update=None, exp=0, - kv_store=1, flag=0, only_store_hash=True, - batch_size=1000, pause_secs=1, timeout_secs=30): + kv_store=1, flag=0, only_store_hash=True, + batch_size=1000, pause_secs=1, timeout_secs=30): """Update data asynchronously on given bucket. Function don't wait for load data to finish, return immediately. @param bucket: bucket where to load data. @@ -1586,13 +2051,13 @@ def async_update_bucket(self, bucket, fields_to_update=None, exp=0, self._kv_gen[OPS.UPDATE] = copy.deepcopy(self._kv_gen[OPS.CREATE]) self._kv_gen[OPS.UPDATE].start = 0 self._kv_gen[OPS.UPDATE].end = int(self._kv_gen[OPS.CREATE].end - * (float)(perc)/100) + * (float)(perc) / 100) self._kv_gen[OPS.UPDATE].update(fields_to_update=fields_to_update) task = self.__clusterop.async_load_gen_docs( self.__master_node, bucket.name, self._kv_gen[OPS.UPDATE], - bucket.kvs[kv_store],OPS.UPDATE, exp, flag, only_store_hash, - batch_size, pause_secs,timeout_secs) + bucket.kvs[kv_store], OPS.UPDATE, exp, flag, only_store_hash, + batch_size, pause_secs, timeout_secs) return task def update_delete_data( @@ -1637,23 +2102,23 @@ def async_update_delete( self._kv_gen[OPS.UPDATE] = copy.deepcopy(self._kv_gen[OPS.CREATE]) self._kv_gen[OPS.UPDATE].start = 0 self._kv_gen[OPS.UPDATE].end = int(self._kv_gen[OPS.CREATE].end - * (float)(perc)/100) + * (float)(perc) / 100) self._kv_gen[OPS.UPDATE].update(fields_to_update=fields_to_update) gen = self._kv_gen[OPS.UPDATE] elif op_type == OPS.DELETE: self._kv_gen[OPS.DELETE] = JsonDocGenerator( - self._kv_gen[OPS.CREATE].name, - op_type= OPS.DELETE, - encoding="utf-8", - start=int((self._kv_gen[OPS.CREATE].end) - * (float)(100 - perc) / 100), - end=self._kv_gen[OPS.CREATE].end) + self._kv_gen[OPS.CREATE].name, + op_type=OPS.DELETE, + encoding="utf-8", + start=int((self._kv_gen[OPS.CREATE].end) + * (float)(100 - perc) / 100), + end=self._kv_gen[OPS.CREATE].end) gen = copy.deepcopy(self._kv_gen[OPS.DELETE]) else: raise FTSException("Unknown op_type passed: %s" % op_type) self.__log.info("At bucket '{0}' @ {1}: operation: {2}, key range {3} - {4}". - format(bucket.name, self.__name, op_type, gen.start, gen.end-1)) + format(bucket.name, self.__name, op_type, gen.start, gen.end - 1)) tasks.append( self.__clusterop.async_load_gen_docs( self.__master_node, @@ -1674,7 +2139,7 @@ def async_run_fts_query_compare(self, fts_index, es, query_index, es_index_name= task = self.__clusterop.async_run_fts_query_compare(fts_index=fts_index, es_instance=es, query_index=query_index, - es_index_name= es_index_name) + es_index_name=es_index_name) return task def run_expiry_pager(self, val=10): @@ -1704,7 +2169,6 @@ def disable_compaction(self, bucket=BUCKET_NAME.DEFAULT): new_config, bucket) - def __async_rebalance_out(self, master=False, num_nodes=1): """Rebalance-out nodes from Cluster @param master: True if rebalance-out master node only. @@ -1714,7 +2178,7 @@ def __async_rebalance_out(self, master=False, num_nodes=1): len(self.__nodes) <= num_nodes, FTSException( "Cluster needs:{0} nodes for rebalance-out, current: {1}". - format((num_nodes + 1), len(self.__nodes))) + format((num_nodes + 1), len(self.__nodes))) ) if master: to_remove_node = [self.__master_node] @@ -1757,7 +2221,7 @@ def async_rebalance_in(self, num_nodes=1, services=None): len(FloatingServers._serverlist) < num_nodes, FTSException( "Number of free nodes: {0}, test tried to add {1} new nodes!". - format(len(FloatingServers._serverlist), num_nodes)) + format(len(FloatingServers._serverlist), num_nodes)) ) to_add_node = [] for _ in range(num_nodes): @@ -1784,13 +2248,13 @@ def __async_swap_rebalance(self, master=False, num_nodes=1, services=None): if master: to_remove_node = [self.__master_node] else: - to_remove_node = self.__nodes[len(self.__nodes)-num_nodes:] + to_remove_node = self.__nodes[len(self.__nodes) - num_nodes:] raise_if( len(FloatingServers._serverlist) < num_nodes, FTSException( "Number of free nodes: {0}, test tried to add {1} new nodes!". - format(len(FloatingServers._serverlist), num_nodes)) + format(len(FloatingServers._serverlist), num_nodes)) ) to_add_node = [] for _ in range(num_nodes): @@ -1801,8 +2265,8 @@ def __async_swap_rebalance(self, master=False, num_nodes=1, services=None): self.__log.info( "Starting swap-rebalance [remove_node:{0}] -> [add_node:{1}] at" " {2} cluster {3}" - .format(to_remove_node, to_add_node, self.__name, - self.__master_node.ip)) + .format(to_remove_node, to_add_node, self.__name, + self.__master_node.ip)) task = self.__clusterop.async_rebalance( self.__nodes, to_add_node, @@ -1839,11 +2303,11 @@ def swap_rebalance(self, services=None, num_nodes=1): """Swap rebalance non-master node """ task = self.__async_swap_rebalance(services=services, - num_nodes = num_nodes) + num_nodes=num_nodes) task.result() def async_failover_and_rebalance(self, master=False, num_nodes=1, - graceful=False): + graceful=False): """Asynchronously failover nodes from Cluster @param master: True if failover master node only. @param num_nodes: number of nodes to rebalance-out from cluster. @@ -1857,7 +2321,6 @@ def async_failover_and_rebalance(self, master=False, num_nodes=1, services=None) return tasks - def __async_failover(self, master=False, num_nodes=1, graceful=False): """Failover nodes from Cluster @param master: True if failover master node only. @@ -2024,7 +2487,6 @@ def restart_couchbase_on_all_nodes(self): NodeHelper.wait_warmup_completed(self.__nodes) - def wait_for_flusher_empty(self, timeout=60): """Wait for disk queue to completely flush. """ @@ -2044,7 +2506,6 @@ def wait_for_flusher_empty(self, timeout=60): class FTSBaseTest(unittest.TestCase): - def setUp(self): unittest.TestCase.setUp(self) self._input = TestInputSingleton.input @@ -2054,32 +2515,34 @@ def setUp(self): self.__cluster_op = Cluster() self.__init_parameters() self.num_custom_analyzers = self._input.param("num_custom_analyzers", 0) + self.field_name = self._input.param("field_name", None) + self.field_type = self._input.param("field_type", None) + self.field_alias = self._input.param("field_alias", None) self.log.info( "==== FTSbasetests setup is started for test #{0} {1} ====" - .format(self.__case_number, self._testMethodName)) + .format(self.__case_number, self._testMethodName)) # workaround for MB-16794 - #self.sleep(30, "working around MB-16794") + # self.sleep(30, "working around MB-16794") self.__setup_for_test() self.log.info( "==== FTSbasetests setup is finished for test #{0} {1} ====" - .format(self.__case_number, self._testMethodName)) + .format(self.__case_number, self._testMethodName)) def __is_test_failed(self): return (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \ - or (hasattr(self, '_exc_info') - and self._exc_info()[1] is not None) + or (hasattr(self, '_exc_info') + and self._exc_info()[1] is not None) def __is_cleanup_not_needed(self): return ((self.__is_test_failed() and - self._input.param("stop-on-failure", False)) or - self._input.param("skip-cleanup", False)) - + self._input.param("stop-on-failure", False)) or + self._input.param("skip-cleanup", False)) def __is_cluster_run(self): return len(set([server.ip for server in self._input.servers])) == 1 @@ -2093,8 +2556,8 @@ def tearDown(self): if self._input.param("negative_test", False): if hasattr(self, '_resultForDoCleanups') \ - and len(self._resultForDoCleanups.failures - or self._resultForDoCleanups.errors): + and len(self._resultForDoCleanups.failures + or self._resultForDoCleanups.errors): self._resultForDoCleanups.failures = [] self._resultForDoCleanups.errors = [] self.log.info("This is marked as a negative test and contains " @@ -2112,23 +2575,34 @@ def tearDown(self): self.log.info("Collecting logs @ {0}".format(server.ip)) NodeHelper.collect_logs(server) + # ---backup pindex_data if the test has failed + # if self._input.param('backup_pindex_data', False) and \ + # self.__is_test_failed(): + # To reproduce MB-20494, temporarily remove condition to + # backup_pindex_data only if test has failed. + if self._input.param('backup_pindex_data', False) : + for server in self._input.servers: + self.log.info("Backing up pindex data @ {0}".format(server.ip)) + self.backup_pindex_data(server) + try: if self.__is_cleanup_not_needed(): self.log.warn("CLEANUP WAS SKIPPED") return self.log.info( "==== FTSbasetests cleanup is started for test #{0} {1} ====" - .format(self.__case_number, self._testMethodName)) + .format(self.__case_number, self._testMethodName)) self._cb_cluster.cleanup_cluster(self) if self.compare_es: self.teardown_es() self.log.info( "==== FTSbasetests cleanup is finished for test #{0} {1} ===" - .format(self.__case_number, self._testMethodName)) + .format(self.__case_number, self._testMethodName)) finally: self.__cluster_op.shutdown(force=True) unittest.TestCase.tearDown(self) + def __init_logger(self): if self._input.param("log_level", None): self.log.setLevel(level=0) @@ -2146,12 +2620,12 @@ def __init_logger(self): def __setup_for_test(self): use_hostanames = self._input.param("use_hostnames", False) no_buckets = self._input.param("no_buckets", False) - master = self._input.servers[0] + master = self._input.servers[0] first_node = copy.deepcopy(master) self._cb_cluster = CouchbaseCluster("C1", - [first_node], - self.log, - use_hostanames) + [first_node], + self.log, + use_hostanames) self.__cleanup_previous() if self.compare_es: self.setup_es() @@ -2172,8 +2646,7 @@ def __setup_for_test(self): if len(self.__report_error_list) > 0: self.__initialize_error_count_dict() - - def construct_serv_list(self,serv_str): + def construct_serv_list(self, serv_str): """ Constructs a list of node services to rebalance into cluster @@ -2181,19 +2654,19 @@ def construct_serv_list(self,serv_str): stand for services defined in serv_dict @return services_list: like ['kv', 'kv,fts', 'index,n1ql','index'] """ - serv_dict = {'D': 'kv','F': 'fts','I': 'index','Q': 'n1ql'} + serv_dict = {'D': 'kv', 'F': 'fts', 'I': 'index', 'Q': 'n1ql'} for letter, serv in serv_dict.iteritems(): serv_str = serv_str.replace(letter, serv) services_list = re.split('[-,:]', serv_str) for index, serv in enumerate(services_list): - services_list[index] = serv.replace('+', ',') + services_list[index] = serv.replace('+', ',') return services_list def __init_parameters(self): self.__case_number = self._input.param("case_number", 0) self.__num_sasl_buckets = self._input.param("sasl_buckets", 0) self.__num_stand_buckets = self._input.param("standard_buckets", 0) - self.__eviction_policy = self._input.param("eviction_policy",'valueOnly') + self.__eviction_policy = self._input.param("eviction_policy", 'valueOnly') self.__mixed_priority = self._input.param("mixed_priority", None) # Public init parameters - Used in other tests too. @@ -2255,10 +2728,11 @@ def __init_parameters(self): self.create_gen = None self.update_gen = None self.delete_gen = None - + self.sort_fields = self._input.param("sort_fields", None) + if self.sort_fields: + self.sort_fields = self.sort_fields.split(',') self.__fail_on_errors = self._input.param("fail-on-errors", True) - def __initialize_error_count_dict(self): """ initializes self.__error_count_dict with ip, error and err count @@ -2277,7 +2751,7 @@ def __set_free_servers(self): cluster_nodes = self._cb_cluster.get_nodes() for server in total_servers: for cluster_node in cluster_nodes: - if server.ip == cluster_node.ip and\ + if server.ip == cluster_node.ip and \ server.port == cluster_node.port: break else: @@ -2286,10 +2760,18 @@ def __set_free_servers(self): FloatingServers._serverlist.append(server) def __calculate_bucket_size(self, cluster_quota, num_buckets): + + if 'quota_percent' in self._input.test_params: + quota_percent = int(self._input.test_params['quota_percent']) + else: + quota_percent = None + dgm_run = self._input.param("dgm_run", 0) if dgm_run: # buckets cannot be created if size<100MB bucket_size = 256 + elif quota_percent is not None: + bucket_size = int( float(cluster_quota - 500) * float(quota_percent/100.0 ) /float(num_buckets) ) else: bucket_size = int((float(cluster_quota) - 500)/float(num_buckets)) return bucket_size @@ -2302,19 +2784,19 @@ def __create_buckets(self): else: bucket_priority = None num_buckets = self.__num_sasl_buckets + \ - self.__num_stand_buckets + int(self._create_default_bucket) + self.__num_stand_buckets + int(self._create_default_bucket) total_quota = self._cb_cluster.get_mem_quota() bucket_size = self.__calculate_bucket_size( - total_quota, - num_buckets) + total_quota, + num_buckets) if self._create_default_bucket: self._cb_cluster.create_default_bucket( - bucket_size, - self._num_replicas, - eviction_policy=self.__eviction_policy, - bucket_priority=bucket_priority) + bucket_size, + self._num_replicas, + eviction_policy=self.__eviction_policy, + bucket_priority=bucket_priority) self._cb_cluster.create_sasl_buckets( bucket_size, num_buckets=self.__num_sasl_buckets, @@ -2336,7 +2818,7 @@ def create_buckets_on_cluster(self): else: bucket_priority = None num_buckets = self.__num_sasl_buckets + \ - self.__num_stand_buckets + int(self._create_default_bucket) + self.__num_stand_buckets + int(self._create_default_bucket) total_quota = self._cb_cluster.get_mem_quota() bucket_size = self.__calculate_bucket_size( @@ -2362,6 +2844,15 @@ def create_buckets_on_cluster(self): eviction_policy=self.__eviction_policy, bucket_priority=bucket_priority) + def load_sample_buckets(self, server, bucketName): + from lib.remote.remote_util import RemoteMachineShellConnection + shell = RemoteMachineShellConnection(server) + shell.execute_command("""curl -v -u Administrator:password \ + -X POST http://{0}:8091/sampleBuckets/install \ + -d '["{1}"]'""".format(server.ip, bucketName)) + shell.disconnect() + self.sleep(20) + def load_employee_dataset(self, num_items=None): """ Loads the default JSON dataset @@ -2390,7 +2881,6 @@ def load_utf16_data(self, num_keys=None): end=num_keys) self._cb_cluster.load_all_buckets_from_generator(gen) - def load_wiki(self, num_keys=None, lang="EN", encoding="utf-8"): """ Loads the Wikipedia dump. @@ -2400,10 +2890,10 @@ def load_wiki(self, num_keys=None, lang="EN", encoding="utf-8"): num_keys = self._num_items gen = WikiJSONGenerator("wiki", - lang=lang, - encoding=encoding, - start=0, - end=num_keys) + lang=lang, + encoding=encoding, + start=0, + end=num_keys) self._cb_cluster.load_all_buckets_from_generator(gen) def perform_update_delete(self, fields_to_update=None): @@ -2470,11 +2960,10 @@ def async_perform_update_delete(self, fields_to_update=None): op_type=OPS.DELETE)) load_tasks += self._cb_cluster.async_load_all_buckets_from_generator( - kv_gen = self.update_gen, + kv_gen=self.update_gen, ops=OPS.UPDATE, exp=self._expires) - [task.result() for task in load_tasks] if load_tasks: self.log.info("Batched updates loaded to cluster(s)") @@ -2515,7 +3004,7 @@ def print_crash_stacktrace(self, node, error): """ shell = RemoteMachineShellConnection(node) result, err = shell.execute_command("zgrep -A 40 -B 4 '{0}' {1}/fts.log*". - format(error, NodeHelper.get_log_dir(node))) + format(error, NodeHelper.get_log_dir(node))) for line in result: self.log.info(line) shell.disconnect() @@ -2541,14 +3030,14 @@ def check_error_count_in_fts_log(self, initial=False): self.__error_count_dict[node.ip][error] = count else: self.log.info("Initial '{0}' count on {1} :{2}, now :{3}". - format(error, - node.ip, - self.__error_count_dict[node.ip][error], - count)) + format(error, + node.ip, + self.__error_count_dict[node.ip][error], + count)) if node.ip in self.__error_count_dict.keys(): - if (count > self.__error_count_dict[node.ip][error]): + if (count > self.__error_count_dict[node.ip][error]): error_found_logger.append("{0} found on {1}".format(error, - node.ip)) + node.ip)) self.print_crash_stacktrace(node, error) shell.disconnect() if not initial: @@ -2577,17 +3066,17 @@ def wait_for_indexing_complete(self): bucket_doc_count = index.get_src_bucket_doc_count() if not self.compare_es: self.log.info("Docs in bucket = %s, docs in FTS index '%s': %s" - %(bucket_doc_count, - index.name, - index_doc_count)) + % (bucket_doc_count, + index.name, + index_doc_count)) else: self.es.update_index('es_index') self.log.info("Docs in bucket = %s, docs in FTS index '%s':" " %s, docs in ES index: %s " - % (bucket_doc_count, - index.name, - index_doc_count, - self.es.get_index_count('es_index'))) + % (bucket_doc_count, + index.name, + index_doc_count, + self.es.get_index_count('es_index'))) if bucket_doc_count == index_doc_count: break @@ -2599,7 +3088,7 @@ def wait_for_indexing_complete(self): retry_count -= 1 time.sleep(6) self.log.info("FTS indexed %s docs in %s mins" - % (index_doc_count, round(float((time.time()-start_time)/60), 2))) + % (index_doc_count, round(float((time.time() - start_time) / 60), 2))) def construct_plan_params(self): plan_params = {} @@ -2629,7 +3118,7 @@ def populate_node_partition_map(self, index): if attr['priority'] == 0: break if node not in nodes_partitions.keys(): - nodes_partitions[node] = {'pindex_count': 0, 'pindexes':{}} + nodes_partitions[node] = {'pindex_count': 0, 'pindexes': {}} nodes_partitions[node]['pindex_count'] += 1 nodes_partitions[node]['pindexes'][pindex['uuid']] = [] for partition in pindex['sourcePartitions'].split(','): @@ -2651,19 +3140,19 @@ def is_index_partitioned_balanced(self, index): # check 1 - test number of pindexes partitions_per_pindex = index.get_max_partitions_pindex() - exp_num_pindexes = self._num_vbuckets/partitions_per_pindex + exp_num_pindexes = self._num_vbuckets / partitions_per_pindex if self._num_vbuckets % partitions_per_pindex: import math exp_num_pindexes = math.ceil( - self._num_vbuckets/partitions_per_pindex + 0.5) + self._num_vbuckets / partitions_per_pindex + 0.5) total_pindexes = 0 for node in nodes_partitions.keys(): total_pindexes += nodes_partitions[node]['pindex_count'] if total_pindexes != exp_num_pindexes: self.fail("Number of pindexes for %s is %s while" - " expected value is %s" %(index.name, - total_pindexes, - exp_num_pindexes)) + " expected value is %s" % (index.name, + total_pindexes, + exp_num_pindexes)) self.log.info("Validated: Number of PIndexes = %s" % total_pindexes) index.num_pindexes = total_pindexes @@ -2688,17 +3177,17 @@ def is_index_partitioned_balanced(self, index): "fts nodes are %s" % (nodes_partitions.keys(), self._cb_cluster.get_fts_nodes())) self.sleep(10, "pIndexes not distributed across %s nodes yet" - % num_fts_nodes) + % num_fts_nodes) nodes_partitions = self.populate_node_partition_map(index) else: self.log.info("Validated: pIndexes are distributed across %s " % nodes_partitions.keys()) # check 4 - balance check(almost equal no of pindexes on all fts nodes) - exp_partitions_per_node = self._num_vbuckets/num_fts_nodes + exp_partitions_per_node = self._num_vbuckets / num_fts_nodes self.log.info("Expecting num of partitions in each node in range %s-%s" % (exp_partitions_per_node - partitions_per_pindex, - min(1024, exp_partitions_per_node + partitions_per_pindex))) + min(1024, exp_partitions_per_node + partitions_per_pindex))) for node in nodes_partitions.keys(): num_node_partitions = 0 @@ -2717,7 +3206,7 @@ def is_index_partitioned_balanced(self, index): return True def generate_random_queries(self, index, num_queries=1, query_type=["match"], - seed=0): + seed=0): """ Calls FTS-ES Query Generator for employee dataset @param num_queries: number of queries to return @@ -2727,21 +3216,25 @@ def generate_random_queries(self, index, num_queries=1, query_type=["match"], """ from random_query_generator.rand_query_gen import FTSESQueryGenerator query_gen = FTSESQueryGenerator(num_queries, query_type=query_type, - seed=seed, dataset=self.dataset, - fields=index.smart_query_fields) - ''' + seed=seed, dataset=self.dataset, + fields=index.smart_query_fields) for fts_query in query_gen.fts_queries: index.fts_queries.append( json.loads(json.dumps(fts_query, ensure_ascii=False))) - ''' if self.compare_es: - return query_gen.fts_queries, query_gen.es_queries + for es_query in query_gen.es_queries: + # unlike fts, es queries are not nested before sending to fts + # so enclose in query dict here + es_query = {'query': es_query} + self.es.es_queries.append( + json.loads(json.dumps(es_query, ensure_ascii=False))) + return index.fts_queries, self.es.es_queries - return query_gen.fts_queries + return index.fts_queries def create_index(self, bucket, index_name, index_params=None, - plan_params=None): + plan_params=None): """ Creates a default index given bucket, index_name and plan_params """ @@ -2769,7 +3262,7 @@ def create_fts_indexes_all_buckets(self, plan_params=None): for count in range(self.index_per_bucket): self.create_index( bucket, - "%s_index_%s" % (bucket.name, count+1), + "%s_index_%s" % (bucket.name, count + 1), plan_params=plan_params) def create_alias(self, target_indexes, name=None, alias_def=None): @@ -2799,9 +3292,9 @@ def validate_index_count(self, equal_bucket_doc_count=False, for index in self._cb_cluster.get_indexes(): docs_indexed = index.get_indexed_doc_count() bucket_count = self._cb_cluster.get_doc_count_in_bucket( - index.source_bucket) + index.source_bucket) self.log.info("Docs in index {0}={1}, bucket docs={2}". - format(index.name, docs_indexed, bucket_count)) + format(index.name, docs_indexed, bucket_count)) if must_equal and docs_indexed != int(must_equal): self.fail("Number of docs indexed is not %s" % must_equal) if docs_indexed == 0 and not zero_rows_ok: @@ -2827,10 +3320,10 @@ def teardown_es(self): def create_es_index_mapping(self, es_mapping, fts_mapping=None): if not (self.num_custom_analyzers > 0): self.es.create_index_mapping(index_name="es_index", - es_mapping=es_mapping,fts_mapping=None) + es_mapping=es_mapping, fts_mapping=None) else: self.es.create_index_mapping(index_name="es_index", - es_mapping=es_mapping,fts_mapping=fts_mapping) + es_mapping=es_mapping, fts_mapping=fts_mapping) def load_data_es_from_generator(self, generator, index_name="es_index"): @@ -2845,7 +3338,6 @@ def load_data_es_from_generator(self, generator, doc['_type'], key) - def create_index_es(self, index_name="es_index"): self.es.create_empty_index_with_bleve_equivalent_std_analyzer(index_name) self.log.info("Created empty index %s on Elastic Search node with " @@ -2861,13 +3353,13 @@ def get_generator(self, dataset, num_items, start=0, encoding="utf-8", return JsonDocGenerator(name="emp", encoding=encoding, start=start, - end=start+num_items) + end=start + num_items) elif dataset == "wiki": return WikiJSONGenerator(name="wiki", lang=lang, encoding=encoding, start=start, - end=start+num_items) + end=start + num_items) def populate_create_gen(self): if self.dataset == "emp": @@ -2879,64 +3371,64 @@ def populate_create_gen(self): elif self.dataset == "all": self.create_gen = [] self.create_gen.append(self.get_generator( - "emp", num_items=self._num_items/2)) + "emp", num_items=self._num_items / 2)) self.create_gen.append(self.get_generator( - "wiki", num_items=self._num_items/2)) + "wiki", num_items=self._num_items / 2)) def populate_update_gen(self, fields_to_update=None): if self.dataset == "emp": self.update_gen = copy.deepcopy(self.create_gen) self.update_gen.start = 0 self.update_gen.end = int(self.create_gen.end * - (float)(self._perc_upd)/100) + (float)(self._perc_upd) / 100) self.update_gen.update(fields_to_update=fields_to_update) elif self.dataset == "wiki": self.update_gen = copy.deepcopy(self.create_gen) self.update_gen.start = 0 self.update_gen.end = int(self.create_gen.end * - (float)(self._perc_upd)/100) + (float)(self._perc_upd) / 100) elif self.dataset == "all": self.update_gen = [] self.update_gen = copy.deepcopy(self.create_gen) for itr, _ in enumerate(self.update_gen): self.update_gen[itr].start = 0 self.update_gen[itr].end = int(self.create_gen[itr].end * - (float)(self._perc_upd)/100) + (float)(self._perc_upd) / 100) if self.update_gen[itr].name == "emp": self.update_gen[itr].update(fields_to_update=fields_to_update) def populate_delete_gen(self): if self.dataset == "emp": self.delete_gen = JsonDocGenerator( - self.create_gen.name, - op_type= OPS.DELETE, - encoding="utf-8", - start=int((self.create_gen.end) - * (float)(100 - self._perc_del) / 100), - end=self.create_gen.end) + self.create_gen.name, + op_type=OPS.DELETE, + encoding="utf-8", + start=int((self.create_gen.end) + * (float)(100 - self._perc_del) / 100), + end=self.create_gen.end) elif self.dataset == "wiki": self.delete_gen = WikiJSONGenerator(name="wiki", - encoding="utf-8", - start=int((self.create_gen.end) - * (float)(100 - self._perc_del) / 100), - end=self.create_gen.end, - op_type=OPS.DELETE) + encoding="utf-8", + start=int((self.create_gen.end) + * (float)(100 - self._perc_del) / 100), + end=self.create_gen.end, + op_type=OPS.DELETE) elif self.dataset == "all": self.delete_gen = [] self.delete_gen.append(JsonDocGenerator( - "emp", - op_type= OPS.DELETE, - encoding="utf-8", - start=int((self.create_gen[0].end) - * (float)(100 - self._perc_del) / 100), - end=self.create_gen[0].end)) + "emp", + op_type=OPS.DELETE, + encoding="utf-8", + start=int((self.create_gen[0].end) + * (float)(100 - self._perc_del) / 100), + end=self.create_gen[0].end)) self.delete_gen.append(WikiJSONGenerator(name="wiki", - encoding="utf-8", - start=int((self.create_gen[1].end) - * (float)(100 - self._perc_del) / 100), - end=self.create_gen[1].end, - op_type=OPS.DELETE)) + encoding="utf-8", + start=int((self.create_gen[1].end) + * (float)(100 - self._perc_del) / 100), + end=self.create_gen[1].end, + op_type=OPS.DELETE)) def load_data(self): """ @@ -2963,12 +3455,12 @@ def async_load_data(self): if isinstance(gen, list): for generator in gen: load_tasks.append(self.es.async_bulk_load_ES(index_name='es_index', - gen=generator, - op_type='create')) + gen=generator, + op_type='create')) else: load_tasks.append(self.es.async_bulk_load_ES(index_name='es_index', - gen=gen, - op_type='create')) + gen=gen, + op_type='create')) load_tasks += self._cb_cluster.async_load_all_buckets_from_generator( self.create_gen) return load_tasks @@ -2994,7 +3486,7 @@ def run_query_and_compare(self, index, es_index_name=None): task.result() if not task.passed: fail_count += 1 - failed_queries.append(task.query_index+1) + failed_queries.append(task.query_index + 1) if fail_count: self.fail("%s out of %s queries failed! - %s" % (fail_count, @@ -3002,7 +3494,7 @@ def run_query_and_compare(self, index, es_index_name=None): failed_queries)) else: self.log.info("SUCCESS: %s out of %s queries passed" - %(num_queries-fail_count, num_queries)) + % (num_queries - fail_count, num_queries)) def grab_fts_diag(self): """ @@ -3032,7 +3524,7 @@ def grab_fts_diag(self): 'Accept': '*/*'} filename = "{0}_fts_diag.json".format(serverInfo.ip) page = urllib2.urlopen(req) - with open(path+'/'+filename, 'wb') as output: + with open(path + '/' + filename, 'wb') as output: os.write(1, "downloading {0} ...".format(serverInfo.ip)) while True: buffer = page.read(65536) @@ -3045,7 +3537,7 @@ def grab_fts_diag(self): zipped.writelines(file_input) file_input.close() zipped.close() - os.remove(path+'/'+filename) + os.remove(path + '/' + filename) print "downloaded and zipped diags @ : {0}/{1}".format(path, filename) except urllib2.URLError as error: @@ -3054,3 +3546,27 @@ def grab_fts_diag(self): print "unable to obtain fts diags from {0}".format(diag_url) except Exception as e: print "unable to obtain fts diags from {0} :{1}".format(diag_url, e) + + def backup_pindex_data(self, server): + remote = RemoteMachineShellConnection(server) + stamp = time.strftime("%d_%m_%Y_%H_%M") + data_dir = NodeHelper.get_data_dir(server) + + try: + info = remote.extract_remote_info() + if info.type.lower() != 'windows': + self.log.info("Backing up pindex data files from {0}".format(server.ip)) + command = "mkdir -p /tmp/backup_pindex_data/{0};" \ + "zip -r /tmp/backup_pindex_data/{0}/fts_pindex_data.zip " \ + "{1}/data/@fts/*".format(stamp, data_dir) + + remote.execute_command(command) + output, error = remote.execute_command("ls -la /tmp/backup_pindex_data/{0}".format(stamp)) + for o in output: + print o + self.log.info("***pindex files for {0} are copied to /tmp/backup_pindex_data/{1} on {0}".format(server.ip,stamp)) + remote.disconnect() + return True + except Exception as ex: + print ex + return False diff --git a/pytests/fts/stable_topology_fts.py b/pytests/fts/stable_topology_fts.py index d106928b2..5b1b8e7e0 100644 --- a/pytests/fts/stable_topology_fts.py +++ b/pytests/fts/stable_topology_fts.py @@ -3,6 +3,7 @@ from lib.membase.api.rest_client import RestConnection from lib.membase.api.exception import FTSException, ServerUnavailableException from TestInput import TestInputSingleton +import copy class StableTopFTS(FTSBaseTest): @@ -300,13 +301,7 @@ def index_query_beer_sample(self): #delete default bucket self._cb_cluster.delete_bucket("default") master = self._cb_cluster.get_master_node() - from lib.remote.remote_util import RemoteMachineShellConnection - shell = RemoteMachineShellConnection(master) - shell.execute_command("""curl -v -u Administrator:password \ - -X POST http://{0}:8091/sampleBuckets/install \ - -d '["beer-sample"]'""".format(master.ip)) - shell.disconnect() - self.sleep(20) + self.load_sample_buckets(server=master, bucketName="beer-sample") bucket = self._cb_cluster.get_bucket_by_name("beer-sample") index = self.create_index(bucket, "beer-index") self.wait_for_indexing_complete() @@ -468,18 +463,15 @@ def test_field_name_alias(self): """ Test the Searchable As property in field mapping """ - field_name = self._input.param("field_name", "") - field_type = self._input.param("field_type", "") - field_alias = self._input.param("field_alias", "") - self.load_data() index = self.create_index( self._cb_cluster.get_bucket_by_name('default'), "default_index") self.wait_for_indexing_complete() - index.add_child_field_to_default_mapping(field_name=field_name, field_type=field_type, - field_alias=field_alias) + index.add_child_field_to_default_mapping(field_name=self.field_name, + field_type=self.field_type, + field_alias=self.field_alias) index.index_definition['uuid'] = index.get_uuid() index.update() self.sleep(5) @@ -497,3 +489,383 @@ def test_field_name_alias(self): zero_results_ok=zero_results_ok, expected_hits=expected_hits) self.log.info("Hits: %s" % hits) + + def test_one_field_multiple_analyzer(self): + """ + 1. Create an default FTS index on wiki dataset + 2. Update it to add a field mapping for revision.text.#text field with 'en' analyzer + 3. Should get 0 search results for a query + 4. Update it to add another field mapping for the same field, with 'fr' analyzer + 5. Same query should yield more results now. + + """ + self.load_data() + index = self.create_index( + self._cb_cluster.get_bucket_by_name('default'), + "default_index") + self.wait_for_indexing_complete() + index.add_child_field_to_default_mapping(field_name=self.field_name, + field_type=self.field_type, + field_alias=self.field_alias, + analyzer="en") + index.index_definition['uuid'] = index.get_uuid() + index.update() + self.sleep(5) + self.wait_for_indexing_complete() + zero_results_ok = True + expected_hits = int(self._input.param("expected_hits1", 0)) + if expected_hits: + zero_results_ok = False + query = eval(self._input.param("query", str(self.sample_query))) + if isinstance(query, str): + query = json.loads(query) + zero_results_ok = True + for index in self._cb_cluster.get_indexes(): + hits, _, _, _ = index.execute_query(query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits) + self.log.info("Hits: %s" % hits) + + index.add_analyzer_to_existing_field_map(field_name=self.field_name, + field_type=self.field_type, + field_alias=self.field_alias, + analyzer="fr") + + index.index_definition['uuid'] = index.get_uuid() + index.update() + self.sleep(5) + self.wait_for_indexing_complete() + zero_results_ok = True + expected_hits = int(self._input.param("expected_hits2", 0)) + if expected_hits: + zero_results_ok = False + query = eval(self._input.param("query", str(self.sample_query))) + if isinstance(query, str): + query = json.loads(query) + zero_results_ok = True + for index in self._cb_cluster.get_indexes(): + hits, _, _, _ = index.execute_query(query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits) + self.log.info("Hits: %s" % hits) + + def test_facets(self): + field_indexed = self._input.param("field_indexed",True) + self.load_data() + index = self.create_index( + self._cb_cluster.get_bucket_by_name('default'), + "default_index") + self.wait_for_indexing_complete() + index.add_child_field_to_default_mapping(field_name="type", + field_type="text", + field_alias="type", + analyzer="keyword") + if field_indexed: + index.add_child_field_to_default_mapping(field_name="dept", + field_type="text", + field_alias="dept", + analyzer="keyword") + index.add_child_field_to_default_mapping(field_name="salary", + field_type="number", + field_alias="salary") + index.add_child_field_to_default_mapping(field_name="join_date", + field_type="datetime", + field_alias="join_date") + index.index_definition['uuid'] = index.get_uuid() + index.update() + self.sleep(5) + self.wait_for_indexing_complete() + zero_results_ok = True + expected_hits = int(self._input.param("expected_hits", 0)) + if expected_hits: + zero_results_ok = False + query = eval(self._input.param("query", str(self.sample_query))) + if isinstance(query, str): + query = json.loads(query) + zero_results_ok = True + try: + for index in self._cb_cluster.get_indexes(): + hits, _, _, _, facets = index.execute_query_with_facets(query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits) + self.log.info("Hits: %s" % hits) + self.log.info("Facets: %s" % facets) + index.validate_facets_in_search_results(no_of_hits=hits, + facets_returned=facets) + except Exception as err: + self.log.error(err) + self.fail("Testcase failed: "+ err.message) + + def test_doc_config(self): + # delete default bucket + self._cb_cluster.delete_bucket("default") + master = self._cb_cluster.get_master_node() + + # Load Travel Sample bucket and create an index + self.load_sample_buckets(server=master, bucketName="travel-sample") + bucket = self._cb_cluster.get_bucket_by_name("travel-sample") + index = self.create_index(bucket, "travel-index") + self.sleep(10) + + # Add Type Mapping + index.add_type_mapping_to_index_definition(type="airport", + analyzer="en") + mode = self._input.param("doc_config_mode", "type_field") + index.add_doc_config_to_index_definition(mode=mode) + index.index_definition['uuid'] = index.get_uuid() + index.update() + self.sleep(15) + self.wait_for_indexing_complete() + self.validate_index_count(equal_bucket_doc_count=True, + zero_rows_ok=False) + + # Run Query + expected_hits = int(self._input.param("expected_hits", 0)) + query = eval(self._input.param("query", str(self.sample_query))) + try: + for index in self._cb_cluster.get_indexes(): + hits, _, _, _ = index.execute_query(query, + zero_results_ok=False, + expected_hits=expected_hits) + self.log.info("Hits: %s" % hits) + except Exception as err: + self.log.error(err) + self.fail("Testcase failed: " + err.message) + + def test_boost_query_type(self): + # Create bucket, create index + self.load_data() + index = self.create_index( + self._cb_cluster.get_bucket_by_name('default'), + "default_index") + self.wait_for_indexing_complete() + index.add_type_mapping_to_index_definition(type="emp", + analyzer="keyword") + index.index_definition['uuid'] = index.get_uuid() + index.update() + self.sleep(15) + self.wait_for_indexing_complete() + zero_results_ok = False + expected_hits = 3 + + # Run Query w/o Boosting and compare the scores for Docs emp10000071 & + # emp10000042. Should be the same + query = {"disjuncts": [{"match": "Safiya", "field": "name"}, + {"match": "Palmer", "field": "name"}]} + if isinstance(query, str): + query = json.loads(query) + zero_results_ok = True + try: + for index in self._cb_cluster.get_indexes(): + hits, contents, _, _ = index.execute_query(query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits, + return_raw_hits=True) + self.log.info("Hits: %s" % hits) + self.log.info("Contents: %s" % contents) + score_before_boosting_doc1 = index.get_score_from_query_result_content( + contents=contents, doc_id=u'emp10000071') + score_before_boosting_doc2 = index.get_score_from_query_result_content( + contents=contents, doc_id=u'emp10000042') + + self.log.info("Scores before boosting:") + self.log.info("") + self.log.info("emp10000071: %s", score_before_boosting_doc1) + self.log.info("emp10000042: %s", score_before_boosting_doc2) + + except Exception as err: + self.log.error(err) + self.fail("Testcase failed: " + err.message) + + if not score_before_boosting_doc1 == score_before_boosting_doc2: + self.fail("Testcase failed: Scores for emp10000071 & emp10000042 " + "are not equal before boosting") + + # Run Query w/o Boosting and compare the scores for Docs emp10000071 & + # emp10000042. emp10000071 score should have improved w.r.t. emp10000042 + query = {"disjuncts": [{"match": "Safiya^2", "field": "name"}, + {"match": "Palmer", "field": "name"}]} + if isinstance(query, str): + query = json.loads(query) + zero_results_ok = True + try: + for index in self._cb_cluster.get_indexes(): + hits, contents, _, _ = index.execute_query(query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits, + return_raw_hits=True) + self.log.info("Hits: %s" % hits) + self.log.info("Contents: %s" % contents) + score_after_boosting_doc1 = index.get_score_from_query_result_content( + contents=contents, doc_id=u'emp10000071') + score_after_boosting_doc2 = index.get_score_from_query_result_content( + contents=contents, doc_id=u'emp10000042') + + self.log.info("Scores after boosting:") + self.log.info("") + self.log.info("emp10000071: %s", score_after_boosting_doc1) + self.log.info("emp10000042: %s", score_after_boosting_doc2) + except Exception as err: + self.log.error(err) + self.fail("Testcase failed: " + err.message) + + if not score_after_boosting_doc1 > score_after_boosting_doc2: + self.fail("Testcase failed: Boosting didn't improve score for emp10000071 w.r.t emp10000042") + + def test_doc_id_query_type(self): + # Create bucket, create index + self.load_data() + index = self.create_index( + self._cb_cluster.get_bucket_by_name('default'), + "default_index") + self.wait_for_indexing_complete() + index.add_type_mapping_to_index_definition(type="emp", + analyzer="keyword") + + index.index_definition['uuid'] = index.get_uuid() + index.update() + self.sleep(15) + self.wait_for_indexing_complete() + + expected_hits = int(self._input.param("expected_hits", 0)) + query = eval(self._input.param("query", str(self.sample_query))) + if isinstance(query, str): + query = json.loads(query) + # From the Query string, fetch the Doc IDs + doc_ids = copy.deepcopy(query['ids']) + + # If invalid_doc_id param is passed, add this to the query['ids'] + invalid_doc_id = self._input.param("invalid_doc_id",0) + if invalid_doc_id: + query['ids'].append(invalid_doc_id) + + # If disjuncts_query is passed, join query and disjuncts_query + # to form a new query string + disjuncts_query = self._input.param("disjuncts_query", None) + if disjuncts_query: + if isinstance(disjuncts_query, str): + disjuncts_query = json.loads(disjuncts_query) + new_query = {} + new_query['disjuncts'] = [] + new_query['disjuncts'].append(disjuncts_query) + new_query['disjuncts'].append(query) + query = new_query + + # Execute Query + zero_results_ok = False + try: + for index in self._cb_cluster.get_indexes(): + hits, contents, _, _ = index.execute_query(query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits, + return_raw_hits=True) + self.log.info("Hits: %s" % hits) + self.log.info("Contents: %s" % contents) + # For each doc id passed in the query, validate the + # presence in the search results + for doc_id in doc_ids: + self.assertTrue(index.is_doc_present_in_query_result_content + (contents=contents, doc_id=doc_id),"Doc ID " + "%s is not present in Search results" + % doc_id) + score = index.get_score_from_query_result_content\ + (contents=contents, doc_id=doc_id) + self.log.info ("Score for Doc ID {0} is {1}". + format(doc_id,score)) + if invalid_doc_id: + # Validate if invalid doc id was passed, it should + # not be present in the search results + self.assertFalse(index.is_doc_present_in_query_result_content + (contents=contents, doc_id=invalid_doc_id), + "Doc ID %s is present in Search results" + % invalid_doc_id) + + except Exception as err: + self.log.error(err) + self.fail("Testcase failed: " + err.message) + + def test_sorting_of_results(self): + self.load_data() + index = self.create_index( + self._cb_cluster.get_bucket_by_name('default'), + "default_index") + self.wait_for_indexing_complete() + + zero_results_ok = True + expected_hits = int(self._input.param("expected_hits", 0)) + default_query = {"disjuncts": [{"match": "Safiya", "field": "name"}, + {"match": "Palmer", "field": "name"}]} + query = eval(self._input.param("query", str(default_query))) + if expected_hits: + zero_results_ok = False + if isinstance(query, str): + query = json.loads(query) + + try: + for index in self._cb_cluster.get_indexes(): + hits, raw_hits, _, _ = index.execute_query(query = query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits, + sort_fields=self.sort_fields, + return_raw_hits=True) + + self.log.info("Hits: %s" % hits) + self.log.info("Doc IDs: %s" % raw_hits) + if hits: + result = index.validate_sorted_results(raw_hits, + self.sort_fields) + if not result: + self.fail( + "Testcase failed. Actual results do not match expected.") + except Exception as err: + self.log.error(err) + self.fail("Testcase failed: " + err.message) + + def test_sorting_of_results_custom_map(self): + self.load_data() + index = self.create_index( + self._cb_cluster.get_bucket_by_name('default'), + "default_index") + self.wait_for_indexing_complete() + index.add_child_field_to_default_mapping(field_name="name", + field_type="text", + field_alias="name", + analyzer="en") + index.add_child_field_to_default_mapping(field_name="join_date", + field_type="datetime", + field_alias="join_date") + index.index_definition['uuid'] = index.get_uuid() + index.update() + self.sleep(5) + self.wait_for_indexing_complete() + + zero_results_ok = True + expected_hits = int(self._input.param("expected_hits", 0)) + default_query = {"disjuncts": [{"match": "Safiya", "field": "name"}, + {"match": "Palmer", "field": "name"}]} + + query = eval(self._input.param("query", str(default_query))) + if expected_hits: + zero_results_ok = False + if isinstance(query, str): + query = json.loads(query) + + try: + for index in self._cb_cluster.get_indexes(): + hits, raw_hits, _, _ = index.execute_query(query=query, + zero_results_ok=zero_results_ok, + expected_hits=expected_hits, + sort_fields=self.sort_fields, + return_raw_hits=True) + + self.log.info("Hits: %s" % hits) + self.log.info("Doc IDs: %s" % raw_hits) + if hits: + result = index.validate_sorted_results(raw_hits, + self.sort_fields) + if not result: + self.fail( + "Testcase failed. Actual results do not match expected.") + except Exception as err: + self.log.error(err) + self.fail("Testcase failed: " + err.message) \ No newline at end of file diff --git a/pytests/memcapable.py b/pytests/memcapable.py index 165de7395..96f9b6918 100644 --- a/pytests/memcapable.py +++ b/pytests/memcapable.py @@ -873,8 +873,15 @@ def _do_warmup(self, howmany, timeout_in_seconds=1800): # to it when it is loaded... disconnect and try again # at a later time.. self.log.info("Waiting for bucket to be loaded again") - ready = BucketOperationHelper.wait_for_memcached(self.master, "default") - self.assertTrue(ready, "wait_for_memcached failed") + for _ in range(5): + self.log.info("Waiting for bucket to be loaded again.") + try: + ready = BucketOperationHelper.wait_for_memcached(self.master, "default") + self.assertTrue(ready, "wait_for_memcached failed") + except: + continue + break + while True: self.onenodemc = MemcachedClientHelper.direct_client(self.master, "default") stats = self.onenodemc.stats() diff --git a/pytests/newupgradebasetest.py b/pytests/newupgradebasetest.py index 8e7a22afc..d76c1228f 100644 --- a/pytests/newupgradebasetest.py +++ b/pytests/newupgradebasetest.py @@ -210,10 +210,18 @@ def _get_build(self, server, version, remote, is_amazon=False, info=None): build_repo = CB_REPO + CB_VERSION_NAME[version[:3]] + "/" elif version[:5] in COUCHBASE_MP_VERSION: build_repo = MV_LATESTBUILD_REPO + + if self.initial_build_type == "community": + edition_type = "couchbase-server-community" + else: + edition_type = "couchbase-server-enterprise" + builds, changes = BuildQuery().get_all_builds(version=version, timeout=self.wait_timeout * 5, \ - deliverable_type=info.deliverable_type, architecture_type=info.architecture_type, \ - edition_type="couchbase-server-enterprise", repo=build_repo, \ - distribution_version=info.distribution_version.lower()) + deliverable_type=info.deliverable_type, + architecture_type=info.architecture_type, \ + edition_type=edition_type, repo=build_repo, \ + distribution_version=info.distribution_version.lower()) + self.log.info("finding build %s for machine %s" % (version, server)) if re.match(r'[1-9].[0-9].[0-9]-[0-9]+$', version): diff --git a/pytests/rebalance/rebalance_start_stop.py b/pytests/rebalance/rebalance_start_stop.py new file mode 100644 index 000000000..7efc07b9b --- /dev/null +++ b/pytests/rebalance/rebalance_start_stop.py @@ -0,0 +1,244 @@ +from couchbase_helper.documentgenerator import BlobGenerator +from membase.api.rest_client import RestConnection, RestHelper +from membase.helper.rebalance_helper import RebalanceHelper +from rebalance.rebalance_base import RebalanceBaseTest + + +class RebalanceStartStopTests(RebalanceBaseTest): + def setUp(self): + super(RebalanceStartStopTests, self).setUp() + extra_nodes_in = self.input.param("extra_nodes_in", 0) + extra_nodes_out = self.input.param("extra_nodes_out", 0) + self.servs_init = self.servers[:self.nodes_init] + self.servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)] + self.servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)] + self.extra_servs_in = [self.servers[i + self.nodes_init + self.nodes_in] for i in range(extra_nodes_in)] + self.extra_servs_out = [self.servers[self.nodes_init - i - 1 - self.nodes_out] for i in range(extra_nodes_out)] + + def tearDown(self): + super(RebalanceStartStopTests, self).tearDown() + + def test_start_stop_rebalance(self): + """ + Start-stop rebalance in/out with adding/removing aditional after stopping rebalance. + + This test begins by loading a given number of items into the cluster. It then + add servs_in nodes and remove servs_out nodes and start rebalance. Then rebalance + is stopped when its progress reached 20%. After we add extra_nodes_in and remove + extra_nodes_out. Restart rebalance with new cluster configuration. Later rebalance + will be stop/restart on progress 40/60/80%. After each iteration we wait for + the disk queues to drain, and then verify that there has been no data loss, + sum(curr_items) match the curr_items_total. Once cluster was rebalanced the test is finished. + The oder of add/remove nodes looks like: + self.nodes_init|servs_in|extra_nodes_in|extra_nodes_out|servs_out + """ + rest = RestConnection(self.master) + self._wait_for_stats_all_buckets(self.servs_init) + self.log.info("Current nodes : {0}".format([node.id for node in rest.node_statuses()])) + self.log.info("Adding nodes {0} to cluster".format(self.servs_in)) + self.log.info("Removing nodes {0} from cluster".format(self.servs_out)) + add_in_once = self.extra_servs_in + result_nodes = set(self.servs_init + self.servs_in) - set(self.servs_out) + # the latest iteration will be with i=5, for this case rebalance should be completed, + # that also is verified and tracked + for i in range(1, 6): + if i == 1: + rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init], + self.servs_in, self.servs_out) + else: + rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init] + self.servs_in, + add_in_once, self.servs_out + self.extra_servs_out) + add_in_once = [] + result_nodes = set(self.servs_init + self.servs_in + self.extra_servs_in) - set( + self.servs_out + self.extra_servs_out) + self.sleep(20) + expected_progress = 20 * i + reached = RestHelper(rest).rebalance_reached(expected_progress) + self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(expected_progress)) + if not RestHelper(rest).is_cluster_rebalanced(): + self.log.info("Stop the rebalance") + stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3) + self.assertTrue(stopped, msg="Unable to stop rebalance") + rebalance.result() + if RestHelper(rest).is_cluster_rebalanced(): + self.verify_cluster_stats(result_nodes) + self.log.info( + "Rebalance was completed when tried to stop rebalance on {0}%".format(str(expected_progress))) + break + else: + self.log.info("Rebalance is still required. Verifying the data in the buckets") + self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1) + self.verify_cluster_stats(result_nodes, check_bucket_stats=False, verify_total_items=False) + self.verify_unacked_bytes_all_buckets() + + def test_start_stop_rebalance_with_mutations(self): + """ + Start-stop rebalance in/out with adding/removing aditional after stopping rebalance with data mutations + in background. + + This test begins by loading a given number of items into the cluster. It then + add servs_in nodes and remove servs_out nodes and start rebalance. Then rebalance + is stopped when its progress reached 20%. After we add extra_nodes_in and remove + extra_nodes_out. Restart rebalance with new cluster configuration. Later rebalance + will be stop/restart on progress 40/60/80%.Before each iteration, we start data mutations + and end the mutations before data validations. After each iteration we wait for + the disk queues to drain, and then verify that there has been no data loss, + sum(curr_items) match the curr_items_total. Once cluster was rebalanced the test is finished. + The oder of add/remove nodes looks like: + self.nodes_init|servs_in|extra_nodes_in|extra_nodes_out|servs_out + """ + rest = RestConnection(self.master) + self._wait_for_stats_all_buckets(self.servs_init) + self.log.info("Current nodes : {0}".format([node.id for node in rest.node_statuses()])) + self.log.info("Adding nodes {0} to cluster".format(self.servs_in)) + self.log.info("Removing nodes {0} from cluster".format(self.servs_out)) + add_in_once = self.extra_servs_in + result_nodes = set(self.servs_init + self.servs_in) - set(self.servs_out) + # the last iteration will be with i=5, for this case rebalance should be completed, + # that also is verified and tracked + for i in range(1, 6): + if self.withMutationOps: + tasks = self._async_load_all_buckets(self.master, self.gen_update, "update", 0) + if i == 1: + rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init], self.servs_in, + self.servs_out) + else: + rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init] + self.servs_in, add_in_once, + self.servs_out + self.extra_servs_out) + add_in_once = [] + result_nodes = set(self.servs_init + self.servs_in + self.extra_servs_in) - set( + self.servs_out + self.extra_servs_out) + self.sleep(20) + expected_progress = 20 * i + reached = RestHelper(rest).rebalance_reached(expected_progress) + self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(expected_progress)) + if not RestHelper(rest).is_cluster_rebalanced(): + self.log.info("Stop the rebalance") + stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3) + self.assertTrue(stopped, msg="Unable to stop rebalance") + if self.withMutationOps: + for tasks in tasks: + tasks.result(self.wait_timeout * 20) + self.sleep(5) + rebalance.result() + if RestHelper(rest).is_cluster_rebalanced(): + self.verify_cluster_stats(result_nodes) + self.log.info( + "Rebalance was completed when tried to stop rebalance on {0}%".format(str(expected_progress))) + break + else: + self.log.info("Rebalance is still required. Verifying the data in the buckets") + self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1) + self.verify_cluster_stats(result_nodes, check_bucket_stats=False, verify_total_items=False) + self.verify_unacked_bytes_all_buckets() + + def test_start_stop_rebalance_before_mutations(self): + """ + Start-stop rebalance in/out with adding/removing aditional after stopping rebalance. + + This test begins by loading a given number of items into the cluster. It then + add servs_in nodes and remove servs_out nodes and start rebalance. Then rebalance + is stopped when its progress reached 20%. After we add extra_nodes_in and remove + extra_nodes_out. Restart rebalance with new cluster configuration. Later rebalance + will be stop/restart on progress 40/60/80%. After each iteration we wait for + the disk queues to drain, and then verify that there has been no data loss, + sum(curr_items) match the curr_items_total. Once cluster was rebalanced the test is finished. + The oder of add/remove nodes looks like: + self.nodes_init|servs_in|extra_nodes_in|extra_nodes_out|servs_out + """ + rest = RestConnection(self.master) + self._wait_for_stats_all_buckets(self.servs_init) + self.log.info("Current nodes : {0}".format([node.id for node in rest.node_statuses()])) + self.log.info("Adding nodes {0} to cluster".format(self.servs_in)) + self.log.info("Removing nodes {0} from cluster".format(self.servs_out)) + add_in_once = self.extra_servs_in + result_nodes = set(self.servs_init + self.servs_in) - set(self.servs_out) + # the latest iteration will be with i=5, for this case rebalance should be completed, + # that also is verified and tracked + for i in range(1, 6): + if i == 1: + rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init], self.servs_in, + self.servs_out) + else: + rebalance = self.cluster.async_rebalance(self.servs_init[:self.nodes_init] + self.servs_in, add_in_once, + self.servs_out + self.extra_servs_out) + add_in_once = [] + result_nodes = set(self.servs_init + self.servs_in + self.extra_servs_in) - set( + self.servs_out + self.extra_servs_out) + self.sleep(20) + expected_progress = 20 * i + reached = RestHelper(rest).rebalance_reached(expected_progress) + self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(expected_progress)) + if not RestHelper(rest).is_cluster_rebalanced(): + self.log.info("Stop the rebalance") + stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3) + self.assertTrue(stopped, msg="Unable to stop rebalance") + self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1) + if self.withMutationOps: + tasks = self._async_load_all_buckets(self.master, self.gen_update, "update", 0) + if self.withMutationOps: + for tasks in tasks: + tasks.result(self.wait_timeout * 20) + self.sleep(5) + rebalance.result() + if RestHelper(rest).is_cluster_rebalanced(): + self.verify_cluster_stats(result_nodes) + self.log.info( + "Rebalance was completed when tried to stop rebalance on {0}%".format(str(expected_progress))) + break + else: + self.log.info("Rebalance is still required. Verifying the data in the buckets.") + self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1) + self.verify_cluster_stats(result_nodes, check_bucket_stats=False, verify_total_items=False) + self.verify_unacked_bytes_all_buckets() + + def test_start_stop_rebalance_after_failover(self): + """ + Rebalances nodes out and in with failover + Use different nodes_in and nodes_out params to have uneven add and deletion. Use 'zone' + param to have nodes divided into server groups by having zone > 1. + + The test begin with loading the bucket with given number of items. It then fails over a node. We then + rebalance the cluster, while adding or removing given number of nodes. Once the rebalance reaches 50%, + we stop the rebalance and validate the cluster stats. We then restart the rebalance and validate rebalance + was completed successfully. + """ + fail_over = self.input.param("fail_over", False) + gen = BlobGenerator('mike', 'mike-', self.value_size, end=self.num_items) + self._load_all_buckets(self.master, gen, "create", 0) + tasks = self._async_load_all_buckets(self.master, gen, "update", 0) + for task in tasks: + task.result(self.wait_timeout * 20) + self._verify_stats_all_buckets(self.servers[:self.nodes_init], timeout=120) + self._wait_for_stats_all_buckets(self.servers[:self.nodes_init]) + self.sleep(20) + prev_vbucket_stats = self.get_vbucket_seqnos(self.servers[:self.nodes_init], self.buckets) + prev_failover_stats = self.get_failovers_logs(self.servers[:self.nodes_init], self.buckets) + disk_replica_dataset, disk_active_dataset = self.get_and_compare_active_replica_data_set_all( + self.servers[:self.nodes_init], self.buckets, path=None) + self.compare_vbucketseq_failoverlogs(prev_vbucket_stats, prev_failover_stats) + self.rest = RestConnection(self.master) + chosen = RebalanceHelper.pick_nodes(self.master, howmany=1) + result_nodes = list(set(self.servers[:self.nodes_init] + self.servs_in) - set(self.servs_out)) + for node in self.servs_in: + self.rest.add_node(self.master.rest_username, self.master.rest_password, node.ip, node.port) + # Mark Node for failover + self.rest.fail_over(chosen[0].id, graceful=fail_over) + rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init], self.servs_in, self.servs_out) + expected_progress = 50 + rest = RestConnection(self.master) + reached = RestHelper(rest).rebalance_reached(expected_progress) + self.assertTrue(reached, "Rebalance failed or did not reach {0}%".format(expected_progress)) + if not RestHelper(rest).is_cluster_rebalanced(): + self.log.info("Stop the rebalance") + stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3) + self.assertTrue(stopped, msg="Unable to stop rebalance") + self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1) + self.shuffle_nodes_between_zones_and_rebalance() + self.verify_cluster_stats(result_nodes, check_ep_items_remaining=True, check_bucket_stats=False) + self.sleep(30) + self.data_analysis_active_replica_all(disk_active_dataset, disk_replica_dataset, result_nodes, self.buckets, + path=None) + self.verify_unacked_bytes_all_buckets() + nodes = self.get_nodes_in_cluster(self.master) + self.vb_distribution_analysis(servers=nodes, std=1.0, total_vbuckets=self.total_vbuckets) diff --git a/pytests/rebalance/rebalanceinout.py b/pytests/rebalance/rebalanceinout.py index efd8d5e24..496c85292 100644 --- a/pytests/rebalance/rebalanceinout.py +++ b/pytests/rebalance/rebalanceinout.py @@ -344,62 +344,6 @@ def test_rebalance_in_out_with_compaction_and_expiration_ops(self): for t in thread_list: t.join() - def test_start_stop_rebalance_in_out(self): - """ - Start-stop rebalance in/out with adding/removing aditional after stopping rebalance. - - This test begins by loading a given number of items into the cluster. It then - add servs_in nodes and remove servs_out nodes and start rebalance. Then rebalance - is stopped when its progress reached 20%. After we add extra_nodes_in and remove - extra_nodes_out. Restart rebalance with new cluster configuration. Later rebalance - will be stop/restart on progress 40/60/80%. After each iteration we wait for - the disk queues to drain, and then verify that there has been no data loss, - sum(curr_items) match the curr_items_total. Once cluster was rebalanced the test is finished. - The oder of add/remove nodes looks like: - self.nodes_init|servs_in|extra_nodes_in|extra_nodes_out|servs_out - """ - extra_nodes_in = self.input.param("extra_nodes_in", 0) - extra_nodes_out = self.input.param("extra_nodes_out", 0) - servs_init = self.servers[:self.nodes_init] - servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)] - servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)] - extra_servs_in = [self.servers[i + self.nodes_init + self.nodes_in] for i in range(extra_nodes_in)] - extra_servs_out = [self.servers[self.nodes_init - i - 1 - self.nodes_out] for i in range(extra_nodes_out)] - rest = RestConnection(self.master) - self._wait_for_stats_all_buckets(servs_init) - self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()])) - self.log.info("adding nodes {0} to cluster".format(servs_in)) - self.log.info("removing nodes {0} from cluster".format(servs_out)) - add_in_once = extra_servs_in - result_nodes = set(servs_init + servs_in) - set(servs_out) - # the latest iteration will be with i=5, for this case rebalance should be completed, - # that also is verified and tracked - for i in range(1, 6): - if i == 1: - rebalance = self.cluster.async_rebalance(servs_init[:self.nodes_init], servs_in, servs_out) - else: - rebalance = self.cluster.async_rebalance(servs_init[:self.nodes_init] + servs_in, add_in_once, - servs_out + extra_servs_out) - add_in_once = [] - result_nodes = set(servs_init + servs_in + extra_servs_in) - set(servs_out + extra_servs_out) - self.sleep(20) - expected_progress = 20 * i - reached = RestHelper(rest).rebalance_reached(expected_progress) - self.assertTrue(reached, "rebalance failed or did not reach {0}%".format(expected_progress)) - if not RestHelper(rest).is_cluster_rebalanced(): - stopped = rest.stop_rebalance(wait_timeout=self.wait_timeout / 3) - self.assertTrue(stopped, msg="unable to stop rebalance") - rebalance.result() - if RestHelper(rest).is_cluster_rebalanced(): - self.verify_cluster_stats(result_nodes) - self.log.info( - "rebalance was completed when tried to stop rebalance on {0}%".format(str(expected_progress))) - break - else: - self.log.info("rebalance is still required") - self._verify_all_buckets(self.master, timeout=None, max_verify=self.max_verify, batch_size=1) - self.verify_unacked_bytes_all_buckets() - def test_incremental_rebalance_out_in_with_mutation(self): """ Rebalances nodes in and out of the cluster while doing mutations. diff --git a/pytests/rqg/test_rqg.py b/pytests/rqg/test_rqg.py index 1c563f249..9165e65de 100644 --- a/pytests/rqg/test_rqg.py +++ b/pytests/rqg/test_rqg.py @@ -30,6 +30,7 @@ def setUp(self): self.check_covering_index = self.input.param("check_covering_index",True) self.skip_setup_cleanup = True self.remove_alias = self.input.param("remove_alias",True) + self.skip_cleanup = self.input.param("skip_cleanup",False) self.build_secondary_index_in_seq = self.input.param("build_secondary_index_in_seq",False) self.number_of_buckets = self.input.param("number_of_buckets",5) self.crud_type = self.input.param("crud_type","update") @@ -100,7 +101,7 @@ def setUp(self): def tearDown(self): super(RQGTests, self).tearDown() if hasattr(self, 'reset_database'): - self.skip_cleanup= self.input.param("skip_cleanup",False) + print "cleanup is %s" %(self.skip_cleanup) if self.use_mysql and self.reset_database and (not self.skip_cleanup): try: self.client.drop_database(self.database) @@ -162,61 +163,6 @@ def test_rqg_concurrent_with_predefined_input(self): self.log.info(result) self.assertTrue(success, summary) - def test_rqg_concurrent_with_predefined_input(self): - check = True - failure_map = {} - batches = [] - batch = [] - test_case_number = 1 - count = 1 - inserted_count = 0 - self.use_secondary_index = self.run_query_with_secondary or self.run_explain_with_hints - with open(self.test_file_path) as f: - n1ql_query_list = f.readlines() - if self.total_queries == None: - self.total_queries = len(n1ql_query_list) - for n1ql_query_info in n1ql_query_list: - data = json.loads(n1ql_query_info) - batch.append({str(test_case_number):data}) - if count == self.concurreny_count: - inserted_count += len(batch) - batches.append(batch) - count = 1 - batch = [] - else: - count +=1 - test_case_number += 1 - if test_case_number >= self.total_queries: - break - if inserted_count != len(n1ql_query_list): - batches.append(batch) - result_queue = Queue.Queue() - for test_batch in batches: - # Build all required secondary Indexes - list = [data[data.keys()[0]] for data in test_batch] - if self.use_secondary_index: - self._generate_secondary_indexes_in_batches(list) - thread_list = [] - # Create threads and run the batch - for test_case in test_batch: - test_case_number = test_case.keys()[0] - data = test_case[test_case_number] - t = threading.Thread(target=self._run_basic_test, args = (data, test_case_number, result_queue)) - t.daemon = True - t.start() - thread_list.append(t) - # Capture the results when done - check = False - for t in thread_list: - t.join() - # Drop all the secondary Indexes - if self.use_secondary_index: - self._drop_secondary_indexes_in_batches(list) - # Analyze the results for the failure and assert on the run - success, summary, result = self._test_result_analysis(result_queue) - self.log.info(result) - self.assertTrue(success, summary) - def test_rqg_generate_input(self): self.data_dump_path= self.input.param("data_dump_path","b/resources/rqg/data_dump") input_file_path=self.data_dump_path+"/input" @@ -344,7 +290,7 @@ def test_rqg_concurrent(self): for t in thread_list: t.join() - if self.drop_index == True: + if self.drop_index: query = 'select * from system:indexes where keyspace_id like "{0}%"'.format(self.database) actual_result = self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server) #print actual_result['results'] @@ -360,14 +306,11 @@ def test_rqg_concurrent(self): query = 'drop primary index on {0}'.format(keyspace) else: query = 'drop index {0}.{1}'.format(keyspace,name) - #import pdb;pdb.set_trace() i+=1 - #import pdb;pdb.set_trace() - self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '600s'}) - if self.drop_bucket == True: - #import pdb;pdb.set_trace() + self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '900s'}) + if self.drop_bucket: for bucket in self.buckets: - BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.master,bucket=bucket) + BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.n1ql_server,bucket=bucket) # Analyze the results for the failure and assert on the run success, summary, result = self._test_result_analysis(result_queue) self.log.info(result) @@ -458,12 +401,9 @@ def test_rqg_concurrent_new(self): query = 'drop primary index on {0}'.format(keyspace) else: query = 'drop index {0}.{1}'.format(keyspace,name) - #import pdb;pdb.set_trace() i+=1 - #import pdb;pdb.set_trace() - self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '600s'}) + self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '900s'}) if self.drop_bucket == True: - #import pdb;pdb.set_trace() for bucket in self.buckets: BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.master,bucket=bucket) # Analyze the results for the failure and assert on the run @@ -708,7 +648,7 @@ def _run_basic_test(self, test_data, test_case_number, result_queue, failure_rec query_index_run = self._run_queries_and_verify(n1ql_query = result_limit , sql_query = sql_query, expected_result = expected_result) result_run["run_query_with_limit"] = query_index_run if expected_result == None: - expected_result = self._gen_expected_result(sql_query) + expected_result = self._gen_expected_result(sql_query,test_case_number) data["expected_result"] = expected_result query_index_run = self._run_queries_and_verify(n1ql_query = n1ql_query , sql_query = sql_query, expected_result = expected_result) result_run["run_query_without_index_hint"] = query_index_run @@ -923,12 +863,16 @@ def _run_n1ql_queries(self, n1ql_query = None): # Run n1ql query actual_result = self.n1ql_helper.run_cbq_query(query = n1ql_query, server = self.n1ql_server) - def _gen_expected_result(self, sql = ""): + def _gen_expected_result(self, sql = "",test = 49): sql_result = [] try: client = MySQLClient(database = self.database, host = self.mysql_url, user_id = self.user_id, password = self.password) - columns, rows = client._execute_query(query = sql) + if (test == 51): + columns = [] + rows = [] + else: + columns, rows = client._execute_query(query = sql) sql_result = self.client._gen_json_from_results(columns, rows) client._close_mysql_connection() client = None @@ -998,7 +942,6 @@ def _run_queries_and_verify(self, n1ql_query = None, sql_query = None, expected_ try: self.n1ql_helper._verify_results_rqg(sql_result = sql_result, n1ql_result = n1ql_result, hints = hints) except Exception, ex: - import pdb;pdb.set_trace() self.log.info(ex) return {"success":False, "result": str(ex)} return {"success":True, "result": "Pass"} @@ -1523,9 +1466,8 @@ def _drop_secondary_indexes_in_batches(self, batches): table_name =self.database+"_"+table_name for index_name in info["indexes"].keys(): query ="DROP INDEX {0}.{1} USING {2}".format(table_name, index_name, info["indexes"][index_name]["type"]) - #import pdb;pdb.set_trace() try: - self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '600s'},timeout = '600s') + self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '900s'},timeout = '900s') self.sleep(10,"Sleep to make sure index dropped properly") except Exception, ex: self.log.info(ex) @@ -1535,9 +1477,8 @@ def _drop_secondary_indexes_with_index_map(self, index_map = {}, table_name = "s self.log.info(" Dropping Secondary Indexes for Bucket {0}".format(table_name)) for index_name in index_map.keys(): query ="DROP INDEX {0}.{1} USING {2}".format(table_name, index_name, index_map[index_name]["type"]) - #import pdb;pdb.set_trace() try: - self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '600s'},timeout = '600s') + self.n1ql_helper.run_cbq_query(query = query, server = self.n1ql_server,query_params={'timeout' : '900s'},timeout = '900s') except Exception, ex: self.log.info(ex) diff --git a/pytests/rqg/test_rqg_asterix.py b/pytests/rqg/test_rqg_asterix.py new file mode 100644 index 000000000..14e3961d7 --- /dev/null +++ b/pytests/rqg/test_rqg_asterix.py @@ -0,0 +1,670 @@ +import sys +import paramiko +import re +from basetestcase import BaseTestCase +import json +import os +import zipfile +import pprint +import Queue +import json +from membase.helper.cluster_helper import ClusterOperationHelper +import mc_bin_client +import threading +from memcached.helper.data_helper import VBucketAwareMemcached +from mysql_client import MySQLClient +from membase.api.rest_client import RestConnection, Bucket +from couchbase_helper.analytics_helper import AnalyticsHelper +from couchbase_helper.query_helper import QueryHelper +from remote.remote_util import RemoteMachineShellConnection +from lib.membase.helper.bucket_helper import BucketOperationHelper + + +class RQGASTERIXTests(BaseTestCase): + """ Class for defining tests for RQG base testing """ + + def setUp(self): + super(RQGASTERIXTests, self).setUp() + self.client_map={} + self.log.info("============== RQGTests setup was finished for test #{0} {1} =============="\ + .format(self.case_number, self._testMethodName)) + self.skip_setup_cleanup = True + self.remove_alias = self.input.param("remove_alias",True) + self.number_of_buckets = self.input.param("number_of_buckets",5) + self.crud_type = self.input.param("crud_type","update") + self.populate_with_replay = self.input.param("populate_with_replay",False) + self.crud_batch_size = self.input.param("crud_batch_size",1) + self.skip_cleanup = self.input.param("skip_cleanup",False) + self.record_failure= self.input.param("record_failure",False) + self.failure_record_path= self.input.param("failure_record_path","/tmp") + self.use_mysql= self.input.param("use_mysql",True) + self.joins = self.input.param("joins",False) + self.subquery = self.input.param("subquery",False) + self.initial_loading_to_cb= self.input.param("initial_loading_to_cb",True) + self.change_bucket_properties = self.input.param("change_bucket_properties",False) + self.database= self.input.param("database","flightstats") + self.merge_operation= self.input.param("merge_operation",False) + self.load_copy_table= self.input.param("load_copy_table",False) + self.user_id= self.input.param("user_id","root") + self.user_cluster = self.input.param("user_cluster","Administrator") + self.password= self.input.param("password","") + self.password_cluster = self.input.param("password_cluster","password") + self.generate_input_only = self.input.param("generate_input_only",False) + self.using_gsi= self.input.param("using_gsi",True) + self.reset_database = self.input.param("reset_database",True) + self.items = self.input.param("items",1000) + self.mysql_url= self.input.param("mysql_url","localhost") + self.mysql_url=self.mysql_url.replace("_",".") + self.n1ql_server = self.get_nodes_from_services_map(service_type = "n1ql") + self.concurreny_count= self.input.param("concurreny_count",10) + self.total_queries= self.input.param("total_queries",None) + self.run_query_with_primary= self.input.param("run_query_with_primary",False) + self.run_query_with_secondary= self.input.param("run_query_with_secondary",False) + self.run_explain_with_hints= self.input.param("run_explain_with_hints",False) + self.test_file_path= self.input.param("test_file_path",None) + self.db_dump_path= self.input.param("db_dump_path",None) + self.input_rqg_path= self.input.param("input_rqg_path",None) + self.set_limit = self.input.param("set_limit",0) + self.query_count= 0 + self.use_rest = self.input.param("use_rest",True) + self.ram_quota = self.input.param("ram_quota",512) + self.drop_bucket = self.input.param("drop_bucket",False) + if self.input_rqg_path != None: + self.db_dump_path = self.input_rqg_path+"/db_dump/database_dump.zip" + self.test_file_path = self.input_rqg_path+"/input/source_input_rqg_run.txt" + self.query_helper = QueryHelper() + self.keyword_list = self.query_helper._read_keywords_from_file("b/resources/rqg/n1ql_info/keywords.txt") + self._initialize_analytics_helper() + self.rest = RestConnection(self.master) + self.indexer_memQuota = self.input.param("indexer_memQuota",1024) + if self.initial_loading_to_cb: + self._initialize_cluster_setup() + if not(self.use_rest): + self._ssh_client = paramiko.SSHClient() + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + try: + self.os = self.shell.extract_remote_info().type.lower() + except Exception, ex: + self.log.error('SETUP FAILED') + self.tearDown() + + + + def tearDown(self): + super(RQGASTERIXTests, self).tearDown() + data = 'use Default ;' + "\n" + for bucket in self.buckets: + data += 'disconnect bucket {0} if connected;'.format(bucket.name) + "\n" + data += 'drop dataset {0} if exists'.format(bucket.name) + "_shadow ;" + "\n" + data += 'drop bucket {0} if exists;'.format(bucket.name) + "\n" + filename = "file.txt" + f = open(filename,'w') + f.write(data) + f.close() + url = 'http://{0}:8095/analytics/service'.format(self.master.ip) + cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + os.system(cmd) + os.remove(filename) + + + if hasattr(self, 'reset_database'): + #self.skip_cleanup= self.input.param("skip_cleanup",False) + if self.use_mysql and self.reset_database and (not self.skip_cleanup): + try: + self.client.drop_database(self.database) + except Exception, ex: + self.log.info(ex) + + + def _initialize_cluster_setup(self): + if self.use_mysql: + self.log.info(" Will load directly from mysql") + self._initialize_mysql_client() + if not self.generate_input_only: + self._setup_and_load_buckets() + else: + self.log.info(" Will load directly from file snap-shot") + if self.populate_with_replay: + self._initialize_mysql_client() + self._setup_and_load_buckets_from_files() + + self._initialize_analytics_helper() + #create copy of simple table if this is a merge operation + self.sleep(10) + if self.gsi_type == "memory_optimized": + os.system("curl -X POST http://Administrator:password@{1}:8091/pools/default -d memoryQuota={0} -d indexMemoryQuota={2}".format(self.ram_quota, self.n1ql_server.ip,self.indexer_memQuota)) + self.sleep(10) + + # self.log.info("Increasing Indexer Memory Quota to {0}".format(self.indexer_memQuota)) + # self.rest.set_indexer_memoryQuota(indexMemoryQuota=self.indexer_memQuota) + # self.sleep(120) + if self.change_bucket_properties: + shell = RemoteMachineShellConnection(self.master) + shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster,self.password_cluster,self.master.ip,self.master.port)) + self.sleep(10,"Updating maxBucket count to 15") + + def _initialize_mysql_client(self): + if self.reset_database: + self.client = MySQLClient(host = self.mysql_url, + user_id = self.user_id, password = self.password) + path = "b/resources/rqg/{0}/database_definition/definition.sql".format(self.database) + self.database = self.database+"_"+str(self.query_helper._random_int()) + populate_data = False + if not self.populate_with_replay: + populate_data = True + self.client.reset_database_add_data(database = self.database, items= self.items, sql_file_definiton_path = path, populate_data = populate_data, number_of_tables = self.number_of_buckets) + self._copy_table_for_merge() + else: + self.client = MySQLClient(database = self.database, host = self.mysql_url, + user_id = self.user_id, password = self.password) + + def _copy_table_for_merge(self): + table_list = self.client._get_table_list() + reference_table = table_list[0] + if self.merge_operation: + path = "b/resources/rqg/crud_db/database_definition/table_definition.sql" + self.client.database_add_data(database = self.database, sql_file_definiton_path = path) + table_list = self.client._get_table_list() + for table_name in table_list: + if table_name != reference_table: + sql = "INSERT INTO {0} SELECT * FROM {1}".format(table_name, reference_table) + self.client._insert_execute_query(sql) + table_list = self.client._get_table_list() + for table_name in table_list: + self.client_map[table_name] = MySQLClient(database = self.database, host = self.mysql_url, user_id = self.user_id, password = self.password) + + def _setup_and_load_buckets(self): + # Remove any previous buckets + #rest = RestConnection(self.master) + if (self.skip_setup_cleanup): + for bucket in self.buckets: + self.rest.delete_bucket(bucket.name) + self.buckets = [] + if self.change_bucket_properties or self.gsi_type == "memory_optimized": + bucket_size = 100 + else: + bucket_size = None + if self.change_bucket_properties: + shell = RemoteMachineShellConnection(self.master) + #print "master is {0}".format(self.master) + shell.execute_command("curl -X POST -u {0}:{1} -d maxBucketCount=25 http://{2}:{3}/internalSettings".format(self.user_cluster,self.password_cluster,self.master.ip,self.master.port)) + self.sleep(10,"Updating maxBucket count to 25") + # Pull information about tables from mysql database and interpret them as no-sql dbs + table_key_map = self.client._get_primary_key_map_for_tables() + # Make a list of buckets that we want to create for querying + bucket_list = table_key_map.keys() + print "database used is {0}".format(self.database) + new_bucket_list =[] + for bucket in bucket_list: + if (bucket.find("copy_simple_table")>0): + new_bucket_list.append(self.database+"_"+"copy_simple_table") + else: + new_bucket_list.append(self.database + "_" + bucket) + + + # Create New Buckets + self._create_buckets(self.master, new_bucket_list, server_id=None, bucket_size=bucket_size) + print "buckets created" + # Wait till the buckets are up + self.sleep(5) + self.buckets = self.rest.get_buckets() + self.newbuckets = [] + for bucket in self.buckets: + if bucket.name in new_bucket_list: + self.newbuckets.append(bucket) + + print "safe to start another job" + self.record_db = {} + self.buckets = self.newbuckets + # Read Data from mysql database and populate the couchbase server + for bucket_name in bucket_list: + query = "select * from {0}".format(bucket_name) + columns, rows = self.client._execute_query(query = query) + self.record_db[bucket_name] = self.client._gen_json_from_results_with_primary_key(columns, rows, + primary_key = table_key_map[bucket_name]) + for bucket in self.newbuckets: + if bucket.name == self.database+"_"+bucket_name: + self._load_bulk_data_in_buckets_using_n1ql(bucket, self.record_db[bucket_name]) + + data = 'use Default;' + "\n" + for bucket in self.buckets: + data += 'create bucket {0} with {{"bucket":"{0}","nodes":"{1}"}} ;'.format(bucket.name,self.master.ip) + "\n" + data += 'create shadow dataset {1} on {0}; '.format(bucket.name,bucket.name+"_shadow") + "\n" + data += 'connect bucket {0} ;'.format(bucket.name) + "\n" + #import pdb;pdb.set_trace() + filename = "file.txt" + f = open(filename,'w') + f.write(data) + f.close() + url = 'http://{0}:8095/analytics/service'.format(self.master.ip) + cmd = 'curl -s --data pretty=true --data format=CLEAN_JSON --data-urlencode "statement@file.txt" ' + url + os.system(cmd) + os.remove(filename) + + + def unzip_template(self, template_path): + if "zip" not in template_path: + return template_path + tokens = template_path.split("/") + file_name = tokens[len(tokens)-1] + output_path = template_path.replace(file_name,"") + with zipfile.ZipFile(template_path, "r") as z: + z.extractall(output_path) + template_path = template_path.replace(".zip","") + return template_path + + def _initialize_analytics_helper(self): + self.n1ql_helper = AnalyticsHelper(version = "spock", shell = None, + use_rest = True, max_verify = self.max_verify, + buckets = self.buckets, item_flag = None, + analytics_port=8095,full_docs_list = [], + log = self.log, input = self.input, master = self.master,database = self.database) + + def _load_bulk_data_in_buckets_using_n1ql(self, bucket, data_set): + try: + count=0 + n1ql_query = self.query_helper._builk_insert_statement_n1ql(bucket.name,data_set) + actual_result = self.n1ql_helper.run_cbq_query(query = n1ql_query, server = self.n1ql_server, verbose = False) + except Exception, ex: + print 'WARN=======================' + print ex + + def test_rqg_concurrent(self): + # Get Data Map + table_map = self.client._get_values_with_type_for_fields_in_table() + check = True + failure_map = {} + batches = [] + batch = [] + test_case_number = 1 + count = 1 + inserted_count = 0 + # Load All the templates + self.test_file_path= self.unzip_template(self.test_file_path) + with open(self.test_file_path) as f: + query_list = f.readlines() + if self.total_queries == None: + self.total_queries = len(query_list) + for n1ql_query_info in query_list: + data = n1ql_query_info + batch.append({str(test_case_number):data}) + if count == self.concurreny_count: + inserted_count += len(batch) + batches.append(batch) + count = 1 + batch = [] + else: + count +=1 + test_case_number += 1 + if test_case_number > self.total_queries: + break + if inserted_count != len(query_list): + batches.append(batch) + result_queue = Queue.Queue() + input_queue = Queue.Queue() + failure_record_queue = Queue.Queue() + # Run Test Batches + test_case_number = 1 + thread_list = [] + for i in xrange(self.concurreny_count): + t = threading.Thread(target=self._testrun_worker, args = (input_queue, result_queue, failure_record_queue)) + t.daemon = True + t.start() + thread_list.append(t) + for test_batch in batches: + # Build all required secondary Indexes + list = [data[data.keys()[0]] for data in test_batch] + list = self.client._convert_template_query_info( + table_map = table_map, + n1ql_queries = list, + gen_expected_result = False) + + # Create threads and run the batch + for test_case in list: + test_case_input = test_case + input_queue.put({"test_case_number":test_case_number, "test_data":test_case_input}) + test_case_number += 1 + # Capture the results when done + check = False + for t in thread_list: + t.join() + + for bucket in self.buckets: + BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.master,bucket=bucket) + # Analyze the results for the failure and assert on the run + success, summary, result = self._test_result_analysis(result_queue) + self.log.info(result) + #self.dump_failure_data(failure_record_queue) + self.assertTrue(success, summary) + + def _testrun_worker(self, input_queue, result_queue, failure_record_queue = None): + count = 0 + while True: + if self.total_queries <= (self.query_count): + break + if not input_queue.empty(): + data = input_queue.get() + test_data = data["test_data"] + test_case_number = data["test_case_number"] + self._run_basic_test(test_data, test_case_number, result_queue, failure_record_queue) + count = 0 + else: + count += 1 + if count > 1000: + return + + def _gen_expected_result(self, sql = ""): + sql_result = [] + try: + client = MySQLClient(database = self.database, host = self.mysql_url, + user_id = self.user_id, password = self.password) + columns, rows = client._execute_query(query = sql) + sql_result = self.client._gen_json_from_results(columns, rows) + client._close_mysql_connection() + client = None + except Exception, ex: + self.log.info(ex) + if ex.message.__contains__("SQL syntax") or ex.message.__contains__("ERROR"): + print "Error in sql syntax" + + def _run_basic_test(self, test_data, test_case_number, result_queue, failure_record_queue = None): + data = test_data + n1ql_query = data["n1ql"] + #LOCK = threading.Lock() + + if (self.joins or self.subquery): + n1ql_query = data["sql"] + #import pdb;pdb.set_trace() + #if LOCK.acquire(False): + #i = n1ql_query.find("t_") + #temp = n1ql_query[i:i+4] + #print "temp is {0}".format(temp) + #n1ql_query = n1ql_query.replace("t_","VALUE t_",1) + #print "n1ql query before replace is %s" %n1ql_query + #n1ql_query = n1ql_query.replace("t_",temp,1) + #print "n1ql query after replace is %s" %n1ql_query + if ("IN" in n1ql_query): + index = n1ql_query.find("IN (") + temp1 = n1ql_query[0:index] + " IN [ " + temp2 = n1ql_query[index+4:].replace(")","]",1) + n1ql_query = temp1 + temp2 + print "n1ql query after in replace is %s"%n1ql_query + #LOCK.release() + + + + if (n1ql_query.find("simple_table")>0) and ((self.database+"_"+"simple_table") not in n1ql_query): + n1ql_query = n1ql_query.replace("simple_table",self.database+"_"+"simple_table") + + sql_query = data["sql"] + table_name = data["bucket"] + expected_result = data["expected_result"] + self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< BEGIN RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number)) + result_run = {} + result_run["n1ql_query"] = n1ql_query + result_run["sql_query"] = sql_query + result_run["test_case_number"] = test_case_number + if self.set_limit > 0 and n1ql_query.find("DISTINCT") > 0: + result_limit = self.query_helper._add_limit_to_query(n1ql_query,self.set_limit) + query_index_run = self._run_queries_and_verify(n1ql_query = result_limit , sql_query = sql_query, expected_result = expected_result) + result_run["run_query_with_limit"] = query_index_run + if expected_result == None: + expected_result = self._gen_expected_result(sql_query) + data["expected_result"] = expected_result + query_index_run = self._run_queries_and_verify(n1ql_query = n1ql_query , sql_query = sql_query, expected_result = expected_result) + result_run["run_query_without_index_hint"] = query_index_run + if self.run_query_with_primary: + index_info = {"name":"`#primary`","type":"GSI"} + query = self.query_helper._add_index_hints_to_query(n1ql_query, [index_info]) + query_index_run = self._run_queries_and_verify(n1ql_query = query , sql_query = sql_query, expected_result = expected_result) + result_run["run_query_with_primary"] = query_index_run + + result_queue.put(result_run) + self._check_and_push_failure_record_queue(result_run, data, failure_record_queue) + self.query_count += 1 + self.log.info(" <<<<<<<<<<<<<<<<<<<<<<<<<<<< END RUNNING TEST {0} >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>".format(test_case_number)) + + + def _run_queries_and_verify(self, n1ql_query = None, sql_query = None, expected_result = None): + self.log.info(" SQL QUERY :: {0}".format(sql_query)) + self.log.info(" N1QL QUERY :: {0}".format(n1ql_query)) + result_run = {} + # Run n1ql query + hints = self.query_helper._find_hints(sql_query) + + for i,item in enumerate(hints): + if "simple_table" in item: + hints[i] = hints[i].replace("simple_table",self.database+"_"+"simple_table") + try: + actual_result = self.n1ql_helper.run_analytics_query(query = n1ql_query, server = self.n1ql_server, scan_consistency="request_plus") + n1ql_result = actual_result["results"] + #self.log.info(actual_result) + # Run SQL Query + sql_result = expected_result + if expected_result == None: + columns, rows = self.client._execute_query(query = sql_query) + sql_result = self.client._gen_json_from_results(columns, rows) + #self.log.info(sql_result) + self.log.info(" result from n1ql query returns {0} items".format(len(n1ql_result))) + self.log.info(" result from sql query returns {0} items".format(len(sql_result))) + + if(len(n1ql_result)!=len(sql_result)): + self.log.info("number of results returned from sql and n1ql are different") + if (len(sql_result) == 0 and len(n1ql_result) ==1) or (len(n1ql_result) == 0 and len(sql_result) == 1): + return {"success":True, "result": "Pass"} + return {"success":False, "result": str("different results")} + try: + self.n1ql_helper._verify_results_rqg(sql_result = sql_result, n1ql_result = n1ql_result, hints = hints) + except Exception, ex: + self.log.info(ex) + return {"success":False, "result": str(ex)} + return {"success":True, "result": "Pass"} + except Exception, ex: + return {"success":False, "result": str(ex)} + + def _test_result_analysis(self, queue): + result_list = [] + pass_case = 0 + fail_case = 0 + total= 0 + failure_map = {} + keyword_map = {} + failure_reason_map = {} + success = True + while not queue.empty(): + result_list.append(queue.get()) + total = len(result_list) + for result_run in result_list: + test_case_number = result_run["test_case_number"] + sql_query = result_run["sql_query"] + n1ql_query = result_run["n1ql_query"] + check, message, failure_types = self._analyze_result(result_run) + success = success and check + if check: + pass_case += 1 + else: + fail_case += 1 + for failure_reason_type in failure_types: + if failure_reason_type not in failure_reason_map.keys(): + failure_reason_map[failure_reason_type] = 1 + else: + failure_reason_map[failure_reason_type] += 1 + keyword_list = self.query_helper.find_matching_keywords(n1ql_query, self.keyword_list) + for keyword in keyword_list: + if keyword not in keyword_map.keys(): + keyword_map[keyword] = 1 + else: + keyword_map[keyword] += 1 + failure_map[test_case_number] = {"sql_query":sql_query, "n1ql_query": n1ql_query, + "run_result" : message, "keyword_list": keyword_list} + summary = " Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Pecentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total)) + if len(keyword_map) > 0: + summary += "\n [ KEYWORD FAILURE DISTRIBUTION ] \n" + for keyword in keyword_map.keys(): + summary += keyword+" :: " + str((keyword_map[keyword]*100)/total)+"%\n " + if len(failure_reason_map) > 0: + summary += "\n [ FAILURE TYPE DISTRIBUTION ] \n" + for keyword in failure_reason_map.keys(): + summary += keyword+" :: " + str((failure_reason_map[keyword]*100)/total)+"%\n " + self.log.info(" Total Queries Run = {0}, Pass = {1}, Fail = {2}, Pass Pecentage = {3} %".format(total, pass_case, fail_case, ((pass_case*100)/total))) + result = self._generate_result(failure_map) + return success, summary, result + + def _analyze_result(self, result): + check = True + failure_types = [] + message = "\n ____________________________________________________\n " + for key in result.keys(): + if key != "test_case_number" and key != "n1ql_query" and key != "sql_query": + check = check and result[key]["success"] + if not result[key]["success"]: + failure_types.append(key) + message += " Scenario :: {0} \n".format(key) + message += " Reason :: "+result[key]["result"]+"\n" + return check, message, failure_types + + def _generate_result(self, data): + result = "" + for key in data.keys(): + result +="<<<<<<<<<< TEST {0} >>>>>>>>>>> \n".format(key) + for result_key in data[key].keys(): + result += "{0} :: {1} \n".format(result_key, data[key][result_key]) + return result + + def _check_and_push_failure_record_queue(self, result, data, failure_record_queue): + if not self.record_failure: + return + check = True + for key in result.keys(): + if key != "test_case_number" and key != "n1ql_query" and key != "sql_query": + check = check and result[key]["success"] + if not result[key]["success"]: + failure_record_queue.put(data) + + + def test_rqg_concurrent_new(self): + # Get Data Map + table_list = self.client._get_table_list() + table_map = self.client._get_values_with_type_for_fields_in_table() + if self.remove_alias: + for key in table_map.keys(): + if "alias_name" in table_map[key].keys(): + table_map[key].pop("alias_name",None) + check = True + failure_map = {} + batches = Queue.Queue() + batch = [] + test_case_number = 1 + count = 1 + inserted_count = 0 + self.use_secondary_index = self.run_query_with_secondary or self.run_explain_with_hints + # Load All the templates + self.test_file_path= self.unzip_template(self.test_file_path) + with open(self.test_file_path) as f: + query_list = f.readlines() + if self.total_queries == None: + self.total_queries = len(query_list) + for n1ql_query_info in query_list: + data = n1ql_query_info + batch.append({str(test_case_number):data}) + if count == self.concurreny_count: + inserted_count += len(batch) + batches.put(batch) + count = 1 + batch = [] + else: + count +=1 + test_case_number += 1 + if test_case_number > self.total_queries: + break + if inserted_count != len(query_list): + batches.put(batch) + result_queue = Queue.Queue() + input_queue = Queue.Queue() + failure_record_queue = Queue.Queue() + # Run Test Batches + test_case_number = 1 + thread_list = [] + start_test_case_number = 1 + table_queue_map = {} + for table_name in table_list: + table_queue_map[table_name] = Queue.Queue() + self.log.info("CREATE BACTHES") + while not batches.empty(): + # Build all required secondary Indexes + for table_name in table_list: + if batches.empty(): + break + test_batch = batches.get() + + list = [data[data.keys()[0]] for data in test_batch] + table_queue_map[table_name].put({"table_name":table_name, "table_map":table_map,"list":list, "start_test_case_number":start_test_case_number }) + start_test_case_number += len(list) + self.log.info("SPAWNING THREADS") + for table_name in table_list: + t = threading.Thread(target=self._testrun_worker_new, args = (table_queue_map[table_name], result_queue, failure_record_queue)) + t.daemon = True + t.start() + thread_list.append(t) + # Drop all the secondary Indexes + for t in thread_list: + t.join() + + if self.drop_bucket == True: + #import pdb;pdb.set_trace() + for bucket in self.buckets: + BucketOperationHelper.delete_bucket_or_assert(serverInfo=self.master,bucket=bucket) + # Analyze the results for the failure and assert on the run + success, summary, result = self._test_result_analysis(result_queue) + self.log.info(result) + self.dump_failure_data(failure_record_queue) + self.assertTrue(success, summary) + + def _testrun_worker_new(self, input_queue , result_queue, failure_record_queue = None): + while not input_queue.empty(): + data = input_queue.get() + table_name = data["table_name"] + table_map = data["table_map"] + list = data["list"] + start_test_case_number = data["start_test_case_number"] + list_info = self.client._convert_template_query_info( + table_map = table_map, + n1ql_queries = list, + define_gsi_index = self.use_secondary_index) + thread_list = [] + test_case_number = start_test_case_number + for test_case_input in list_info: + t = threading.Thread(target=self._run_basic_test, args = (test_case_input, test_case_number, result_queue, failure_record_queue)) + test_case_number += 1 + t.daemon = True + t.start() + thread_list.append(t) + # Drop all the secondary Indexes + for t in thread_list: + t.join() + + def dump_failure_data(self, failure_record_queue): + if not self.record_failure: + return + import uuid + sub_dir = str(uuid.uuid4()).replace("-","") + self.data_dump_path= self.failure_record_path+"/"+sub_dir + os.mkdir(self.data_dump_path) + input_file_path=self.data_dump_path+"/input" + os.mkdir(input_file_path) + f_write_file = open(input_file_path+"/source_input_rqg_run.txt",'w') + secondary_index_path=self.data_dump_path+"/index" + os.mkdir(secondary_index_path) + database_dump = self.data_dump_path+"/db_dump" + os.mkdir(database_dump) + f_write_index_file = open(secondary_index_path+"/secondary_index_definitions.txt",'w') + client = MySQLClient(database = self.database, host = self.mysql_url, + user_id = self.user_id, password = self.password) + client.dump_database(data_dump_path = database_dump) + client._close_mysql_connection() + f_write_index_file.write(json.dumps(self.sec_index_map)) + f_write_index_file.close() + while not failure_record_queue.empty(): + f_write_file.write(json.dumps(failure_record_queue.get())+"\n") + f_write_file.close() + diff --git a/pytests/security/auditcli.py b/pytests/security/auditcli.py index d5272e49e..162e12ff4 100644 --- a/pytests/security/auditcli.py +++ b/pytests/security/auditcli.py @@ -195,7 +195,7 @@ def testBucketCreation(self): 'replica_index':True, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":self.source, \ - "user":self.ldapUser, "ip":'127.0.0.1', "port":57457, 'sessionid':'','time_synchronization': 'disabled' } + "user":self.ldapUser, "ip":'127.0.0.1', "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno'} self.checkConfig(8201, self.master, expectedResults) remote_client.disconnect() diff --git a/pytests/security/audittest.py b/pytests/security/audittest.py index d3ddf4bb1..b577aac7c 100644 --- a/pytests/security/audittest.py +++ b/pytests/security/audittest.py @@ -85,7 +85,7 @@ def test_bucketEvents(self): 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ - "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'','time_synchronization': 'disabled' } + "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno'} rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly') @@ -445,7 +445,7 @@ def test_checkCreateBucketCluster(self): 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ - "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'','time_synchronization': 'disabled' } + "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno'} rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly') self.log.info ("value of server is {0}".format(server)) @@ -479,7 +479,7 @@ def test_createBucketClusterNodeOut(self): 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ - "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'','time_synchronization': 'disabled' } + "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno'} restFirstNode.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0, 'valueOnly') @@ -499,7 +499,7 @@ def test_createBucketClusterNodeOut(self): 'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \ 'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \ "flush_enabled":False, "num_threads":3, "source":source, \ - "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'','time_synchronization': 'disabled' } + "user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'', 'conflict_resolution_type':'seqno'} rest.create_bucket(expectedResults['bucket_name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \ '11211', 'membase', 0, expectedResults['num_threads'], 0 , 'valueOnly') diff --git a/pytests/security/rbacTest.py b/pytests/security/rbacTest.py index b4b2e3b27..41180b79f 100644 --- a/pytests/security/rbacTest.py +++ b/pytests/security/rbacTest.py @@ -8,6 +8,9 @@ from security.auditmain import audit import commands import socket +import fileinput +import sys +from subprocess import Popen, PIPE class ServerInfo(): def __init__(self, @@ -27,18 +30,10 @@ def __init__(self, class rbacTest(ldaptest): - - def setupLDAPSettings (self,rest): - api = rest.baseUrl + 'settings/saslauthdAuth' - params = urllib.urlencode({"enabled":'true',"admins":[],"roAdmins":[]}) - status, content, header = rest._http_request(api, 'POST', params) - return status, content, header - - def setUp(self): super(rbacTest, self).setUp() rest = RestConnection(self.master) - self.setupLDAPSettings(rest) + self.auth_type = self.input.param('auth_type','ldap') self.user_id = self.input.param("user_id",None) self.user_role = self.input.param("user_role",None) self.bucket_name = self.input.param("bucket_name",'default') @@ -49,8 +44,14 @@ def setUp(self): self.no_bucket_access = self.input.param("no_bucket_access",False) self.no_access_bucket_name = self.input.param("no_access_bucket_name",None) self.ldap_users = rbacmain().returnUserList(self.user_id) - self._removeLdapUserRemote(self.ldap_users) - self._createLDAPUser(self.ldap_users) + if self.auth_type == 'ldap': + rbacmain().setup_auth_mechanism(self.servers,'ldap',rest) + self._removeLdapUserRemote(self.ldap_users) + self._createLDAPUser(self.ldap_users) + else: + rbacmain().setup_auth_mechanism(self.servers,'pam', rest) + rbacmain().add_remove_local_user(self.servers, self.ldap_users, 'deluser') + rbacmain().add_remove_local_user(self.servers, self.ldap_users,'adduser') self.ldap_server = ServerInfo(self.ldapHost, self.ldapPort, 'root', 'couchbase') rbacmain()._delete_user_from_roles(self.master) self.ipAddress = self.getLocalIPAddress() @@ -60,6 +61,7 @@ def tearDown(self): super(rbacTest, self).tearDown() def getLocalIPAddress(self): + ''' s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('couchbase.com', 0)) return s.getsockname()[0] @@ -68,7 +70,6 @@ def getLocalIPAddress(self): if '1' not in ipAddress: status, ipAddress = commands.getstatusoutput("ifconfig eth0 | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | awk '{print $2}'") return ipAddress - ''' def test_compare_orig_roles(self): status, content, header = rbacmain(self.master)._retrive_all_user_role(self.user_id) @@ -98,7 +99,6 @@ def test_role_assign_check_rest_api(self): def test_role_assign_check_end_to_end(self): user_name = self.input.param("user_name") final_user_id = rbacmain().returnUserList(self.user_id) - print final_user_id final_roles = rbacmain()._return_roles(self.user_role) payload = "name=" + user_name + "&roles=" + final_roles if len(final_user_id) == 1: @@ -285,7 +285,6 @@ def test_ldapDeleteUser(self): rbacmain(self.master)._check_role_permission_validate_multiple(self.user_id,self.user_role,self.bucket_name,self.role_map) user_name = rbacmain().returnUserList(self.user_id) self._removeLdapUserRemote(user_name) - print user_name status, content, header = rbacmain(self.master)._check_user_permission(user_name[0][0],user_name[0][1],self.user_role) self.assertFalse(status,"Not getting 401 for users that are deleted in LDAP") @@ -314,7 +313,6 @@ def test_checkPasswordChange(self): for i in range(len(user_list)): self._changeLdapPassRemote(user_list[i][0], 'password1') temp_id = str(user_list[i][0]) + ":" + str('password1?') - print temp_id result = rbacmain(self.master)._check_role_permission_validate_multiple(temp_id[:-1],self.user_role,self.bucket_name,self.role_map) self.assertTrue(result,"Issue with role assignment and comparision with permission set") @@ -359,7 +357,6 @@ def test_role_assignment_audit(self): self.assertTrue(fieldVerification, "One of the fields is not matching") self.assertTrue(valueVerification, "Values for one of the fields is not matching") - class rbac_upgrade(NewUpgradeBaseTest,ldaptest): def setUp(self): @@ -416,3 +413,5 @@ def upgrade_half_nodes(self): status, content, header = rbacmain(server)._retrieve_user_roles() self.assertFalse(status,"Incorrect status for rbac cluster in mixed cluster {0} - {1} - {2}".format(status,content,header)) + + diff --git a/pytests/security/rbacmain.py b/pytests/security/rbacmain.py index 88c503a2a..1f11d9552 100644 --- a/pytests/security/rbacmain.py +++ b/pytests/security/rbacmain.py @@ -5,11 +5,18 @@ log = logger.Logger.get_logger() import base64 from security.rbacPermissionList import rbacPermissionList +from remote.remote_util import RemoteMachineShellConnection +import commands +import urllib class rbacmain: AUDIT_ROLE_ASSIGN=8232 AUDIT_ROLE_UPDATE=8232 AUDIT_REMOVE_ROLE=8194 + PATH_SASLAUTHD = '/etc/sysconfig/' + FILE_SASLAUTHD = 'saslauthd' + PATH_SASLAUTHD_LOCAL = '/tmp/' + FILE_SASLAUTHD_LOCAL = 'saslauth' def __init__(self, master_ip=None, @@ -328,4 +335,111 @@ def _check_role_permission_validate_multiple_rest_api(self,user_id,user_role,buc if item['final_result'] == 'False': final_result = False - return final_result \ No newline at end of file + return final_result + + + def getRemoteFile(self, host): + commands.getstatusoutput(' rm -rf ' + self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD) + shell = RemoteMachineShellConnection(host) + try: + sftp = shell._ssh_client.open_sftp() + tempfile = str(self.PATH_SASLAUTHD + self.FILE_SASLAUTHD) + tmpfile = self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD + sftp.get('{0}'.format(tempfile), '{0}'.format(tmpfile)) + sftp.close() + except Exception, e: + log.info (" Value of e is {0}".format(e)) + finally: + shell.disconnect() + + + def writeFile(self, host): + shell = RemoteMachineShellConnection(host) + try: + result = shell.copy_file_local_to_remote(self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD, \ + self.PATH_SASLAUTHD + self.FILE_SASLAUTHD) + finally: + shell.disconnect() + + ''' + Update saslauth file with mechanism + + Parameters - + mech_value - mechanism value to change + + Returns - None + ''' + + def update_file_inline(self,mech_value='ldap'): + commands.getstatusoutput(' rm -rf ' + self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD_LOCAL) + f1 = open (self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD,'r') + f2 = open (self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD_LOCAL,'w') + for line in f1: + line = line.rstrip('\n') + if line=='MECH=ldap' and mech_value == 'pam': + f2.write(line.replace('MECH=ldap', 'MECH=pam')) + f2.write("\n") + f2.write("\n") + elif line=='MECH=pam' and mech_value == 'ldap': + f2.write(line.replace('MECH=pam', 'MECH=ldap')) + f2.write("\n") + f2.write("\n") + else: + f2.write(line + "\n") + f1.close() + f2.close() + commands.getstatusoutput("mv " + self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD_LOCAL + " " + self.PATH_SASLAUTHD_LOCAL + self.FILE_SASLAUTHD) + + + def restart_saslauth(self,host): + shell = RemoteMachineShellConnection(host) + shell.execute_command('service saslauthd restart') + shell.disconnect() + + ''' + Setup auth mechanism - pam or auth + + Parameters - + servers - list of servers that need user creation + type - Mechanism type + rest - Rest connection object + + Returns - status, content and header of the rest command executed for saslauthd + ''' + def setup_auth_mechanism(self, servers, type, rest): + for server in servers: + self.getRemoteFile(server) + self.update_file_inline(mech_value=type) + self.writeFile(server) + self.restart_saslauth(server) + api = rest.baseUrl + 'settings/saslauthdAuth' + params = urllib.urlencode({"enabled": 'true', "admins": [], "roAdmins": []}) + status, content, header = rest._http_request(api, 'POST', params) + return status, content, header + + ''' + Add/remove load unix users + Parameters - + servers - list of servers that need user creation + operation - deluser and adduser operations + + Returns - None + ''' + def add_remove_local_user(self,servers,user_list,operation): + for server in servers: + shell = RemoteMachineShellConnection(server) + try: + for user in user_list: + print user + if (user[0] != ''): + if (operation == "deluser"): + user_command = "userdel -r " + user[0] + + elif (operation == 'adduser'): + user_command = "openssl passwd -crypt " + user[1] + o, r = shell.execute_command(user_command) + user_command = "useradd -p " + o[0] + " " + user[0] + o, r = shell.execute_command(user_command) + shell.log_command_output(o, r) + finally: + shell.disconnect() \ No newline at end of file diff --git a/pytests/tuqquery/date_time_functions.py b/pytests/tuqquery/date_time_functions.py new file mode 100644 index 000000000..a60790232 --- /dev/null +++ b/pytests/tuqquery/date_time_functions.py @@ -0,0 +1,224 @@ +import logging +import random +import time + +from datetime import datetime +from dateutil.parser import parse +from threading import Thread +from tuq import QueryTests + +log = logging.getLogger(__name__) + +FORMATS = ["2006-01-02T15:04:05.999+07:00", + "2006-01-02T15:04:05.999", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05.999+07:00", + "2006-01-02 15:04:05.999", + "2006-01-02 15:04:05", + "2006-01-02", + "15:04:05.999+07:00", + "15:04:05.999", + "15:04:05"] + +PARTS = ["millennium", + "century", + "decade", + "year", + "quarter", + "month", + "day", + "hour", + "minute", + "second", + "millisecond", + "week", + "day_of_year", "doy", + "day_of_week", "dow", + "iso_week", + "iso_year", + "iso_dow", + "timezone"] + +TIMEZONES = ["utc"] + + +class DateTimeFunctionClass(QueryTests): + def setUp(self): + super(DateTimeFunctionClass, self).setUp() + + def tearDown(self): + super(DateTimeFunctionClass, self).tearDown() + + def test_date_part_millis(self): + for count in range(5): + milliseconds = random.randint(658979899786, 876578987695) + time_tuple = time.gmtime(milliseconds/1000) + local_parts = self._generate_expected_results_for_date_part_millis(time_tuple) + for part in local_parts: + query = self._generate_date_part_millis_query(milliseconds, part) + actual_result = self.run_cbq_query(query) + self.assertEqual(actual_result["results"][0]["$1"], local_parts[part], + "Actual result {0} and expected result {1} don't match for {2} milliseconds and \ + {3} parts".format(actual_result["results"][0], local_parts[part], + milliseconds, part)) + + def test_date_part_millis_for_zero(self): + #Special Case when expression is 0 + time_tuple = datetime(1970, 1, 1, 0, 0, 0).timetuple() + local_parts = self._generate_expected_results_for_date_part_millis(time_tuple) + for part in local_parts: + query = self._generate_date_part_millis_query(0, part) + actual_result = self.run_cbq_query(query) + self.assertEqual(actual_result["results"][0]["$1"], local_parts[part], + "Actual result {0} and expected result {1} don't match for 0 milliseconds and {2} parts". + format(actual_result["results"][0], local_parts[part], part)) + + def test_date_part_millis_for_negative_inputs(self): + expressions = ['\"123abc\"', 675786.869876, -658979899786, '\"\"', "null", {"a": 1, "b": 2}, {}] + for expression in expressions: + for part in PARTS: + query = 'SELECT DATE PART_MILLIS({0}, "{1}")'.format(expression, part) + try: + actual_result = self.run_cbq_query(query) + except Exception, ex: + msg = "syntax error" + if msg not in str(ex): + raise + + def test_date_format_str(self): + local_formats = ["2006-01-02T15:04:05.999", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05.999", + "2006-01-02 15:04:05", + "2006-01-02"] + for expression in local_formats: + for expected_format in FORMATS: + query = self._generate_date_format_str_query(expression, expected_format) + actual_result = self.run_cbq_query(query) + query = 'SELECT MILLIS_TO_UTC(MILLIS("{0}"), "{1}")'.format(expression, expected_format) + expected_result = self.run_cbq_query(query) + self.assertEqual(actual_result["results"][0]["$1"], expected_result["results"][0]["$1"], + "Resulting format {0} doesn't match with expected {1}".format( + actual_result["results"][0]["$1"], expected_result["results"][0]["$1"])) + + def test_array_date_range(self): + error_query = [] + local_parts = ["millennium", + "century", + "decade", + "year", + "quarter", + "month", + "week", + "day", + "hour", + "minute", + "second", + "millisecond"] + count = 3 + for first_expression in FORMATS: + expect_null_result = 0 + for part in local_parts: + query = 'SELECT DATE_ADD_STR("{0}", {1}, "{2}")'.format(first_expression, count, part) + expected_result = self.run_cbq_query(query) + temp_expression = expected_result["results"][0]["$1"] + self.assertIsNotNone(temp_expression, "result is {0} for query {1}".format(expected_result, query)) + query = self._generate_date_format_str_query(temp_expression, first_expression) + result = self.run_cbq_query(query) + second_expression = result["results"][0]["$1"] + if part in local_parts[:8]: + if not (self._is_date_part_present(first_expression) and + self._is_date_part_present(second_expression)): + expect_null_result = 1 + else: + if not (self._is_time_part_present(first_expression) and + self._is_time_part_present(second_expression)): + expect_null_result = 1 + query = self._generate_array_date_range_query(first_expression, second_expression, part) + log.info(query) + try: + actual_result = self.run_cbq_query(query) + except Exception: + error_query.append(query) + else: + lst = actual_result["results"][0]["$1"] + if not expect_null_result and not lst: + error_query.append(query) + elif lst: + if len(lst) != count+1: + error_query.append(query) + self.assertFalse(error_query, "Queries Failed are: {0}".format(error_query)) + + def test_array_date_range_for_intervals(self): + #Set Interval + intervals = [0, 2, 10, -1] + start_date = "2006-01-02T15:04:05" + end_date = "2006-01-10T15:04:05" + for interval in intervals: + query = self._generate_array_date_range_query(start_date, end_date, "day", interval) + actual_result = self.run_cbq_query(query) + if interval < 0: + self.assertIsNone(actual_result["results"][0]["$1"], + "{0} Failed. Result {1}".format(query, actual_result)) + lst = actual_result["results"][0]["$1"] + if interval == 0: + self.asserEqual(len(lst), 0, "Query {1} Failed".format(query)) + self.asserEqual(len(lst), 8/interval, "Query {1} Failed".format(query)) + + def test_new_functions(self): + local_formats = ["2006-01-02"] + for expression in local_formats: + query = 'SELECT STR_TO_UTC(CLOCK_STR("{0}"))'.format(expression) + expected_result = self.run_cbq_query(query) + query = 'SELECT CLOCK_UTC("{0}")'.format(expression) + actual_result = self.run_cbq_query(query) + self.assertEqual(actual_result["results"][0]["$1"], expected_result["results"][0]["$1"], + "{0} failed ".format(query)) + query = 'SELECT STR_TO_UTC(NOW_STR("{0}"))'.format(expression) + expected_result = self.run_cbq_query(query) + query = 'SELECT NOW_UTC("{0}")'.format(expression) + actual_result = self.run_cbq_query(query) + self.assertEqual(actual_result["results"][0]["$1"], expected_result["results"][0]["$1"], + "{0} failed ".format(query)) + + def _generate_date_part_millis_query(self, expression, part, timezone=None): + if not timezone: + query = 'SELECT DATE_PART_MILLIS({0}, "{1}")'.format(expression, part) + else: + query = 'SELECT DATE_PART_MILLIS({0}, "{1}", "{2}")'.format(expression, part, timezone) + return query + + def _generate_expected_results_for_date_part_millis(self, time_tuple): + local_parts = {"millennium": (time_tuple.tm_year-1)//1000 + 1, + "century": (time_tuple.tm_year-1)//100 + 1, + "decade": time_tuple.tm_year/10, + "year": time_tuple.tm_year, + "quarter": (time_tuple.tm_mon-1)//3 + 1, + "month": time_tuple.tm_mon, + "day": time_tuple.tm_mday, + "hour": time_tuple.tm_hour, + "minute": time_tuple.tm_min, + "second": time_tuple.tm_sec, + "week": (time_tuple.tm_yday-1)//7 + 1, + "day_of_year": time_tuple.tm_yday, + "doy": time_tuple.tm_yday, + "day_of_week": (time_tuple.tm_wday + 1)%7, + "dow": (time_tuple.tm_wday + 1)%7} + return local_parts + + def _generate_date_format_str_query(self, expression, format): + query = 'SELECT DATE_FORMAT_STR("{0}", "{1}")'.format(expression, format) + return query + + def _generate_array_date_range_query(self, initial_date, final_date, part, increment=None): + if increment is None: + query = 'SELECT ARRAY_DATE_RANGE("{0}", "{1}", "{2}")'.format(initial_date, final_date, part) + else: + query = 'SELECT ARRAY_DATE_RANGE("{0}", "{1}", "{2}", {3})'.format(initial_date, final_date, part, increment) + return query + + def _is_date_part_present(self, expression): + return (len(expression.split("-")) > 1) + + def _is_time_part_present(self, expression): + return (len(expression.split(":")) > 1) \ No newline at end of file diff --git a/pytests/tuqquery/n1ql_ro_user.py b/pytests/tuqquery/n1ql_ro_user.py index 9b2e6ca31..b8a5d201e 100644 --- a/pytests/tuqquery/n1ql_ro_user.py +++ b/pytests/tuqquery/n1ql_ro_user.py @@ -52,6 +52,7 @@ def test_readonly(self): self._kill_all_processes_cbq() self._start_command_line_query(self.master, user=self.username, password=self.password) for bucket in self.buckets: + self.analytics = False self.query = 'INSERT into %s (key, value) VALUES ("%s", %s)' % (bucket.name, 'key1', 1) self.run_cbq_query() diff --git a/pytests/tuqquery/newtuq.py b/pytests/tuqquery/newtuq.py index 2e98cba39..8492ce6dc 100644 --- a/pytests/tuqquery/newtuq.py +++ b/pytests/tuqquery/newtuq.py @@ -6,6 +6,7 @@ import copy import math import re +import os import testconstants import datetime @@ -38,6 +39,7 @@ def setUp(self): self.docs_per_day = self.input.param("doc-per-day", 49) self.item_flag = self.input.param("item_flag", 4042322160) self.n1ql_port = self.input.param("n1ql_port", 8093) + self.analytics = self.input.param("analytics",False) self.dataset = self.input.param("dataset", "default") self.primary_indx_type = self.input.param("primary_indx_type", 'GSI') self.index_type = self.input.param("index_type", 'GSI') @@ -60,7 +62,11 @@ def setUp(self): if self.input.param("gomaxprocs", None): self.configure_gomaxprocs() self.gen_results = TuqGenerators(self.log, self.generate_full_docs_list(self.gens_load)) - self.create_primary_index_for_3_0_and_greater() + if (self.analytics == False): + self.create_primary_index_for_3_0_and_greater() + if (self.analytics): + self.setup_analytics() + self.sleep(30,'wait for analytics setup') def suite_setUp(self): try: @@ -75,6 +81,20 @@ def suite_setUp(self): def tearDown(self): if self._testMethodName == 'suite_tearDown': self.skip_buckets_handle = False + if self.analytics: + data = 'use Default ;' + "\n" + for bucket in self.buckets: + data += 'disconnect bucket {0} if connected;'.format(bucket.name) + "\n" + data += 'drop dataset {0} if exists'.format(bucket.name) + "_shadow ;" + "\n" + data += 'drop bucket {0} if exists;'.format(bucket.name) + "\n" + filename = "file.txt" + f = open(filename,'w') + f.write(data) + f.close() + url = 'http://{0}:8095/analytics/service'.format(self.master.ip) + cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + os.system(cmd) + os.remove(filename) super(QueryTests, self).tearDown() def suite_tearDown(self): @@ -85,6 +105,33 @@ def suite_tearDown(self): self.shell.disconnect() + def setup_analytics(self): + #data = "" + # for bucket in self.buckets: + # data += 'disconnect bucket {0} ;'.format(bucket.name) + "\n" + # data += 'connect bucket {0};'.format(bucket.name) + "\n" + # filename = "file.txt" + # f = open(filename,'w') + # f.write(data) + # f.close() + # url = 'http://{0}:8095/analytics/service'.format(self.master.ip) + # cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + # os.system(cmd) + # os.remove(filename) + data = 'use Default;' + "\n" + for bucket in self.buckets: + data += 'create bucket {0} with {{"bucket":"{0}","nodes":"{1}"}} ;'.format(bucket.name,self.master.ip) + "\n" + data += 'create shadow dataset {1} on {0}; '.format(bucket.name,bucket.name+"_shadow") + "\n" + data += 'connect bucket {0} ;'.format(bucket.name) + "\n" + filename = "file.txt" + f = open(filename,'w') + f.write(data) + f.close() + url = 'http://{0}:8095/analytics/service'.format(self.master.ip) + cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + os.system(cmd) + os.remove(filename) + ############################################################################################## # # SIMPLE CHECKS @@ -265,12 +312,15 @@ def test_simple_alias(self): t5 = threading.Thread(name='run_limit_offset', target=self.run_active_requests,args=(e,2)) t5.start() query_template = 'SELECT COUNT($str0) AS COUNT_EMPLOYEE FROM %s' % (bucket.name) + if self.analytics: + query_template = 'SELECT COUNT(`$str0`) AS COUNT_EMPLOYEE FROM %s' % (bucket.name) actual_result, expected_result = self.run_query_from_template(query_template) self.assertEquals(actual_result['results'], expected_result, "Results are incorrect.Actual %s.\n Expected: %s.\n" % ( actual_result['results'], expected_result)) query_template = 'SELECT COUNT(*) + 1 AS COUNT_EMPLOYEE FROM %s' % (bucket.name) + import pdb;pdb.set_trace() actual_result, expected_result = self.run_query_from_template(query_template) if self.monitoring: e.set() @@ -297,6 +347,12 @@ def test_alias_from_clause(self): ' ORDER BY points', 'SELECT $obj0.$_obj0_int0 AS points FROM %s AS test ' +\ 'GROUP BY test.$obj0.$_obj0_int0 ORDER BY points'] + # if self.analytics: + # queries_templates = ['SELECT test.$obj0.$_obj0_int0 AS points FROM %s AS test ORDER BY test.points', + # 'SELECT test.$obj0.$_obj0_int0 AS points FROM %s AS test WHERE test.$int0 >0' +\ + # ' ORDER BY test.points', + # 'SELECT test.$obj0.$_obj0_int0 AS points FROM %s AS test ' +\ + # 'GROUP BY test.$obj0.$_obj0_int0 ORDER BY test.points'] for bucket in self.buckets: if self.monitoring: e = threading.Event() @@ -334,6 +390,9 @@ def test_alias_order_asc(self): for bucket in self.buckets: query_template = 'SELECT $str0 AS name_new FROM %s AS test ORDER BY name_new ASC' %( bucket.name) + if self.analytics: + query_template = 'SELECT `$str0` AS name_new FROM %s AS test ORDER BY name_new ASC' %( + bucket.name) actual_result, expected_result = self.run_query_from_template(query_template) self._verify_results(actual_result['results'], expected_result) @@ -344,6 +403,9 @@ def test_alias_aggr_fn(self): t8 = threading.Thread(name='run_limit_offset', target=self.run_active_requests,args=(e,2)) t8.start() query_template = 'SELECT COUNT(TEST.$str0) from %s AS TEST' %(bucket.name) + if self.analytics: + query_template = 'SELECT COUNT(TEST.`$str0`) from %s AS TEST' %(bucket.name) + actual_result, expected_result = self.run_query_from_template(query_template) if self.monitoring: e.set() @@ -408,6 +470,13 @@ def test_order_by_aggr_fn(self): actual_result, expected_result = self.run_query_from_template(query_template) self._verify_results(actual_result['results'], expected_result) + if self.analytics: + self.query = 'SELECT d.email AS TITLE, min(d.join_day) day FROM %s d GROUP' % (bucket.name) +\ + ' BY d.$str1 ORDER BY MIN(d.join_day), d.$str1' + actual_result1 = self.run_cbq_query() + self._verify_results(actual_result1['results'], actual_result['results']) + + def test_order_by_precedence(self): for bucket in self.buckets: query_template = 'SELECT $str0, $str1 FROM %s' % (bucket.name) +\ @@ -568,7 +637,17 @@ def run_cbq_query(self, query=None, min_output_size=10, server=None): if self.use_rest: query_params.update({'scan_consistency': self.scan_consistency}) self.log.info('RUN QUERY %s' % query) - result = RestConnection(server).query_tool(query, self.n1ql_port, query_params=query_params) + + if self.analytics: + query = query + ";" + for bucket in self.buckets: + query = query.replace(bucket.name,bucket.name+"_shadow") + result = RestConnection(server).analytics_tool(query, 8095, query_params=query_params) + + else : + result = RestConnection(server).query_tool(query, self.n1ql_port, query_params=query_params) + + else: if self.version == "git_repo": output = self.shell.execute_commands_inside("$GOPATH/src/github.com/couchbase/query/" +\ diff --git a/pytests/tuqquery/tuq.py b/pytests/tuqquery/tuq.py index 7724550d9..28c0958c8 100644 --- a/pytests/tuqquery/tuq.py +++ b/pytests/tuqquery/tuq.py @@ -1,3 +1,4 @@ +import os import pprint import logger import json @@ -61,6 +62,7 @@ def setUp(self): self.skip_load = self.input.param("skip_load", False) self.skip_index = self.input.param("skip_index", False) self.n1ql_port = self.input.param("n1ql_port", 8093) + #self.analytics = self.input.param("analytics",False) self.primary_indx_type = self.input.param("primary_indx_type", 'GSI') self.primary_indx_drop = self.input.param("primary_indx_drop", False) self.index_type = self.input.param("index_type", 'GSI') @@ -72,7 +74,6 @@ def setUp(self): self.cluster_ops = self.input.param("cluster_ops",False) self.isprepared = False self.server = self.master - self.ispokemon = self.input.param("pokemon",False) self.rest = RestConnection(self.server) #self.coverage = self.input.param("coverage",False) self.cover = self.input.param("cover", False) @@ -98,7 +99,8 @@ def setUp(self): if self.input.param("gomaxprocs", None): self.configure_gomaxprocs() if str(self.__class__).find('QueriesUpgradeTests') == -1 and self.primary_index_created == False: - self.create_primary_index_for_3_0_and_greater() + if (self.analytics == False): + self.create_primary_index_for_3_0_and_greater() self.log.info('-'*100) self.log.info('Temp fix for MB-16888') #if (self.coverage == False): @@ -107,6 +109,9 @@ def setUp(self): self.shell.execute_command("killall -9 indexer") self.sleep(10, 'wait for indexer') self.log.info('-'*100) + if (self.analytics): + self.setup_analytics() + self.sleep(30,'wait for analytics setup') #if self.ispokemon: #self.set_indexer_pokemon_settings() @@ -130,6 +135,20 @@ def suite_setUp(self): def tearDown(self): if self._testMethodName == 'suite_tearDown': self.skip_buckets_handle = False + if self.analytics == True: + data = 'use Default ;' + "\n" + for bucket in self.buckets: + data += 'disconnect bucket {0} if connected;'.format(bucket.name) + "\n" + data += 'drop dataset {0} if exists'.format(bucket.name) + "_shadow ;" + "\n" + data += 'drop bucket {0} if exists;'.format(bucket.name) + "\n" + filename = "file.txt" + f = open(filename,'w') + f.write(data) + f.close() + url = 'http://{0}:8095/analytics/service'.format(self.master.ip) + cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + os.system(cmd) + os.remove(filename) super(QueryTests, self).tearDown() def suite_tearDown(self): @@ -145,6 +164,23 @@ def suite_tearDown(self): pid = [item for item in output if item][1] self.shell.execute_command("kill -9 %s" % pid) + def setup_analytics(self): + data = 'use Default;' + "\n" + for bucket in self.buckets: + data += 'create bucket {0} with {{"bucket":"{0}","nodes":"{1}"}} ;'.format(bucket.name,self.master.ip) + "\n" + data += 'create shadow dataset {1} on {0}; '.format(bucket.name,bucket.name+"_shadow") + "\n" + data += 'connect bucket {0} ;'.format(bucket.name) + "\n" + filename = "file.txt" + f = open(filename,'w') + f.write(data) + f.close() + url = 'http://{0}:8095/analytics/service'.format(self.master.ip) + cmd = 'curl -s --data pretty=true --data-urlencode "statement@file.txt" ' + url + os.system(cmd) + os.remove(filename) + + + ############################################################################################## # # SIMPLE CHECKS @@ -170,8 +206,6 @@ def test_all(self): for bucket in self.buckets: self.query = 'SELECT ALL job_title FROM %s ORDER BY job_title' % (bucket.name) actual_result = self.run_cbq_query() - - expected_result = [{"job_title" : doc['job_title']} for doc in self.full_list] expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) @@ -194,15 +228,16 @@ def test_all_nested(self): expected_result = sorted(expected_result, key=lambda doc: (doc['skill'])) self._verify_results(actual_result['results'], expected_result) - self.query = 'SELECT ALL tasks_points.* ' +\ + if (self.analytics == False): + self.query = 'SELECT ALL tasks_points.* ' +\ 'FROM %s' % (bucket.name) - actual_result = self.run_cbq_query() - expected_result = [doc['tasks_points'] for doc in self.full_list] - expected_result = sorted(expected_result, key=lambda doc: + actual_result = self.run_cbq_query() + expected_result = [doc['tasks_points'] for doc in self.full_list] + expected_result = sorted(expected_result, key=lambda doc: (doc['task1'], doc['task2'])) - actual_result = sorted(actual_result['results'], key=lambda doc: + actual_result = sorted(actual_result['results'], key=lambda doc: (doc['task1'], doc['task2'])) - self._verify_results(actual_result, expected_result) + self._verify_results(actual_result, expected_result) def set_indexer_pokemon_settings(self): projector_json = { "projector.dcp.numConnections": 1 } @@ -365,7 +400,8 @@ def test_array(self): for bucket in self.buckets: self.query = "SELECT ARRAY vm.memory FOR vm IN VMs END AS vm_memories" +\ " FROM %s WHERE VMs IS NOT NULL " % (bucket.name) - + if self.analytics: + self.query = 'SELECT (SELECT VALUE vm.memory FROM VMs AS vm) AS vm_memories FROM %s WHERE VMs IS NOT NULL '% bucket.name actual_result = self.run_cbq_query() actual_result = sorted(actual_result['results'], key=lambda doc: (doc['vm_memories'])) expected_result = [{"vm_memories" : [vm["memory"] for vm in doc['VMs']]} @@ -471,6 +507,11 @@ def test_like(self): expected_result = [{"name" : doc['name']} for doc in self.full_list if not (doc["job_title"].endswith('ales') and\ len(doc["job_title"]) == 5)] + self.query = "SELECT name FROM {0} WHERE reverse(job_title) NOT LIKE 'sela_' ORDER BY name".format( + bucket.name) + actual_result1 = self.run_cbq_query() + + self.assertEqual(actual_result1['results'],actual_result['results'] ) expected_result = sorted(expected_result, key=lambda doc: (doc['name'])) self._verify_results(actual_result['results'], expected_result) @@ -478,6 +519,10 @@ def test_like_negative(self): queries_errors = {"SELECT tasks_points FROM {0} WHERE tasks_points.* LIKE '_1%'" : ('syntax error', 3000)} self.negative_common_body(queries_errors) + queries_errors = {"SELECT tasks_points FROM {0} WHERE REVERSE(tasks_points.*) LIKE '%1_'" : + ('syntax error', 3000)} + self.negative_common_body(queries_errors) + def test_like_any(self): for bucket in self.buckets: @@ -520,6 +565,10 @@ def test_like_aliases(self): self.query = "select name AS NAME from %s " % (bucket.name) +\ "AS EMPLOYEE where EMPLOYEE.name LIKE '_mpl%' ORDER BY name" actual_result = self.run_cbq_query() + self.query = "select name AS NAME from %s " % (bucket.name) +\ + "AS EMPLOYEE where reverse(EMPLOYEE.name) LIKE '%lpm_' ORDER BY name" + actual_result1 = self.run_cbq_query() + self.assertEqual(actual_result['results'],actual_result1['results']) expected_result = [{"NAME" : doc['name']} for doc in self.full_list if doc["name"].find('mpl') == 1] expected_result = sorted(expected_result, key=lambda doc: (doc['NAME'])) @@ -530,6 +579,10 @@ def test_like_wildcards(self): self.query = "SELECT email FROM %s WHERE email " % (bucket.name) +\ "LIKE '%@%.%' ORDER BY email" actual_result = self.run_cbq_query() + self.query = "SELECT email FROM %s WHERE reverse(email) " % (bucket.name) +\ + "LIKE '%.%@%' ORDER BY email" + actual_result1 = self.run_cbq_query() + self.assertEqual(actual_result['results'],actual_result1['results']) expected_result = [{"email" : doc['email']} for doc in self.full_list if re.match(r'.*@.*\..*', doc['email'])] @@ -595,6 +648,10 @@ def test_group_by(self): self.query = "SELECT tasks_points.task1 AS task from %s " % (bucket.name) +\ "WHERE join_mo>7 GROUP BY tasks_points.task1 " +\ "ORDER BY tasks_points.task1" + if (self.analytics): + self.query = "SELECT d.tasks_points.task1 AS task from %s d " % (bucket.name) +\ + "WHERE d.join_mo>7 GROUP BY d.tasks_points.task1 " +\ + "ORDER BY d.tasks_points.task1" actual_result = self.run_cbq_query() expected_result = [{"task" : doc['tasks_points']["task1"]} @@ -629,6 +686,13 @@ def test_group_by_aggr_fn(self): "ORDER BY tasks_points.task1" actual_result = self.run_cbq_query() + if self.analytics: + self.query = "SELECT d.tasks_points.task1 AS task from %s d " % (bucket.name) +\ + "WHERE d.join_mo>7 GROUP BY d.tasks_points.task1 " +\ + "HAVING COUNT(d.tasks_points.task1) > 0 AND " +\ + "(MIN(d.join_day)=1 OR MAX(d.join_yr=2011)) " +\ + "ORDER BY d.tasks_points.task1" + tmp_groups = set([doc['tasks_points']["task1"] for doc in self.full_list]) expected_result = [{"task" : group} for group in tmp_groups if [doc['tasks_points']["task1"] @@ -674,6 +738,15 @@ def test_group_by_satisfy(self): "AND (ANY vm IN %s.VMs SATISFIES vm.RAM = 5 end) " % ( bucket.name) +\ "GROUP BY job_title ORDER BY job_title" + + if self.analytics: + self.query = "SELECT d.job_title, AVG(d.test_rate) as avg_rate FROM %s d " % (bucket.name) +\ + "WHERE (ANY skill IN skills SATISFIES skill = 'skill2010' end) " % ( + bucket.name) +\ + "AND (ANY vm IN VMs SATISFIES vm.RAM = 5 end) " % ( + bucket.name) +\ + "GROUP BY d.job_title ORDER BY d.job_title" + actual_result = self.run_cbq_query() tmp_groups = set([doc["job_title"] for doc in self.full_list]) @@ -853,7 +926,7 @@ def test_meta_flags(self): expected_result = [{"flags" : self.item_flag}] self._verify_results(actual_result['results'], expected_result) - def test_meta_cas(self): + def test_long_values(self): self.query = 'insert into %s values("k051", { "id":-9223372036854775808 } )'%("default") self.run_cbq_query() self.query = 'insert into %s values("k031", { "id":-9223372036854775807 } )'%("default") @@ -898,6 +971,13 @@ def test_meta_cas(self): #self._verify_results(actual_result, expected_result) + def test_meta_cas(self): + for bucket in self.buckets: + self.query = 'select meta().cas from {0} order by meta().id limit 10'.format(bucket.name) + actual_result = self.run_cbq_query() + print actual_result + + def test_meta_negative(self): queries_errors = {'SELECT distinct name FROM %s WHERE META().type = "json"' : ('syntax error', 3000)} self.negative_common_body(queries_errors) @@ -962,6 +1042,9 @@ def test_substr(self): for bucket in self.buckets: self.query = "select name, SUBSTR(email, 7) as DOMAIN from %s" % (bucket.name) actual_result = self.run_cbq_query() + self.query = "select name, reverse(SUBSTR(email, 7)) as REV_DOMAIN from %s" % (bucket.name) + actual_result1 = self.run_cbq_query() + self.assertEqual(actual_result['results'],actual_result1['results']) actual_result = sorted(actual_result['results'], key=lambda doc: (doc['name'], doc['DOMAIN'])) @@ -988,6 +1071,9 @@ def test_first(self): for bucket in self.buckets: self.query = "select name, FIRST vm.os for vm in VMs end as OS from %s" % ( bucket.name) + if self.analytics: + self.query = "select name, VMs[0].os as OS from %s" %(bucket.name); + actual_result = self.run_cbq_query() actual_result = sorted(actual_result['results'], key=lambda doc: (doc['name'], doc['OS'])) @@ -1008,6 +1094,10 @@ def test_sum(self): self.query = "SELECT join_mo, SUM(tasks_points.task1) as points_sum" +\ " FROM %s WHERE join_mo < 5 GROUP BY join_mo " % (bucket.name) +\ "ORDER BY join_mo" + if self.analytics: + self.query = "SELECT d.join_mo, SUM(d.tasks_points.task1) as points_sum" +\ + " FROM %s d WHERE d.join_mo < 5 GROUP BY d.join_mo " % (bucket.name) +\ + "ORDER BY d.join_mo" actual_result = self.run_cbq_query() tmp_groups = set([doc['join_mo'] for doc in self.full_list @@ -1024,6 +1114,11 @@ def test_sum(self): "as employees WHERE job_title='Sales' GROUP BY join_mo " +\ "HAVING SUM(employees.test_rate) > 0 and " +\ "SUM(test_rate) < 100000" + if self.analytics: + self.query = "SELECT d.join_mo, SUM(d.test_rate) as rate FROM %s d " % (bucket.name) +\ + " WHERE d.job_title='Sales' GROUP BY d.join_mo " +\ + "HAVING SUM(d.test_rate) > 0 and " +\ + "SUM(d.test_rate) < 100000" actual_result = self.run_cbq_query() actual_result = [{"join_mo" : doc["join_mo"], "rate" : round(doc["rate"])} for doc in actual_result['results']] actual_result = sorted(actual_result, key=lambda doc: (doc['join_mo'])) @@ -1064,8 +1159,12 @@ def test_avg(self): self.query = "SELECT join_mo, AVG(tasks_points.task1) as points_avg" +\ " FROM %s WHERE join_mo < 5 GROUP BY join_mo " % (bucket.name) +\ "ORDER BY join_mo" - actual_result = self.run_cbq_query() + if self.analytics: + self.query = "SELECT d.join_mo, AVG(d.tasks_points.task1) as points_avg" +\ + " FROM %s d WHERE d.join_mo < 5 GROUP BY d.join_mo " % (bucket.name) +\ + "ORDER BY d.join_mo" + actual_result = self.run_cbq_query() tmp_groups = set([doc['join_mo'] for doc in self.full_list if doc['join_mo'] < 5]) expected_result = [{"join_mo" : group, @@ -1083,6 +1182,13 @@ def test_avg(self): "as employees WHERE job_title='Sales' GROUP BY join_mo " +\ "HAVING AVG(employees.test_rate) > 0 and " +\ "SUM(test_rate) < 100000" + + if self.analytics: + self.query = "SELECT d.join_mo, AVG(d.test_rate) as rate FROM %s d" % (bucket.name) +\ + " WHERE d.job_title='Sales' GROUP BY d.join_mo " +\ + "HAVING AVG(d.test_rate) > 0 and " +\ + "SUM(d.test_rate) < 100000" + actual_result = self.run_cbq_query() actual_result = [{'join_mo' : doc['join_mo'], @@ -1237,6 +1343,23 @@ def test_array_append(self): expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) self._verify_results(actual_result, expected_result) + self.query = "SELECT job_title," +\ + " array_append(array_agg(DISTINCT name), 'new_name','123') as names" +\ + " FROM %s GROUP BY job_title" % (bucket.name) + + actual_list = self.run_cbq_query() + actual_result = self.sort_nested_list(actual_list['results']) + actual_result = sorted(actual_result, key=lambda doc: (doc['job_title'])) + tmp_groups = set([doc['job_title'] for doc in self.full_list]) + expected_result = [{"job_title" : group, + "names" : sorted(set([x["name"] for x in self.full_list + if x["job_title"] == group] + ['new_name'] + ['123']))} + for group in tmp_groups] + expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) + self._verify_results(actual_result, expected_result) + + + def test_prepared_array_append(self): for bucket in self.buckets: self.query = "SELECT job_title," +\ @@ -1253,8 +1376,7 @@ def test_array_concat(self): actual_list = self.run_cbq_query() actual_result = self.sort_nested_list(actual_list['results']) - actual_result = sorted(actual_result, key=lambda doc: (doc['job_title'])) - + actual_result1 = sorted(actual_result, key=lambda doc: (doc['job_title'])) tmp_groups = set([doc['job_title'] for doc in self.full_list]) expected_result = [{"job_title" : group, "names" : sorted([x["name"] for x in self.full_list @@ -1262,8 +1384,29 @@ def test_array_concat(self): [x["email"] for x in self.full_list if x["job_title"] == group])} for group in tmp_groups] - expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) - self._verify_results(actual_result, expected_result) + expected_result1 = sorted(expected_result, key=lambda doc: (doc['job_title'])) + + self._verify_results(actual_result1, expected_result1) + + self.query = "SELECT job_title," +\ + " array_concat(array_agg(name), array_agg(email),array_agg(join_day)) as names" +\ + " FROM %s GROUP BY job_title limit 10" % (bucket.name) + + actual_list = self.run_cbq_query() + actual_result = self.sort_nested_list(actual_list['results']) + actual_result2 = sorted(actual_result, key=lambda doc: (doc['job_title'])) + + + expected_result = [{"job_title" : group, + "names" : sorted([x["name"] for x in self.full_list + if x["job_title"] == group] + \ + [x["email"] for x in self.full_list + if x["job_title"] == group] + \ + [x["join_day"] for x in self.full_list + if x["job_title"] == group])} + for group in tmp_groups][0:10] + expected_result2 = sorted(expected_result, key=lambda doc: (doc['job_title'])) + self.assertTrue(actual_result2==expected_result2) def test_array_prepend(self): for bucket in self.buckets: @@ -1282,6 +1425,22 @@ def test_array_prepend(self): for group in tmp_groups] expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) self._verify_results(actual_result, expected_result) + self.query = "SELECT job_title," +\ + " array_prepend(1.2,2.4, array_agg(test_rate)) as rates" +\ + " FROM %s GROUP BY job_title" % (bucket.name) + + actual_list = self.run_cbq_query() + actual_result = self.sort_nested_list(actual_list['results']) + actual_result = sorted(actual_result, key=lambda doc: (doc['job_title'])) + + tmp_groups = set([doc['job_title'] for doc in self.full_list]) + expected_result = [{"job_title" : group, + "rates" : sorted([x["test_rate"] for x in self.full_list + if x["job_title"] == group] + [1.2]+[2.4])} + for group in tmp_groups] + expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) + self._verify_results(actual_result, expected_result) + self.query = "SELECT job_title," +\ " array_prepend(['skill5', 'skill8'], array_agg(skills)) as skills_new" +\ " FROM %s GROUP BY job_title" % (bucket.name) @@ -1299,6 +1458,25 @@ def test_array_prepend(self): expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) self._verify_results(actual_result, expected_result) + self.query = "SELECT job_title," +\ + " array_prepend(['skill5', 'skill8'],['skill9','skill10'], array_agg(skills)) as skills_new" +\ + " FROM %s GROUP BY job_title" % (bucket.name) + + actual_list = self.run_cbq_query() + actual_result = self.sort_nested_list(actual_list['results']) + actual_result = sorted(actual_result, key=lambda doc: (doc['job_title'])) + + tmp_groups = set([doc['job_title'] for doc in self.full_list]) + expected_result = [{"job_title" : group, + "skills_new" : sorted([x["skills"] for x in self.full_list + if x["job_title"] == group] + \ + [['skill5', 'skill8']]+ [['skill9','skill10']])} + for group in tmp_groups] + expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) + + self._verify_results(actual_result, expected_result) + + def test_array_remove(self): value = 'employee-1' for bucket in self.buckets: @@ -1318,6 +1496,35 @@ def test_array_remove(self): expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) self._verify_results(actual_result, expected_result) + value1 = 'employee-2' + value2 = 'emp-2' + value3 = 'employee-1' + self.query = "SELECT job_title," +\ + " array_remove(array_agg(DISTINCT name), '%s','%s','%s') as names" % (value1,value2,value3) +\ + " FROM %s GROUP BY job_title" % (bucket.name) + + actual_list = self.run_cbq_query() + actual_result = self.sort_nested_list(actual_list['results']) + actual_result = sorted(actual_result, key=lambda doc: (doc['job_title'])) + tmp_groups = set([doc['job_title'] for doc in self.full_list]) + expected_result = [{"job_title" : group, + "names" : sorted(set([x["name"] for x in self.full_list + if x["job_title"] == group and x["name"]!= value1 and x["name"]!=value3]))} + for group in tmp_groups] + expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) + self._verify_results(actual_result, expected_result) + + def test_array_insert(self): + value1 = 'skill-20' + value2 = 'skill-21' + for bucket in self.buckets: + self.query = "SELECT array_insert(skills, 1, '%s','%s') " % (value1,value2) +\ + " FROM %s limit 1" % (bucket.name) + + actual_list = self.run_cbq_query() + expected_result = [{u'$1': [u'skill2010', u'skill-20', u'skill-21', u'skill2011']}] + self.assertTrue( actual_list['results'] == expected_result ) + def test_array_avg(self): for bucket in self.buckets: self.query = "SELECT job_title, array_avg(array_agg(test_rate))" +\ @@ -1468,6 +1675,22 @@ def test_array_put(self): expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) self._verify_results(actual_result, expected_result) + self.query = "SELECT job_title, array_put(array_agg(distinct name), 'employee-50','employee-51') as emp_job" +\ + " FROM %s GROUP BY job_title" % (bucket.name) + + actual_list = self.run_cbq_query() + actual_result = self.sort_nested_list(actual_list['results']) + actual_result = sorted(actual_result, + key=lambda doc: (doc['job_title'])) + + tmp_groups = set([doc['job_title'] for doc in self.full_list]) + expected_result = [{"job_title" : group, + "emp_job" : sorted(set([x["name"] for x in self.full_list + if x["job_title"] == group] + ['employee-50'] + ['employee-51']))} + for group in tmp_groups] + expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'])) + self._verify_results(actual_result, expected_result) + self.query = "SELECT job_title, array_put(array_agg(distinct name), 'employee-47') as emp_job" +\ " FROM %s GROUP BY job_title" % (bucket.name) @@ -1646,6 +1869,9 @@ def test_in_str(self): for bucket in self.buckets: self.query = "select name from %s where job_title in ['Sales', 'Support']" % (bucket.name) actual_result = self.run_cbq_query() + self.query = "select name from %s where REVERSE(job_title) in ['selaS', 'troppuS']" % (bucket.name) + actual_result1 = self.run_cbq_query() + self.assertEqual(actual_result['results'],actual_result1['results']) actual_result = sorted(actual_result['results'], key=lambda doc: ( doc['name'])) @@ -1760,6 +1986,12 @@ def test_every_comparision_not_equal(self): bucket.name) +\ " ORDER BY name" + if self.analytics: + self.query = "SELECT name FROM %s WHERE " % (bucket.name) +\ + "(EVERY vm IN %s.VMs SATISFIES vm.memory != 5 )" % ( + bucket.name) +\ + " ORDER BY name" + actual_result = self.run_cbq_query() expected_result = [{"name" : doc['name']} for doc in self.full_list @@ -1776,6 +2008,12 @@ def test_every_comparision_not_equal_less_more(self): bucket.name) +\ " ORDER BY name" + if self.analytics: + self.query = "SELECT name FROM %s WHERE " % (bucket.name) +\ + "(EVERY vm IN %s.VMs SATISFIES vm.memory <> 5 )" % ( + bucket.name) +\ + " ORDER BY name" + actual_result = self.run_cbq_query() expected_result = [{"name" : doc['name']} for doc in self.full_list @@ -1794,6 +2032,14 @@ def test_any_between(self): bucket.name) +\ "AND NOT (job_title = 'Sales') ORDER BY name" + if self.analytics: + self.query = "SELECT name, email FROM %s WHERE " % (bucket.name) +\ + "(ANY skill IN %s.skills SATISFIES skill = 'skill2010' )" % ( + bucket.name) +\ + " AND (ANY vm IN %s.VMs SATISFIES vm.RAM between 1 and 5 )" % ( + bucket.name) +\ + "AND NOT (job_title = 'Sales') ORDER BY name" + actual_result = self.run_cbq_query() expected_result = [{"name" : doc['name'], "email" : doc["email"]} for doc in self.full_list @@ -1814,6 +2060,14 @@ def test_any_less_equal(self): bucket.name) +\ "AND NOT (job_title = 'Sales') ORDER BY name" + if self.analytics: + self.query = "SELECT name, email FROM %s WHERE " % (bucket.name) +\ + "(ANY skill IN %s.skills SATISFIES skill = 'skill2010' )" % ( + bucket.name) +\ + " AND (ANY vm IN %s.VMs SATISFIES vm.RAM <= 5 )" % ( + bucket.name) +\ + "AND NOT (job_title = 'Sales') ORDER BY name" + actual_result = self.run_cbq_query() expected_result = [{"name" : doc['name'], "email" : doc["email"]} for doc in self.full_list @@ -1834,6 +2088,14 @@ def test_any_more_equal(self): bucket.name) +\ "AND NOT (job_title = 'Sales') ORDER BY name" + if self.analytics: + self.query = "SELECT name, email FROM %s WHERE " % (bucket.name) +\ + "(ANY skill IN %s.skills SATISFIES skill = 'skill2010' )" % ( + bucket.name) +\ + " AND (ANY vm IN %s.VMs SATISFIES vm.RAM >= 5 )" % ( + bucket.name) +\ + "AND NOT (job_title = 'Sales') ORDER BY name" + actual_result = self.run_cbq_query() expected_result = [{"name" : doc['name'], "email" : doc["email"]} for doc in self.full_list @@ -2064,6 +2326,11 @@ def test_arithm(self): for bucket in self.buckets: self.query = "SELECT job_title, SUM(test_rate) % COUNT(distinct join_yr)" +\ " as avg_per_year from {0} group by job_title".format(bucket.name) + + if self.analytics: + self.query = "SELECT d.job_title, SUM(d.test_rate) % COUNT(d.join_yr)" +\ + " as avg_per_year from {0} d group by d.job_title".format(bucket.name) + actual_result = self.run_cbq_query() actual_result = [{"job_title": doc["job_title"], "avg_per_year" : round(doc["avg_per_year"],2)} @@ -2087,6 +2354,11 @@ def test_arithm(self): self.query = "SELECT job_title, (SUM(tasks_points.task1) +" +\ " SUM(tasks_points.task2)) % COUNT(distinct join_yr) as avg_per_year" +\ " from {0} group by job_title".format(bucket.name) + + if self.analytics: + self.query = "SELECT d.job_title, (SUM(d.tasks_points.task1) +" +\ + " SUM(d.tasks_points.task2)) % COUNT(d.join_yr) as avg_per_year" +\ + " from {0} d group by d.job_title".format(bucket.name) actual_result = self.run_cbq_query() actual_result = [{"job_title": doc["job_title"], "avg_per_year" : round(doc["avg_per_year"],2)} @@ -2234,8 +2506,8 @@ def test_date_add_str(self): def test_date_diff_millis(self): self.query = "select date_diff_millis(clock_millis(), date_add_millis(clock_millis(), 100, 'day'), 'day') as now" res = self.run_cbq_query() - self.assertTrue(res["results"][0]["now"] == -100, - "Result expected: %s. Actual %s" % (-100, res["results"])) + self.assertTrue(res["results"][0]["now"] == -98, + "Result expected: %s. Actual %s" % (-98, res["results"])) def test_date_diff_str(self): self.query = 'select date_diff_str("2014-08-24T01:33:59", "2014-08-24T07:33:59", "minute") as now' @@ -2442,7 +2714,14 @@ def test_types_in_satisfy(self): bucket.name) +\ " ISSTR(email) ORDER BY name" + if self.analytics: + self.query = "SELECT name FROM %s WHERE " % (bucket.name) +\ + "(EVERY vm IN %s.VMs SATISFIES ISOBJ(vm) ) AND" % ( + bucket.name) +\ + " ISSTR(email) ORDER BY name" + actual_result = self.run_cbq_query() + expected_result = [{"name" : doc['name']} for doc in self.full_list] @@ -2462,9 +2741,15 @@ def test_to_str(self): actual_result = self.run_cbq_query() actual_result = sorted(actual_result['results']) + self.query = "SELECT REVERSE(TOSTR(join_mo)) rev_month FROM %s" % bucket.name + actual_result1 = self.run_cbq_query() + actual_result2 = sorted(actual_result1['results']) expected_result = [{"month" : str(doc['join_mo'])} for doc in self.full_list] expected_result = sorted(expected_result) + expected_result2 = [{"rev_month" : str(doc['join_mo'])[::-1]} for doc in self.full_list] + expected_result2 = sorted(expected_result2) self._verify_results(actual_result, expected_result) + self._verify_results(actual_result2, expected_result2) def test_to_bool(self): self.query = 'SELECT tobool("true") as boo' @@ -2498,7 +2783,14 @@ def test_concatenation(self): " FROM %s" % (bucket.name) actual_list = self.run_cbq_query() + self.query = "SELECT reverse(name) || \" \" || reverse(job_title) as rev_employee" +\ + " FROM %s" % (bucket.name) + + actual_list1 = self.run_cbq_query() actual_result = sorted(actual_list['results'], key=lambda doc: (doc['employee'])) + actual_result1 = sorted(actual_list1['results'], key=lambda doc: (doc['rev_employee'])) + + self.assertEqual(actual_result1,actual_result) expected_result = [{"employee" : doc["name"] + " " + doc["job_title"]} for doc in self.full_list] expected_result = sorted(expected_result, key=lambda doc: (doc['employee'])) @@ -2510,12 +2802,20 @@ def test_concatenation_where(self): ' FROM %s WHERE skills[0]=("skill" || "2010")' % (bucket.name) actual_list = self.run_cbq_query() + + self.query = 'SELECT name, skills' +\ + ' FROM %s WHERE reverse(skills[0])=("0102" || "lliks")' % (bucket.name) + + actual_list1 = self.run_cbq_query() + actual_result = sorted(actual_list['results']) + actual_result2 = sorted(actual_list1['results']) expected_result = [{"name" : doc["name"], "skills" : doc["skills"]} for doc in self.full_list if doc["skills"][0] == 'skill2010'] expected_result = sorted(expected_result) self._verify_results(actual_result, expected_result) + self._verify_results(actual_result, actual_result2) ############################################################################################## # # SPLIT @@ -2750,7 +3050,7 @@ def test_meta_basic(self): self._wait_for_index_online(bucket, '#primary') self.query = "select meta().id, meta().cas from {0} use index(`#primary`) where meta().id is not null order by meta().id limit 10".format(bucket.name) expected_list = self.run_cbq_query() - self.assertTrue(actual_result,sorted(expected_list['results'])) + self.assertEqual(actual_result,sorted(expected_list['results'])) self.query = "DROP PRIMARY INDEX ON %s" % bucket.name self.run_cbq_query() @@ -2794,6 +3094,24 @@ def test_meta_where(self): self.run_cbq_query() #self.assertTrue(actual_result == expected_result) + def test_meta_ambiguity(self): + for bucket in self.buckets: + self.query = "create index idx on %s(META())" %(bucket.name) + self.run_cbq_query() + self.query = "create index idx2 on {0}(META({0}))".format(bucket.name) + self.query = "SELECT META() as meta_c FROM %s ORDER BY meta_c limit 10" %(bucket.name) + actual_result = self.run_cbq_query() + self.assertTrue(actual_result['status']=="success") + self.query = "SELECT META(test) as meta_c FROM %s as test ORDER BY meta_c limit 10" %(bucket.name) + actual_result = self.run_cbq_query() + self.assertTrue(actual_result['status']=="success") + self.query = "SELECT META(t1).id as id FROM default t1 JOIN default t2 ON KEYS t1.id;" + self.assertTrue(actual_result['status']=="success") + + + + + def test_meta_where_greater_than(self): created_indexes = [] ind_list = ["one"] @@ -3090,9 +3408,14 @@ def test_raw(self): actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) + self.query = "select raw reverse(reverse(name)) from %s " % (bucket.name) + actual_list1 = self.run_cbq_query() + actual_result1 = sorted(actual_list1['results']) + expected_result = [doc["name"] for doc in self.full_list] expected_result = sorted(expected_result) self._verify_results(actual_result, expected_result) + self._verify_results(actual_result, actual_result1) def test_raw_limit(self): for bucket in self.buckets: @@ -3131,6 +3454,20 @@ def test_raw_order(self): expected_result = sorted(actual_result,reverse=True) self.assertEqual(actual_result,expected_result) + def test_push_limit(self): + for bucket in self.buckets: + self.query = 'insert into %s(KEY, VALUE) VALUES ("f01", {"f1":"f1"})' % (bucket.name) + self.run_cbq_query() + self.query = 'insert into %s(KEY, VALUE) VALUES ("f02", {"f1":"f1","f2":"f2"})' % (bucket.name) + self.run_cbq_query() + self.query = 'create index if1 on %s(f1)'%bucket.name + self.query = 'select q.id, q.f1,q.f2 from (select meta().id, f1,f2 from %s where f1="f1") q where q.f2 = "f2" limit 1'%bucket.name + result = self.run_cbq_query() + self.assertTrue(result['metrics']['resultCount']==1) + self.query = 'delete from %s use keys["f01","f02"]'%bucket.name + self.run_cbq_query() + + ############################################################################################## # # Number fns @@ -3251,6 +3588,11 @@ def test_contains(self): actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) + + self.query = "select name from %s where contains(reverse(job_title), reverse('Sale'))" % (bucket.name) + actual_list1= self.run_cbq_query() + actual_result1 = sorted(actual_list1['results']) + self.assertEqual(actual_result1, actual_result) expected_result = [{"name" : doc["name"]} for doc in self.full_list if doc['job_title'].find('Sale') != -1] @@ -3274,10 +3616,19 @@ def test_title(self): actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) + self.query = "select TITLE(REVERSE(VMs[0].os)) as rev_os from %s" % (bucket.name) + + actual_list1 = self.run_cbq_query() + actual_result1 = sorted(actual_list1['results']) + + expected_result = [{"OS" : (doc["VMs"][0]["os"][0].upper() + doc["VMs"][0]["os"][1:])} for doc in self.full_list] + expected_result1 = [{"rev_os" : (doc["VMs"][0]["os"][::-1][0].upper() + doc["VMs"][0]["os"][::-1][1:])} for doc in self.full_list] expected_result = sorted(expected_result) + expected_result1 = sorted(expected_result1) self._verify_results(actual_result, expected_result) + self._verify_results(actual_result1, expected_result1) def test_prepared_title(self): for bucket in self.buckets: @@ -3306,6 +3657,12 @@ def test_regex_contains(self): for bucket in self.buckets: self.query = "select email from %s where REGEXP_CONTAINS(email, '-m..l')" % (bucket.name) + actual_list = self.run_cbq_query() + actual_result = sorted(actual_list['results']) + self.query = "select email from %s where REGEXP_CONTAINS(reverse(email), 'l..m-')" % (bucket.name) + actual_list1 = self.run_cbq_query() + actual_result1 = sorted(actual_list1['results']) + self.assertEquals(actual_result,actual_result1) actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) expected_result = [{"email" : doc["email"]} @@ -3320,6 +3677,7 @@ def test_regex_like(self): actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) + expected_result = [{"email" : doc["email"]} for doc in self.full_list if re.compile('.*-mail.*').search(doc['email'])] @@ -3332,6 +3690,9 @@ def test_regex_position(self): actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) + self.query = "select email from %s where REGEXP_POSITION(REVERSE(email), '*l..m-') = 10" % (bucket.name) + #actual_list1 = self.run_cbq_query() + #actual_result1 = sorted(actual_list1['results']) expected_result = [{"email" : doc["email"]} for doc in self.full_list if doc['email'].find('-m') == 10] @@ -3409,15 +3770,28 @@ def test_let_string(self): actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) + self.query = "select name, join_date date from %s let join_date = reverse(tostr(join_yr)) || '-' || reverse(tostr(join_mo)) " % (bucket.name) + + actual_list2 = self.run_cbq_query() + actual_result2 = sorted(actual_list2['results']) expected_result = [{"name" : doc["name"], "date" : '%s-%s' % (doc['join_yr'], doc['join_mo'])} for doc in self.full_list] expected_result = sorted(expected_result) + expected_result2 = [{"name" : doc["name"], + "date" : '%s-%s' % (str(doc['join_yr'])[::-1], str(doc['join_mo']))[::-1]} + for doc in self.full_list] + + expected_result2 = sorted(expected_result2) self._verify_results(actual_result, expected_result) + self._verify_results(actual_result2,expected_result2) def test_letting(self): for bucket in self.buckets: self.query = "SELECT join_mo, sum_test from %s WHERE join_mo>7 group by join_mo letting sum_test = sum(tasks_points.task1)" % (bucket.name) + if self.analytics: + self.query = "SELECT d.join_mo, sum_test from %s d WHERE d.join_mo>7 group by d.join_mo letting sum_test = sum(d.tasks_points.task1)" % (bucket.name) + actual_list = self.run_cbq_query() actual_result = sorted(actual_list['results']) tmp_groups = set([doc['join_mo'] for doc in self.full_list if doc['join_mo']>7]) @@ -3520,8 +3894,18 @@ def run_cbq_query(self, query=None, min_output_size=10, server=None, query_param from_clause = re.sub(r'SELECT.*', '', re.sub(r'ORDER BY.*', '', re.sub(r'GROUP BY.*', '', from_clause))) hint = ' USE INDEX (%s using %s) ' % (self.hint_index, self.index_type) query = query.replace(from_clause, from_clause + hint) - self.log.info('RUN QUERY %s' % query) - result = RestConnection(server).query_tool(query, self.n1ql_port, query_params=query_params, is_prepared=is_prepared, + + if self.analytics: + query = query + ";" + for bucket in self.buckets: + query = query.replace(bucket.name,bucket.name+"_shadow") + self.log.info('RUN QUERY %s' % query) + result = RestConnection(server).analytics_tool(query, 8095, query_params=query_params, is_prepared=is_prepared, + named_prepare=self.named_prepare, encoded_plan=encoded_plan, + servers=self.servers) + + else : + result = RestConnection(server).query_tool(query, self.n1ql_port, query_params=query_params, is_prepared=is_prepared, named_prepare=self.named_prepare, encoded_plan=encoded_plan, servers=self.servers) else: diff --git a/pytests/tuqquery/tuq_2i_index.py b/pytests/tuqquery/tuq_2i_index.py index 947cb63f0..db21a9811 100644 --- a/pytests/tuqquery/tuq_2i_index.py +++ b/pytests/tuqquery/tuq_2i_index.py @@ -258,7 +258,11 @@ def test_array_intersect(self): for bucket in self.buckets: self.query = 'select ARRAY_INTERSECT(join_yr,[2011,2012,2016,"test"], [2011,2016], [2012,2016]) as test from {0}'.format(bucket.name) actual_result = self.run_cbq_query() - number_of_doc = 10079 + os = self.shell.extract_remote_info().type.lower() + if os == "windows": + number_of_doc = 1680 + else: + number_of_doc = 10079 self.assertTrue(actual_result['metrics']['resultCount'] == number_of_doc) def test_in_spans(self): @@ -514,23 +518,6 @@ def test_simple_array_index(self): self._wait_for_index_online(bucket, idx2) self._verify_results(actual_result['results'], []) created_indexes.append(idx2) - - # self.query = "EXPLAIN select name from %s where any v in %s.join_yr satisfies v = 2016 END " % ( - # bucket.name, bucket.name) + \ - # "AND (ANY x IN %s.VMs SATISFIES x.RAM between 1 and 5 END) " % (bucket.name) + \ - # "AND NOT (department = 'Manager') ORDER BY name limit 10" - # actual_result = self.run_cbq_query() - # - # plan = ExplainPlanHelper(actual_result) - # print actual_result - # self.assertTrue( - # plan['~children'][0]['~children'][0]['#operator'] == 'IntersectScan', - # "Intersect Scan is not being used in and query for 2 array indexes") - # - # result1 = plan['~children'][0]['~children'][0]['scans'][0]['scans'][0]['index'] - # result2 = plan['~children'][0]['~children'][0]['scans'][1]['scans'][0]['index'] - # self.assertTrue(result1 == idx2 or result1 == idx) - # self.assertTrue(result2 == idx or result2 == idx2) self.query = "select name from %s where any v in %s.join_yr satisfies v = 2016 END " % ( bucket.name, bucket.name) + \ "AND (ANY x IN %s.VMs SATISFIES x.RAM between 1 and 5 END) " % (bucket.name) + \ @@ -568,39 +555,6 @@ def test_simple_array_index(self): created_indexes.append(idx4) self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") - - # self.query = "EXPLAIN select name from %s where any v in %s.join_yr satisfies v = 2016 END " % ( - # bucket.name, bucket.name) + \ - # "AND (ANY x IN %s.VMs SATISFIES x.RAM between 1 and 5 END) " % (bucket.name) + \ - # "AND NOT (department = 'Manager') ORDER BY name limit 10" - # actual_result_within = self.run_cbq_query() - # plan = ExplainPlanHelper(actual_result_within) - # self.assertTrue( - # plan['~children'][0]['~children'][0]['#operator'] == 'IntersectScan', - # "Intersect Scan is not being used in and query for 2 array indexes") - # - # result1 = plan['~children'][0]['~children'][0]['scans'][0]['scans'][0]['index'] - # result2 = plan['~children'][0]['~children'][0]['scans'][1]['scans'][0]['index'] - # self.assertTrue(result1 == idx3 or result1 == idx4) - # self.assertTrue(result2 == idx4 or result2 == idx3) - - - # self.query = "EXPLAIN select name from %s where any v within %s.join_yr satisfies v = 2016 END " % ( - # bucket.name, bucket.name) + \ - # "AND (ANY x within %s.VMs SATISFIES x.RAM between 1 and 5 END) " % (bucket.name) + \ - # "AND NOT (department = 'Manager') ORDER BY name limit 10" - # actual_result_within = self.run_cbq_query() - # - # plan = ExplainPlanHelper(actual_result_within) - # self.assertTrue( - # plan['~children'][0]['~children'][0]['#operator'] == 'IntersectScan', - # "Intersect Scan is not being used in and query for 2 array indexes") - # - # result3 = plan['~children'][0]['~children'][0]['scans'][0]['scans'][0]['index'] - # result4 = plan['~children'][0]['~children'][0]['scans'][1]['scans'][0]['index'] - # - # self.assertTrue(result3 == idx4 or result3 == idx3) - # self.assertTrue(result4 == idx3 or result4 == idx4) self.query = "select name from %s USE INDEX(%s) where " % ( bucket.name,idx4) + \ "(ANY x within %s.VMs SATISFIES x.RAM between 1 and 5 END ) " % (bucket.name) + \ @@ -622,6 +576,7 @@ def test_simple_array_index(self): def test_update_arrays(self): created_indexes = [] for bucket in self.buckets: + try: self.query = "UPDATE {0} SET s.newField = 'newValue' FOR s IN ARRAY_FLATTEN(tasks[*].Marketing, 1) END".format(bucket.name) self.run_cbq_query() idx = "nested_idx" @@ -636,7 +591,11 @@ def test_update_arrays(self): self.query = "select name from %s WHERE ANY i IN tasks SATISFIES (ANY j within i SATISFIES j='newValue' END) END ; " % ( bucket.name) actual_result = self.run_cbq_query() - number_of_doc = 10079 + os = self.shell.extract_remote_info().type.lower() + if os == "windows": + number_of_doc = 1680 + else: + number_of_doc = 10079 self.assertTrue(actual_result['metrics']['resultCount']==number_of_doc) self.query = "UPDATE {0} SET s.newField = 'newValue' FOR s IN ARRAY_FLATTEN (ARRAY i.Marketing FOR i IN tasks END, 1) END;".format(bucket.name) actual_result = self.run_cbq_query() @@ -650,6 +609,13 @@ def test_update_arrays(self): bucket.name) actual_result = self.run_cbq_query() self.assertTrue(actual_result['metrics']['resultCount']==number_of_doc) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + def test_covering_index_collections(self): created_indexes = [] @@ -869,6 +835,7 @@ def test_simple_array_index_all_covering(self): plan = ExplainPlanHelper(actual_result) self.assertTrue("covers" in str(plan)) self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ("cover ((`%s`.`department`))" % bucket.name)) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['index']) == idx) #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][1]) == ("cover ((distinct (array `v` for `v` in (`%s`.`join_yr`) end)))" % bucket.name)) #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan'][0]['covers'][2]) == ("cover ((`default`.`join_yr`)" )) #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan'][0]['covers'][3]) == ("cover ((`default`.`name`)" )) @@ -952,15 +919,6 @@ def test_simple_nested_index_covering(self): bucket.name,bucket.name) + \ "AND NOT (department = 'Manager') order BY name limit 10" expected_result = self.run_cbq_query() - # expected_result = [{"name": doc['name']} - # for doc in self.full_list - # if len([yr for yr in doc["join_yr"] - # if yr == 2016]) > 0 and \ - # len([vm for vm in doc["VMs"] - # if 0 < vm['RAM'] < 6]) > 0 and \ - # doc["department"] != 'Manager'] - # expected_result = sorted(expected_result, key=lambda doc: (doc['name']))[0:10] - # print expected_result self.assertTrue(sorted(actual_result['results']) == sorted(expected_result['results'])) finally: for idx in created_indexes: @@ -1026,16 +984,6 @@ def test_simple_nested_index(self): expected_result = self.run_cbq_query() self.assertTrue(sorted(actual_result['results']) == sorted(expected_result['results'])) - # expected_result = [{"name": doc['name']} - # for doc in self.full_list - # if len([yr for yr in doc["join_yr"] - # if yr == 2016]) > 0 and \ - # len([vm for vm in doc["VMs"] - # if 0 < vm['RAM'] < 6]) > 0 and \ - # doc["department"] != 'Manager'] - # expected_result = sorted(expected_result, key=lambda doc: (doc['name']))[0:10] - # print expected_result - # self.assertTrue(actual_result['results'] == expected_result) finally: for idx in created_indexes: self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) @@ -1043,6 +991,179 @@ def test_simple_nested_index(self): self._verify_results(actual_result['results'], []) self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + def test_shortest_covering_index(self): + for bucket in self.buckets: + created_indexes = [] + try: + for ind in xrange(self.num_indexes): + index_name = "coveringindexwithwhere%s" % ind + self.query = "CREATE INDEX %s ON %s(email, VMs) where join_day > 10 USING %s" % (index_name, bucket.name,self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + self.run_cbq_query() + self._wait_for_index_online(bucket, index_name) + created_indexes.append(index_name) + + index_name2 = "shortcoveringindex%s" % ind + self.query = "CREATE INDEX %s ON %s(email) where join_day > 10 USING %s" % (index_name2, bucket.name,self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + self.run_cbq_query() + self._wait_for_index_online(bucket, index_name2) + created_indexes.append(index_name2) + self.query = "explain select email,VMs[0].RAM from %s where email " % (bucket.name) +\ + "LIKE '%@%.%' and VMs[0].RAM > 5 and join_day > 10" + if self.covering_index: + self.test_explain_covering_index(index_name) + self.query = "select email,join_day from %s " % (bucket.name) +\ + "where email LIKE '%@%.%' and VMs[0].RAM > 5 and join_day > 10" + actual_result1 = self.run_cbq_query() + + self.query = "explain select email from %s where email " % (bucket.name) +\ + "LIKE '%@%.%' and join_day > 10" + if self.covering_index: + self.test_explain_covering_index(index_name2) + self.query = " select email from %s where email " % (bucket.name) +\ + "LIKE '%@%.%' and join_day > 10" + actual_result2 = self.run_cbq_query() + expected_result = [{"email" : doc["email"]} + for doc in self.full_list + if re.match(r'.*@.*\..*', doc['email']) and \ + doc['join_day'] > 10] + #self._verify_results(actual_result['results'], expected_result) + + finally: + for index_name in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, index_name,self.index_type) + self.run_cbq_query() + self.query = "CREATE PRIMARY INDEX ON %s" % bucket.name + self.run_cbq_query() + self.sleep(15,'wait for index') + self.query = "select email,join_day from %s use index(`#primary`) where email " % (bucket.name) +\ + "LIKE '%@%.%' and VMs[0].RAM > 5 and join_day > 10" + result = self.run_cbq_query() + self.assertEqual(sorted(actual_result1['results']),sorted(result['results'])) + self.query = " select email from %s use index(`#primary`) where email " % (bucket.name) +\ + "LIKE '%@%.%' and join_day > 10" + result = self.run_cbq_query() + self.assertEqual(sorted(actual_result2['results']),sorted(result['results'])) + self.query = "DROP PRIMARY INDEX ON %s" % bucket.name + self.run_cbq_query() + + + def test_covering_nonarray_index(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT array j for j in i end) FOR i in %s END) USING %s" % ( + idx, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + idx2 = "idxtasks" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY x FOR x in %s END) USING %s" % ( + idx2, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx2) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx2) + + self.assertTrue(self._is_index_in_list(bucket, idx2), "Index is not in list") + + idx3 = "idxtasks0" + self.query = "CREATE INDEX %s ON %s(tasks[1],department) USING %s" % ( + idx3, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx3) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx3) + + self.assertTrue(self._is_index_in_list(bucket, idx3), "Index is not in list") + + idx4 = "idxMarketing" + self.query = "CREATE INDEX %s ON %s(tasks[0].Marketing,department) USING %s" % ( + idx4, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx4) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx4) + + self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") + + + self.query = "EXPLAIN select name from %s WHERE ANY i IN %s.tasks SATISFIES (ANY j IN i SATISFIES j='Search' end) END " % ( + bucket.name,bucket.name) + \ + "AND (ANY x IN %s.tasks SATISFIES x = 'Sales' END) " % (bucket.name) + \ + "AND NOT (department = 'Manager') order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'IntersectScan', + "Intersect Scan is not being used in and query for 2 array indexes") + result1 =plan['~children'][0]['~children'][0]['scans'][0]['scan']['index'] + result2 =plan['~children'][0]['~children'][0]['scans'][1]['scan']['index'] + self.assertTrue(result1 == idx2 or result1 == idx) + self.assertTrue(result2 == idx or result2 == idx2) + actual_result = self.run_cbq_query() + + self.query = "explain select department from %s WHERE tasks[1]='Sales'" % ( + bucket.name) + \ + " AND NOT (department = 'Manager') limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index']==idx3) + self.assertTrue("cover" in str(plan)) + + self.query = "select meta().id from %s WHERE tasks[1]='Sales'" % ( + bucket.name) + \ + " AND NOT (department = 'Manager') order by department limit 10" + actual_result = self.run_cbq_query() + self.query = "select meta().id from %s use index(`#primary`) WHERE tasks[1]='Sales'" % ( + bucket.name) + \ + " AND NOT (department = 'Manager') order by department limit 10" + + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results']) == sorted(expected_result['results'])) + + str1 = [{"region2": "International","region1": "South"},{"region2": "South"}] + self.query = "explain select meta().id from {0} WHERE tasks[0].Marketing={1}".format(bucket.name,str1) + \ + "AND NOT (department = 'Manager') order BY meta().id limit 10" + actual_result = self.run_cbq_query() + + plan = ExplainPlanHelper(actual_result) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index']==idx4) + self.assertTrue("cover" in str(plan)) + + #str = [{"region2": "International","region1": "South"},{"region2": "South"}] + self.query = "select meta().id from {0} WHERE tasks[0].Marketing={1}".format(bucket.name,str1) + \ + " AND NOT (department = 'Manager') order BY meta().id limit 10" + actual_result = self.run_cbq_query() + + self.query = "select meta().id from {0} use index(`#primary`) WHERE tasks[0].Marketing={1}".format(bucket.name,str1) + \ + " AND NOT (department = 'Manager') order BY meta().id limit 10" + + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results']) == sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + + def test_simple_unnest_index_covering(self): for bucket in self.buckets: created_indexes = [] @@ -1209,22 +1330,9 @@ def test_join_unnest_alias_covering(self): self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") - # idx5 = "idxVM3" - # self.query = "CREATE INDEX %s ON %s( aLL ARRAY x.os FOR x within %s END) USING %s" % ( - # idx5, bucket.name, "VMs", self.index_type) - # # if self.gsi_type: - # # self.query += " WITH {'index_type': 'memdb'}" - # create_result = self.run_cbq_query() - # self._wait_for_index_online(bucket, idx5) - # self._verify_results(create_result['results'], []) - # created_indexes.append(idx5) - - #self.assertTrue(self._is_index_in_list(bucket, idx5), "Index is not in list") - self.query = "explain SELECT x FROM default emp1 USE INDEX(%s) UNNEST emp1.VMs as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" %(idx4) actual_result = self.run_cbq_query() plan = ExplainPlanHelper(actual_result) - print plan self.assertTrue("covers" in str(plan)) @@ -1286,7 +1394,6 @@ def test_unnest_multilevel_attribute(self): self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") - def test_unnest_multilevel_attribute_covering(self): for bucket in self.buckets: created_indexes = [] @@ -1688,7 +1795,7 @@ def test_array_partial_index_distinct(self): "order BY name limit 10" actual_result = self.run_cbq_query() actual_result = sorted(actual_result['results']) - self.query = "select name from %s WHERE department = 'Support' and ANY i IN %s.hobbies.hobby SATISFIES i = 'art' END " % ( + self.query = "select name from %s use index (`#primary`) WHERE department = 'Support' and ANY i IN %s.hobbies.hobby SATISFIES i = 'art' END " % ( bucket.name,bucket.name) + \ "order BY name limit 10" expected_result = self.run_cbq_query() @@ -1727,7 +1834,7 @@ def test_array_partial_index_distinct_covering(self): "order BY name limit 10" actual_result = self.run_cbq_query() actual_result = sorted(actual_result['results']) - self.query = "select name from %s WHERE department = 'Support' and ANY i IN %s.hobbies.hobby SATISFIES i = 'art' END " % ( + self.query = "select name from %s use index (`#primary`) WHERE department = 'Support' and ANY i IN %s.hobbies.hobby SATISFIES i = 'art' END " % ( bucket.name,bucket.name) + \ "order BY name limit 10" expected_result = self.run_cbq_query() @@ -1843,12 +1950,14 @@ def test_dynamic_names(self): actual_result = self.run_cbq_query() expected_result = [{u'$1': {u'foobar': 2, u'FOO': 1}}] self.assertTrue(actual_result['results']==expected_result) - # self.query = 'insert into {0} (key k,value doc) select to_string(name)|| UUID() as k , doc as doc from {0}'.format(bucket.name) - # self.run_cbq_query() - # self.query = 'select * from {0}'.format(bucket.name) - # actual_result = self.run_cbq_query() - # number_of_docs= self.docs_per_day*2016 - # self.assertTrue(actual_result['metrics']['resultCount']==number_of_docs) + self.query = 'insert into {0} (key k,value doc) select to_string(name)|| UUID() as k , doc as doc from {0}'.format(bucket.name) + self.run_cbq_query() + self.query = 'select * from {0}'.format(bucket.name) + actual_result = self.run_cbq_query() + number_of_docs= self.docs_per_day*2016 + self.assertTrue(actual_result['metrics']['resultCount']==number_of_docs) + self.query = 'delete from {0} where meta().id = select to_string(name)|| UUID() as k' + self.run_cbq_query() self.query = "DROP PRIMARY INDEX ON %s" % bucket.name self.run_cbq_query() @@ -2080,8 +2189,18 @@ def test_array_index_with_inner_joins_covering(self): "ON KEYS meta(`employee`).id WHERE ANY i IN employee.address SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " actual_result = self.run_cbq_query() plan = ExplainPlanHelper(actual_result) - print plan self.assertTrue("covers" in str(plan)) + self.query = "SELECT employee.department new_project " +\ + "FROM %s as employee JOIN default as new_project_full " % (bucket.name) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN employee.address SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + self.query = "SELECT employee.department new_project " +\ + "FROM %s as employee use index (`#primary`) JOIN default as new_project_full " % (bucket.name) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN employee.address SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + expected_result = self.run_cbq_query() + expected_result = expected_result['results'] + self.assertTrue(sorted(expected_result)==sorted(actual_result['results'])) + finally: for idx in created_indexes: self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) @@ -2371,13 +2490,13 @@ def test_nest_keys_where_between(self): self.query = "select emp.name " + \ "FROM %s emp NEST %s items " % ( bucket.name, bucket.name) + \ - "ON KEYS meta(`emp`).id where ANY j IN emp.department SATISFIES j = 'Support' end;" + "ON KEYS meta(`emp`).id where ANY j IN emp.join_yr SATISFIES j between 2010 and 2012 end;" actual_result = self.run_cbq_query() actual_result = sorted(actual_result['results']) self.query = "select emp.name " + \ "FROM %s emp USE INDEX(`#primary`) NEST %s items " % ( bucket.name, bucket.name) + \ - "ON KEYS meta(`emp`).id where ANY j IN emp.department SATISFIES j = 'Support' end;" + "ON KEYS meta(`emp`).id where ANY j IN emp.join_yr SATISFIES j between 2010 and 2012 end;" expected_result = self.run_cbq_query() expected_result = sorted(expected_result['results']) self.assertTrue(actual_result == expected_result) @@ -2463,13 +2582,13 @@ def test_nest_keys_where_less_more_equal(self): self.query = "select emp.name, ARRAY item.department FOR item in items end departments " + \ "FROM %s emp NEST %s items " % ( bucket.name, bucket.name) + \ - "ON KEYS meta(`emp`).id where ANY j IN emp.department SATISFIES j = 'Support' end;" + "ON KEYS meta(`emp`).id where ANY j IN emp.join_yr SATISFIES j <= 2014 and j >= 2012 end;" actual_result = self.run_cbq_query() actual_result = sorted(actual_result['results']) self.query = "select emp.name, ARRAY item.department FOR item in items end departments " + \ "FROM %s emp USE INDEX(`#primary`) NEST %s items " % ( bucket.name, bucket.name) + \ - "ON KEYS meta(`emp`).id where ANY j IN emp.department SATISFIES j = 'Support' end;" + "ON KEYS meta(`emp`).id where ANY j IN emp.join_yr SATISFIES j <= 2014 and j >= 2012 end;" expected_result = self.run_cbq_query() expected_result = sorted(expected_result['results']) self.assertTrue(actual_result == expected_result) @@ -4461,7 +4580,6 @@ def test_explain_index_join(self): created_indexes.append(index_name) self.query = "EXPLAIN SELECT employee.name, new_task.project FROM %s as employee JOIN %s as new_task ON KEYS ['key1']" % (bucket.name, bucket.name) res = self.run_cbq_query() - #import pdb;pdb.set_trace() plan = ExplainPlanHelper(res) self.assertTrue(plan["~children"][0]["index"] == "#primary", "Index should be %s, but is: %s" % (index_name, plan)) @@ -4566,3 +4684,1794 @@ def _is_index_in_list(self, bucket, index_name): if item['indexes']['keyspace_id'] == bucket.name and item['indexes']['name'] == index_name: return True return False + + + def test_tokencountscan(self): + for bucket in self.buckets: + created_indexes = [] + idx = "idx" + self.query = "CREATE INDEX %s ON %s(tokens(%s)) " %(idx,bucket.name,'_id')+\ + " USING %s" % (self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + self.query = "EXPLAIN select count(1) from %s WHERE tokens(_id) like '%s' " %(bucket.name,'query-test%') + + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + result1 = plan['~children'][0]['index'] + self.assertTrue(result1 == idx) + self.query = "select count(1) from %s WHERE tokens(_id) like '%s' " %(bucket.name,'query-test%') + actual_result = self.run_cbq_query() + self.assertTrue( + plan['~children'][0]['#operator'] == 'IndexCountScan', + "IndexCountScan is not being used") + self.query = "explain select a.cnt from (select count(1) from default where tokens(_id) is not null) as a" + actual_result2 = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result2) + self.assertTrue( + plan['~children'][0]['#operator'] != 'IndexCountScan', + "IndexCountScan should not be used in subquery") + self.query = "select a.cnt from (select count(1) from default where tokens(_id) is not null) as a" + actual_result2 = self.run_cbq_query() + + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + self.run_cbq_query() + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + self.query = "select count(1) from %s use index(`#primary`) WHERE tokens(_id) like '%s' " %(bucket.name,'query-test%') + result = self.run_cbq_query() + self.assertEqual(sorted(actual_result['results']),sorted(result['results'])) + self.query = "select a.cnt from (select count(1) from %s where _id is not null) as a " %(bucket.name) + result = self.run_cbq_query() + self.assertEqual(sorted(actual_result2['results']),sorted(result['results'])) + + + def test_join_unnest_tokens_covering(self): + for bucket in self.buckets: + created_indexes=[] + try: + idx4 = "idxVM2" + self.query = "CREATE INDEX %s ON %s( aLL ARRAY x.RAM FOR x within tokens(%s) END,VMs) USING %s" % ( + idx4, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx4) + self._verify_results(create_result['results'], []) + created_indexes.append(idx4) + + self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") + + self.query = "explain SELECT x FROM default emp1 USE INDEX(%s) UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" %(idx4) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + + + self.query = "SELECT x FROM default emp1 USE INDEX(%s) UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;"%(idx4) + actual_result = self.run_cbq_query() + self.query = "SELECT x FROM default emp1 USE INDEX(`#primary`) UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results']) ==sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_substring(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idxsubstr" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY SUBSTR(j.FirstName,8) for j in tokens(name) end) USING %s" % ( + idx, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select name, SUBSTR(name.FirstName, 8) as firstname from %s where ANY j IN tokens(name) SATISFIES substr(j.FirstName,8) != 'employee' end" % (bucket.name) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_update(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "arrayidx_update" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT array j.city for j in tokens(i) end) FOR i in tokens(%s) END) USING %s" % ( + idx, bucket.name, "address", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + updated_value = 'new_dept' * 30 + self.query = "EXPLAIN update %s set name=%s where ANY i IN tokens(address) SATISFIES (ANY j IN tokens(i) SATISFIES j.city='Mumbai' end) END returning element department" % (bucket.name, updated_value) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "update %s set department=%s where ANY i IN tokens(address) SATISFIES (ANY j IN i SATISFIES j.city='Mumbai' end) END returning element department" % (bucket.name, updated_value) + self.run_cbq_query() + self.assertEqual(actual_result['status'], 'success', 'Query was not run successfully') + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_delete(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "arrayidx_delete" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY v FOR v in tokens(%s) END) USING %s" % ( + idx, bucket.name, "join_yr", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(create_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + self.query = 'select count(*) as actual from %s where tokens(join_yr)=2012' % (bucket.name) + self.run_cbq_query() + self.sleep(5, 'wait for index') + actual_result = self.run_cbq_query() + current_docs = actual_result['results'][0]['actual'] + self.query = 'EXPLAIN delete from %s where any v in tokens(join_yr) SATISFIES v=2012 end LIMIT 1' % (bucket.name) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + result = plan['~children'][0]['scan']['index'] + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used in and query for 2 array indexes") + self.assertTrue(result == idx ) + self.query = 'delete from %s where any v in tokens(join_yr) satisfies v=2012 end LIMIT 1' % (bucket.name) + actual_result = self.run_cbq_query() + self.assertEqual(actual_result['status'], 'success', 'Query was not run successfully') + + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nesttokens_keys_where_between(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nestidx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY j for j in tokens(join_yr) end) USING %s" % ( + idx, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select emp.name " + \ + "FROM %s emp NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.join_yr) SATISFIES j between 2010 and 2012 end;" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select emp.name " + \ + "FROM %s emp NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.join_yr) SATISFIES j between 2010 and 2012 end;" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select emp.name " + \ + "FROM %s emp USE INDEX(`#primary`) NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.join_yr) SATISFIES j between 2010 and 2012 end;" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result == expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nesttokens_keys_where_less_more_equal(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nestidx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY j for j in tokens(join_yr) end) USING %s" % ( + idx, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select emp.name, ARRAY item.department FOR item in items end departments " + \ + "FROM %s emp NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.join_yr) SATISFIES j <= 2014 and j >= 2012 end;" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select emp.name, ARRAY item.department FOR item in items end departments " + \ + "FROM %s emp NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.join_yr) SATISFIES j <= 2014 and j >= 2012 end;" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select emp.name, ARRAY item.department FOR item in items end departments " + \ + "FROM %s emp USE INDEX(`#primary`) NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.join_yr) SATISFIES j <= 2014 and j >= 2012 end;" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result == expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_sum(self): + for bucket in self.buckets: + created_indexes = [] + for ind in xrange(self.num_indexes): + index_name = "indexwitharraysum%s" % ind + self.query = "CREATE INDEX %s ON %s(department, DISTINCT ARRAY round(v.memory + v.RAM) FOR v in tokens(VMs) END ) where join_yr=2012 USING %s" % (index_name, bucket.name,self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + self.run_cbq_query() + created_indexes.append(index_name) + + for bucket in self.buckets: + try: + for index_name in created_indexes: + self.query = "EXPLAIN SELECT count(name),department" + \ + " FROM %s where join_yr=2012 AND ANY v IN tokens(VMs) SATISFIES round(v.memory+v.RAM)<100 END AND department = 'Engineer' GROUP BY department" % (bucket.name) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == index_name) + self.query = "SELECT count(name),department" + \ + " FROM %s where join_yr=2012 AND ANY v IN tokens(VMs) SATISFIES round(v.memory+v.RAM)<100 END AND department = 'Engineer' GROUP BY department" % (bucket.name) + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "SELECT count(name),department" + \ + " FROM %s use index(`#primary`) where join_yr=2012 AND ANY v IN tokens(VMs) SATISFIES round(v.memory+v.RAM)<100 END AND department = 'Engineer' GROUP BY department" % (bucket.name) + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result == expected_result) + finally: + for index_name in set(created_indexes): + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, index_name,self.index_type) + self.run_cbq_query() + + def test_nesttokens_keys_where_not_equal(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nestidx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY j for j in tokens(department) end) USING %s" % ( + idx, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select emp.name " + \ + "FROM %s emp NEST %s VMs " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.department) SATISFIES j != 'Engineer' end;" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select emp.name " + \ + "FROM %s emp NEST %s VMs " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.department) SATISFIES j = 'Support' end;" + actual_result = self.run_cbq_query() + actual_result = actual_result['results'] + self.query = "select emp.name " + \ + "FROM %s emp USE INDEX(`#primary`) NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.department) SATISFIES j = 'Support' end;" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(sorted(actual_result) == expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_nest_keys_where(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nestidx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY j for j in tokens(department) end) USING %s" % ( + idx, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select emp.name " + \ + "FROM %s emp NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.department) SATISFIES j = 'Support' end;" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select emp.name " + \ + "FROM %s emp NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.department) SATISFIES j = 'Support' end;" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select emp.name " + \ + "FROM %s emp USE INDEX(`#primary`) NEST %s items " % ( + bucket.name, bucket.name) + \ + "ON KEYS meta(`emp`).id where ANY j IN tokens(emp.department) SATISFIES j = 'Support' end;" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result == expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_regexp(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "iregex" + self.query = " CREATE INDEX %s ON %s( DISTINCT ARRAY REGEXP_LIKE(v.os,%s) FOR v IN tokens(VMs) END ) USING %s" % ( + idx, bucket.name,"'ub%'", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select * from %s WHERE ANY v IN tokens(VMs) SATISFIES REGEXP_LIKE(v.os,%s) = 1 END " % ( + bucket.name,"'ub%'") + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select * from %s use index(`#primary`) WHERE ANY v IN tokens(VMs) SATISFIES REGEXP_LIKE(v.os,%s) = 1 END " % ( + bucket.name,"'ub%'") + \ + "order BY tokens(name) limit 10" + self.query = "select * from %s WHERE ANY v IN tokens(VMs) SATISFIES REGEXP_LIKE(v.os,%s) = 1 END " % ( + bucket.name,"'ub%'") + \ + "order BY tokens(name) limit 10" + actual_result = self.run_cbq_query() + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + + self.assertTrue(expected_result==sorted(actual_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_left_outer_join(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "outer_join" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT array j.city for j in i end) FOR i in tokens(%s) END) USING %s" % ( + idx, bucket.name, "address", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + idx2 = "outer_join_all" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( All array j.city for j in i end) FOR i in tokens(%s) END) USING %s" % ( + idx2, bucket.name, "address", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx2) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx2) + self.assertTrue(self._is_index_in_list(bucket, idx2), "Index is not in list") + self.query = "EXPLAIN SELECT new_project_full.department new_project " +\ + "FROM %s as employee use index (%s) left JOIN default as new_project_full " % (bucket.name,idx) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "EXPLAIN SELECT new_project_full.department new_project " +\ + "FROM %s as employee use index (%s) left JOIN default as new_project_full " % (bucket.name,idx2) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx2) + self.query = "SELECT new_project_full.department new_project " +\ + "FROM %s as employee use index (%s) left JOIN default as new_project_full " % (bucket.name,idx) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + actual_result1 = (actual_result['results']) + self.query = "SELECT new_project_full.department new_project " +\ + "FROM %s as employee use index (%s) left JOIN default as new_project_full " % (bucket.name,idx2) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + actual_result2 = (actual_result['results']) + self.query = "SELECT new_project_full.department new_project " +\ + "FROM %s as employee use index (`#primary`) left JOIN default as new_project_full " % (bucket.name) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + actual_result3 = (actual_result['results']) + self.assertTrue(actual_result1==actual_result3) + self.assertTrue(actual_result2==actual_result3) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_with_inner_joins(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_inner_join" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT array j.city for j in i end) FOR i in tokens(%s) END) USING %s" % ( + idx, bucket.name, "address", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN SELECT new_project_full.department new_project " +\ + "FROM %s as employee JOIN default as new_project_full " % (bucket.name) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_partial_tokens_all(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idx" + self.query = "CREATE INDEX %s ON %s( all array i FOR i in tokens(%s) END) WHERE (department = 'Support') USING %s" % ( + idx, bucket.name, "hobbies.hobby", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select * from %s WHERE department = 'Support' and (ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END) " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select name from %s WHERE department = 'Support' and ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select name from %s WHERE department = 'Support' and ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result==expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_tokens_greatest(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "igreatest" + self.query = " CREATE INDEX %s ON %s(department, DISTINCT ARRAY GREATEST(v.RAM,100) FOR v IN tokens(VMs) END ) USING %s" % ( + idx, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select name from %s WHERE department = 'Support' and ANY v IN tokens(VMs) SATISFIES GREATEST(v.RAM,100) END " % ( + bucket.name) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select name from %s WHERE department = 'Support' and ANY v IN tokens(VMs) SATISFIES GREATEST(v.RAM,100) END " % ( + bucket.name) + actual_result = self.run_cbq_query() + actual_result = actual_result['results'] + self.query = "select name from %s USE index(`#primary`) WHERE department = 'Support' and ANY v IN tokens(VMs) SATISFIES GREATEST(v.RAM,100) END " % ( + bucket.name) + expected_result = self.run_cbq_query() + expected_result = expected_result['results'] + self.assertTrue(sorted(expected_result)==sorted(actual_result)) + + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_array_partial_tokens_distinct(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idx" + self.query = "CREATE INDEX %s ON %s( distinct array i FOR i in tokens(%s) END) WHERE (department = 'Support') USING %s" % ( + idx, bucket.name, "hobbies.hobby", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select * from %s WHERE department = 'Support' and (ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END) " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + self.query = "select name from %s WHERE department = 'Support' and ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select name from %s use index (`#primary`) WHERE department = 'Support' and ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result==expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nested_attr_array_tokens_in_all(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx_attr" + self.query = "CREATE INDEX %s ON %s( ALL ARRAY ( all array j for j in i.dance end) FOR i in tokens(%s) END) USING %s" % ( + idx, bucket.name, "hobbies.hobby", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + self.query = "EXPLAIN select name from %s WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + + self.query = "select name from %s WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select name from %s USE INDEX(`#primary`) WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result==expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nested_attr_token_within(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx_attr" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY i FOR i within tokens(%s) END) USING %s" % ( + idx, bucket.name, "hobbies", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + idx2 = "nested_idx_attr2" + self.query = "CREATE INDEX %s ON %s( ALL ARRAY i FOR i within tokens(%s) END) USING %s" % ( + idx2, bucket.name, "hobbies", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx2) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx2) + self.assertTrue(self._is_index_in_list(bucket, idx2), "Index is not in list") + + self.query = "EXPLAIN select name from %s use index(%s) WHERE ANY i within tokens(%s.hobbies) SATISFIES i = 'bhangra' END " % ( + bucket.name,idx2,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx2) + + self.query = "EXPLAIN select name from %s use index(%s) WHERE ANY i within tokens(%s.hobbies) SATISFIES i = 'bhangra' END " % ( + bucket.name,idx,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + + self.query = "select name from %s use index(%s) WHERE ANY i within tokens(%s.hobbies) SATISFIES i = 'bhangra' END " % ( + bucket.name,idx,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result1 = sorted(actual_result['results']) + + self.query = "select name from %s use index(%s) WHERE ANY i within tokens(%s.hobbies) SATISFIES i = 'bhangra' END " % ( + bucket.name,idx2,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result2 = sorted(actual_result['results']) + + self.query = "select name from %s use index(`#primary`) WHERE ANY i within tokens(%s.hobbies) SATISFIES i = 'bhangra' END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result3 = sorted(actual_result['results']) + + + self.assertTrue(actual_result1 == actual_result2 == actual_result3) + + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nested_attr_array_tokens_in_distinct(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx_attr" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT array j for j in i.dance end) FOR i in tokens(%s) END) USING %s" % ( + idx, bucket.name, "hobbies.hobby", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + self.query = "EXPLAIN select name from %s WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + actual_result = self.run_cbq_query() + self.query = "select name from %s WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select name from %s USE INDEX(`#primary`) WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result==expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nested_attr_token(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx_attr" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT array j.region1 for j in i.Marketing end) FOR i in tokens(%s) END) USING %s" % ( + idx, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + idx2 = "nested_idx_attr2" + self.query = "CREATE INDEX %s ON %s( ALL ARRAY ( All array j.region1 for j in i.Marketing end) FOR i in tokens(%s) END) USING %s" % ( + idx2, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx2) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx2) + self.assertTrue(self._is_index_in_list(bucket, idx2), "Index is not in list") + + idx3 = "nested_idx_attr3" + self.query = "CREATE INDEX %s ON %s( ALL ARRAY ( DISTINCT array j.region1 for j in i.Marketing end) FOR i in tokens(%s) END) USING %s" % ( + idx3, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx3) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx3) + self.assertTrue(self._is_index_in_list(bucket, idx3), "Index is not in list") + + idx4 = "nested_idx_attr4" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( ALL array j.region1 for j in i.Marketing end) FOR i in tokens(%s) END) USING %s" % ( + idx4, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx4) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx4) + self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") + + self.query = "EXPLAIN select name from %s USE INDEX(%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + + self.query = "EXPLAIN select name from %s USE INDEX(%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx2,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "DistinctScan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx2) + + self.query = "EXPLAIN select name from %s USE INDEX(%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx3,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx3) + + self.query = "EXPLAIN select name from %s USE INDEX(%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx4,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx4) + + self.query = "select name from %s use index (%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx,bucket.name) + \ + "order BY name limit 10" + actual_result1 = self.run_cbq_query() + self.query = "select name from %s use index (%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx2,bucket.name) + \ + "order BY name limit 10" + actual_result2 = self.run_cbq_query() + + self.query = "select name from %s use index (%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx3,bucket.name) + \ + "order BY name limit 10" + actual_result3 = self.run_cbq_query() + + self.query = "select name from %s use index (%s) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,idx4,bucket.name) + \ + "order BY name limit 10" + actual_result4 = self.run_cbq_query() + + + self.query = "select name from %s use index (`#primary`) WHERE ANY i IN tokens(%s.tasks) SATISFIES (ANY j IN i.Marketing SATISFIES j.region1='South' end) END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result5 = self.run_cbq_query() + + self.assertTrue(sorted(actual_result1['results'])==sorted(actual_result2['results'])) + self.assertTrue(sorted(actual_result2['results'])==sorted(actual_result3['results'])) + self.assertTrue(sorted(actual_result4['results'])==sorted(actual_result3['results'])) + self.assertTrue(sorted(actual_result4['results'])==sorted(actual_result5['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + + def test_unnest_multilevel_attribute_tokens(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx_attr_nest" + self.query = "CREATE INDEX %s ON %s( ALL ARRAY ( DISTINCT array j.region1 for j in tokens(i.Marketing) end) FOR i in %s END) USING %s" % ( + idx, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + + self.query = "explain SELECT emp.name FROM %s emp UNNEST emp.tasks as i UNNEST tokens(i.Marketing) as j where j.region1 = 'South'" % (bucket.name) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue( + plan['~children'][0]['#operator'] == 'DistinctScan', + "Union Scan is not being used") + result1 = plan['~children'][0]['scan']['index'] + self.assertTrue(result1 == idx) + + self.query = "select name from %s WHERE ANY i IN %s.tasks SATISFIES (ANY j IN tokens(i.Marketing) SATISFIES j.region1='South' end) END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + self.query = "select name from %s USE INDEX(`#primary`) WHERE ANY i IN %s.tasks SATISFIES (ANY j IN tokens(i.Marketing) SATISFIES j.region1='South' end) END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results'])==sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + + def test_tokens_join_unnest_alias_covering(self): + for bucket in self.buckets: + created_indexes=[] + try: + idx4 = "idxVM2" + self.query = "CREATE INDEX %s ON %s( aLL ARRAY x.RAM FOR x within tokens(%s) END,VMs) USING %s" % ( + idx4, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx4) + self._verify_results(create_result['results'], []) + created_indexes.append(idx4) + + self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") + + # idx5 = "idxVM3" + # self.query = "CREATE INDEX %s ON %s( aLL ARRAY x.os FOR x within %s END) USING %s" % ( + # idx5, bucket.name, "VMs", self.index_type) + # # if self.gsi_type: + # # self.query += " WITH {'index_type': 'memdb'}" + # create_result = self.run_cbq_query() + # self._wait_for_index_online(bucket, idx5) + # self._verify_results(create_result['results'], []) + # created_indexes.append(idx5) + + #self.assertTrue(self._is_index_in_list(bucket, idx5), "Index is not in list") + + self.query = "explain SELECT x FROM default emp1 UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.query = "SELECT x FROM default emp1 UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" + actual_result = self.run_cbq_query() + self.query = "SELECT x FROM default emp1 USE INDEX(`#primary`) UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results']) ==sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_simple_tokens_all(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idxjoin_yr" + self.query = 'CREATE INDEX %s ON %s( ALL ARRAY v FOR v in tokens(%s,{"case":"lower"}) END) USING %s' % ( + idx, bucket.name, "join_yr", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + idx2 = "idxVM" + self.query = 'CREATE INDEX %s ON %s( All ARRAY x.RAM FOR x in tokens(%s,{"names":true}) END) USING %s' % ( + idx2, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx2) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx2) + + self.query = 'EXPLAIN select name from %s where any v in tokens(%s.join_yr,{"case":"lower"}) satisfies v = 2016 END ' % ( + bucket.name, bucket.name) + \ + 'AND (ANY x IN tokens(%s.VMs,{"names":true}) SATISFIES x.RAM between 1 and 5 END) ' % (bucket.name) + \ + 'AND NOT (department = "Manager") ORDER BY name limit 10' + actual_result = self.run_cbq_query() + + plan = ExplainPlanHelper(actual_result) + self.assertTrue(plan['~children'][0]['~children'][0]['#operator'] == 'IntersectScan') + + result1 = plan['~children'][0]['~children'][0]['scans'][0]['scan']['index'] + result2 = plan['~children'][0]['~children'][0]['scans'][1]['scan']['index'] + self.assertTrue(result1 == idx2 or result1 == idx) + self.assertTrue(result2 == idx or result2 == idx2) + self.query = 'select name from %s where any v in tokens(%s.join_yr,{"case":"lower"}) satisfies v = 2016 END ' % ( + bucket.name, bucket.name) + \ + 'AND (ANY x IN tokens(%s.VMs,{"names":true}) SATISFIES x.RAM between 1 and 5 END) ' % (bucket.name) + \ + 'AND NOT (department = "Manager") ORDER BY name limit 10' + actual_result = self.run_cbq_query() + + self.query = 'select name from %s use index (`#primary`) where any v in tokens(%s.join_yr,{"case":"lower"}) satisfies v = 2016 END ' % ( + bucket.name, bucket.name) + \ + 'AND (ANY x IN tokens(%s.VMs,{\'"names"\':true}) SATISFIES x.RAM between 1 and 5 END) ' % (bucket.name) + \ + 'AND NOT (department = "Manager") ORDER BY name limit 10' + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results'])==sorted(expected_result['results'])) + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + drop_result = self.run_cbq_query() + self._verify_results(drop_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + created_indexes.remove(idx) + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx2, self.index_type) + drop_result = self.run_cbq_query() + self._verify_results(drop_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx2), "Index is in list") + created_indexes.remove(idx2) + + idx3 = "idxjoin_yr2" + self.query = 'CREATE INDEX %s ON %s( all ARRAY v FOR v within tokens(%s,{"names":true,"case":"lower"}) END) USING %s' % ( + + idx3, bucket.name, "join_yr", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx3) + self._verify_results(create_result['results'], []) + created_indexes.append(idx3) + self.assertTrue(self._is_index_in_list(bucket, idx3), "Index is not in list") + idx4 = "idxVM2" + self.query = 'CREATE INDEX %s ON %s( aLL ARRAY x.RAM FOR x within tokens(%s,{"names":true,"case":"lower"}) END) USING %s' % ( + idx4, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx4) + self._verify_results(create_result['results'], []) + created_indexes.append(idx4) + + self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") + + self.query = 'EXPLAIN select name from %s where any v in tokens(%s.join_yr,{"case":"lower","names":true}) satisfies v = 2016 END ' % ( + bucket.name, bucket.name) + \ + 'AND (ANY x IN tokens(%s.VMs,{"names":true,"case":"lower"}) SATISFIES x.RAM between 1 and 5 END) ' % (bucket.name) + \ + 'AND NOT (department = "Manager") ORDER BY name limit 10' + actual_result_within = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result_within) + self.assertTrue( + plan['~children'][0]['~children'][0]['#operator'] == 'IntersectScan', + "Intersect Scan is not being used in and query for 2 array indexes") + + result1 = plan['~children'][0]['~children'][0]['scans'][0]['scan']['index'] + result2 = plan['~children'][0]['~children'][0]['scans'][1]['scan']['index'] + self.assertTrue(result1 == idx3 or result1 == idx4) + self.assertTrue(result2 == idx4 or result2 == idx3) + + idx5 = "idxtokens1" + self.query = 'CREATE INDEX %s ON %s( all ARRAY v FOR v within tokens(%s) END) USING %s' % ( + + idx5, bucket.name, "join_yr", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx5) + self._verify_results(create_result['results'], []) + created_indexes.append(idx5) + self.assertTrue(self._is_index_in_list(bucket, idx5), "Index is not in list") + + idx8 = "idxtokens2" + self.query = 'CREATE INDEX %s ON %s( all ARRAY v FOR v within tokens(%s,{"names":true,"case":"lower","specials":false}) END) USING %s' % ( + + idx8, bucket.name, "join_yr", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx8) + self._verify_results(create_result['results'], []) + created_indexes.append(idx8) + self.assertTrue(self._is_index_in_list(bucket, idx8), "Index is not in list") + + idx6 = "idxVM4" + self.query = 'CREATE INDEX %s ON %s( aLL ARRAY x.RAM FOR x within tokens(%s,{"specials":false}) END) USING %s' % ( + idx6, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx6) + self._verify_results(create_result['results'], []) + created_indexes.append(idx6) + + self.assertTrue(self._is_index_in_list(bucket, idx6), "Index is not in list") + + idx7 = "idxVM3" + self.query = 'CREATE INDEX %s ON %s( aLL ARRAY x.RAM FOR x within tokens(%s,{"names":true,"case":"lower"}) END) USING %s' % ( + idx7, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx7) + self._verify_results(create_result['results'], []) + created_indexes.append(idx7) + + self.assertTrue(self._is_index_in_list(bucket, idx7), "Index is not in list") + + self.query = 'EXPLAIN select name from %s where any v within tokens(%s.join_yr,{"case"}) satisfies v = 2016 END ' % ( + bucket.name, bucket.name) + \ + 'AND (ANY x within tokens(%s.VMs,{"specials"}) SATISFIES x.RAM between 1 and 5 END) ' % (bucket.name) + \ + 'AND NOT (department = "Manager") ORDER BY name limit 10' + # actual_result_within = self.run_cbq_query() + # self.assertTrue("Object member has no value" in actual_result_within) + + self.query = 'select name from %s where any v in tokens(%s.join_yr,{"case":"lower","names":true,"specials":false}) satisfies v = 2016 END ' % ( + bucket.name, bucket.name) + \ + 'AND (ANY x within tokens(%s.VMs,{"specials":false}) SATISFIES x.RAM between 1 and 5 END ) ' % (bucket.name) + \ + 'AND NOT (department = "Manager") order by name limit 10' + actual_result_within = self.run_cbq_query() + self.query = "select name from %s use index (`#primary`) where any v in tokens(%s.join_yr) satisfies v = 2016 END " % ( + bucket.name, bucket.name) + \ + "AND (ANY x within tokens(%s.VMs) SATISFIES x.RAM between 1 and 5 END ) " % (bucket.name) + \ + "AND NOT (department = 'Manager') order by name limit 10" + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result_within['results']) == sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nested_attr_tokens(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx_attr" + self.query = "CREATE INDEX %s ON %s( ALL ARRAY ( all array j for j in i.dance end) FOR i in tokens(%s) END,hobbies.hobby,department,name) USING %s" % ( + idx, bucket.name, "hobbies.hobby", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + self.query = "EXPLAIN select name from %s WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + + self.assertTrue("covers" in str(plan)) + self.query = "select name from %s WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select name from %s USE INDEX(`#primary`) WHERE ANY i IN tokens(%s.hobbies.hobby) SATISFIES (ANY j IN i.dance SATISFIES j='contemporary' end) END and department='Support'" % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result==expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx),"Index is in list") + + def test_tokens_partial_index_distinct_covering(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idx" + self.query = 'CREATE INDEX %s ON %s( distinct array i FOR i in tokens(%s,{"names":true}) END,hobbies.hobby,name) WHERE (department = "Support") USING %s' % ( + idx, bucket.name, "hobbies.hobby", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = 'EXPLAIN select name from %s WHERE department = "Support" and (ANY i IN tokens(%s.hobbies.hobby,{"names":true}) SATISFIES i = "art" END) ' % ( + bucket.name,bucket.name) + \ + 'order BY name limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.query = 'select name from %s WHERE department = "Support" and ( ANY i IN tokens(%s.hobbies.hobby,{"names":true} ) SATISFIES i = "art" END) ' % (bucket.name,bucket.name) + \ + "order BY name limit 10" + + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = 'select name from %s use index (`#primary`) WHERE department = "Support" and (ANY i IN tokens(%s.hobbies.hobby,{"names":true}) SATISFIES i = "art" END) ' % ( + bucket.name,bucket.name) + \ + 'order BY name limit 10' + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result==expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_tokens_distinct_covering(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idx" + self.query = "CREATE INDEX %s ON %s( distinct array i FOR i in tokens(%s) END,hobbies.hobby,name) WHERE (department = 'Support') USING %s" % ( + idx, bucket.name, "hobbies.hobby", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select name from %s WHERE department = 'Support' and (ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END) " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + print plan + self.assertTrue("covers" in str(plan)) + self.query = "select name from %s WHERE department = 'Support' and ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + actual_result = self.run_cbq_query() + actual_result = sorted(actual_result['results']) + self.query = "select name from %s use index (`#primary`) WHERE department = 'Support' and ANY i IN tokens(%s.hobbies.hobby) SATISFIES i = 'art' END " % ( + bucket.name,bucket.name) + \ + "order BY name limit 10" + expected_result = self.run_cbq_query() + expected_result = sorted(expected_result['results']) + self.assertTrue(actual_result==expected_result) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_tokens_with_inner_joins_covering(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_inner_join" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT array j.city for j in i end) FOR i in tokens(%s) END,address,department) USING %s" % ( + idx, bucket.name, "address", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN SELECT employee.department new_project " +\ + "FROM %s as employee JOIN default as new_project_full " % (bucket.name) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.query = "SELECT employee.department new_project " +\ + "FROM %s as employee JOIN default as new_project_full " % (bucket.name) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + actual_result = self.run_cbq_query() + self.query = "SELECT employee.department new_project " +\ + "FROM %s as employee use index (`#primary`) JOIN default as new_project_full " % (bucket.name) +\ + "ON KEYS meta(`employee`).id WHERE ANY i IN tokens(employee.address) SATISFIES (ANY j IN i SATISFIES j.city='Delhi' end) END " + expected_result = self.run_cbq_query() + expected_result = expected_result['results'] + self.assertTrue(sorted(expected_result)==sorted(actual_result['results'])) + + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + + def test_tokens_regexp_covering(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "iregex" + self.query = " CREATE INDEX %s ON %s( DISTINCT ARRAY REGEXP_LIKE(v.os,%s) FOR v IN tokens(VMs) END,VMs ) USING %s" % ( + idx, bucket.name,"'ub%'", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = "EXPLAIN select VMs from %s WHERE ANY v IN tokens(VMs) SATISFIES REGEXP_LIKE(v.os,%s) = 1 END " % ( + bucket.name,"'ub%'") + \ + "limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + + self.assertTrue("covers" in str(plan)) + + self.query = "select VMs from %s WHERE ANY v IN tokens(VMs) SATISFIES REGEXP_LIKE(v.os,%s) = 1 END " % ( + bucket.name,"'ub%'") + \ + "limit 10" + actual_result = self.run_cbq_query() + self.query = "select VMs from %s use index(`#primary`) WHERE ANY v IN tokens(VMs) SATISFIES REGEXP_LIKE(v.os,%s) = 1 END " % ( + bucket.name,"'ub%'") + \ + "limit 10" + + expected_result = self.run_cbq_query() + expected_result = expected_result['results'] + + self.assertTrue(sorted(expected_result)==sorted(actual_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + + def test_tokens_greatest_covering(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "igreatest" + self.query = " CREATE INDEX %s ON %s( department, DISTINCT ARRAY GREATEST(v.RAM,100) FOR v IN tokens(VMs) END ,VMs,name ) USING %s" % ( + idx, bucket.name, self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + self.query = " EXPLAIN select name from %s WHERE department = 'Support' and ANY v IN tokens(VMs) SATISFIES GREATEST(v.RAM,100) END " % ( + bucket.name) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + print plan + self.assertTrue("covers" in str(plan)) + self.query = "select name from %s WHERE department = 'Support' and ANY v IN tokens(VMs) SATISFIES GREATEST(v.RAM,100) END " % ( + bucket.name) + actual_result = self.run_cbq_query() + actual_result = actual_result['results'] + self.query = "select name from %s USE index(`#primary`) WHERE department = 'Support' and ANY v IN tokens(VMs) SATISFIES GREATEST(v.RAM,100) END " % ( + bucket.name) + expected_result = self.run_cbq_query() + expected_result = expected_result['results'] + self.assertTrue(sorted(expected_result)==sorted(actual_result)) + + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_unnest_tokens_covering(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "unnest_idx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( ALL array j for j in i end) FOR i in tokens(%s) END,department,tasks,name) USING %s" % ( + idx, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + self.query = "EXPLAIN select %s.name from %s UNNEST TOKENS(tasks) AS i UNNEST i AS j WHERE j = 'Search' " % ( + bucket.name,bucket.name) + \ + "AND (ANY x IN TOKENS(default.tasks) SATISFIES x = 'Sales' END) " + \ + "AND NOT (%s.department = 'Manager') order BY %s.name limit 10" % (bucket.name,bucket.name) + + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][2]) == ("cover ((`default`.`tasks`))" )) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][3]) == ("cover ((`default`.`name`))" )) + + self.query = "select %s.name from %s UNNEST TOKENS(tasks) AS i UNNEST i AS j WHERE j = 'Search' " % ( + bucket.name,bucket.name) + \ + "AND (ANY x IN tokens(%s.tasks) SATISFIES x = 'Sales' END) " % (bucket.name) + \ + "AND NOT (%s.department = 'Manager') order BY %s.name limit 10" % (bucket.name,bucket.name) + actual_result = self.run_cbq_query() + self.query = "select %s.name from %s use index (`#primary`) UNNEST TOKENS(tasks) AS i UNNEST i as j WHERE j = 'Search' " % ( + bucket.name,bucket.name) + \ + "AND (ANY x IN tokens(%s.tasks) SATISFIES x = 'Sales' END) " % (bucket.name) + \ + "AND NOT (%s.department = 'Manager') order BY %s.name limit 10" % (bucket.name,bucket.name) + expected_result = self.run_cbq_query() + + self.assertEqual(sorted(actual_result['results']),sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_nested_token_covering(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "nested_idx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( DISTINCT ARRAY j for j in i end) FOR i in tokens(%s) END,tasks,department,name)" % ( + idx, bucket.name, "tasks") + + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + + self.query = "EXPLAIN select name from %s WHERE ANY i IN tokens(tasks) SATISFIES (ANY j IN i SATISFIES j='Search' end) END " % ( + bucket.name) + \ + "AND NOT (department = 'Manager') order BY name limit 10" + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['index']) == idx) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][1]) == ("cover ((`%s`.`tasks`))" % bucket.name)) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan'][0]['covers'][2]) == ("cover ((`default`.`department`)")) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan'][0]['covers'][3]) == ("cover ((meta(`default`).`id`))")) + self.query = "select name from %s WHERE ANY i IN tokens(tasks) SATISFIES (ANY j IN i SATISFIES j='Search' end) END " % ( + bucket.name) + \ + "AND NOT (department = 'Manager') order BY name limit 10" + actual_result = self.run_cbq_query() + + self.query = "select name from %s use index(`#primary`) WHERE ANY i IN tokens(tasks) SATISFIES (ANY j IN i SATISFIES j='Search' end) END " % ( + bucket.name) + \ + "AND NOT (department = 'Manager') order BY name limit 10" + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results']) == sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_simple_token_nonarrayindex(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idxjoin_yr" + self.query = "CREATE INDEX %s ON %s( ARRAY v FOR v in tokens(%s) END) USING %s" % ( + idx, bucket.name, "join_yr", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + idx2 = "idxVM" + self.query = "CREATE INDEX %s ON %s( ARRAY x.RAM FOR x in tokens(%s) END) USING %s" % ( + idx2, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx2) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx2) + self.query = "select name from %s where any v in tokens(%s.join_yr) satisfies v = 2016 END " % ( + bucket.name, bucket.name) + \ + "AND ( ANY x IN tokens(%s.VMs) SATISFIES x.RAM between 1 and 5 END ) " % (bucket.name) + \ + "AND NOT (department = 'Manager') ORDER BY name limit 10" + actual_result = self.run_cbq_query() + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + drop_result = self.run_cbq_query() + self._verify_results(drop_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + created_indexes.remove(idx) + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx2, self.index_type) + drop_result = self.run_cbq_query() + self._verify_results(drop_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx2), "Index is in list") + created_indexes.remove(idx2) + + idx3 = "idxjoin_yr2" + self.query = "CREATE INDEX %s ON %s( ARRAY v FOR v within tokens(%s) END) USING %s" % ( + idx3, bucket.name, "join_yr", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx3) + self._verify_results(create_result['results'], []) + created_indexes.append(idx3) + self.assertTrue(self._is_index_in_list(bucket, idx3), "Index is not in list") + idx4 = "idxVM2" + self.query = "CREATE INDEX %s ON %s( ARRAY x.RAM FOR x within tokens(%s) END) USING %s" % ( + idx4, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx4) + self._verify_results(create_result['results'], []) + created_indexes.append(idx4) + + self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") + self.query = "select name from %s USE INDEX(%s) where " % (bucket.name,idx4) + \ + "(ANY x within tokens(%s.VMs) SATISFIES x.RAM between 1 and 5 END ) " % (bucket.name) + \ + "AND NOT (department = 'Manager') order by name limit 10" + actual_result_within = self.run_cbq_query() + self.query = "select name from %s USE INDEX(`#primary`) where " % ( + bucket.name) + \ + "(ANY x within tokens(%s.VMs) SATISFIES x.RAM between 1 and 5 END ) " % (bucket.name) + \ + "AND NOT (department = 'Manager') order by name limit 10" + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result_within['results']) == sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_simple_unnest_token(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "unnest_idx" + self.query = "CREATE INDEX %s ON %s( DISTINCT ARRAY ( ALL array j for j in i end) FOR i in tokens(%s) END) USING %s" % ( + idx, bucket.name, "tasks", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + self._verify_results(actual_result['results'], []) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + self.query = "EXPLAIN select %s.name from %s UNNEST tokens(tasks) as i UNNEST i as j WHERE j = 'Search' " % ( + bucket.name,bucket.name) + \ + "AND (ANY x IN %s.tasks SATISFIES x = 'Sales' END) " % (bucket.name) + \ + "AND NOT (%s.department = 'Manager') order BY %s.name limit 10" % (bucket.name,bucket.name) + # self.query = "EXPLAIN select %s.name from %s UNNEST tasks as i UNNEST i as j WHERE j = 'Search' " % ( + # bucket.name,bucket.name) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + # self.assertTrue( + # plan['~children'][0]['~children'][0]['#operator'] == 'IntersectScan', + # "Intersect Scan is not being used in and query for 2 array indexes") + result1 =plan['~children'][0]['~children'][0]['scan']['index'] + + #result2 =plan['~children'][0]['~children'][0]['scans'][1]['scans'][0]['index'] + self.assertTrue(result1 == idx ) + #self.assertTrue(result2 == idx or result2 == idx2) + self.query = "select %s.name from %s UNNEST tokens(tasks) as i UNNEST i as j WHERE j = 'Search' " % ( + bucket.name,bucket.name) + \ + "AND (ANY x IN %s.tasks SATISFIES x = 'Sales' END) " % (bucket.name) + \ + "AND NOT (%s.department = 'Manager') order BY %s.name limit 10" % (bucket.name,bucket.name) + actual_result = self.run_cbq_query() + self.query = "select %s.name from %s use index (`#primary`) UNNEST tokens(tasks) as i UNNEST i as j WHERE j = 'Search' " % ( + bucket.name,bucket.name) + \ + "AND (ANY x IN %s.tasks SATISFIES x = 'Sales' END) " % (bucket.name) + \ + "AND NOT (%s.department = 'Manager') order BY %s.name limit 10" % (bucket.name,bucket.name) + expected_result = self.run_cbq_query() + + self.assertEqual(sorted(actual_result['results']),sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_join_unnest_alias_tokens(self): + for bucket in self.buckets: + created_indexes=[] + try: + idx4 = "idxVM2" + self.query = "CREATE INDEX %s ON %s( aLL ARRAY x.RAM FOR x within tokens(%s) END) USING %s" % ( + idx4, bucket.name, "VMs", self.index_type) + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + create_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx4) + self._verify_results(create_result['results'], []) + created_indexes.append(idx4) + + self.assertTrue(self._is_index_in_list(bucket, idx4), "Index is not in list") + + # idx5 = "idxVM3" + # self.query = "CREATE INDEX %s ON %s( aLL ARRAY x.os FOR x within %s END) USING %s" % ( + # idx5, bucket.name, "VMs", self.index_type) + # # if self.gsi_type: + # # self.query += " WITH {'index_type': 'memdb'}" + # create_result = self.run_cbq_query() + # self._wait_for_index_online(bucket, idx5) + # self._verify_results(create_result['results'], []) + # created_indexes.append(idx5) + + #self.assertTrue(self._is_index_in_list(bucket, idx5), "Index is not in list") + + self.query = "explain SELECT x FROM default emp1 USE INDEX(%s) UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" %(idx4) + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + result1 =plan['~children'][0]['scan']['index'] + self.assertTrue(result1==idx4) + self.query = "SELECT x FROM default emp1 USE INDEX(%s) UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;"%(idx4) + actual_result = self.run_cbq_query() + self.query = "SELECT x FROM default emp1 USE INDEX(`#primary`) UNNEST tokens(emp1.VMs) as x JOIN default task ON KEYS meta(`emp1`).id where x.RAM > 1 and x.RAM < 5 ;" + expected_result = self.run_cbq_query() + self.assertTrue(sorted(actual_result['results']) ==sorted(expected_result['results'])) + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") + + def test_simple_token(self): + for bucket in self.buckets: + created_indexes = [] + try: + idx = "idxjoin_yr" + self.query = "CREATE INDEX %s ON %s (department, DISTINCT (ARRAY lower(to_string(d)) for d in tokens(%s) END),join_yr,name) " % (idx, bucket.name,"join_yr") + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx) + created_indexes.append(idx) + self.assertTrue(self._is_index_in_list(bucket, idx), "Index is not in list") + + idx2 = "idxVM" + self.query = "CREATE INDEX %s ON %s(`name`,(DISTINCT (array (`x`.`RAM`) for `x` in tokens(%s) end)),VMs)" % ( + idx2, bucket.name, "VMs") + # if self.gsi_type: + # self.query += " WITH {'index_type': 'memdb'}" + actual_result = self.run_cbq_query() + self._wait_for_index_online(bucket, idx2) + created_indexes.append(idx2) + + self.query = "EXPLAIN select name from %s WHERE ANY d in tokens(join_yr) satisfies lower(to_string(d)) = '2016' END " % ( + bucket.name) + \ + "AND NOT (department = 'Manager') ORDER BY name limit 10" + actual_result = self.run_cbq_query() + + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ("cover ((`%s`.`department`))" % bucket.name)) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['index']) == idx) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][1]) == ("cover ((distinct (array `v` for `v` in (`%s`.`join_yr`) end)))" % bucket.name)) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan'][0]['covers'][2]) == ("cover ((`default`.`join_yr`)" )) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan'][0]['covers'][3]) == ("cover ((`default`.`name`)" )) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['scan'][0]['covers'][4]) == ("cover ((meta(`default`).`id`))" )) + + self.query = "select name from %s WHERE ANY d in tokens(join_yr) satisfies lower(to_string(d)) = '2016' END " % ( + bucket.name) + \ + "AND NOT (department = 'Manager') ORDER BY name limit 10" + actual_result = self.run_cbq_query() + + self.query = "select name from %s use index(`#primary`) WHERE ANY d in tokens(join_yr) satisfies lower(to_string(d)) = '2016' END " % ( + bucket.name) + \ + "AND NOT (department = 'Manager') ORDER BY name limit 10" + expected_result = self.run_cbq_query() + + self.assertTrue(sorted(actual_result['results']) == sorted(expected_result['results'])) + + self.query = "EXPLAIN select name from %s where (ANY x within tokens(VMs) SATISFIES x.RAM between 1 and 5 END ) " % ( + bucket.name) + \ + "and name is not null ORDER BY name limit 10" + actual_result = self.run_cbq_query() + + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['index']) == idx2) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['covers'][0]) == ("cover ((`%s`.`name`))" % bucket.name)) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['covers'][1]) == ("cover (all (array (`x`.`RAM`) for `x` in (`%s`.`VMs`) end))" % bucket.name)) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['covers'][2]) == ("cover ((`%s`.`VMs`)" % bucket.name)) + #self.assertTrue(str(plan['~children'][0]['~children'][0]['covers'][3]) == ("cover ((meta(`default`).`id`))" % bucket.name)) + + self.query = "select name from `%s` where (ANY x within tokens(VMs) SATISFIES x.RAM between 1 and 5 END ) " % ( + bucket.name) + \ + "and name is not null ORDER BY name limit 10" + actual_result = self.run_cbq_query() + + self.query = "select name from `%s` use index(`#primary`) where (ANY x within tokens(VMs) SATISFIES x.RAM between 1 and 5 END ) " % ( + bucket.name) + \ + "and name is not null ORDER BY name limit 10" + expected_result = self.run_cbq_query() + + self.assertTrue(sorted(actual_result['results']) == sorted(expected_result['results'])) + + finally: + for idx in created_indexes: + self.query = "DROP INDEX %s.%s USING %s" % (bucket.name, idx, self.index_type) + actual_result = self.run_cbq_query() + self._verify_results(actual_result['results'], []) + self.assertFalse(self._is_index_in_list(bucket, idx), "Index is in list") diff --git a/pytests/tuqquery/tuq_advancedcbqshell.py b/pytests/tuqquery/tuq_advancedcbqshell.py index 2eaf51b97..4392a2c3a 100644 --- a/pytests/tuqquery/tuq_advancedcbqshell.py +++ b/pytests/tuqquery/tuq_advancedcbqshell.py @@ -16,12 +16,35 @@ def tearDown(self): self.skip_buckets_handle = False super(AdvancedQueryTests, self).tearDown() + + def test_url(self): + for server in self.servers: + shell = RemoteMachineShellConnection(server) + for bucket in self.buckets: + try: + o = shell.execute_commands_inside('%s/cbq -u=Administrator http://localhost:8091@' % (self.path),'','','','','','') + self.assertTrue('status:FAIL' in o) + o = shell.execute_commands_inside('%s/cbq -u=Administrator http://localhost:8091:' % (self.path),'','','','','','') + self.assertTrue('status:FAIL' in o) + o = shell.execute_commands_inside('%s/cbq -u=Administrator http://localhost:8091[' % (self.path),'','','','','','') + self.assertTrue('status:FAIL' in o) + o = shell.execute_commands_inside('%s/cbq -u=Administrator http://localhost:8091]' % (self.path),'','','','','','') + self.assertTrue('status:FAIL' in o) + o = shell.execute_commands_inside('%s/cbq -u=Administrator http://localhost:8091:' % (self.path),'','','','','','') + self.assertTrue('status:FAIL' in o) + finally: + shell.disconnect() + def test_engine_postive(self): for server in self.servers: shell = RemoteMachineShellConnection(server) for bucket in self.buckets: try: o = shell.execute_commands_inside('%s/cbq -q' % (self.path),'\quit','','','','','','') + if self.analytics: + self.query = '\quit' + o = self.run_cbq_query() + print o self.assertTrue(o is '') finally: shell.disconnect() @@ -33,22 +56,28 @@ def test_shell_error(self): for bucket in self.buckets: try: o = shell.execute_commands_inside('%s/cbq -q ' % (self.path),'\quit1','','','','','') - print o - self.assertTrue("FAIL" in o) + if self.analytics: + self.query = '\quit1' + o = self.run_cbq_query() + print o + self.assertTrue("Commanddoesnotexist" in o) finally: shell.disconnect() - def test_engine_ne(self): for server in self.servers: shell = RemoteMachineShellConnection(server) for bucket in self.buckets: try: o = shell.execute_commands_inside('%s/cbq -q -ne' % (self.path),'select * from %s' % bucket.name,'','','','','') - print o - self.assertTrue('FAIL' in o) + self.assertTrue("Notconnectedtoanycluster" in o) o = shell.execute_commands_inside('%s/cbq -q -ne' % (self.path),'\SET','','','','','') print o + if self.analytics: + self.query = '\SET' + o = self.run_cbq_query() + print o + self.assertTrue("histfileValue" in o) finally: shell.disconnect() @@ -60,8 +89,14 @@ def test_timeout(self): try: queries = ['\set -timeout "10ms";',"create primary index on bucketname;","select * from bucketname;"] o = shell.execute_commands_inside('%s/cbq -q ' % (self.path),'',queries,bucket.name,'',bucket.name,'') - print o - self.assertEqual('timeout',o[7:]) + self.assertTrue("timeout" in o) + if self.analytics: + self.query = '\set -timeout "10ms"' + self.run_cbq_query() + self.query = 'select * from %s ' %bucket.name + o = self.run_cbq_query() + print o + self.assertTrue("timeout" in o) finally: shell.disconnect() @@ -76,6 +111,9 @@ def check_onesaslbucket_auth(self): o = shell.execute_commands_inside('%s/cbq -c %s:%s -q' % (self.path,bucket.name,bucket.saslPassword),'CREATE PRIMARY INDEX ON %s USING GSI' %bucket.name,'','','','','') self.assertTrue("requestID" in o) o = shell.execute_commands_inside('%s/cbq -c %s:%s -q' % (self.path,bucket.name,bucket.saslPassword),'select *,join_day from %s limit 10'%bucket.name,'','','','','') + if self.analytics: + self.query = 'select join_day from %s limit 10'%bucket.name + o = self.run_cbq_query() self.assertTrue("requestID" in o) o = shell.execute_commands_inside('%s/cbq -c %s:%s -q' % (self.path,bucket.name,'wrong'),'select * from %s limit 10'%bucket.name,'','','','','') print o @@ -197,38 +235,33 @@ def test_connect_disconnect(self): for bucket in self.buckets: queries = ['\connect http://localhost:8091;','create primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - print o queries = ['\connect http://localhost:8091;','drop primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - print o # wrong disconnect queries = ['\disconnect http://localhost:8091;','create primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) self.assertTrue("Toomanyinputargumentstocommand" in o) - print o #wrong port queries = ['\connect http://localhost:8097;','create primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - self.assertTrue("Unabletoconnectto" in o) - print o + self.assertTrue("Connectionfailed" in o) #wrong url including http queries = ['\connect http://localhost345:8097;','create primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - print o - self.assertTrue("Unabletoconnectto" in o) + self.assertTrue("Connectionfailed" in o) #wrong url not including http queries = ['\connect localhost3458097;','create primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - self.assertTrue("Unabletoconnectto" in o) + self.assertTrue("InvalidinputURLmissingportinaddresslocalhost" in o) queries = ['\disconnect','drop primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) self.assertTrue("Toomanyinputargumentstocommand" in o) queries = ['\disconnect','create primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) self.assertTrue("Toomanyinputargumentstocommand" in o) - queries = ['\connect http://localhost:8091;','create primary index on bucketname;'] + queries = ['\connect http://localhost:8091;','create primary index on bucketname;','drop primary index on bucketname;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - print o + self.assertTrue("GSICreatePrimaryIndex()-cause:Index#primaryalreadyexists." in o) def test_history(self): for server in self.servers: @@ -253,17 +286,10 @@ def test_history(self): queries5 = ['\set $a "/abcde";'] queries6 = ["\set $a /abcde;"] - #import pdb;pdb.set_trace() queries.extend(['\ALIAS tempcommand create primary index on bucketname;','\\\\tempcommand;','\ALIAS tempcommand2 select * from bucketname limit 1;','\\\\tempcommand2;','\ALIAS;','\echo \\\\tempcommand;','\echo \\\\tempcommand2;','\echo histfile;']) - print queries o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - if type2.lower() == "linux": self.assertTrue('/tmp/history' in o) - #import pdb;pdb.set_trace() - - #open and check the file - queries2.extend(["\set histfile \\\\p;","\echo histfile;","\set histfile '\\\\p';","\echo histfile;"]) o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries2,'','',bucket.name,'' ) @@ -275,12 +301,10 @@ def test_history(self): queries3.extend(["\set histfile $a;","\echo histfile;"]) o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries3,'','',bucket.name,'' ) - #import pdb;pdb.set_trace() queries4 = ["\push histfile newhistory.txt;","\echo histfile;",'\ALIAS tempcommand create primary index on bucketname;','\\\\tempcommand;','\ALIAS tempcommand2 select * from bucketname limit 1;','\\\\tempcommand2;','\ALIAS;','\echo \\\\tempcommand;','\echo \\\\tempcommand2;','\echo histfile;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries4,'','',bucket.name,'' ) - #import pdb;pdb.set_trace() queries5.append("\echo $a;") o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries5,'','',bucket.name,'' ) @@ -299,21 +323,26 @@ def test_history(self): def test_alias_and_echo(self): for server in self.servers: shell = RemoteMachineShellConnection(server) + type2 = shell.extract_remote_info().distribution_type for bucket in self.buckets: queries = ["\ALIAS tempcommand create primary index on bucketname;","\\\\tempcommand;",'\ALIAS tempcommand2 select *,email from bucketname limit 10;',"\\\\tempcommand2;",'\ALIAS;','\echo \\\\tempcommand1;','\echo \\\\tempcommand2;'] - o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - print o + o = self.shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) + self.assertTrue("ERROR141:Aliasdoesnotexisttempcommand1" in o) + #print o queries = ['\ALIAS tempcommand drop primary index on bucketname;','\\\\tempcommand;','\ALIAS tempcommand create primary index on bucketname;','\ALIAS tempcommand2 drop primary index on bucketname;','\\\\tempcommand;','\\\\tempcommand2;','\ALIAS;','\echo \\\\tempcommand;','\echo \\\\tempcommand2;'] - o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - print o + o = self.shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) + #print o queries = ['\UNALIAS tempcommand drop primary index on bucketname;','\\\\tempcommand;'] - o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) - self.assertTrue("Aliasdoesnotexist" in o) - print o + o = self.shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) + if type2.lower() == "windows": + print o + else: + self.assertTrue("Aliasdoesnotexist" in o) + #print o queries = ['\UNALIAS tempcommand;','\\\\tempcommand;'] - o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) + o = self.shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',queries,'','',bucket.name,'' ) self.assertTrue("Aliasdoesnotexist" in o) - print o + #print o @@ -343,7 +372,6 @@ def test_push_pop_set(self): i=1 pushqueries=['\set -$project "AB";','\push -$project "CD";','\push -$project "EF";','\push -$project "GH";','select $project;'] o = shell.execute_commands_inside('%s/cbq -quiet' % (self.path),'',pushqueries,'','',bucket.name,'' ) - self.assertTrue('{"$1":"GH"}' in o) pushqueries.append('\pop;') pushqueries.append('select $project;') diff --git a/pytests/tuqquery/tuq_join.py b/pytests/tuqquery/tuq_join.py index 2db9b5363..43caf2bd1 100644 --- a/pytests/tuqquery/tuq_join.py +++ b/pytests/tuqquery/tuq_join.py @@ -88,23 +88,126 @@ def test_where_join_keys(self): self._verify_results(actual_result, expected_result) def test_bidirectional_join(self): - self.query = "create index idxbidirec on %s(join_day)" %self.buckets[1].name ; + self.query = "create index idxbidirec on %s(join_day)" %self.buckets[0].name ; actual_result = self.run_cbq_query() self.assertEqual(actual_result['status'], 'success', 'Query was not run successfully') self.query = "explain SELECT employee.name, employee.join_day " +\ - "FROM %s as employee %s JOIN %s as new_project " % (self.buckets[0].name, self.type_join,self.buckets[1].name) +\ + "FROM %s as employee %s JOIN %s as new_project " % (self.buckets[0].name, self.type_join,self.buckets[0].name) +\ "ON KEY new_project.join_day FOR employee where new_project.join_day is not null" actual_result = self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) self.assertEqual(actual_result['status'], 'success', 'Query was not run successfully') self.test_explain_particular_index("idxbidirec") self.query = "SELECT employee.name, employee.join_day " +\ - "FROM %s as employee %s JOIN %s as new_project " % (self.buckets[0].name, self.type_join,self.buckets[1].name) +\ + "FROM %s as employee %s JOIN %s as new_project " % (self.buckets[0].name, self.type_join,self.buckets[0].name) +\ "ON KEY new_project.join_day FOR employee where new_project.join_day is not null" actual_result = self.run_cbq_query() - print actual_result - self.assertTrue(actual_result['metrics']['resultCount'] == 0, 'Query was not run successfully') - self.query = "drop index %s.idxbidirec" %self.buckets[1].name; - actual_result = self.run_cbq_query() + #self.assertTrue(actual_result['metrics']['resultCount'] == 0, 'Query was not run successfully') + self.query = "drop index %s.idxbidirec" %self.buckets[0].name; + self.run_cbq_query() + + self.query = "CREATE INDEX ix1 ON default(docid,name)" + self.run_cbq_query() + self.query = 'CREATE INDEX ix2 ON default(docid,name) where type = "wdoc"' + self.run_cbq_query() + self.query = "CREATE INDEX ix3 ON default(altid, name, DISTINCT ARRAY p FOR p IN phones END)" + self.run_cbq_query() + self.query = 'INSERT into %s (key , value) VALUES ("%s", %s)' % ("default", "w001", {"type":"wdoc", "docid":"x001","name":"wdoc","phones":["123-456-7890","123-456-7891"],"altid":"x001"}) + self.run_cbq_query() + self.query = 'INSERT into %s (key , value) VALUES ("%s", %s)' % ("default", "pdoc1", {"type":"pdoc", "docid":"x001","name":"pdoc","phones":["123-456-7890","123-456-7891"],"altid":"x001"}) + self.run_cbq_query() + self.query = 'INSERT into %s (key , value) VALUES ("%s", %s)' % ("default", "pdoc2", {"type":"pdoc", "docid":"w001","name":"pdoc","phones":["123-456-7890","123-456-7891"],"altid":"w001"}) + self.run_cbq_query() + self.query = 'explain SELECT meta(b1).id b1id FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + + self.query = 'SELECT meta(b1).id b1id FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + + self.assertTrue(actual_result['results']==[{u'b1id': u'w001'}]) + self.query = 'explain SELECT meta(b1).id b1id, meta(b2).id b2id FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, meta(b2).id b2id FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['results']==[{u'b1id': u'w001', u'b2id': u'pdoc2'}]) + self.query = 'explain SELECT meta(b1).id b1id, b2.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, b2.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['results']==[{u'docid': u'w001', u'b1id': u'w001'}]) + self.query = 'explain SELECT meta(b1).id b1id, b2.name FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, b2.name FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['results']==[{u'b1id': u'w001', u'name': u'pdoc'}]) + self.query = 'explain SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b1 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['results']==[{u'docid': u'w001', u'b1id': u'w001', u'name': u'pdoc'}]) + self.query = 'explain SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b2 WHERE meta(b1).id > ""' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b2 WHERE meta(b1).id > "";' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['metrics']['resultCount']==0) + self.query = 'explain SELECT meta(b1).id b1id, meta(b2).id, b2.name FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > "" AND b2.type = "wdoc"' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix2" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, meta(b2).id, b2.name FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > "" AND b2.type = "wdoc"' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['metrics']['resultCount']==0) + self.query = 'explain SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b1 WHERE meta(b1).id > "" AND b2.type = "wdoc"' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix2" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b1 WHERE meta(b1).id > "" AND b2.type = "wdoc"' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['metrics']['resultCount']==0) + self.query = 'explain SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b2 WHERE meta(b1).id > "" AND b2.type = "wdoc"' + actual_result= self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix2" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, b2.name, b3.docid FROM default b1 JOIN default b2 ON KEY b2.docid FOR b1 JOIN default b3 ON KEY b3.docid FOR b2 WHERE meta(b1).id > "" AND b2.type = "wdoc"' + actual_result= self.run_cbq_query() + self.assertTrue(actual_result['metrics']['resultCount']==0) + self.query = 'SELECT meta(b1).id b1id, b2 from default b1 JOIN default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result=self.run_cbq_query() + self.assertTrue(actual_result['results']==[{u'b1id': u'w001', u'b2': {u'phones': [u'123-456-7890', u'123-456-7891'], u'type': u'pdoc', u'docid': u'w001', u'name': u'pdoc', u'altid': u'w001'}}]) + self.query='SELECT meta(b1).id b1id FROM default b1 JOIN default b2 ON KEY b2.altid FOR b1 WHERE meta(b1).id > ""' + #enhancement + #actual_result=self.run_cbq_query() + #print actual_result + #right side should not use covered index + self.query = 'explain SELECT meta(b1).id b1id from default b1 NEST default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result=self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + self.assertTrue("(`b2`.`docid`)" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id from default b1 NEST default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result=self.run_cbq_query() + self.query = 'explain SELECT meta(b1).id b1id, b2 from default b1 NEST default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result=self.run_cbq_query() + self.assertTrue("covers" in str(actual_result)) + self.assertTrue("ix1" in str(actual_result)) + self.assertTrue("(`b2`.`docid`)" in str(actual_result)) + self.query = 'SELECT meta(b1).id b1id, b2 from default b1 NEST default b2 ON KEY b2.docid FOR b1 WHERE meta(b1).id > ""' + actual_result=self.run_cbq_query() + self.assertTrue( actual_result['results']== [{u'b1id': u'w001', u'b2': [{u'phones': [u'123-456-7890', u'123-456-7891'], u'type': u'pdoc', u'docid': u'w001', u'name': u'pdoc', u'altid': u'w001'}]}] ) + + diff --git a/pytests/tuqquery/tuq_nulls.py b/pytests/tuqquery/tuq_nulls.py index ae85e8bbc..1cb969e01 100644 --- a/pytests/tuqquery/tuq_nulls.py +++ b/pytests/tuqquery/tuq_nulls.py @@ -4,6 +4,7 @@ class NULLTests(QueryTests): def setUp(self): self.skip_generation = True + self.analytics = False super(NULLTests, self).setUp() self.gens_load = self.generate_docs() # for bucket in self.buckets: @@ -320,6 +321,11 @@ def test_nulls_over(self): for bucket in self.buckets: self.query = "SELECT feature_name FROM %s" % bucket.name +\ " WHERE ANY story_point_n IN story_point SATISFIES story_point_n IS NULL END ORDER BY feature_name" + + if self.analytics: + self.query = "SELECT feature_name FROM %s" % bucket.name +\ + " WHERE ANY story_point_n IN story_point SATISFIES story_point_n IS NULL ORDER BY feature_name" + self.run_cbq_query() self.sleep(3) actual_result = self.run_cbq_query() diff --git a/pytests/tuqquery/tuq_tokens.py b/pytests/tuqquery/tuq_tokens.py new file mode 100644 index 000000000..3f554e86d --- /dev/null +++ b/pytests/tuqquery/tuq_tokens.py @@ -0,0 +1,257 @@ +from lib import testconstants +from lib.membase.api.exception import CBQError +from lib.membase.api.rest_client import RestConnection +from lib.remote.remote_util import RemoteMachineShellConnection +from pytests.basetestcase import BaseTestCase +from tuqquery.tuq import ExplainPlanHelper + + +class TokenTests(BaseTestCase): + def setUp(self): + if not self._testMethodName == 'suite_setUp': + self.skip_buckets_handle = True + super(TokenTests, self).setUp() + self.n1ql_port = self.input.param("n1ql_port", 8093) + self.scan_consistency = self.input.param("scan_consistency", 'REQUEST_PLUS') + + # def suite_setUp(self): + # super(TokenTests, self).suite_setUp() + + def tearDown(self): + server = self.master + shell = RemoteMachineShellConnection(server) + shell.execute_command("""curl -X DELETE -u Administrator:password http://{0}:8091/pools/default/buckets/beer-sample""".format(server.ip)) + self.sleep(20) + super(TokenTests, self).tearDown() + # + # def suite_tearDown(self): + # super(TokenTests, self).suite_tearDown() + + def load_sample_buckets(self, bucketName="beer-sample" ): + """ + Load the specified sample bucket in Couchbase + """ + #self.cluster.bucket_delete(server=self.master, bucket="default") + server = self.master + shell = RemoteMachineShellConnection(server) + shell.execute_command("""curl -v -u Administrator:password \ + -X POST http://{0}:8091/sampleBuckets/install \ + -d '["{1}"]'""".format(server.ip, bucketName)) + self.sleep(30) + + shell.disconnect() + + def test_tokens_secondary_indexes(self): + bucket_name = "beer-sample" + self.query = 'create primary index on {0}'.format(bucket_name) + self.query = 'create index idx1 on `beer-sample`(description,name )' + self.run_cbq_query() + self.query = 'create index idx2 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx3 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx4 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx5 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx6 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx7 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx8 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx9 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx10 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx11 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END ,description,name )' + self.run_cbq_query() + self.query = 'create index idx12 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description) END )' + self.run_cbq_query() + self.query = 'create index idx13 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower"}) END )' + self.run_cbq_query() + self.query = 'create index idx14 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper"}) END )' + self.run_cbq_query() + self.query = 'create index idx15 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"lower","names":true,"specials":false}) END )' + self.run_cbq_query() + self.query = 'create index idx16 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false,"specials":true}) END )' + self.run_cbq_query() + self.query = 'create index idx17 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"upper","names":false}) END )' + self.run_cbq_query() + self.query = 'create index idx18 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{}) END )' + self.run_cbq_query() + self.query = 'create index idx19 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"":""}) END )' + self.run_cbq_query() + self.query = 'create index idx20 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"case":"random"}) END )' + self.run_cbq_query() + self.query = 'create index idx21 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"names":"random"}) END )' + self.run_cbq_query() + self.query = 'create index idx22 on `beer-sample`( DISTINCT ARRAY v FOR v in tokens(description,{"specials":"random"}) END )' + self.run_cbq_query() + + + self.query = 'explain select name from `beer-sample` where any v in tokens(description) satisfies v = "golden" END limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + + self.assertTrue(actual_result['results']) + self.assertTrue("covers" in str(plan)) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx2") + + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ('cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`)) end)))')) + + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10' + expected_result = self.run_cbq_query() + + + self.query = 'select name from `beer-sample` where any v in tokens(reverse(description)) satisfies v = "nedlog" END order by meta().id limit 10' + actual_result = self.run_cbq_query() + #self.assertTrue(str(actual_result['results'])=="[{u'name': u'21A IPA'}, {u'name': u'Amendment Pale Ale'}, {u'name': u'Double Trouble IPA'}, {u'name': u'South Park Blonde'}, {u'name': u'Restoration Pale Ale'}, {u'name': u'S.O.S'}, {u'name': u'Satsuma Harvest Wit'}, {u'name': u'Adnams Explorer'}, {u'name': u'Shock Top'}, {u'name': u'Anniversary Maibock'}]" ) + self.assertTrue((actual_result['results'])== (expected_result['results'])) + + + self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ('cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "lower", "names": true, "specials": false}) end)))')) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx3") + + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10' + + expected_result = self.run_cbq_query() + + self.query = 'select name from `beer-sample` use index(`idx15`) where any v in tokens(description,{"case":"lower","names":true,"specials":false}) satisfies v = "brewery" END order by meta().id limit 10' + actual_result = self.run_cbq_query() + + self.assertTrue((actual_result['results'])== (expected_result['results']) ) + + self.query = 'explain select name from `beer-sample` use index(`idx14`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['covers'][0]) == ('cover ((distinct (array `v` for `v` in tokens((`beer-sample`.`description`), {"case": "upper", "names": false, "specials": true}) end)))')) + self.assertTrue(str(plan['~children'][0]['~children'][0]['scan']['index']) == "idx4") + + self.query = 'select name from `beer-sample` use index(`idx16`) where any v in tokens(description,{"case":"upper","names":false,"specials":true}) satisfies v = "BREWERY" END order by meta().id limit 10' + actual_result = self.run_cbq_query() + self.assertTrue((actual_result['results'])== (expected_result['results'])) + + self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx5") + + + self.query = 'select name from `beer-sample` use index(`idx17`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10' + actual_result = self.run_cbq_query() + + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(description,{"case":"upper","names":false}) satisfies v = "GOLDEN" END limit 10' + expected_result = self.run_cbq_query() + self.assertTrue(actual_result['results']==expected_result['results']) + + + self.query = 'explain select name from `beer-sample` where any v in tokens(description,{}) satisfies v = "golden" END limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx7") + self.query = 'select name from `beer-sample` use index(`idx18`) where any v in tokens(description,{}) satisfies v = "golden" END limit 10' + actual_result = self.run_cbq_query() + + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(description,{}) satisfies v = "golden" END limit 10' + expected_result = self.run_cbq_query() + self.assertTrue(actual_result['results']==expected_result['results']) + + self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"":""}) satisfies v = "golden" END limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx8") + + self.query = 'select name from `beer-sample` use index(`idx19`) where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name ' + actual_result = self.run_cbq_query() + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(description,{"":""}) satisfies v = "golden" END order by name ' + expected_result = self.run_cbq_query() + self.assertTrue(actual_result['results']==expected_result['results']) + + + self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"case":"random"}) satisfies v = "golden" END ' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(plan['~children'][0]['scan']['index'] == "idx9") + + self.query = 'select name from `beer-sample` use index(`idx20`) where any v in tokens(description,{"case":"random"}) satisfies v = "golden" END order by name ' + actual_result = self.run_cbq_query() + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(description,{"case":"random"}) satisfies v = "golden" END order by name ' + expected_result = self.run_cbq_query() + self.assertTrue(actual_result['results']==expected_result['results']) + + self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"specials":"random"}) satisfies v = "brewery" END order by name' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx11") + + self.query = 'select name from `beer-sample` use index(`idx22`) where any v in tokens(description,{"specials":"random"}) satisfies v = "golden" END order by name' + actual_result = self.run_cbq_query() + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(description,{"specials":"random"}) satisfies v = "golden" END order by name' + expected_result = self.run_cbq_query() + self.assertTrue(actual_result['results']==expected_result['results']) + + + self.query = 'explain select name from `beer-sample` where any v in tokens(description,{"names":"random"}) satisfies v = "brewery" END limit 10' + actual_result = self.run_cbq_query() + plan = ExplainPlanHelper(actual_result) + self.assertTrue("covers" in str(plan)) + self.assertTrue(plan['~children'][0]['~children'][0]['scan']['index'] == "idx10") + + self.query = 'select name from `beer-sample` use index(`idx21`) where any v in tokens(description,{"names":"random"}) satisfies v = "golden" END limit 10' + actual_result = self.run_cbq_query() + self.query = 'select name from `beer-sample` use index(`beer_primary`) where any v in tokens(description,{"names":"random"}) satisfies v = "golden" END limit 10' + expected_result = self.run_cbq_query() + self.assertTrue(actual_result['results']==expected_result['results']) + + + + + + + + + + + + def run_cbq_query(self, query=None, min_output_size=10, server=None): + if query is None: + query = self.query + if server is None: + server = self.master + if server.ip == "127.0.0.1": + self.n1ql_port = server.n1ql_port + else: + if server.ip == "127.0.0.1": + self.n1ql_port = server.n1ql_port + if self.input.tuq_client and "client" in self.input.tuq_client: + server = self.tuq_client + if self.n1ql_port == None or self.n1ql_port == '': + self.n1ql_port = self.input.param("n1ql_port", 8093) + if not self.n1ql_port: + self.log.info(" n1ql_port is not defined, processing will not proceed further") + raise Exception("n1ql_port is not defined, processing will not proceed further") + query_params = {} + cred_params = {'creds': []} + for bucket in self.buckets: + if bucket.saslPassword: + cred_params['creds'].append({'user': 'local:%s' % bucket.name, 'pass': bucket.saslPassword}) + query_params.update(cred_params) + query_params.update({'scan_consistency': self.scan_consistency}) + self.log.info('RUN QUERY %s' % query) + + result = RestConnection(server).query_tool(query, self.n1ql_port, query_params=query_params) + if isinstance(result, str) or 'errors' in result: + raise CBQError(result, server.ip) + self.log.info("TOTAL ELAPSED TIME: %s" % result["metrics"]["elapsedTime"]) + return result diff --git a/pytests/ui/uibasetest.py b/pytests/ui/uibasetest.py index 6b11d0743..e6a3efe77 100644 --- a/pytests/ui/uibasetest.py +++ b/pytests/ui/uibasetest.py @@ -2,7 +2,6 @@ import time import unittest import os -import urllib2 import commands import types import datetime @@ -10,12 +9,11 @@ from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.common.desired_capabilities import DesiredCapabilities -from selenium.webdriver.chrome.options import Options from selenium.webdriver.support.ui import WebDriverWait, Select -from selenium.common.exceptions import NoSuchElementException +from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException from threading import Thread import ConfigParser -from TestInput import TestInputSingleton, TestInputParser, TestInputServer +from TestInput import TestInputSingleton from remote.remote_util import RemoteMachineShellConnection from membase.api.rest_client import RestConnection from membase.helper.bucket_helper import BucketOperationHelper @@ -149,6 +147,7 @@ def setUp(self): desired_capabilities=DesiredCapabilities.FIREFOX) elif self.browser == 'chrome': self.log.info("Test Couchbase Server UI in Chrome") + self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub' .format(self.machine.ip, self.machine.port), @@ -185,13 +184,19 @@ def _log_finish(self): def tearDown(self): try: - if self.driver: + test_failed = len(self._resultForDoCleanups.errors) + if self.driver and test_failed: path_screen = self.input.ui_conf['screenshots'] or 'logs/screens' full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen) self.log.info('screenshot is available: %s' % full_path) if not os.path.exists(path_screen): os.mkdir(path_screen) self.driver.get_screenshot_as_file(os.path.abspath(full_path)) + if self.driver: + self.driver.close() + if test_failed and TestInputSingleton.input.param("stop-on-failure", False): + print "test fails, teardown will be skipped!!!" + return rest = RestConnection(self.servers[0]) if rest._rebalance_progress_status() == 'running': stopped = rest.stop_rebalance() @@ -200,8 +205,6 @@ def tearDown(self): for server in self.servers: ClusterOperationHelper.cleanup_cluster([server]) ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self) - if self.driver: - self.driver.close() except Exception as e: raise e finally: @@ -314,9 +317,6 @@ def select(self, label=None, value=None): def mouse_over(self): ActionChains(self.selenium).move_to_element(self.web_element).perform() - def get_inner_html(self): - return self.web_element.get_attribute("outerHTML") - class ControlsHelper(): def __init__(self, driver): @@ -344,7 +344,6 @@ def find_first_visible(self, section, locator, parent_locator=None, text=None): by = self._find_by(section, locator, parent_locator) if text: by = by.format(text) - controls = [] elements = self.driver.find_elements_by_xpath(by) for element in elements: try: @@ -370,6 +369,7 @@ def __init__(self, driver): self._login_btn = helper.find_control('login', 'login_btn') self._logout_btn = helper.find_control('login', 'logout_btn') self.error = helper.find_control('login', 'error') + self.ajax_spinner = helper.find_control('login', 'ajax_spinner') class BaseHelper(): @@ -378,6 +378,13 @@ def __init__(self, tc): self.controls = BaseHelperControls(tc.driver) self.wait = WebDriverWait(tc.driver, timeout=100) + def wait_ajax_loaded(self): + try: + self.wait.until_not(lambda fn: self.controls.ajax_spinner.is_displayed(), + "Page is still loaded") + except StaleElementReferenceException: + pass + def login(self, user=None, password=None): self.tc.log.info("Try to login to Couchbase Server in browser") if not user: @@ -421,18 +428,12 @@ def loadSampleBucket(self, node, bucketName): username = self.tc.input.membase_settings.rest_username password = self.tc.input.membase_settings.rest_password - sample_bucket_path = {"beer":"/opt/couchbase/samples/beer-sample.zip", - "game-sim":"/opt/couchbase/samples/gamesim-sample.zip", - "travel":"/opt/couchbase/samples/travel-sample.zip"} + sample_bucket_path = "/opt/couchbase/samples/%s-sample.zip" % bucketName command = '/opt/couchbase/bin/cbdocloader -n ' + node.ip + ':' + \ node.port + ' -u ' + username + ' -p ' + password + \ - ' -b ' + bucketName + ' -s 100 ' + \ - sample_bucket_path[bucketName] - - self.tc.log.info('Command : '+command) - - o,r = shell.execute_command(command) - - shell.log_command_output(o,r) + ' -b ' + bucketName + ' -s 100 ' + sample_bucket_path - self.tc.log.info("Done loading sample bucket %s", bucketName) + self.tc.log.info('Command: %s ', command) + o, r = shell.execute_command(command) + shell.log_command_output(o, r) + self.tc.log.info("Done loading sample bucket %s", bucketName) \ No newline at end of file diff --git a/pytests/ui/uilocators-watson.conf b/pytests/ui/uilocators-watson.conf index 8701ec438..a6822dfac 100644 --- a/pytests/ui/uilocators-watson.conf +++ b/pytests/ui/uilocators-watson.conf @@ -4,6 +4,7 @@ password_field://*[@id='password2_inp'] login_btn://*[@value='Sign In'] logout_btn://a[text()='Sign Out'] error://*[@id='auth_failed_message'] +ajax_spinner://*[@class ='spinner ng-scope'] [navigation] navigation_bar://*[@id='headerNav'] @@ -276,7 +277,6 @@ confirm_btn://button[text()='Delete'] dlg://*[@mn-spinner='externalRolesDeleteDialogCtl.viewLoading'] confirm_btn://*[text()='Yes, Delete'] - [sample_buckets] sample_cb://*[@id='available_samples']//input[@value='{0}'] save_btn://*[@id='sample_buckets_settings_btn'] @@ -290,7 +290,7 @@ server_arrow://a[contains(@title,'Show Server Node')] server_arrow_opened://a[contains(@title,'Show Server Node')]//*[not(contains(@class,'closed'))] server_info://*[@class='settings-placeholder'][contains(.,'{0}')] rebalance_progress_in://*[@class='settings-placeholder'][contains(.,'{0}')]//*[@class='rebalance_progress']//*[contains(.,'transferred in')] -rebalance_progress://*[@id='active_server_list_container'] +rebalance_progress://*[@class='rebalance_progress ng-scope'] #//*[@class='settings-placeholder'][contains(.,'{0}')]//*[@class='rebalance_progress']//*[contains(.,'transferred out')] bucket_row://p[contains(.,'Bucket')][contains(.,'{1}')] total_trans_keys_row://p[contains(.,'Total number of keys')] @@ -387,3 +387,40 @@ cancel_create_index_btn=//*[@class='cancel'] [fts_index_details_screen] index_name_header=//h2[text()='{0}'] show_index_defn_json_checkbox=//input[@type='checkbox'][contains(text(),'Show index definition JSON')] + +[query_top_screen] +execute_button://span[text()='Execute'] +view_previous://span[text()='←'] +view_next://span[text()='→'] +history_link://a[text()='History '] +page_count_label://a[text()='History ']/.. +query_editor://textarea[@class='ace_text-input'] +query_editor_value://div[@class='ace_layer ace_text-layer'] +save_query://a[@ng-click='qc.save_query()']/span + +[query_bucket_analysis] +refresh_button://*[@class="cbui-refreshicon"] +resize_button://*[@class="btn_1 dynamic_disabled"] +sidebar_body://*[@id='sidebar_body'] + +[query_results_box] +result_box://*[@id='result_box'] +result_select_mode://ul[@class='tabs']//a[text()='{0}'] +result_selected_mode://ul[@class ='tabs']//a[@class ='selected'] +result_summary://*[@id='result_summary'] +result_json_mode://*[@id='result_editor']//*[@class='ace_layer ace_text-layer'] +result_table_mode://*[@id='result_table'] +result_tree_mode://*[@id='result_tree'] +result_plan_mode://*[@id='query_plan'] +result_plan_text_mode://*[@id='query_plan_text']//*[@class='ace_layer ace_text-layer'] +result_save_button://a[@ng-click='qc.save()']/span + +[query_save_screen] +path://*[@id='pwd'] +cancel_button://a[text()='Cancel'] +ok_button://button[text()='OK'] + + + + + diff --git a/pytests/ui/uilocators.conf b/pytests/ui/uilocators.conf index 6d8587f24..607035063 100644 --- a/pytests/ui/uilocators.conf +++ b/pytests/ui/uilocators.conf @@ -4,6 +4,7 @@ password_field://*[@id='password2_inp'] login_btn://*[@value='Sign In'] logout_btn://a[text()='Sign Out'] error://*[@id='auth_failed_message'] +ajax_spinner://*[@class ='spinner ng-scope'] [navigation] navigation_bar://*[@id='headerNav'] diff --git a/pytests/ui/uiqueryworkbench.py b/pytests/ui/uiqueryworkbench.py new file mode 100644 index 000000000..d2ea27f0d --- /dev/null +++ b/pytests/ui/uiqueryworkbench.py @@ -0,0 +1,266 @@ +import re +from os.path import expanduser +from unittest import TestCase + +from membase.api.rest_client import RestConnection +from uibasetest import * +from uisampletests import NavigationHelper + + +class QueryWorkbenchTests(BaseUITestCase): + """ + For the first node the following services should be turn on: kv,n1ql,index + """ + def setUp(self): + super(QueryWorkbenchTests, self).setUp() + self.bucketname = 'beer' + self.baseHelper = BaseHelper(self) + self.queryHelper = QueryWorkbenchHelper(self) + self.baseHelper.login() + self.baseHelper.loadSampleBucket(self.servers[0], self.bucketname) + self.rest = RestConnection(self.servers[0]) + self.rest.set_indexer_storage_mode() + self.rest.query_tool("DROP INDEX `beer`.`beer_index_sec` USING GSI;") + + def tearDown(self): + super(QueryWorkbenchTests, self).tearDown() + + def test_create_indexes(self): + expected_results = self.input.param('expected_result', None) + if expected_results is not None: + expected_results = expected_results.replace('_STAR_', '*').replace('_SEM_', ';').decode('unicode_escape')\ + .split('|') # 4.7 vs 4.6 versions + summary_result = self.input.param('summary_result', '') + summary_result = summary_result.replace('_STAR_', '*').replace('_SEM_', ';').decode('unicode_escape') + result_mode = self.input.param('mode', 'JSON') + if self.rest.get_nodes()[0].version <= '4.7' and result_mode in ['Plan Text', 'Plan']: + self.log.info("skipp 'Plan Text', 'Plan' modes in version < 4.7") + return + init_query = self.input.param('init_query', '').replace('_STAR_', '*').replace('_SEM_', ';').decode( + 'unicode_escape') + check_query = self.input.param('check_query', '').replace('_STAR_', '*').replace('_SEM_', ';').decode( + 'unicode_escape') + + NavigationHelper(self).navigate('Query') + if init_query: + self.queryHelper.execute_query(init_query) + self.queryHelper.controls.query_top_screen().view_next.click() + self.assertEqual('{"no_data_yet": "hit execute to run query"}', + self.queryHelper.controls.query_results_box().result_json_mode.get_text()) + self.queryHelper.execute_query(check_query) + if expected_results is not None: + self.queryHelper.check_result(expected_results, mode=result_mode) + if summary_result: + self.queryHelper.check_summary_result(summary_result, mode=result_mode) + if init_query: + self.queryHelper.controls.query_top_screen().view_previous.click() + self.assertEqual(init_query, self.queryHelper.controls.query_top_screen().query_editor_value.get_text()) + + def test_bucket_analysis(self): + init_analysis = self.input.param('init_analysis', None) + expected_analysis = self.input.param('expected_analysis', None) + init_analysis = init_analysis.replace('_STAR_', '*').replace('_SEM_', ';').decode( + 'unicode_escape').split('|') # 4.7 vs 4.6 + expected_analysis = expected_analysis.replace('_STAR_', '*').replace('_SEM_', ';').decode( + 'unicode_escape') + check_query = self.input.param('check_query', '').replace('_STAR_', '*').replace('_SEM_', ';').decode( + 'unicode_escape') + + NavigationHelper(self).navigate('Query') + self.queryHelper.check_bucket_analysis_result(init_analysis) + self.queryHelper.execute_query(check_query) + self.queryHelper.controls.query_bucket_analysis().refresh_button.click(highlight=False) + time.sleep(6) + self.queryHelper.check_bucket_analysis_result(expected_analysis) + + def test_save_query(self): + path = self.input.param('path', "n1ql_query.txt") + check_query = self.input.param('check_query', '').replace('_STAR_', '*').replace('_SEM_', ';').decode( + 'unicode_escape') + home = expanduser("~") + home += "\Downloads" + saved_file = "%s\%s" % (home, path) + try: + if path: + os.remove(saved_file) + else: + os.remove("%s\download.txt" % home) + except OSError: + pass + NavigationHelper(self).navigate('Query') + self.queryHelper.execute_query(check_query) + self.queryHelper.save_query(path) + if not path: + saved_file = "%s\download.txt" % home + f = open(saved_file, 'r') + content = f.read() + self.assertEqual(content, check_query, msg='Incorrect saved query file: %s' % content) + + def test_save_result(self): + path = self.input.param('path', "data.json") + check_query = self.input.param('check_query', '').replace('_STAR_', '*').replace('_SEM_', ';').decode( + 'unicode_escape') + expected_result = self.input.param('expected_result', '{"no_data_yet": "hit execute to run query"}') + expected_result = expected_result.replace('_STAR_', '*').replace('_SEM_', ';').decode('unicode_escape') + home = expanduser("~") + home += "\Downloads" + saved_file = "%s\%s" % (home, path) + try: + if path: + os.remove(saved_file) + else: + os.remove("%s\download" % home) + except OSError: + pass + NavigationHelper(self).navigate('Query') + if check_query: + self.queryHelper.execute_query(check_query) + self.queryHelper.save_result(path) + if not path: + saved_file = "%s\download" % home + f = open(saved_file, 'r') + content = f.read() + search_obj = re.search(expected_result, content, re.M | re.I) + self.assertTrue(search_obj, msg='Incorrect saved query result file: %s' % content) + + +class QueryWorkbenchHelper(TestCase): + def __init__(self, tc): + self.tc = tc + self.wait = WebDriverWait(tc.driver, timeout=250) + self.controls = QueryControls(tc.driver) + + def execute_query(self, query): + self.wait.until_not(lambda fn: + self.controls.query_top_screen().query_editor.is_displayed(), + "Query Editor not displayed") + self.controls.query_top_screen().query_editor.type(query) + self.controls.query_top_screen().execute_button.click() + self.controls.query_results_box() + self.controls.query_results_box().result_json_mode.get_text() + self.wait.until(lambda fn: + self.controls.query_top_screen().execute_button.is_displayed(), + "Query is still running?") + + def check_result(self, expected_results, mode='JSON'): + self.select_result_mode(mode) + if mode == 'JSON': + result = self.controls.query_results_box().result_json_mode.get_text() + elif mode == 'Table': + result = self.controls.query_results_box().result_table_mode.get_text() + elif mode == 'Tree': + result = self.controls.query_results_box().result_tree_mode.get_text() + elif mode == 'Plan': + result = self.controls.query_results_box().result_plan_mode.get_text() + elif mode == 'Plan Text': + result = self.controls.query_results_box().result_plan_text_mode.get_text() + search_obj = None + for expected_result in expected_results: + search_obj = re.search(expected_result, result, re.M | re.I) + if search_obj: + break + self.assertTrue(search_obj, msg='Incorrect query result: %s' % result) + + def check_summary_result(self, expected_result, mode='JSON'): + self.select_result_mode(mode) + result = self.controls.query_results_box().result_summary.get_text() + search_obj = re.search(expected_result, result, re.M | re.I) + self.assertTrue(search_obj, msg='Incorrect query summary result: %s' % result) + + def select_result_mode(self, mode='JSON'): + selected = self.controls.query_results_box().result_selected_mode.get_text() + if selected != mode: + if mode == 'JSON': + self.controls.query_results_box().result_select_json_mode.click() + elif mode == 'Table': + self.controls.query_results_box().result_select_table_mode.click() + elif mode == 'Tree': + self.controls.query_results_box().result_select_tree_mode.click() + elif mode == 'Plan': + self.controls.query_results_box().result_select_plan_mode.click() + elif mode == 'Plan Text': + self.controls.query_results_box().result_select_plan_text_mode.click() + + def check_bucket_analysis_result(self, expected_results): + result = self.controls.query_bucket_analysis().sidebar_body.get_text() + search_obj = None + for expected_result in expected_results: + search_obj = re.search(expected_result, result, re.M | re.I) + if search_obj: + break + self.assertTrue(search_obj, msg='Incorrect bucket analysis result: %s' % result) + + def save_query(self, path): + self.wait.until(lambda fn: + self.controls.query_top_screen().save_query.is_displayed(), + "Save button is not available") + time.sleep(5) + self.controls.query_top_screen().save_query.click() + self.wait.until(lambda fn: + self.controls.save_dialog().path.is_displayed(), + "Save dialog is not displayed") + self.controls.save_dialog().path.type_native(path) + self.controls.save_dialog().ok_button.click() + time.sleep(3) + + def save_result(self, path): + self.wait.until(lambda fn: + self.controls.query_results_box().result_save_button.is_displayed(), + "Save button is not available") + time.sleep(3) + self.controls.query_results_box().result_save_button.click() + self.wait.until(lambda fn: + self.controls.save_dialog().path.is_displayed(), + "Save dialog is not displayed") + self.controls.save_dialog().path.type_native(path) + self.controls.save_dialog().ok_button.click() + time.sleep(3) + + +class QueryControls: + def __init__(self, driver): + self.helper = ControlsHelper(driver) + + def query_top_screen(self): + self.execute_button = self.helper.find_control('query_top_screen', 'execute_button') + self.view_previous = self.helper.find_control('query_top_screen', 'view_previous') + self.view_next = self.helper.find_control('query_top_screen', 'view_next') + self.history_link = self.helper.find_control('query_top_screen', 'history_link') + self.page_count_label = self.helper.find_control('query_top_screen', 'page_count_label') + self.query_editor = self.helper.find_control('query_top_screen', 'query_editor') + self.query_editor_value = self.helper.find_control('query_top_screen', 'query_editor_value') + self.save_query = self.helper.find_control('query_top_screen', 'save_query') + return self + + def query_bucket_analysis(self): + self.refresh_button = self.helper.find_control('query_bucket_analysis', 'refresh_button') + self.resize_button = self.helper.find_control('query_bucket_analysis', 'resize_button') + self.sidebar_body = self.helper.find_control('query_bucket_analysis', 'sidebar_body') + return self + + def query_results_box(self): + self.result_box = self.helper.find_control('query_results_box', 'result_box') + self.result_select_json_mode = self.helper.find_control('query_results_box', 'result_select_mode', text="JSON") + self.result_select_table_mode = self.helper.find_control('query_results_box', 'result_select_mode', + text="Table") + self.result_select_tree_mode = self.helper.find_control('query_results_box', 'result_select_mode', text="Tree") + self.result_select_plan_mode = self.helper.find_control('query_results_box', 'result_select_mode', text="Plan") + self.result_select_plan_text_mode = self.helper.find_control('query_results_box', 'result_select_mode', + text="Plan Text") + self.result_selected_mode = self.helper.find_control('query_results_box', 'result_selected_mode') + self.result_summary = self.helper.find_control('query_results_box', 'result_summary') + self.result_json_mode = self.helper.find_control('query_results_box', 'result_json_mode') + self.result_table_mode = self.helper.find_control('query_results_box', 'result_table_mode') + self.result_tree_mode = self.helper.find_control('query_results_box', 'result_tree_mode') + self.result_plan_mode = self.helper.find_control('query_results_box', 'result_plan_mode') + self.result_plan_text_mode = self.helper.find_control('query_results_box', 'result_plan_text_mode') + self.result_box = self.helper.find_control('query_results_box', 'result_box') + self.result_box = self.helper.find_control('query_results_box', 'result_box') + self.result_save_button = self.helper.find_control('query_results_box', 'result_save_button') + return self + + def save_dialog(self): + self.path = self.helper.find_control('query_save_screen', 'path') + self.cancel_button = self.helper.find_control('query_save_screen', 'cancel_button') + self.ok_button = self.helper.find_control('query_save_screen', 'ok_button') + return self diff --git a/pytests/ui/uisampletests.py b/pytests/ui/uisampletests.py index 32f2e58c8..68c08aa6e 100644 --- a/pytests/ui/uisampletests.py +++ b/pytests/ui/uisampletests.py @@ -71,11 +71,13 @@ def test_bucket_stats_mb_8538(self): NavigationHelper(self).navigate('Data Buckets') BucketHelper(self).create(self.bucket) - NavigationHelper(self).navigate('Views') + NavigationHelper(self).navigate('Indexes') + DdocViewHelper(self).click_view_tab(text='Views') view_name = 'test_view_ui' DdocViewHelper(self).create_view(view_name, view_name) NavigationHelper(self).navigate('Data Buckets') + BaseHelper(self).wait_ajax_loaded() BucketHelper(self).open_stats(self.bucket) total_views_st = BucketHelper(self).get_stat("views total disk size") view_st = BucketHelper(self).get_stat("disk size", block="view") @@ -120,7 +122,8 @@ def tearDown(self): try: super(InitializeTest, self).tearDown() finally: - self.cluster.shutdown() + if hasattr(self, 'cluster'): + self.cluster.shutdown() def test_initialize(self): try: @@ -388,6 +391,7 @@ def verify_read_only(self, bucket, view): self.assertFalse(btn.is_displayed(), "There is remove btn") self.log.info("Bucket check") navigator.navigate('Data Buckets') + BaseHelper(self).wait_ajax_loaded() BucketHelper(self).controls.bucket_info(bucket.name).arrow.click() self.assertFalse(BucketHelper(self).controls.edit_btn().is_displayed(), "Bucket can be edited") @@ -438,12 +442,12 @@ def test_external_user(self): if not expected_error: SettingsHelper(self).delete_external_user(username) - class RebalanceProgressTests(BaseUITestCase): def setUp(self): super(RebalanceProgressTests, self).setUp() self.helper = ServerHelper(self) - BaseHelper(self).login() + self.baseHelper = BaseHelper(self) + self.baseHelper.login() num_buckets = self.input.param("num_buckets", 1) self.buckets = [] NavigationHelper(self).navigate('Data Buckets') @@ -453,33 +457,30 @@ def setUp(self): saslPassword=bucket.sasl_password, proxyPort=STANDARD_BUCKET_PORT + i + 1) self.buckets.append(bucket) + self.baseHelper.loadSampleBucket(self.servers[0], 'beer') def tearDown(self): super(RebalanceProgressTests, self).tearDown() def test_rebalance_in(self): + self.baseHelper = BaseHelper(self) NavigationHelper(self).navigate('Server Nodes') ServerHelper(self).add(self.input) ServerHelper(self).start_rebalancing() transfer_out_stat = ServerHelper(self).get_server_rebalance_progress(self.servers[0], 'out') transfer_in_stat = ServerHelper(self).get_server_rebalance_progress(self.servers[1], 'in') - self.verify_stats(transfer_out_stat) - self.verify_stats(transfer_in_stat) + self._verify_stats(transfer_out_stat) + self._verify_stats(transfer_in_stat) - def verify_stats(self, stats): - bucket_presence = False - for bucket in self.buckets: - bucket_presence = re.match(r'.*Bucket\:.*{0}.*(1 out of 1)*'.format(self.buckets[0].name), - stats["bucket"]) is not None - if bucket_presence: - break - self.assertTrue(bucket_presence, "Bucket in stats %s has incorrect format" % stats) - self.assertTrue(int(stats["total_transfer"].split(':')[1].strip()) >= int(stats["estimated_transfer"].split(':')[1].strip()), + def _verify_stats(self, stats): + self.assertTrue(int(stats["total_transfer"]) >= int(stats["estimated_transfer"]), "total_transfer should be greater than estimated in stats %s" % stats) - self.assertTrue(re.match(r'.*Number of Active# vBuckets and Replica# vBuckets to transfer:.*Active#-.*Replica#-.*', - stats["vbuckets"]) is not None, "VBuckets in stats %s has incorrect format" % stats) + self.assertTrue(re.match(r'.*Active#-.*Replica#-.*',str(stats["vbuckets"])), + "VBuckets in stats %s has incorrect format" % stats) + -class GracefullFailoverTests(BaseUITestCase): + +class GracefullFailoverTests(BaseUITestCase): def setUp(self): super(GracefullFailoverTests, self).setUp() self.master=self.servers[0] @@ -576,11 +577,11 @@ def test_delta_recovery_failover_251(self): RestConnection(self.servers[0]).monitorRebalance() self.log.info("Recovery checked") + class ViewsTests(BaseUITestCase): def setUp(self): super(ViewsTests, self).setUp() self._initialize_nodes() - #RestConnection(self.servers[0]).create_bucket(bucket='default', ramQuotaMB=500) num_buckets = self.input.param("num_buckets", 1) self.ddoc_name = self.input.param("ddoc_name", "ddoc1") self.view_name = self.input.param("view_name", "view1") @@ -773,13 +774,9 @@ def server_info(self, server_ip): text=server_ip) return self - def server_info_rebalance_progress(self, server_ip, direction): - if direction == 'in': - parent = 'rebalance_progress_in' - else: - parent = 'rebalance_progress_out' - return self.helper.find_control('server_info', 'rebalance_progress', - text=server_ip) + def server_info_rebalance_progress(self, server_ip): + return self.helper.find_control('server_info', 'rebalance_progress') + def rebalance_progress_bar(self, server_ip): return self.helper.find_control('server_info', 'rebalance_bar', parent_locator='server_row', @@ -1240,6 +1237,7 @@ def navigate(self, tab): self.controls._navigation_tab_link(tab).click() self.wait.until(lambda fn: self._is_tab_selected(tab), "tab '%s' is not selected in %d sec" % (tab, self.wait._timeout)) + BaseHelper(self.tc).wait_ajax_loaded() self.tc.log.info("tab '%s' is selected" % tab) class ServerHelper(): @@ -1361,7 +1359,7 @@ def open_server_stats(self, server): def close_server_stats(self, server): self.tc.log.info("Close stats for server % s" % server.ip) - for i in [1,2,3]: + for i in xrange(3): try: self.controls.server_info(server.ip).server_arrow_opened.click() break @@ -1377,16 +1375,15 @@ def is_server_stats_opened(self, server): def get_server_rebalance_progress(self, server, direction): if not self.is_server_stats_opened(server): self.open_server_stats(server) - src = self.controls.server_info_rebalance_progress(server.ip, direction).get_inner_html() + BaseHelper(self.tc).wait_ajax_loaded() + src = self.controls.server_info_rebalance_progress(server.ip).get_text() src = src.split("Data being transferred %s" % direction)[1] + src = src.split('\n') + self.tc.log.info("Stats for %s: %s" % (server, src)) stats = {} - stats["bucket"] = "Bucket:%s" % src.split("Bucket:")[1].split("

")[0].replace('\n',' ') - stats["total_transfer"] = "Total number of keys to be transferred:%s" %\ - src.split("Total number of keys to be transferred:")[1].split("

")[0].replace('\n',' ') - stats["estimated_transfer"] = "Estimated number of keys transferred:%s" %\ - src.split("Estimated number of keys transferred:")[1].split("

")[0].replace('\n',' ') - stats["vbuckets"] = "Number of Active# vBuckets and Replica# vBuckets to transfer:%s" %\ - src.split("vBuckets to transfer:")[1].split("

")[0].replace('\n',' ').replace('', ' ') + stats["total_transfer"] = src[1].replace("Total number of keys to be transferred:", "") + stats["estimated_transfer"] = src[2].replace("Estimated number of keys transferred:", "") + stats["vbuckets"] = src[3].replace("Number of Active# vBuckets and Replica# vBuckets to transfer:","") self.close_server_stats(server) return stats @@ -1433,6 +1430,7 @@ def is_confirmation_failover_opened(self): return opened def confirm_failover(self, confirm=True, is_graceful=None, confirm_failover_check=False): + time.sleep(1) if is_graceful: self.controls.failover_confirmation().failover_conf_gracefull_option.check() self.tc.log.info("Graceful Failover Enabled") @@ -1542,6 +1540,7 @@ def check_recovery(self, server, option='full'): "Recovery btn is not displayed in %d sec" % (self.wait._timeout)) self.tc.log.info("Recovery checked") + class BucketHelper(): def __init__(self, tc): self.tc = tc @@ -1560,13 +1559,14 @@ def create(self, bucket): "Warning 'Cluster Memory Fully Allocated' appeared") self.fill_bucket_info(bucket) self.controls.bucket_pop_up().create_btn.click() + BaseHelper(self.tc).wait_ajax_loaded() self.tc.log.info("created bucket '%s'" % bucket.name) if self.controls.bucket_pop_up().create_bucket_pop_up.is_present(): - self.wait.until_not(lambda fn: - self.controls.bucket_pop_up().create_bucket_pop_up.is_displayed(), - "create new bucket pop up is not closed in %d sec" % self.wait._timeout) + self.wait.until_not(lambda fn: + self.controls.bucket_pop_up().create_bucket_pop_up.is_displayed(), + "create new bucket pop up is not closed in %d sec" % self.wait._timeout) self.wait.until(lambda fn: self.is_bucket_present(bucket), - "Bucket '%s' is not displayed" % bucket) + "Bucket '%s' is not displayed" % bucket) self.tc.log.info("bucket '%s' is displayed" % bucket) #self.wait.until(lambda fn: self.is_bucket_helthy(bucket), # "Bucket '%s' is not in healthy state" % bucket) @@ -1681,7 +1681,7 @@ def open_documents(self, bucket): try: self.controls.bucket_info(bucket.name).documents.click() break - except StaleElementReferenceException: + except AttributeError: pass def open_stats(self, bucket): @@ -1785,7 +1785,7 @@ def _fill_1_step(self, input): def _fill_2_step(self, input): if input.param("sample", None): self.controls.step_2_sample(input.param("sample", None)).check() - #TODO successfull loading? + #TODO successful loading? def _fill_3_step(self, input): BucketHelper(self.tc).fill_bucket_info(Bucket(parse_bucket=input), @@ -1835,6 +1835,10 @@ def click_view_tab(self, text=''): def create_view(self, ddoc_name, view_name, dev_view=True): self.tc.log.info('trying create a view %s' % view_name) + BaseHelper(self.tc).wait_ajax_loaded() + self.wait.until(lambda fn: + self.controls.view_btn().create_view_btn.is_displayed(), + "Create View button is not displayed") self.controls.view_btn().create_view_btn.click() self.wait.until(lambda fn: self.controls.create_pop_up().pop_up.is_displayed(), @@ -1957,6 +1961,7 @@ def fill_edit_view_screen(self, view_name, action='save', reduce_fn='_count'): def verify_view_results(self, view_set, reduce_fn, value=0): self.tc.log.info('Verify View Results') + BaseHelper(self.tc).wait_ajax_loaded() if view_set == 'dev': self.wait.until(lambda fn: self.controls.view_results_container().dev_subset.is_displayed, @@ -2283,6 +2288,7 @@ def create_user(self, user, pwd, verify_pwd = None): self.controls.user_create_info().password.type(pwd, is_pwd=True) self.controls.user_create_info().verify_password.type(verify_pwd, is_pwd=True) self.controls.user_create_info().create_btn.click() + BaseHelper(self.tc).wait_ajax_loaded() self.wait.until(lambda fn: self.is_user_created() or self.is_error_present(), "No reaction for create btn in %d sec" % (self.wait._timeout)) if self.is_error_present(): diff --git a/pytests/ui/uixdcrtests.py b/pytests/ui/uixdcrtests.py index 569a3d4fd..ddad572fb 100644 --- a/pytests/ui/uixdcrtests.py +++ b/pytests/ui/uixdcrtests.py @@ -1,11 +1,6 @@ -import logger -import time -from selenium import webdriver -from selenium.webdriver.support.ui import WebDriverWait -from couchbase_helper.cluster import Cluster from membase.api.rest_client import RestConnection from uibasetest import * -from uisampletests import Bucket, NavigationHelper, BucketHelper +from uisampletests import Bucket, NavigationHelper from selenium.common.exceptions import StaleElementReferenceException from membase.helper.bucket_helper import BucketOperationHelper from membase.helper.cluster_helper import ClusterOperationHelper @@ -106,7 +101,7 @@ def test_create_replication(self): XDCRHelper(self).create_cluster_reference(name, ip, user, passwd) self.sleep(3) try: - XDCRHelper(self).create_replication(dest_cluster, src_bucket, dest_bucket, advanced_settings=advanced_settings) + XDCRHelper(self).create_replication(dest_cluster, src_bucket, dest_bucket, advanced_settings=advanced_settings) except Exception, ex: self.log.error(str(ex)) if error: @@ -214,18 +209,18 @@ def errors_advanced_settings(self): class XDCRHelper(): def __init__(self, tc): self.tc = tc - self.wait = WebDriverWait(tc.driver, timeout=100) + self.wait = WebDriverWait(tc.driver, timeout=50) self.controls = XDCRControls(tc.driver) - def create_cluster_reference(self, cluster_name, ip, username, password, cancel=False): self.wait.until(lambda fn: self.controls.create_cluster_reference_btn.is_displayed(), "create_cluster_reference_btn is not displayed in %d sec" % (self.wait._timeout)) time.sleep(3) self.tc.log.info("try to create cluster reference with name=%s, ip=%s" % (cluster_name, ip)) self.controls.create_cluster_reference_btn.click() + BaseHelper(self.tc).wait_ajax_loaded() self.wait.until(lambda fn: self.controls.create_reference_pop_up().pop_up.is_displayed(), - "create_cluster_reference_ popup is not displayed in %d sec" % (self.wait._timeout)) + "create_cluster_reference_ popup is not displayed in %d sec" % self.wait._timeout) if cluster_name: self.controls.create_reference_pop_up().name.type(cluster_name) if ip: @@ -238,14 +233,13 @@ def create_cluster_reference(self, cluster_name, ip, username, password, cancel= self.controls.create_reference_pop_up().create_btn.click() else: self.controls.create_reference_pop_up().cancel_btn.click() + time.sleep(2) + BaseHelper(self.tc).wait_ajax_loaded() self.wait.until(lambda fn: self._cluster_reference_pop_up_reaction(), - "there is no reaction in %d sec" % (self.wait._timeout)) + "there is no reaction in %d sec" % self.wait._timeout) if self.controls.error_reference().is_displayed(): - self.wait.until(lambda fn: self.controls.error_reference().get_text() != '', - "text didn't appear in %d sec" % (self.wait._timeout)) - raise Exception('Reference is not created: %s' % self.controls.error_reference().get_text()) - else: - self.tc.log.info("Reference has been created") + raise Exception("Error found:" + self.controls.error_reference().get_text()) + self.tc.log.info("Reference has been created") def _cluster_reference_pop_up_reaction(self): try: @@ -253,7 +247,7 @@ def _cluster_reference_pop_up_reaction(self): return not self.controls.create_reference_pop_up().pop_up.is_displayed() else: return self.controls.error_reference().is_displayed() - except StaleElementReferenceException as e: + except StaleElementReferenceException: return False def _get_error(self): @@ -296,13 +290,20 @@ def create_replication(self, remote_cluster, bucket, remote_bucket, cancel=False self.controls.advanced_settings().collection_interval.type(advanced_settings['collection_interval']) if 'logging' in advanced_settings: self.controls.advanced_settings().logging.select(value=advanced_settings['logging']) - if len([el for el in self.controls.errors_advanced_settings() if el.is_displayed() and el.get_text() != '']) > 0: - raise Exception('Advanced setting error: %s' % str([el.get_text() for el in self.controls.errors_advanced_settings() - if el.is_displayed() and el.get_text() != ''])) + time.sleep(3) + try: + errors = [el.get_text() for el in self.controls.errors_advanced_settings() if el.is_displayed() and el.get_text() != ''] + except Exception as e: + self.tc.log.info("exception when tried to get errors", e) + errors = [el.get_text() for el in self.controls.errors_advanced_settings() if + el.is_displayed() and el.get_text() != ''] + if len(errors): + raise Exception('Advanced setting error: %s' % str(errors)) if not cancel: self.controls.create_replication_pop_up().replicate_btn.click() else: self.controls.create_replication_pop_up().cancel_btn.click() + BaseHelper(self.tc).wait_ajax_loaded() all_errors = self.controls.error_replica() try: if all_errors: @@ -310,10 +311,10 @@ def create_replication(self, remote_cluster, bucket, remote_bucket, cancel=False if error.is_displayed(): raise Exception('Replication is not created: %s' % error.get_text()) except StaleElementReferenceException as e: - self.tc.log.info ("No error displayed while creating/cancelling a Replication") + self.tc.log.info("No error displayed while creating/cancelling a Replication") self.wait.until(lambda fn: self._cluster_replication_pop_up_reaction(), - "there is no reaction in %d sec" % (self.wait._timeout)) + "there is no reaction in %d sec" % self.wait._timeout) def _cluster_replication_pop_up_reaction(self): try: diff --git a/pytests/xdcr/lww.py b/pytests/xdcr/lww.py index 1a08c2151..0b7bd1601 100644 --- a/pytests/xdcr/lww.py +++ b/pytests/xdcr/lww.py @@ -11,6 +11,8 @@ from couchbase.bucket import Bucket from couchbase_helper.cluster import Cluster from membase.helper.cluster_helper import ClusterOperationHelper +from membase.api.exception import XDCRCheckpointException +from memcached.helper.data_helper import VBucketAwareMemcached class Lww(XDCRNewBaseTest): @@ -28,11 +30,17 @@ def setUp(self): def tearDown(self): super(Lww, self).tearDown() + remote_client = RemoteMachineShellConnection(self._input.servers[6]) + command = "rm -rf /data/lww-backup" + output, error = remote_client.execute_command(command) + remote_client.log_command_output(output, error) if not self.skip_ntp: self._disable_ntp() - def _enable_ntp_and_sync(self, ntp_server="0.north-america.pool.ntp.org"): - for node in self._input.servers: + def _enable_ntp_and_sync(self, nodes=[], ntp_server="0.north-america.pool.ntp.org"): + if not nodes: + nodes = self._input.servers + for node in nodes: conn = RemoteMachineShellConnection(node) output, error = conn.execute_command("chkconfig ntpd on") conn.log_command_output(output, error) @@ -49,20 +57,23 @@ def _disable_ntp(self): output, error = conn.execute_command("/etc/init.d/ntpd stop") conn.log_command_output(output, error) - def _offset_wall_clock(self, cluster=None, offset_secs=0, inc=True): + def _offset_wall_clock(self, cluster=None, offset_secs=0, inc=True, offset_drift=-1): + counter = 1 for node in cluster.get_nodes(): conn = RemoteMachineShellConnection(node) output, error = conn.execute_command("date +%s") conn.log_command_output(output, error) curr_time = int(output[-1]) if inc: - new_time = curr_time + offset_secs + new_time = curr_time + (offset_secs * counter) else: - new_time = curr_time - offset_secs + new_time = curr_time - (offset_secs * counter) output, error = conn.execute_command("date --date @" + str(new_time)) conn.log_command_output(output, error) output, error = conn.execute_command("date --set='" + output[-1] + "'") conn.log_command_output(output, error) + if offset_drift > 0 and counter < offset_drift: + counter = counter + 1 def _change_time_zone(self, cluster=None, time_zone="America/Los_Angeles"): for node in cluster.get_nodes(): @@ -141,6 +152,24 @@ def _start_cb_server(self, node): shell.start_couchbase() shell.disconnect() + def _get_max_cas(self, node, bucket, vbucket_id=0): + max_cas = 0 + conn = RemoteMachineShellConnection(node) + command = "/opt/couchbase/bin/cbstats " + node.ip + ":11210 vbucket-details " + str(vbucket_id) + " -b " + bucket + output, error = conn.execute_command(command) + conn.log_command_output(output, error) + for line in output: + if "max_cas" in line: + max_cas = line.split()[1] + return long(max_cas) + + def _get_vbucket_id(self, node, bucket, key): + conn = RemoteMachineShellConnection(node) + command = "curl -s http://" + node.ip + ":8091/pools/default/buckets/" + bucket + " | /opt/couchbase/bin/tools/vbuckettool - " + key + output, error = conn.execute_command(command) + conn.log_command_output(output, error) + return output[0].split()[5] + def test_lww_enable(self): src_conn = RestConnection(self.c1_cluster.get_master_node()) dest_conn = RestConnection(self.c2_cluster.get_master_node()) @@ -174,9 +203,9 @@ def test_replication_with_lww_default(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -197,9 +226,9 @@ def test_replication_with_lww_sasl(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -220,9 +249,9 @@ def test_replication_with_lww_standard(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -250,9 +279,9 @@ def test_replication_with_lww_and_no_lww(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -269,9 +298,9 @@ def test_lww_extended_metadata(self): self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") self.log.info("LWW enabled on dest bucket as expected") - gen = DocumentGenerator('C1-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) + gen = DocumentGenerator('lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen) - gen = DocumentGenerator('C2-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) + gen = DocumentGenerator('lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen) data_path = src_conn.get_data_path() @@ -629,40 +658,12 @@ def test_seq_upd_on_uni_with_lww_disabled_target_and_src_wins(self): self.assertFalse(dest_conn.is_lww_enabled(), "LWW enabled on dest bucket") self.log.info("LWW not enabled on dest bucket as expected") - self.setup_xdcr() - self.merge_all_buckets() - - self.c1_cluster.pause_all_replications() - - self.sleep(30) - - src_def = self._get_python_sdk_client(self.c1_cluster.get_master_node().ip, 'default') - self.sleep(10) - dst_def = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') - self.sleep(10) - - gen = DocumentGenerator('lww', '{{"key":"value"}}', xrange(100), start=0, end=1) - self.c2_cluster.load_all_buckets_from_generator(gen) - gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) - self.c1_cluster.load_all_buckets_from_generator(gen) - # update doc at C1 thrice - self._upsert(conn=src_def, doc_id='lww-0', old_key='key1', new_key='key2', new_val='value2') - self._upsert(conn=src_def, doc_id='lww-0', old_key='key2', new_key='key3', new_val='value3') - self._upsert(conn=src_def, doc_id='lww-0', old_key='key3', new_key='key4', new_val='value4') - # update doc at C2 twice - self._upsert(conn=dst_def, doc_id='lww-0', old_key='key', new_key='key1', new_val='value1') - self._upsert(conn=dst_def, doc_id='lww-0', old_key='key1', new_key='key2', new_val='value2') - - self.c1_cluster.resume_all_replications() - self._wait_for_replication_to_catchup() - - obj = src_def.get(key='lww-0') - self.assertDictContainsSubset({'key4':'value4'}, obj.value, "Src doc did not win using Rev Id") - obj = dst_def.get(key='lww-0') - self.assertDictContainsSubset({'key4':'value4'}, obj.value, "Src doc did not win using Rev Id") - self.log.info("Src doc won using Rev Id as expected") - - self.verify_results(skip_verify_data=['default']) + try: + self.setup_xdcr() + except Exception as e: + self.assertTrue("Replication between buckets with different ConflictResolutionType setting is not allowed" in str(e), + "ConflictResolutionType mismatch message not thrown as expected") + self.log.info("ConflictResolutionType mismatch message thrown as expected") def test_seq_upd_on_uni_with_lww_disabled_source_and_target_wins(self): src_conn = RestConnection(self.c1_cluster.get_master_node()) @@ -674,40 +675,12 @@ def test_seq_upd_on_uni_with_lww_disabled_source_and_target_wins(self): self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") self.log.info("LWW enabled on dest bucket as expected") - self.setup_xdcr() - self.merge_all_buckets() - - self.c1_cluster.pause_all_replications() - - self.sleep(30) - - src_def = self._get_python_sdk_client(self.c1_cluster.get_master_node().ip, 'default') - self.sleep(10) - dst_def = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') - self.sleep(10) - - gen = DocumentGenerator('lww', '{{"key":"value"}}', xrange(100), start=0, end=1) - self.c1_cluster.load_all_buckets_from_generator(gen) - gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) - self.c2_cluster.load_all_buckets_from_generator(gen) - # update doc at C2 thrice - self._upsert(conn=dst_def, doc_id='lww-0', old_key='key1', new_key='key2', new_val='value2') - self._upsert(conn=dst_def, doc_id='lww-0', old_key='key2', new_key='key3', new_val='value3') - self._upsert(conn=dst_def, doc_id='lww-0', old_key='key3', new_key='key4', new_val='value4') - # update doc at C1 twice - self._upsert(conn=src_def, doc_id='lww-0', old_key='key', new_key='key1', new_val='value1') - self._upsert(conn=src_def, doc_id='lww-0', old_key='key1', new_key='key2', new_val='value2') - - self.c1_cluster.resume_all_replications() - self._wait_for_replication_to_catchup() - - obj = src_def.get(key='lww-0') - self.assertDictContainsSubset({'key2':'value2'}, obj.value, "Target doc did not win using Rev Id") - obj = dst_def.get(key='lww-0') - self.assertDictContainsSubset({'key4':'value4'}, obj.value, "Target doc did not win using Rev Id") - self.log.info("Target doc won using Rev Id as expected") - - self.verify_results(skip_verify_data=['default'], skip_verify_revid=['default']) + try: + self.setup_xdcr() + except Exception as e: + self.assertTrue("Replication between buckets with different ConflictResolutionType setting is not allowed" in str(e), + "ConflictResolutionType mismatch message not thrown as expected") + self.log.info("ConflictResolutionType mismatch message thrown as expected") def test_seq_upd_on_bi_with_lww_disabled_on_both_clusters(self): src_conn = RestConnection(self.c1_cluster.get_master_node()) @@ -1072,9 +1045,9 @@ def test_lww_with_optimistic_threshold_change(self): self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -1096,9 +1069,9 @@ def test_lww_with_master_warmup(self): self.sleep(self._wait_timeout) - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.async_load_all_buckets_from_generator(gen2) self.sleep(self._wait_timeout / 2) @@ -1122,9 +1095,9 @@ def test_lww_with_cb_restart_at_master(self): self.sleep(self._wait_timeout) - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.async_load_all_buckets_from_generator(gen2) self.sleep(self._wait_timeout / 2) @@ -1150,9 +1123,9 @@ def test_lww_with_erlang_restart_at_master(self): self.sleep(self._wait_timeout) - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.async_load_all_buckets_from_generator(gen2) self.sleep(self._wait_timeout / 2) @@ -1178,9 +1151,9 @@ def test_lww_with_memcached_restart_at_master(self): self.sleep(self._wait_timeout) - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.async_load_all_buckets_from_generator(gen2) self.sleep(self._wait_timeout / 2) @@ -1457,9 +1430,9 @@ def test_lww_with_bucket_recreate(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -1480,9 +1453,9 @@ def test_lww_while_rebalancing_node_at_src(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -1513,9 +1486,9 @@ def test_lww_while_failover_node_at_src(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -1555,9 +1528,9 @@ def test_lww_with_rebalance_in_and_simult_upd_del(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -1568,7 +1541,7 @@ def test_lww_with_rebalance_in_and_simult_upd_del(self): task.result() - self._wait_for_replication_to_catchup() + self._wait_for_replication_to_catchup(timeout=600) self.verify_results() @@ -1586,9 +1559,9 @@ def test_lww_with_rebalance_out_and_simult_upd_del(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -1604,7 +1577,7 @@ def test_lww_with_rebalance_out_and_simult_upd_del(self): task = self.c1_cluster.async_rebalance_in() task.result() - self._wait_for_replication_to_catchup() + self._wait_for_replication_to_catchup(timeout=1200) self.verify_results() @@ -1622,9 +1595,9 @@ def test_lww_with_failover_and_simult_upd_del(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -1649,7 +1622,7 @@ def test_lww_with_failover_and_simult_upd_del(self): rebalance = self.cluster.async_rebalance(self.c1_cluster.get_nodes(), [], []) rebalance.result() - self._wait_for_replication_to_catchup() + self._wait_for_replication_to_catchup(timeout=1200) self.verify_results() @@ -1658,18 +1631,18 @@ def test_lww_disabled_extended_metadata(self): dest_conn = RestConnection(self.c2_cluster.get_master_node()) self._create_buckets(bucket='default', ramQuotaMB=100, src_lww=False, dst_lww=False) - self.assertTrue(src_conn.is_lww_enabled(), "LWW enabled on source bucket") + self.assertFalse(src_conn.is_lww_enabled(), "LWW enabled on source bucket") self.log.info("LWW not enabled on source bucket as expected") - self.assertTrue(dest_conn.is_lww_enabled(), "LWW enabled on dest bucket") + self.assertFalse(dest_conn.is_lww_enabled(), "LWW enabled on dest bucket") self.log.info("LWW not enabled on dest bucket as expected") self.setup_xdcr() self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen = DocumentGenerator('C1-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) + gen = DocumentGenerator('lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen) - gen = DocumentGenerator('C2-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) + gen = DocumentGenerator('lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen) self.c1_cluster.resume_all_replications() @@ -1698,50 +1671,22 @@ def test_lww_disabled_extended_metadata(self): "Conflict resolution mode is LWW in extended metadata of dest bucket") self.log.info("Conflict resolution mode is not LWW in extended metadata of dest bucket as expected") - def test_lww_src_disabled_dst_enabled_extended_metadata(self): + def test_lww_src_enabled_dst_disabled_extended_metadata(self): src_conn = RestConnection(self.c1_cluster.get_master_node()) dest_conn = RestConnection(self.c2_cluster.get_master_node()) self._create_buckets(bucket='default', ramQuotaMB=100, src_lww=True, dst_lww=False) - self.assertTrue(src_conn.is_lww_enabled(), "LWW enabled on source bucket") - self.log.info("LWW not enabled on source bucket as expected") - self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") - self.log.info("LWW enabled on dest bucket as expected") - - self.setup_xdcr() - self.merge_all_buckets() - self.c1_cluster.pause_all_replications() - - gen = DocumentGenerator('C1-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) - self.c1_cluster.load_all_buckets_from_generator(gen) - gen = DocumentGenerator('C2-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) - self.c2_cluster.load_all_buckets_from_generator(gen) - - self.c1_cluster.resume_all_replications() - - self._wait_for_replication_to_catchup() - - data_path = src_conn.get_data_path() - dump_file = data_path + "/default/0.couch.1" - cmd = "/opt/couchbase/bin/couch_dbdump --json " + dump_file - conn = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) - output, error = conn.execute_command(cmd) - conn.log_command_output(output, error) - json_parsed = json.loads(output[1]) - self.assertEqual(json_parsed['conflict_resolution_mode'], 0, - "Conflict resolution mode is LWW in extended metadata of src bucket") - self.log.info("Conflict resolution mode is not LWW in extended metadata of src bucket as expected") + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertFalse(dest_conn.is_lww_enabled(), "LWW enabled on dest bucket") + self.log.info("LWW not enabled on dest bucket as expected") - data_path = dest_conn.get_data_path() - dump_file = data_path + "/default/0.couch.1" - cmd = "/opt/couchbase/bin/couch_dbdump --json " + dump_file - conn = RemoteMachineShellConnection(self.c2_cluster.get_master_node()) - output, error = conn.execute_command(cmd) - conn.log_command_output(output, error) - json_parsed = json.loads(output[1]) - self.assertEqual(json_parsed['conflict_resolution_mode'], 0, - "Conflict resolution mode is LWW in extended metadata of dest bucket") - self.log.info("Conflict resolution mode is not LWW in extended metadata of dest bucket as expected") + try: + self.setup_xdcr() + except Exception as e: + self.assertTrue("Replication between buckets with different ConflictResolutionType setting is not allowed" in str(e), + "ConflictResolutionType mismatch message not thrown as expected") + self.log.info("ConflictResolutionType mismatch message thrown as expected") def test_lww_with_nodes_reshuffle(self): src_conn = RestConnection(self.c1_cluster.get_master_node()) @@ -1760,30 +1705,34 @@ def test_lww_with_nodes_reshuffle(self): zones = src_conn.get_zone_names().keys() source_zone = zones[0] target_zone = "test_lww" - self.log.info("Current nodes in group {0} : {1}".format(source_zone, - str(src_conn.get_nodes_in_zone(source_zone).keys()))) - self.log.info("Creating new zone " + target_zone) - src_conn.add_zone(target_zone) - self.log.info("Moving {0} to new zone {1}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()), - target_zone)) - src_conn.shuffle_nodes_in_zones(["{0}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()))], - source_zone,target_zone) - - gen = DocumentGenerator('C1-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) - self.c1_cluster.load_all_buckets_from_generator(gen) - gen = DocumentGenerator('C2-lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) - self.c2_cluster.load_all_buckets_from_generator(gen) - - self.c1_cluster.resume_all_replications() - - self._wait_for_replication_to_catchup() - self.log.info("Moving {0} back to old zone {1}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()), - source_zone)) - src_conn.shuffle_nodes_in_zones(["{0}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()))], - target_zone,source_zone) - self.log.info("Deleting new zone " + target_zone) - src_conn.delete_zone(target_zone) + try: + self.log.info("Current nodes in group {0} : {1}".format(source_zone, + str(src_conn.get_nodes_in_zone(source_zone).keys()))) + self.log.info("Creating new zone " + target_zone) + src_conn.add_zone(target_zone) + self.log.info("Moving {0} to new zone {1}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()), + target_zone)) + src_conn.shuffle_nodes_in_zones(["{0}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()))], + source_zone,target_zone) + + gen = DocumentGenerator('lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen) + gen = DocumentGenerator('lww-', '{{"age": {0}}}', xrange(100), start=0, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen) + + self.c1_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup(timeout=600) + except Exception as e: + self.log.info(e) + finally: + self.log.info("Moving {0} back to old zone {1}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()), + source_zone)) + src_conn.shuffle_nodes_in_zones(["{0}".format(str(src_conn.get_nodes_in_zone(source_zone).keys()))], + target_zone,source_zone) + self.log.info("Deleting new zone " + target_zone) + src_conn.delete_zone(target_zone) def test_lww_with_dst_failover_and_rebalance(self): src_conn = RestConnection(self.c1_cluster.get_master_node()) @@ -1798,9 +1747,9 @@ def test_lww_with_dst_failover_and_rebalance(self): self.setup_xdcr() self.merge_all_buckets() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.async_load_all_buckets_from_generator(gen2) graceful = self._input.param("graceful", False) @@ -1821,56 +1770,7 @@ def test_lww_with_dst_failover_and_rebalance(self): self.sleep(60) - self._wait_for_replication_to_catchup() - - self.verify_results() - - def test_lww_with_dst_bucket_flush(self): - src_conn = RestConnection(self.c1_cluster.get_master_node()) - dest_conn = RestConnection(self.c2_cluster.get_master_node()) - - self._create_buckets(bucket='default', ramQuotaMB=100) - self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") - self.log.info("LWW enabled on source bucket as expected") - self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") - self.log.info("LWW enabled on dest bucket as expected") - - self.setup_xdcr() - self.merge_all_buckets() - - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) - self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) - self.c1_cluster.async_load_all_buckets_from_generator(gen2) - - self.c2_cluster.flush_buckets() - - self.sleep(60) - - self.verify_results() - - def test_lww_with_dst_bucket_delete(self): - src_conn = RestConnection(self.c1_cluster.get_master_node()) - dest_conn = RestConnection(self.c2_cluster.get_master_node()) - - self._create_buckets(bucket='default', ramQuotaMB=100) - self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") - self.log.info("LWW enabled on source bucket as expected") - self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") - self.log.info("LWW enabled on dest bucket as expected") - - self.setup_xdcr() - self.merge_all_buckets() - - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) - self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) - self.c1_cluster.async_load_all_buckets_from_generator(gen2) - - self.c1_cluster.delete_bucket(bucket_name='default') - self._create_buckets(bucket='default', ramQuotaMB=100, skip_dst=True) - - self.sleep(60) + self._wait_for_replication_to_catchup(timeout=600) self.verify_results() @@ -1887,9 +1787,9 @@ def test_lww_with_rebooting_non_master_node(self): self.setup_xdcr() self.merge_all_buckets() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.async_load_all_buckets_from_generator(gen2) rebooted_node_src = self.c1_cluster.reboot_one_node(self) @@ -1918,9 +1818,9 @@ def test_lww_with_firewall(self): self.setup_xdcr() self.merge_all_buckets() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.async_load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.async_load_all_buckets_from_generator(gen2) NodeHelper.enable_firewall(self.c2_cluster.get_master_node()) @@ -1944,6 +1844,11 @@ def test_lww_with_node_crash_cluster(self): self.setup_xdcr() self.merge_all_buckets() + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.async_load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.async_load_all_buckets_from_generator(gen2) + crashed_nodes = [] crash = self._input.param("crash", "").split('-') if "C1" in crash: @@ -1959,7 +1864,7 @@ def test_lww_with_node_crash_cluster(self): if "C1" in crash: NodeHelper.wait_warmup_completed(self.c1_cluster.get_nodes()) - gen1 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen1) self.async_perform_update_delete() @@ -1986,9 +1891,9 @@ def test_lww_with_auto_failover(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -2015,9 +1920,9 @@ def test_lww_with_mixed_buckets(self): self.merge_all_buckets() self.c1_cluster.pause_all_replications() - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() @@ -2052,13 +1957,830 @@ def test_lww_with_diff_time_zones(self): self._change_time_zone(self.c2_cluster, time_zone="America/Chicago") self._change_time_zone(self.c3_cluster, time_zone="America/New_York") - gen1 = BlobGenerator("C3-lww-", "C3-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c3_cluster.load_all_buckets_from_generator(gen1) - gen1 = BlobGenerator("C2-lww-", "C2-lww-", self._value_size, end=self._num_items) + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen2) + + self.c1_cluster.resume_all_replications() + + self.verify_results() + + def test_lww_with_dest_shutdown(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.async_load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.async_load_all_buckets_from_generator(gen2) + + crashed_nodes = self.c2_cluster.get_nodes() + + self._kill_processes(crashed_nodes=crashed_nodes) + + self.sleep(timeout=180) + + for crashed_node in crashed_nodes: + self._start_cb_server(crashed_node) + + self.async_perform_update_delete() + + NodeHelper.wait_warmup_completed(crashed_nodes) + + self.verify_results() + + def test_disk_full(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + self.c1_cluster.pause_all_replications() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen2) + + self.c1_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + self.verify_results() + + self.sleep(self._wait_timeout) + + zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo")) + try: + for node in [self.src_master, self.dest_master]: + self.shell = RemoteMachineShellConnection(node) + self.shell.execute_cbcollect_info(zip_file) + if self.shell.extract_remote_info().type.lower() != "windows": + command = "unzip %s" % (zip_file) + output, error = self.shell.execute_command(command) + self.shell.log_command_output(output, error) + if len(error) > 0: + raise Exception("unable to unzip the files. Check unzip command output for help") + cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/' + output, _ = self.shell.execute_command(cmd) + else: + cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format( + self.src_master.ip, + self.src_master.rest_username, + self.src_master.rest_password) + output, _ = self.shell.execute_command(cmd) + self.assertNotEquals(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip) + self.log.info("Full disk warning generated as expected in %s" % node.ip) + + self.shell.delete_files(zip_file) + self.shell.delete_files("cbcollect_info*") + except Exception as e: + self.log.info(e) + + def test_lww_with_checkpoint_validation(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + self.c1_cluster.pause_all_replications() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen2) + + self.c1_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + self.sleep(60) + + vb0_node = None + nodes = self.c1_cluster.get_nodes() + ip = VBucketAwareMemcached(src_conn,'default').vBucketMap[0].split(':')[0] + for node in nodes: + if ip == node.ip: + vb0_node = node + if not vb0_node: + raise XDCRCheckpointException("Error determining the node containing active vb0") + rest_con = RestConnection(vb0_node) + repl = rest_con.get_replication_for_buckets('default', 'default') + try: + checkpoint_record = rest_con.get_recent_xdcr_vb_ckpt(repl['id']) + self.log.info("Checkpoint record : {0}".format(checkpoint_record)) + except Exception as e: + raise XDCRCheckpointException("Error retrieving last checkpoint document - {0}".format(e)) + + self.verify_results() + + def test_lww_with_backup_and_restore(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + + backup_host_conn = RemoteMachineShellConnection(self._input.servers[6]) + output, error = backup_host_conn.execute_command("cbbackupmgr config --archive /data/lww-backup --repo lww") + backup_host_conn.log_command_output(output, error) + output, error = backup_host_conn.execute_command("cbbackupmgr backup --archive /data/lww-backup --repo lww " + "--host couchbase://{0} --username Administrator " + "--password password".format(self._input.servers[0].ip)) + backup_host_conn.log_command_output(output, error) + output, error = backup_host_conn.execute_command("cbbackupmgr restore --archive /data/lww-backup --repo lww " + "--host couchbase://{0} --username Administrator " + "--password password".format(self._input.servers[2].ip)) + backup_host_conn.log_command_output(output, error) + + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + self.c1_cluster.pause_all_replications() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c2_cluster.load_all_buckets_from_generator(gen1) - gen2 = BlobGenerator("C1-lww-", "C1-lww-", self._value_size, end=self._num_items) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) self.c1_cluster.load_all_buckets_from_generator(gen2) self.c1_cluster.resume_all_replications() + self._wait_for_replication_to_catchup() + self.verify_results() + + def test_lww_with_time_diff_in_src_nodes(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._offset_wall_clock(cluster=self.c1_cluster, offset_secs=300, offset_drift=3) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + self.c1_cluster.pause_all_replications() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen2) + + self.c1_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + self.verify_results() + + conn = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) + conn.stop_couchbase() + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn.start_couchbase() + + def test_lww_with_nfs(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + #test will fail if there is a problem with this permanently mounted nfs folder + src_conn.set_data_path(data_path='/mnt/nfs/var/nfsshare/test_lww') + dest_conn.set_data_path(data_path='/mnt/nfs/var/nfsshare/test_lww') + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + self.c1_cluster.pause_all_replications() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen2) + + self.c1_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + self.verify_results() + + def test_lww_enabled_with_diff_topology_and_clocks_out_of_sync(self): + self.c3_cluster = self.get_cb_cluster_by_name('C3') + + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + c3_conn = RestConnection(self.c3_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + c3_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1, + proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3, + flushEnabled=1, lww=True) + self.c3_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none', + saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase', + evictionPolicy='valueOnly') + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on C1 bucket") + self.log.info("LWW enabled on C1 bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on C2 bucket") + self.log.info("LWW enabled on C2 bucket as expected") + self.assertTrue(c3_conn.is_lww_enabled(), "LWW not enabled on C3 bucket") + self.log.info("LWW enabled on C3 bucket as expected") + + self._offset_wall_clock(self.c1_cluster, offset_secs=3600) + self._offset_wall_clock(self.c2_cluster, offset_secs=7200) + self._offset_wall_clock(self.c3_cluster, offset_secs=10800) + + self.setup_xdcr() + self.merge_all_buckets() + + gen = DocumentGenerator('lww', '{{"key":"value"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen) + self._wait_for_replication_to_catchup() + + self.c1_cluster.pause_all_replications() + self.c2_cluster.pause_all_replications() + self.c3_cluster.pause_all_replications() + + self.sleep(30) + + src_def = self._get_python_sdk_client(self.c1_cluster.get_master_node().ip, 'default') + self.sleep(10) + dest_def = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') + self.sleep(10) + c3_def = self._get_python_sdk_client(self.c3_cluster.get_master_node().ip, 'default') + self.sleep(10) + + self._upsert(conn=dest_def, doc_id='lww-0', old_key='key', new_key='key1', new_val='value1') + self._upsert(conn=c3_def, doc_id='lww-0', old_key='key', new_key='key2', new_val='value2') + src_def.remove(key='lww-0') + + self.c1_cluster.resume_all_replications() + self.c2_cluster.resume_all_replications() + self.c3_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + obj = src_def.get(key='lww-0') + self.assertDictContainsSubset({'key2':'value2'}, obj.value, "C3 doc did not win using LWW") + obj = dest_def.get(key='lww-0') + self.assertDictContainsSubset({'key2':'value2'}, obj.value, "C3 doc did not win using LWW") + obj = c3_def.get(key='lww-0') + self.assertDictContainsSubset({'key2':'value2'}, obj.value, "C3 doc did not win using LWW") + self.log.info("C3 doc won using LWW as expected") + + conn1 = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) + conn1.stop_couchbase() + conn2 = RemoteMachineShellConnection(self.c2_cluster.get_master_node()) + conn2.stop_couchbase() + conn3 = RemoteMachineShellConnection(self.c3_cluster.get_master_node()) + conn3.stop_couchbase() + + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn1.start_couchbase() + conn2.start_couchbase() + conn3.start_couchbase() + + def test_lww_mixed_with_diff_topology_and_clocks_out_of_sync(self): + self.c3_cluster = self.get_cb_cluster_by_name('C3') + + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + c3_conn = RestConnection(self.c3_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100, src_lww=True, dst_lww=False) + c3_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1, + proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3, + flushEnabled=1, lww=True) + self.c3_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none', + saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase', + evictionPolicy='valueOnly') + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on C1 bucket") + self.log.info("LWW enabled on C1 bucket as expected") + self.assertFalse(dest_conn.is_lww_enabled(), "LWW enabled on C2 bucket") + self.log.info("LWW not enabled on C2 bucket as expected") + self.assertTrue(c3_conn.is_lww_enabled(), "LWW not enabled on C3 bucket") + self.log.info("LWW enabled on C3 bucket as expected") + + try: + self.setup_xdcr() + except Exception as e: + self.assertTrue("Replication between buckets with different ConflictResolutionType setting is not allowed" in str(e), + "ConflictResolutionType mismatch message not thrown as expected") + self.log.info("ConflictResolutionType mismatch message thrown as expected") + + def test_v_topology_with_clocks_out_of_sync(self): + self.c3_cluster = self.get_cb_cluster_by_name('C3') + + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + c3_conn = RestConnection(self.c3_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100, src_lww=True, dst_lww=True) + c3_conn.create_bucket(bucket='default', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1, + proxyPort=11211, bucketType='membase', replica_index=1, threadsNumber=3, + flushEnabled=1, lww=True) + self.c3_cluster.add_bucket(ramQuotaMB=100, bucket='default', authType='none', + saslPassword='', replicaNumber=1, proxyPort=11211, bucketType='membase', + evictionPolicy='valueOnly') + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on C1 bucket") + self.log.info("LWW enabled on C1 bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on C2 bucket") + self.log.info("LWW enabled on C2 bucket as expected") + self.assertTrue(c3_conn.is_lww_enabled(), "LWW not enabled on C3 bucket") + self.log.info("LWW enabled on C3 bucket as expected") + + self._offset_wall_clock(self.c1_cluster, offset_secs=3600) + self._offset_wall_clock(self.c2_cluster, offset_secs=7200) + self._offset_wall_clock(self.c3_cluster, offset_secs=10800) + + self.setup_xdcr() + self.merge_all_buckets() + + gen1 = DocumentGenerator('lww', '{{"key":"value"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen1) + self._wait_for_replication_to_catchup() + + gen2 = DocumentGenerator('lww', '{{"key":"value"}}', xrange(100), start=0, end=1) + self.c3_cluster.load_all_buckets_from_generator(gen2) + self._wait_for_replication_to_catchup() + + self.c1_cluster.pause_all_replications() + self.c3_cluster.pause_all_replications() + + self.sleep(30) + + src_def = self._get_python_sdk_client(self.c1_cluster.get_master_node().ip, 'default') + self.sleep(10) + dest_def = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') + self.sleep(10) + c3_def = self._get_python_sdk_client(self.c3_cluster.get_master_node().ip, 'default') + self.sleep(10) + + self._upsert(conn=c3_def, doc_id='lww-0', old_key='key', new_key='key1', new_val='value1') + self._upsert(conn=src_def, doc_id='lww-0', old_key='key', new_key='key2', new_val='value2') + + self.c1_cluster.resume_all_replications() + self.c3_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + obj = dest_def.get(key='lww-0') + self.assertDictContainsSubset({'key1':'value1'}, obj.value, "C3 doc did not win using LWW") + + conn1 = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) + conn1.stop_couchbase() + conn2 = RemoteMachineShellConnection(self.c2_cluster.get_master_node()) + conn2.stop_couchbase() + conn3 = RemoteMachineShellConnection(self.c3_cluster.get_master_node()) + conn3.stop_couchbase() + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn1.start_couchbase() + conn2.start_couchbase() + conn3.start_couchbase() + + def test_hlc_active_and_replica(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100, skip_dst=True) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + + gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen) + + vbucket_id = self._get_vbucket_id(self.c1_cluster.get_master_node(), 'default', 'lww-0') + max_cas_active = self._get_max_cas(node=self.c1_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + vbucket_id = self._get_vbucket_id(self._input.servers[1], 'default', 'lww-0') + max_cas_replica = self._get_max_cas(node=self._input.servers[1], bucket='default', vbucket_id=vbucket_id) + + self.log.info("max_cas_active: " + str(max_cas_active)) + self.log.info("max_cas_replica: " + str(max_cas_replica)) + self.assertTrue(not (max_cas_active ^ max_cas_replica), "HLC of active is not equal to replica") + self.log.info("HLC of active is equal to replica") + + def test_hlc(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + self.c1_cluster.pause_all_replications() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen2) + + self.c1_cluster.resume_all_replications() + + max_cas_c1 = self._get_max_cas(node=self.c1_cluster.get_master_node(), bucket='default') + max_cas_c2 = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default') + self.log.info("max_cas C1: " + str(max_cas_c1)) + self.log.info("max_cas C2: " + str(max_cas_c2)) + self.assertTrue(not (max_cas_c1 ^ max_cas_c2), "HLC of C1 is not equal to C2") + self.log.info("HLC of C1 is equal to C2") + + def test_hlc_target_faster(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self._offset_wall_clock(self.c2_cluster, offset_secs=900) + + self.setup_xdcr() + self.merge_all_buckets() + + self.c1_cluster.pause_all_replications() + + gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) + self.c2_cluster.load_all_buckets_from_generator(gen) + + vbucket_id = self._get_vbucket_id(self.c2_cluster.get_master_node(), 'default', 'lww-0') + max_cas_c2_before = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + gen = DocumentGenerator('lww', '{{"key2":"value2"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen) + + self.c1_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + max_cas_c2_after = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + dest_lww = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') + self.sleep(10) + + obj = dest_lww.get(key='lww-0') + self.assertDictContainsSubset({'key1':'value1'}, obj.value, "Target doc did not win using LWW") + self.log.info("Target doc won using LWW as expected") + + self.log.info("max_cas_c2_before: " + str(max_cas_c2_before)) + self.log.info("max_cas_c2_after: " + str(max_cas_c2_after)) + self.assertTrue(not (max_cas_c2_before ^ max_cas_c2_after), "HLC of C2 changed after replication") + self.log.info("HLC of C2 did not change after replication as expected") + + conn = RemoteMachineShellConnection(self.c2_cluster.get_master_node()) + conn.stop_couchbase() + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn.start_couchbase() + + def test_hlc_source_faster(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self._offset_wall_clock(self.c1_cluster, offset_secs=900) + + self.setup_xdcr() + self.merge_all_buckets() + + self.c1_cluster.pause_all_replications() + + gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) + self.c2_cluster.load_all_buckets_from_generator(gen) + + vbucket_id = self._get_vbucket_id(self.c2_cluster.get_master_node(), 'default', 'lww-0') + max_cas_c2_before = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + gen = DocumentGenerator('lww', '{{"key2":"value2"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen) + + self.c1_cluster.resume_all_replications() + + max_cas_c2_after = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + dest_lww = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') + self.sleep(10) + + obj = dest_lww.get(key='lww-0') + self.assertDictContainsSubset({'key2':'value2'}, obj.value, "Src doc did not win using LWW") + self.log.info("Src doc won using LWW as expected") + + self.log.info("max_cas_c2_before: " + str(max_cas_c2_before)) + self.log.info("max_cas_c2_after: " + str(max_cas_c2_after)) + self.assertTrue(not ((max_cas_c2_after + (~max_cas_c2_before +1)) >> 63 & 1), "HLC of C2 is not greater than before replication") + self.log.info("HLC of C2 is greater than before replication as expected") + + conn = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) + conn.stop_couchbase() + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn.start_couchbase() + + def test_hlc_within_cluster_target_faster(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self._offset_wall_clock(self.c2_cluster, offset_secs=900) + + self.setup_xdcr() + self.merge_all_buckets() + + self.c1_cluster.pause_all_replications() + + gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) + self.c2_cluster.load_all_buckets_from_generator(gen) + + vbucket_id = self._get_vbucket_id(self.c2_cluster.get_master_node(), 'default', 'lww-0') + max_cas_c2_before = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + gen = DocumentGenerator('lww', '{{"key2":"value2"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen) + + self.c1_cluster.resume_all_replications() + + self._wait_for_replication_to_catchup() + + max_cas_c2_after = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + dest_lww = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') + self.sleep(10) + + obj = dest_lww.get(key='lww-0') + self.assertDictContainsSubset({'key1':'value1'}, obj.value, "Target doc did not win using LWW") + self.log.info("Target doc won using LWW as expected") + + self.log.info("max_cas_c2_before: " + str(max_cas_c2_before)) + self.log.info("max_cas_c2_after: " + str(max_cas_c2_after)) + self.assertTrue(not (max_cas_c2_before ^ max_cas_c2_after), "HLC of C2 changed after replication") + self.log.info("HLC of C2 did not change after replication as expected") + + self._upsert(conn=dest_lww, doc_id='lww-0', old_key='key1', new_key='key3', new_val='key3') + max_cas_c2_after_new_mutation = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + self.log.info("max_cas_c2_after_new_mutation: " + str(max_cas_c2_after_new_mutation)) + self.assertTrue(not ((max_cas_c2_after_new_mutation + (~max_cas_c2_after +1)) >> 63 & 1), "HLC of C2 is not greater after new mutation") + self.log.info("HLC of C2 is greater after new mutation as expected") + + conn = RemoteMachineShellConnection(self.c2_cluster.get_master_node()) + conn.stop_couchbase() + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn.start_couchbase() + + def test_hlc_within_cluster_source_faster(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self._offset_wall_clock(self.c1_cluster, offset_secs=900) + + self.setup_xdcr() + self.merge_all_buckets() + + self.c1_cluster.pause_all_replications() + + gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) + self.c2_cluster.load_all_buckets_from_generator(gen) + + vbucket_id = self._get_vbucket_id(self.c2_cluster.get_master_node(), 'default', 'lww-0') + max_cas_c2_before = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + gen = DocumentGenerator('lww', '{{"key2":"value2"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen) + + self.c1_cluster.resume_all_replications() + + max_cas_c2_after = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + dest_lww = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') + self.sleep(10) + + obj = dest_lww.get(key='lww-0') + self.assertDictContainsSubset({'key2':'value2'}, obj.value, "Src doc did not win using LWW") + self.log.info("Src doc won using LWW as expected") + + self.log.info("max_cas_c2_before: " + str(max_cas_c2_before)) + self.log.info("max_cas_c2_after: " + str(max_cas_c2_after)) + self.assertTrue(not ((max_cas_c2_after + (~max_cas_c2_before +1)) >> 63 & 1), "HLC of C2 is not greater than before replication") + self.log.info("HLC of C2 is greater than before replication as expected") + + self._upsert(conn=dest_lww, doc_id='lww-0', old_key='key2', new_key='key3', new_val='key3') + max_cas_c2_after_new_mutation = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + self.log.info("max_cas_c2_after_new_mutation: " + str(max_cas_c2_after_new_mutation)) + self.assertTrue(not ((max_cas_c2_after_new_mutation + (~max_cas_c2_after +1)) >> 63 & 1), "HLC of C2 is not greater after new mutation") + self.log.info("HLC of C2 is greater after new mutation as expected") + + conn = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) + conn.stop_couchbase() + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn.start_couchbase() + + def test_hlc_ordering_with_delay_source_faster(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self._offset_wall_clock(self.c1_cluster, offset_secs=900) + + self.setup_xdcr() + self.merge_all_buckets() + + self.c1_cluster.pause_all_replications() + + gen = DocumentGenerator('lww', '{{"key1":"value1"}}', xrange(100), start=0, end=1) + self.c1_cluster.load_all_buckets_from_generator(gen) + + vbucket_id = self._get_vbucket_id(self.c1_cluster.get_master_node(), 'default', 'lww-0') + hlc_c1 = self._get_max_cas(node=self.c1_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + self.sleep(timeout=1200) + + gen = DocumentGenerator('lww', '{{"key2":"value2"}}', xrange(100), start=0, end=1) + self.c2_cluster.load_all_buckets_from_generator(gen) + + vbucket_id = self._get_vbucket_id(self.c2_cluster.get_master_node(), 'default', 'lww-0') + hlc_c2_1 = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + self.c1_cluster.resume_all_replications() + + dest_lww = self._get_python_sdk_client(self.c2_cluster.get_master_node().ip, 'default') + self.sleep(10) + + obj = dest_lww.get(key='lww-0') + self.assertDictContainsSubset({'key2':'value2'}, obj.value, "Target doc did not win using LWW") + self.log.info("Target doc won using LWW as expected") + + hlc_c2_2 = self._get_max_cas(node=self.c2_cluster.get_master_node(), bucket='default', vbucket_id=vbucket_id) + + self.log.info("hlc_c1: " + str(hlc_c1)) + self.log.info("hlc_c2_1: " + str(hlc_c2_1)) + self.log.info("hlc_c2_2: " + str(hlc_c2_2)) + self.assertTrue(not (hlc_c2_1 ^ hlc_c2_2), "HLC of C2 changed after replication") + self.log.info("HLC of C2 did not change after replication as expected") + + conn = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) + conn.stop_couchbase() + + self._enable_ntp_and_sync() + self._disable_ntp() + + conn.start_couchbase() + + def test_lww_with_two_ntp_pools(self): + self._enable_ntp_and_sync(nodes=self.c1_cluster.get_nodes(), ntp_server="0.north-america.pool.ntp.org") + self._enable_ntp_and_sync(nodes=self.c2_cluster.get_nodes(), ntp_server="3.north-america.pool.ntp.org") + + src_conn = RestConnection(self.c1_cluster.get_master_node()) + dest_conn = RestConnection(self.c2_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + self.setup_xdcr() + self.merge_all_buckets() + self.c1_cluster.pause_all_replications() + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c2_cluster.load_all_buckets_from_generator(gen1) + gen2 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen2) + + self.c1_cluster.resume_all_replications() + + self.verify_results() + + def test_conflict_resolution_after_warmup(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100, skip_dst=True) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + + NodeHelper.wait_warmup_completed([self.c1_cluster.warmup_node(master=True)]) + + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket after warmup") + self.log.info("LWW enabled on source bucket after warmup as expected") + + def test_conflict_resolution_mode_with_bucket_delete_and_recreate(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100, skip_dst=True) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + + self.c1_cluster.delete_bucket(bucket_name='default') + self._create_buckets(bucket='default', ramQuotaMB=100, src_lww=False, skip_dst=True) + + self.assertFalse(src_conn.is_lww_enabled(), "LWW enabled on source bucket after recreate") + self.log.info("LWW not enabled on source bucket after recreation as expected") + + def test_conflict_resolution_mode_edit(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100, skip_dst=True) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + + conn = RemoteMachineShellConnection(self.c1_cluster.get_master_node()) + command = "curl -X POST -u Administrator:password " + self.c1_cluster.get_master_node().ip + \ + ":8091/pools/default/buckets/default -d name=default -d conflictResolutionType=seqno " + \ + "-d authType=none -d proxyPort=11212 -d ramQuotaMB=100" + output, error = conn.execute_command(command) + conn.log_command_output(output, error) + self.assertTrue("Conflict resolution type not allowed in update bucket" in str(output), + "Expected error message not found on editing conflict resolution type") + self.log.info("Expected error message found on editing conflict resolution type") + + def test_conflict_resolution_mode_after_swap_rebalance(self): + src_conn = RestConnection(self.c1_cluster.get_master_node()) + + self._create_buckets(bucket='default', ramQuotaMB=100, skip_dst=True) + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + + gen1 = BlobGenerator("lww-", "lww-", self._value_size, end=self._num_items) + self.c1_cluster.load_all_buckets_from_generator(gen1) + + self.c1_cluster.swap_rebalance_master() + + src_conn = RestConnection(self.c1_cluster.get_master_node()) + + self.assertTrue(src_conn.is_lww_enabled(), "LWW not enabled on source bucket after swap rebalance") + self.log.info("LWW enabled on source bucket after swap rebalance as expected") diff --git a/pytests/xdcr/upgradeXDCR.py b/pytests/xdcr/upgradeXDCR.py index 9658f7bf5..3ab57eee3 100644 --- a/pytests/xdcr/upgradeXDCR.py +++ b/pytests/xdcr/upgradeXDCR.py @@ -14,6 +14,7 @@ from couchbase_helper.documentgenerator import BlobGenerator from remote.remote_util import RemoteMachineShellConnection from couchbase_helper.document import DesignDocument, View +from testconstants import STANDARD_BUCKET_PORT class UpgradeTests(NewUpgradeBaseTest,XDCRNewBaseTest): def setUp(self): @@ -236,7 +237,27 @@ def offline_cluster_upgrade(self): expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_dest, self.dest_master) - if float(self.initial_version[:2]) == 3.1 and float(self.upgrade_versions[0][:2]) == 4.1: + if float(self.upgrade_versions[0][:3]) == 4.6: + self.log.info("##### Testing LWW as we are upgrading to 4.6 #####") + if "src" in upgrade_nodes: + src_conn = RestConnection(self.src_master) + src_conn.delete_bucket(bucket='default') + src_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1, + proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='membase', replica_index=1, threadsNumber=3, + flushEnabled=1, lww=True) + self.assertTrue(src_conn.is_lww_enabled(bucket='lww'), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + + if "dest" in upgrade_nodes: + dest_conn = RestConnection(self.dest_master) + dest_conn.delete_bucket(bucket='default') + dest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1, + proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='membase', replica_index=1, threadsNumber=3, + flushEnabled=1, lww=True) + self.assertTrue(dest_conn.is_lww_enabled(bucket='lww'), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1: goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\ + '/goxdcr.log*' for node in self.src_cluster.get_nodes(): @@ -376,7 +397,27 @@ def online_cluster_upgrade(self): expected_rows = sum([len(kv_store) for kv_store in bucket.kvs.values()]) self._verify_ddocs(expected_rows, [bucket_name], self.ddocs_dest, self.dest_master) - if float(self.initial_version[:2]) == 3.1 and float(self.upgrade_versions[0][:2]) == 4.1: + if float(self.upgrade_versions[0][:3]) == 4.6: + self.log.info("##### Testing LWW as we are upgrading to 4.6 #####") + src_conn = RestConnection(self.src_master) + dest_conn = RestConnection(self.dest_master) + + src_conn.delete_bucket(bucket='default') + dest_conn.delete_bucket(bucket='default') + + src_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1, + proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='membase', replica_index=1, threadsNumber=3, + flushEnabled=1, lww=True) + dest_conn.create_bucket(bucket='lww', ramQuotaMB=100, authType='none', saslPassword='', replicaNumber=1, + proxyPort=STANDARD_BUCKET_PORT + 1, bucketType='membase', replica_index=1, threadsNumber=3, + flushEnabled=1, lww=True) + + self.assertTrue(src_conn.is_lww_enabled(bucket='lww'), "LWW not enabled on source bucket") + self.log.info("LWW enabled on source bucket as expected") + self.assertTrue(dest_conn.is_lww_enabled(bucket='lww'), "LWW not enabled on dest bucket") + self.log.info("LWW enabled on dest bucket as expected") + + if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1: goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\ + '/goxdcr.log*' for node in self.src_cluster.get_nodes(): @@ -441,7 +482,7 @@ def incremental_offline_upgrade(self): self.merge_all_buckets() self.verify_results() self.sleep(self.wait_timeout * 5, "Let clusters work for some time") - if float(self.initial_version[:2]) == 3.1 and float(self.upgrade_versions[0][:2]) == 4.1: + if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1: goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\ + '/goxdcr.log*' for node in self.src_cluster.get_nodes(): @@ -635,7 +676,7 @@ def test_backward_compatibility(self): self._post_upgrade_ops() self.sleep(60) self.verify_results() - if float(self.initial_version[:2]) == 3.1 and float(self.upgrade_versions[0][:2]) == 4.1: + if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1: goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\ + '/goxdcr.log*' for node in self.src_cluster.get_nodes(): diff --git a/resources/imex/json_1000_lines b/resources/imex/json_1000_lines new file mode 100644 index 000000000..2d855c3ca --- /dev/null +++ b/resources/imex/json_1000_lines @@ -0,0 +1,1000 @@ +{"name": "pymc189", "age": 88, "index": 189, "body": "VTKGNKUHMP"} +{"name": "pymc735", "age": 28, "index": 735, "body": "VTKGNKUHMP"} +{"name": "pymc959", "age": 50, "index": 959, "body": "VTKGNKUHMP"} +{"name": "pymc272", "age": 70, "index": 272, "body": "VTKGNKUHMP"} +{"name": "pymc884", "age": 76, "index": 884, "body": "VTKGNKUHMP"} +{"name": "pymc239", "age": 37, "index": 239, "body": "VTKGNKUHMP"} +{"name": "pymc77", "age": 77, "index": 77, "body": "VTKGNKUHMP"} +{"name": "pymc886", "age": 78, "index": 886, "body": "VTKGNKUHMP"} +{"name": "pymc95", "age": 95, "index": 95, "body": "VTKGNKUHMP"} +{"name": "pymc73", "age": 73, "index": 73, "body": "VTKGNKUHMP"} +{"name": "pymc373", "age": 70, "index": 373, "body": "VTKGNKUHMP"} +{"name": "pymc459", "age": 55, "index": 459, "body": "VTKGNKUHMP"} +{"name": "pymc282", "age": 80, "index": 282, "body": "VTKGNKUHMP"} +{"name": "pymc966", "age": 57, "index": 966, "body": "VTKGNKUHMP"} +{"name": "pymc947", "age": 38, "index": 947, "body": "VTKGNKUHMP"} +{"name": "pymc334", "age": 31, "index": 334, "body": "VTKGNKUHMP"} +{"name": "pymc367", "age": 64, "index": 367, "body": "VTKGNKUHMP"} +{"name": "pymc868", "age": 60, "index": 868, "body": "VTKGNKUHMP"} +{"name": "pymc358", "age": 55, "index": 358, "body": "VTKGNKUHMP"} +{"name": "pymc658", "age": 52, "index": 658, "body": "VTKGNKUHMP"} +{"name": "pymc262", "age": 60, "index": 262, "body": "VTKGNKUHMP"} +{"name": "pymc844", "age": 36, "index": 844, "body": "VTKGNKUHMP"} +{"name": "pymc153", "age": 52, "index": 153, "body": "VTKGNKUHMP"} +{"name": "pymc30", "age": 30, "index": 30, "body": "VTKGNKUHMP"} +{"name": "pymc529", "age": 24, "index": 529, "body": "VTKGNKUHMP"} +{"name": "pymc276", "age": 74, "index": 276, "body": "VTKGNKUHMP"} +{"name": "pymc498", "age": 94, "index": 498, "body": "VTKGNKUHMP"} +{"name": "pymc638", "age": 32, "index": 638, "body": "VTKGNKUHMP"} +{"name": "pymc184", "age": 83, "index": 184, "body": "VTKGNKUHMP"} +{"name": "pymc159", "age": 58, "index": 159, "body": "VTKGNKUHMP"} +{"name": "pymc668", "age": 62, "index": 668, "body": "VTKGNKUHMP"} +{"name": "pymc112", "age": 11, "index": 112, "body": "VTKGNKUHMP"} +{"name": "pymc62", "age": 62, "index": 62, "body": "VTKGNKUHMP"} +{"name": "pymc9", "age": 9, "index": 9, "body": "VTKGNKUHMP"} +{"name": "pymc25", "age": 25, "index": 25, "body": "VTKGNKUHMP"} +{"name": "pymc361", "age": 58, "index": 361, "body": "VTKGNKUHMP"} +{"name": "pymc12", "age": 12, "index": 12, "body": "VTKGNKUHMP"} +{"name": "pymc32", "age": 32, "index": 32, "body": "VTKGNKUHMP"} +{"name": "pymc362", "age": 59, "index": 362, "body": "VTKGNKUHMP"} +{"name": "pymc265", "age": 63, "index": 265, "body": "VTKGNKUHMP"} +{"name": "pymc221", "age": 19, "index": 221, "body": "VTKGNKUHMP"} +{"name": "pymc162", "age": 61, "index": 162, "body": "VTKGNKUHMP"} +{"name": "pymc274", "age": 72, "index": 274, "body": "VTKGNKUHMP"} +{"name": "pymc801", "age": 94, "index": 801, "body": "VTKGNKUHMP"} +{"name": "pymc356", "age": 53, "index": 356, "body": "VTKGNKUHMP"} +{"name": "pymc58", "age": 58, "index": 58, "body": "VTKGNKUHMP"} +{"name": "pymc91", "age": 91, "index": 91, "body": "VTKGNKUHMP"} +{"name": "pymc933", "age": 24, "index": 933, "body": "VTKGNKUHMP"} +{"name": "pymc478", "age": 74, "index": 478, "body": "VTKGNKUHMP"} +{"name": "pymc152", "age": 51, "index": 152, "body": "VTKGNKUHMP"} +{"name": "pymc110", "age": 9, "index": 110, "body": "VTKGNKUHMP"} +{"name": "pymc816", "age": 8, "index": 816, "body": "VTKGNKUHMP"} +{"name": "pymc845", "age": 37, "index": 845, "body": "VTKGNKUHMP"} +{"name": "pymc191", "age": 90, "index": 191, "body": "VTKGNKUHMP"} +{"name": "pymc257", "age": 55, "index": 257, "body": "VTKGNKUHMP"} +{"name": "pymc323", "age": 20, "index": 323, "body": "VTKGNKUHMP"} +{"name": "pymc170", "age": 69, "index": 170, "body": "VTKGNKUHMP"} +{"name": "pymc559", "age": 54, "index": 559, "body": "VTKGNKUHMP"} +{"name": "pymc890", "age": 82, "index": 890, "body": "VTKGNKUHMP"} +{"name": "pymc736", "age": 29, "index": 736, "body": "VTKGNKUHMP"} +{"name": "pymc224", "age": 22, "index": 224, "body": "VTKGNKUHMP"} +{"name": "pymc757", "age": 50, "index": 757, "body": "VTKGNKUHMP"} +{"name": "pymc696", "age": 90, "index": 696, "body": "VTKGNKUHMP"} +{"name": "pymc92", "age": 92, "index": 92, "body": "VTKGNKUHMP"} +{"name": "pymc369", "age": 66, "index": 369, "body": "VTKGNKUHMP"} +{"name": "pymc489", "age": 85, "index": 489, "body": "VTKGNKUHMP"} +{"name": "pymc325", "age": 22, "index": 325, "body": "VTKGNKUHMP"} +{"name": "pymc354", "age": 51, "index": 354, "body": "VTKGNKUHMP"} +{"name": "pymc851", "age": 43, "index": 851, "body": "VTKGNKUHMP"} +{"name": "pymc54", "age": 54, "index": 54, "body": "VTKGNKUHMP"} +{"name": "pymc146", "age": 45, "index": 146, "body": "VTKGNKUHMP"} +{"name": "pymc89", "age": 89, "index": 89, "body": "VTKGNKUHMP"} +{"name": "pymc619", "age": 13, "index": 619, "body": "VTKGNKUHMP"} +{"name": "pymc360", "age": 57, "index": 360, "body": "VTKGNKUHMP"} +{"name": "pymc843", "age": 35, "index": 843, "body": "VTKGNKUHMP"} +{"name": "pymc832", "age": 24, "index": 832, "body": "VTKGNKUHMP"} +{"name": "pymc539", "age": 34, "index": 539, "body": "VTKGNKUHMP"} +{"name": "pymc322", "age": 19, "index": 322, "body": "VTKGNKUHMP"} +{"name": "pymc737", "age": 30, "index": 737, "body": "VTKGNKUHMP"} +{"name": "pymc881", "age": 73, "index": 881, "body": "VTKGNKUHMP"} +{"name": "pymc957", "age": 48, "index": 957, "body": "VTKGNKUHMP"} +{"name": "pymc99", "age": 99, "index": 99, "body": "VTKGNKUHMP"} +{"name": "pymc256", "age": 54, "index": 256, "body": "VTKGNKUHMP"} +{"name": "pymc949", "age": 40, "index": 949, "body": "VTKGNKUHMP"} +{"name": "pymc72", "age": 72, "index": 72, "body": "VTKGNKUHMP"} +{"name": "pymc128", "age": 27, "index": 128, "body": "VTKGNKUHMP"} +{"name": "pymc291", "age": 89, "index": 291, "body": "VTKGNKUHMP"} +{"name": "pymc939", "age": 30, "index": 939, "body": "VTKGNKUHMP"} +{"name": "pymc569", "age": 64, "index": 569, "body": "VTKGNKUHMP"} +{"name": "pymc865", "age": 57, "index": 865, "body": "VTKGNKUHMP"} +{"name": "pymc277", "age": 75, "index": 277, "body": "VTKGNKUHMP"} +{"name": "pymc139", "age": 38, "index": 139, "body": "VTKGNKUHMP"} +{"name": "pymc69", "age": 69, "index": 69, "body": "VTKGNKUHMP"} +{"name": "pymc79", "age": 79, "index": 79, "body": "VTKGNKUHMP"} +{"name": "pymc220", "age": 18, "index": 220, "body": "VTKGNKUHMP"} +{"name": "pymc896", "age": 88, "index": 896, "body": "VTKGNKUHMP"} +{"name": "pymc29", "age": 29, "index": 29, "body": "VTKGNKUHMP"} +{"name": "pymc876", "age": 68, "index": 876, "body": "VTKGNKUHMP"} +{"name": "pymc55", "age": 55, "index": 55, "body": "VTKGNKUHMP"} +{"name": "pymc271", "age": 69, "index": 271, "body": "VTKGNKUHMP"} +{"name": "pymc977", "age": 68, "index": 977, "body": "VTKGNKUHMP"} +{"name": "pymc961", "age": 52, "index": 961, "body": "VTKGNKUHMP"} +{"name": "pymc225", "age": 23, "index": 225, "body": "VTKGNKUHMP"} +{"name": "pymc690", "age": 84, "index": 690, "body": "VTKGNKUHMP"} +{"name": "pymc243", "age": 41, "index": 243, "body": "VTKGNKUHMP"} +{"name": "pymc648", "age": 42, "index": 648, "body": "VTKGNKUHMP"} +{"name": "pymc377", "age": 74, "index": 377, "body": "VTKGNKUHMP"} +{"name": "pymc261", "age": 59, "index": 261, "body": "VTKGNKUHMP"} +{"name": "pymc379", "age": 76, "index": 379, "body": "VTKGNKUHMP"} +{"name": "pymc837", "age": 29, "index": 837, "body": "VTKGNKUHMP"} +{"name": "pymc98", "age": 98, "index": 98, "body": "VTKGNKUHMP"} +{"name": "pymc872", "age": 64, "index": 872, "body": "VTKGNKUHMP"} +{"name": "pymc827", "age": 19, "index": 827, "body": "VTKGNKUHMP"} +{"name": "pymc7", "age": 7, "index": 7, "body": "VTKGNKUHMP"} +{"name": "pymc366", "age": 63, "index": 366, "body": "VTKGNKUHMP"} +{"name": "pymc82", "age": 82, "index": 82, "body": "VTKGNKUHMP"} +{"name": "pymc283", "age": 81, "index": 283, "body": "VTKGNKUHMP"} +{"name": "pymc734", "age": 27, "index": 734, "body": "VTKGNKUHMP"} +{"name": "pymc41", "age": 41, "index": 41, "body": "VTKGNKUHMP"} +{"name": "pymc862", "age": 54, "index": 862, "body": "VTKGNKUHMP"} +{"name": "pymc181", "age": 80, "index": 181, "body": "VTKGNKUHMP"} +{"name": "pymc958", "age": 49, "index": 958, "body": "VTKGNKUHMP"} +{"name": "pymc127", "age": 26, "index": 127, "body": "VTKGNKUHMP"} +{"name": "pymc548", "age": 43, "index": 548, "body": "VTKGNKUHMP"} +{"name": "pymc118", "age": 17, "index": 118, "body": "VTKGNKUHMP"} +{"name": "pymc280", "age": 78, "index": 280, "body": "VTKGNKUHMP"} +{"name": "pymc685", "age": 79, "index": 685, "body": "VTKGNKUHMP"} +{"name": "pymc102", "age": 1, "index": 102, "body": "VTKGNKUHMP"} +{"name": "pymc368", "age": 65, "index": 368, "body": "VTKGNKUHMP"} +{"name": "pymc278", "age": 76, "index": 278, "body": "VTKGNKUHMP"} +{"name": "pymc286", "age": 84, "index": 286, "body": "VTKGNKUHMP"} +{"name": "pymc0", "age": 0, "index": 0, "body": "VTKGNKUHMP"} +{"name": "pymc253", "age": 51, "index": 253, "body": "VTKGNKUHMP"} +{"name": "pymc163", "age": 62, "index": 163, "body": "VTKGNKUHMP"} +{"name": "pymc805", "age": 98, "index": 805, "body": "VTKGNKUHMP"} +{"name": "pymc469", "age": 65, "index": 469, "body": "VTKGNKUHMP"} +{"name": "pymc206", "age": 4, "index": 206, "body": "VTKGNKUHMP"} +{"name": "pymc825", "age": 17, "index": 825, "body": "VTKGNKUHMP"} +{"name": "pymc578", "age": 73, "index": 578, "body": "VTKGNKUHMP"} +{"name": "pymc86", "age": 86, "index": 86, "body": "VTKGNKUHMP"} +{"name": "pymc122", "age": 21, "index": 122, "body": "VTKGNKUHMP"} +{"name": "pymc289", "age": 87, "index": 289, "body": "VTKGNKUHMP"} +{"name": "pymc808", "age": 0, "index": 808, "body": "VTKGNKUHMP"} +{"name": "pymc57", "age": 57, "index": 57, "body": "VTKGNKUHMP"} +{"name": "pymc108", "age": 7, "index": 108, "body": "VTKGNKUHMP"} +{"name": "pymc692", "age": 86, "index": 692, "body": "VTKGNKUHMP"} +{"name": "pymc94", "age": 94, "index": 94, "body": "VTKGNKUHMP"} +{"name": "pymc213", "age": 11, "index": 213, "body": "VTKGNKUHMP"} +{"name": "pymc975", "age": 66, "index": 975, "body": "VTKGNKUHMP"} +{"name": "pymc160", "age": 59, "index": 160, "body": "VTKGNKUHMP"} +{"name": "pymc898", "age": 90, "index": 898, "body": "VTKGNKUHMP"} +{"name": "pymc173", "age": 72, "index": 173, "body": "VTKGNKUHMP"} +{"name": "pymc836", "age": 28, "index": 836, "body": "VTKGNKUHMP"} +{"name": "pymc176", "age": 75, "index": 176, "body": "VTKGNKUHMP"} +{"name": "pymc804", "age": 97, "index": 804, "body": "VTKGNKUHMP"} +{"name": "pymc820", "age": 12, "index": 820, "body": "VTKGNKUHMP"} +{"name": "pymc893", "age": 85, "index": 893, "body": "VTKGNKUHMP"} +{"name": "pymc20", "age": 20, "index": 20, "body": "VTKGNKUHMP"} +{"name": "pymc821", "age": 13, "index": 821, "body": "VTKGNKUHMP"} +{"name": "pymc372", "age": 69, "index": 372, "body": "VTKGNKUHMP"} +{"name": "pymc332", "age": 29, "index": 332, "body": "VTKGNKUHMP"} +{"name": "pymc618", "age": 12, "index": 618, "body": "VTKGNKUHMP"} +{"name": "pymc695", "age": 89, "index": 695, "body": "VTKGNKUHMP"} +{"name": "pymc754", "age": 47, "index": 754, "body": "VTKGNKUHMP"} +{"name": "pymc807", "age": 100, "index": 807, "body": "VTKGNKUHMP"} +{"name": "pymc863", "age": 55, "index": 863, "body": "VTKGNKUHMP"} +{"name": "pymc364", "age": 61, "index": 364, "body": "VTKGNKUHMP"} +{"name": "pymc883", "age": 75, "index": 883, "body": "VTKGNKUHMP"} +{"name": "pymc680", "age": 74, "index": 680, "body": "VTKGNKUHMP"} +{"name": "pymc46", "age": 46, "index": 46, "body": "VTKGNKUHMP"} +{"name": "pymc758", "age": 51, "index": 758, "body": "VTKGNKUHMP"} +{"name": "pymc873", "age": 65, "index": 873, "body": "VTKGNKUHMP"} +{"name": "pymc109", "age": 8, "index": 109, "body": "VTKGNKUHMP"} +{"name": "pymc149", "age": 48, "index": 149, "body": "VTKGNKUHMP"} +{"name": "pymc956", "age": 47, "index": 956, "body": "VTKGNKUHMP"} +{"name": "pymc234", "age": 32, "index": 234, "body": "VTKGNKUHMP"} +{"name": "pymc743", "age": 36, "index": 743, "body": "VTKGNKUHMP"} +{"name": "pymc296", "age": 94, "index": 296, "body": "VTKGNKUHMP"} +{"name": "pymc806", "age": 99, "index": 806, "body": "VTKGNKUHMP"} +{"name": "pymc74", "age": 74, "index": 74, "body": "VTKGNKUHMP"} +{"name": "pymc238", "age": 36, "index": 238, "body": "VTKGNKUHMP"} +{"name": "pymc67", "age": 67, "index": 67, "body": "VTKGNKUHMP"} +{"name": "pymc71", "age": 71, "index": 71, "body": "VTKGNKUHMP"} +{"name": "pymc930", "age": 21, "index": 930, "body": "VTKGNKUHMP"} +{"name": "pymc251", "age": 49, "index": 251, "body": "VTKGNKUHMP"} +{"name": "pymc854", "age": 46, "index": 854, "body": "VTKGNKUHMP"} +{"name": "pymc241", "age": 39, "index": 241, "body": "VTKGNKUHMP"} +{"name": "pymc336", "age": 33, "index": 336, "body": "VTKGNKUHMP"} +{"name": "pymc2", "age": 2, "index": 2, "body": "VTKGNKUHMP"} +{"name": "pymc328", "age": 25, "index": 328, "body": "VTKGNKUHMP"} +{"name": "pymc52", "age": 52, "index": 52, "body": "VTKGNKUHMP"} +{"name": "pymc210", "age": 8, "index": 210, "body": "VTKGNKUHMP"} +{"name": "pymc973", "age": 64, "index": 973, "body": "VTKGNKUHMP"} +{"name": "pymc857", "age": 49, "index": 857, "body": "VTKGNKUHMP"} +{"name": "pymc929", "age": 20, "index": 929, "body": "VTKGNKUHMP"} +{"name": "pymc78", "age": 78, "index": 78, "body": "VTKGNKUHMP"} +{"name": "pymc722", "age": 15, "index": 722, "body": "VTKGNKUHMP"} +{"name": "pymc327", "age": 24, "index": 327, "body": "VTKGNKUHMP"} +{"name": "pymc14", "age": 14, "index": 14, "body": "VTKGNKUHMP"} +{"name": "pymc744", "age": 37, "index": 744, "body": "VTKGNKUHMP"} +{"name": "pymc183", "age": 82, "index": 183, "body": "VTKGNKUHMP"} +{"name": "pymc329", "age": 26, "index": 329, "body": "VTKGNKUHMP"} +{"name": "pymc549", "age": 44, "index": 549, "body": "VTKGNKUHMP"} +{"name": "pymc921", "age": 12, "index": 921, "body": "VTKGNKUHMP"} +{"name": "pymc19", "age": 19, "index": 19, "body": "VTKGNKUHMP"} +{"name": "pymc724", "age": 17, "index": 724, "body": "VTKGNKUHMP"} +{"name": "pymc281", "age": 79, "index": 281, "body": "VTKGNKUHMP"} +{"name": "pymc207", "age": 5, "index": 207, "body": "VTKGNKUHMP"} +{"name": "pymc156", "age": 55, "index": 156, "body": "VTKGNKUHMP"} +{"name": "pymc172", "age": 71, "index": 172, "body": "VTKGNKUHMP"} +{"name": "pymc140", "age": 39, "index": 140, "body": "VTKGNKUHMP"} +{"name": "pymc229", "age": 27, "index": 229, "body": "VTKGNKUHMP"} +{"name": "pymc147", "age": 46, "index": 147, "body": "VTKGNKUHMP"} +{"name": "pymc941", "age": 32, "index": 941, "body": "VTKGNKUHMP"} +{"name": "pymc130", "age": 29, "index": 130, "body": "VTKGNKUHMP"} +{"name": "pymc168", "age": 67, "index": 168, "body": "VTKGNKUHMP"} +{"name": "pymc954", "age": 45, "index": 954, "body": "VTKGNKUHMP"} +{"name": "pymc97", "age": 97, "index": 97, "body": "VTKGNKUHMP"} +{"name": "pymc856", "age": 48, "index": 856, "body": "VTKGNKUHMP"} +{"name": "pymc50", "age": 50, "index": 50, "body": "VTKGNKUHMP"} +{"name": "pymc935", "age": 26, "index": 935, "body": "VTKGNKUHMP"} +{"name": "pymc124", "age": 23, "index": 124, "body": "VTKGNKUHMP"} +{"name": "pymc759", "age": 52, "index": 759, "body": "VTKGNKUHMP"} +{"name": "pymc216", "age": 14, "index": 216, "body": "VTKGNKUHMP"} +{"name": "pymc228", "age": 26, "index": 228, "body": "VTKGNKUHMP"} +{"name": "pymc275", "age": 73, "index": 275, "body": "VTKGNKUHMP"} +{"name": "pymc895", "age": 87, "index": 895, "body": "VTKGNKUHMP"} +{"name": "pymc824", "age": 16, "index": 824, "body": "VTKGNKUHMP"} +{"name": "pymc749", "age": 42, "index": 749, "body": "VTKGNKUHMP"} +{"name": "pymc132", "age": 31, "index": 132, "body": "VTKGNKUHMP"} +{"name": "pymc940", "age": 31, "index": 940, "body": "VTKGNKUHMP"} +{"name": "pymc121", "age": 20, "index": 121, "body": "VTKGNKUHMP"} +{"name": "pymc288", "age": 86, "index": 288, "body": "VTKGNKUHMP"} +{"name": "pymc203", "age": 1, "index": 203, "body": "VTKGNKUHMP"} +{"name": "pymc63", "age": 63, "index": 63, "body": "VTKGNKUHMP"} +{"name": "pymc15", "age": 15, "index": 15, "body": "VTKGNKUHMP"} +{"name": "pymc164", "age": 63, "index": 164, "body": "VTKGNKUHMP"} +{"name": "pymc830", "age": 22, "index": 830, "body": "VTKGNKUHMP"} +{"name": "pymc137", "age": 36, "index": 137, "body": "VTKGNKUHMP"} +{"name": "pymc31", "age": 31, "index": 31, "body": "VTKGNKUHMP"} +{"name": "pymc721", "age": 14, "index": 721, "body": "VTKGNKUHMP"} +{"name": "pymc192", "age": 91, "index": 192, "body": "VTKGNKUHMP"} +{"name": "pymc629", "age": 23, "index": 629, "body": "VTKGNKUHMP"} +{"name": "pymc753", "age": 46, "index": 753, "body": "VTKGNKUHMP"} +{"name": "pymc126", "age": 25, "index": 126, "body": "VTKGNKUHMP"} +{"name": "pymc186", "age": 85, "index": 186, "body": "VTKGNKUHMP"} +{"name": "pymc359", "age": 56, "index": 359, "body": "VTKGNKUHMP"} +{"name": "pymc22", "age": 22, "index": 22, "body": "VTKGNKUHMP"} +{"name": "pymc120", "age": 19, "index": 120, "body": "VTKGNKUHMP"} +{"name": "pymc678", "age": 72, "index": 678, "body": "VTKGNKUHMP"} +{"name": "pymc747", "age": 40, "index": 747, "body": "VTKGNKUHMP"} +{"name": "pymc699", "age": 93, "index": 699, "body": "VTKGNKUHMP"} +{"name": "pymc264", "age": 62, "index": 264, "body": "VTKGNKUHMP"} +{"name": "pymc365", "age": 62, "index": 365, "body": "VTKGNKUHMP"} +{"name": "pymc119", "age": 18, "index": 119, "body": "VTKGNKUHMP"} +{"name": "pymc924", "age": 15, "index": 924, "body": "VTKGNKUHMP"} +{"name": "pymc158", "age": 57, "index": 158, "body": "VTKGNKUHMP"} +{"name": "pymc659", "age": 53, "index": 659, "body": "VTKGNKUHMP"} +{"name": "pymc23", "age": 23, "index": 23, "body": "VTKGNKUHMP"} +{"name": "pymc66", "age": 66, "index": 66, "body": "VTKGNKUHMP"} +{"name": "pymc331", "age": 28, "index": 331, "body": "VTKGNKUHMP"} +{"name": "pymc335", "age": 32, "index": 335, "body": "VTKGNKUHMP"} +{"name": "pymc978", "age": 69, "index": 978, "body": "VTKGNKUHMP"} +{"name": "pymc810", "age": 2, "index": 810, "body": "VTKGNKUHMP"} +{"name": "pymc39", "age": 39, "index": 39, "body": "VTKGNKUHMP"} +{"name": "pymc60", "age": 60, "index": 60, "body": "VTKGNKUHMP"} +{"name": "pymc846", "age": 38, "index": 846, "body": "VTKGNKUHMP"} +{"name": "pymc59", "age": 59, "index": 59, "body": "VTKGNKUHMP"} +{"name": "pymc116", "age": 15, "index": 116, "body": "VTKGNKUHMP"} +{"name": "pymc51", "age": 51, "index": 51, "body": "VTKGNKUHMP"} +{"name": "pymc963", "age": 54, "index": 963, "body": "VTKGNKUHMP"} +{"name": "pymc260", "age": 58, "index": 260, "body": "VTKGNKUHMP"} +{"name": "pymc835", "age": 27, "index": 835, "body": "VTKGNKUHMP"} +{"name": "pymc746", "age": 39, "index": 746, "body": "VTKGNKUHMP"} +{"name": "pymc40", "age": 40, "index": 40, "body": "VTKGNKUHMP"} +{"name": "pymc113", "age": 12, "index": 113, "body": "VTKGNKUHMP"} +{"name": "pymc144", "age": 43, "index": 144, "body": "VTKGNKUHMP"} +{"name": "pymc68", "age": 68, "index": 68, "body": "VTKGNKUHMP"} +{"name": "pymc237", "age": 35, "index": 237, "body": "VTKGNKUHMP"} +{"name": "pymc231", "age": 29, "index": 231, "body": "VTKGNKUHMP"} +{"name": "pymc742", "age": 35, "index": 742, "body": "VTKGNKUHMP"} +{"name": "pymc233", "age": 31, "index": 233, "body": "VTKGNKUHMP"} +{"name": "pymc333", "age": 30, "index": 333, "body": "VTKGNKUHMP"} +{"name": "pymc10", "age": 10, "index": 10, "body": "VTKGNKUHMP"} +{"name": "pymc26", "age": 26, "index": 26, "body": "VTKGNKUHMP"} +{"name": "pymc36", "age": 36, "index": 36, "body": "VTKGNKUHMP"} +{"name": "pymc860", "age": 52, "index": 860, "body": "VTKGNKUHMP"} +{"name": "pymc5", "age": 5, "index": 5, "body": "VTKGNKUHMP"} +{"name": "pymc56", "age": 56, "index": 56, "body": "VTKGNKUHMP"} +{"name": "pymc85", "age": 85, "index": 85, "body": "VTKGNKUHMP"} +{"name": "pymc3", "age": 3, "index": 3, "body": "VTKGNKUHMP"} +{"name": "pymc80", "age": 80, "index": 80, "body": "VTKGNKUHMP"} +{"name": "pymc169", "age": 68, "index": 169, "body": "VTKGNKUHMP"} +{"name": "pymc822", "age": 14, "index": 822, "body": "VTKGNKUHMP"} +{"name": "pymc897", "age": 89, "index": 897, "body": "VTKGNKUHMP"} +{"name": "pymc867", "age": 59, "index": 867, "body": "VTKGNKUHMP"} +{"name": "pymc694", "age": 88, "index": 694, "body": "VTKGNKUHMP"} +{"name": "pymc179", "age": 78, "index": 179, "body": "VTKGNKUHMP"} +{"name": "pymc768", "age": 61, "index": 768, "body": "VTKGNKUHMP"} +{"name": "pymc818", "age": 10, "index": 818, "body": "VTKGNKUHMP"} +{"name": "pymc193", "age": 92, "index": 193, "body": "VTKGNKUHMP"} +{"name": "pymc419", "age": 15, "index": 419, "body": "VTKGNKUHMP"} +{"name": "pymc61", "age": 61, "index": 61, "body": "VTKGNKUHMP"} +{"name": "pymc689", "age": 83, "index": 689, "body": "VTKGNKUHMP"} +{"name": "pymc639", "age": 33, "index": 639, "body": "VTKGNKUHMP"} +{"name": "pymc819", "age": 11, "index": 819, "body": "VTKGNKUHMP"} +{"name": "pymc285", "age": 83, "index": 285, "body": "VTKGNKUHMP"} +{"name": "pymc953", "age": 44, "index": 953, "body": "VTKGNKUHMP"} +{"name": "pymc269", "age": 67, "index": 269, "body": "VTKGNKUHMP"} +{"name": "pymc65", "age": 65, "index": 65, "body": "VTKGNKUHMP"} +{"name": "pymc499", "age": 95, "index": 499, "body": "VTKGNKUHMP"} +{"name": "pymc178", "age": 77, "index": 178, "body": "VTKGNKUHMP"} +{"name": "pymc829", "age": 21, "index": 829, "body": "VTKGNKUHMP"} +{"name": "pymc363", "age": 60, "index": 363, "body": "VTKGNKUHMP"} +{"name": "pymc161", "age": 60, "index": 161, "body": "VTKGNKUHMP"} +{"name": "pymc688", "age": 82, "index": 688, "body": "VTKGNKUHMP"} +{"name": "pymc934", "age": 25, "index": 934, "body": "VTKGNKUHMP"} +{"name": "pymc376", "age": 73, "index": 376, "body": "VTKGNKUHMP"} +{"name": "pymc859", "age": 51, "index": 859, "body": "VTKGNKUHMP"} +{"name": "pymc888", "age": 80, "index": 888, "body": "VTKGNKUHMP"} +{"name": "pymc212", "age": 10, "index": 212, "body": "VTKGNKUHMP"} +{"name": "pymc853", "age": 45, "index": 853, "body": "VTKGNKUHMP"} +{"name": "pymc200", "age": 99, "index": 200, "body": "VTKGNKUHMP"} +{"name": "pymc145", "age": 44, "index": 145, "body": "VTKGNKUHMP"} +{"name": "pymc175", "age": 74, "index": 175, "body": "VTKGNKUHMP"} +{"name": "pymc691", "age": 85, "index": 691, "body": "VTKGNKUHMP"} +{"name": "pymc945", "age": 36, "index": 945, "body": "VTKGNKUHMP"} +{"name": "pymc738", "age": 31, "index": 738, "body": "VTKGNKUHMP"} +{"name": "pymc370", "age": 67, "index": 370, "body": "VTKGNKUHMP"} +{"name": "pymc811", "age": 3, "index": 811, "body": "VTKGNKUHMP"} +{"name": "pymc891", "age": 83, "index": 891, "body": "VTKGNKUHMP"} +{"name": "pymc236", "age": 34, "index": 236, "body": "VTKGNKUHMP"} +{"name": "pymc320", "age": 17, "index": 320, "body": "VTKGNKUHMP"} +{"name": "pymc195", "age": 94, "index": 195, "body": "VTKGNKUHMP"} +{"name": "pymc858", "age": 50, "index": 858, "body": "VTKGNKUHMP"} +{"name": "pymc194", "age": 93, "index": 194, "body": "VTKGNKUHMP"} +{"name": "pymc608", "age": 2, "index": 608, "body": "VTKGNKUHMP"} +{"name": "pymc682", "age": 76, "index": 682, "body": "VTKGNKUHMP"} +{"name": "pymc252", "age": 50, "index": 252, "body": "VTKGNKUHMP"} +{"name": "pymc943", "age": 34, "index": 943, "body": "VTKGNKUHMP"} +{"name": "pymc861", "age": 53, "index": 861, "body": "VTKGNKUHMP"} +{"name": "pymc809", "age": 1, "index": 809, "body": "VTKGNKUHMP"} +{"name": "pymc197", "age": 96, "index": 197, "body": "VTKGNKUHMP"} +{"name": "pymc106", "age": 5, "index": 106, "body": "VTKGNKUHMP"} +{"name": "pymc838", "age": 30, "index": 838, "body": "VTKGNKUHMP"} +{"name": "pymc16", "age": 16, "index": 16, "body": "VTKGNKUHMP"} +{"name": "pymc741", "age": 34, "index": 741, "body": "VTKGNKUHMP"} +{"name": "pymc849", "age": 41, "index": 849, "body": "VTKGNKUHMP"} +{"name": "pymc105", "age": 4, "index": 105, "body": "VTKGNKUHMP"} +{"name": "pymc378", "age": 75, "index": 378, "body": "VTKGNKUHMP"} +{"name": "pymc45", "age": 45, "index": 45, "body": "VTKGNKUHMP"} +{"name": "pymc177", "age": 76, "index": 177, "body": "VTKGNKUHMP"} +{"name": "pymc104", "age": 3, "index": 104, "body": "VTKGNKUHMP"} +{"name": "pymc196", "age": 95, "index": 196, "body": "VTKGNKUHMP"} +{"name": "pymc83", "age": 83, "index": 83, "body": "VTKGNKUHMP"} +{"name": "pymc488", "age": 84, "index": 488, "body": "VTKGNKUHMP"} +{"name": "pymc13", "age": 13, "index": 13, "body": "VTKGNKUHMP"} +{"name": "pymc720", "age": 13, "index": 720, "body": "VTKGNKUHMP"} +{"name": "pymc87", "age": 87, "index": 87, "body": "VTKGNKUHMP"} +{"name": "pymc950", "age": 41, "index": 950, "body": "VTKGNKUHMP"} +{"name": "pymc468", "age": 64, "index": 468, "body": "VTKGNKUHMP"} +{"name": "pymc834", "age": 26, "index": 834, "body": "VTKGNKUHMP"} +{"name": "pymc815", "age": 7, "index": 815, "body": "VTKGNKUHMP"} +{"name": "pymc215", "age": 13, "index": 215, "body": "VTKGNKUHMP"} +{"name": "pymc295", "age": 93, "index": 295, "body": "VTKGNKUHMP"} +{"name": "pymc292", "age": 90, "index": 292, "body": "VTKGNKUHMP"} +{"name": "pymc180", "age": 79, "index": 180, "body": "VTKGNKUHMP"} +{"name": "pymc728", "age": 21, "index": 728, "body": "VTKGNKUHMP"} +{"name": "pymc155", "age": 54, "index": 155, "body": "VTKGNKUHMP"} +{"name": "pymc855", "age": 47, "index": 855, "body": "VTKGNKUHMP"} +{"name": "pymc841", "age": 33, "index": 841, "body": "VTKGNKUHMP"} +{"name": "pymc209", "age": 7, "index": 209, "body": "VTKGNKUHMP"} +{"name": "pymc100", "age": 100, "index": 100, "body": "VTKGNKUHMP"} +{"name": "pymc248", "age": 46, "index": 248, "body": "VTKGNKUHMP"} +{"name": "pymc769", "age": 62, "index": 769, "body": "VTKGNKUHMP"} +{"name": "pymc330", "age": 27, "index": 330, "body": "VTKGNKUHMP"} +{"name": "pymc142", "age": 41, "index": 142, "body": "VTKGNKUHMP"} +{"name": "pymc976", "age": 67, "index": 976, "body": "VTKGNKUHMP"} +{"name": "pymc151", "age": 50, "index": 151, "body": "VTKGNKUHMP"} +{"name": "pymc852", "age": 44, "index": 852, "body": "VTKGNKUHMP"} +{"name": "pymc649", "age": 43, "index": 649, "body": "VTKGNKUHMP"} +{"name": "pymc294", "age": 92, "index": 294, "body": "VTKGNKUHMP"} +{"name": "pymc686", "age": 80, "index": 686, "body": "VTKGNKUHMP"} +{"name": "pymc352", "age": 49, "index": 352, "body": "VTKGNKUHMP"} +{"name": "pymc70", "age": 70, "index": 70, "body": "VTKGNKUHMP"} +{"name": "pymc114", "age": 13, "index": 114, "body": "VTKGNKUHMP"} +{"name": "pymc48", "age": 48, "index": 48, "body": "VTKGNKUHMP"} +{"name": "pymc47", "age": 47, "index": 47, "body": "VTKGNKUHMP"} +{"name": "pymc669", "age": 63, "index": 669, "body": "VTKGNKUHMP"} +{"name": "pymc974", "age": 65, "index": 974, "body": "VTKGNKUHMP"} +{"name": "pymc745", "age": 38, "index": 745, "body": "VTKGNKUHMP"} +{"name": "pymc150", "age": 49, "index": 150, "body": "VTKGNKUHMP"} +{"name": "pymc375", "age": 72, "index": 375, "body": "VTKGNKUHMP"} +{"name": "pymc6", "age": 6, "index": 6, "body": "VTKGNKUHMP"} +{"name": "pymc937", "age": 28, "index": 937, "body": "VTKGNKUHMP"} +{"name": "pymc968", "age": 59, "index": 968, "body": "VTKGNKUHMP"} +{"name": "pymc814", "age": 6, "index": 814, "body": "VTKGNKUHMP"} +{"name": "pymc88", "age": 88, "index": 88, "body": "VTKGNKUHMP"} +{"name": "pymc141", "age": 40, "index": 141, "body": "VTKGNKUHMP"} +{"name": "pymc125", "age": 24, "index": 125, "body": "VTKGNKUHMP"} +{"name": "pymc726", "age": 19, "index": 726, "body": "VTKGNKUHMP"} +{"name": "pymc936", "age": 27, "index": 936, "body": "VTKGNKUHMP"} +{"name": "pymc778", "age": 71, "index": 778, "body": "VTKGNKUHMP"} +{"name": "pymc972", "age": 63, "index": 972, "body": "VTKGNKUHMP"} +{"name": "pymc579", "age": 74, "index": 579, "body": "VTKGNKUHMP"} +{"name": "pymc249", "age": 47, "index": 249, "body": "VTKGNKUHMP"} +{"name": "pymc871", "age": 63, "index": 871, "body": "VTKGNKUHMP"} +{"name": "pymc337", "age": 34, "index": 337, "body": "VTKGNKUHMP"} +{"name": "pymc979", "age": 70, "index": 979, "body": "VTKGNKUHMP"} +{"name": "pymc779", "age": 72, "index": 779, "body": "VTKGNKUHMP"} +{"name": "pymc136", "age": 35, "index": 136, "body": "VTKGNKUHMP"} +{"name": "pymc199", "age": 98, "index": 199, "body": "VTKGNKUHMP"} +{"name": "pymc53", "age": 53, "index": 53, "body": "VTKGNKUHMP"} +{"name": "pymc684", "age": 78, "index": 684, "body": "VTKGNKUHMP"} +{"name": "pymc297", "age": 95, "index": 297, "body": "VTKGNKUHMP"} +{"name": "pymc439", "age": 35, "index": 439, "body": "VTKGNKUHMP"} +{"name": "pymc75", "age": 75, "index": 75, "body": "VTKGNKUHMP"} +{"name": "pymc755", "age": 48, "index": 755, "body": "VTKGNKUHMP"} +{"name": "pymc877", "age": 69, "index": 877, "body": "VTKGNKUHMP"} +{"name": "pymc927", "age": 18, "index": 927, "body": "VTKGNKUHMP"} +{"name": "pymc76", "age": 76, "index": 76, "body": "VTKGNKUHMP"} +{"name": "pymc148", "age": 47, "index": 148, "body": "VTKGNKUHMP"} +{"name": "pymc44", "age": 44, "index": 44, "body": "VTKGNKUHMP"} +{"name": "pymc739", "age": 32, "index": 739, "body": "VTKGNKUHMP"} +{"name": "pymc828", "age": 20, "index": 828, "body": "VTKGNKUHMP"} +{"name": "pymc826", "age": 18, "index": 826, "body": "VTKGNKUHMP"} +{"name": "pymc732", "age": 25, "index": 732, "body": "VTKGNKUHMP"} +{"name": "pymc135", "age": 34, "index": 135, "body": "VTKGNKUHMP"} +{"name": "pymc324", "age": 21, "index": 324, "body": "VTKGNKUHMP"} +{"name": "pymc409", "age": 5, "index": 409, "body": "VTKGNKUHMP"} +{"name": "pymc267", "age": 65, "index": 267, "body": "VTKGNKUHMP"} +{"name": "pymc244", "age": 42, "index": 244, "body": "VTKGNKUHMP"} +{"name": "pymc143", "age": 42, "index": 143, "body": "VTKGNKUHMP"} +{"name": "pymc802", "age": 95, "index": 802, "body": "VTKGNKUHMP"} +{"name": "pymc892", "age": 84, "index": 892, "body": "VTKGNKUHMP"} +{"name": "pymc240", "age": 38, "index": 240, "body": "VTKGNKUHMP"} +{"name": "pymc131", "age": 30, "index": 131, "body": "VTKGNKUHMP"} +{"name": "pymc21", "age": 21, "index": 21, "body": "VTKGNKUHMP"} +{"name": "pymc298", "age": 96, "index": 298, "body": "VTKGNKUHMP"} +{"name": "pymc4", "age": 4, "index": 4, "body": "VTKGNKUHMP"} +{"name": "pymc1", "age": 1, "index": 1, "body": "VTKGNKUHMP"} +{"name": "pymc925", "age": 16, "index": 925, "body": "VTKGNKUHMP"} +{"name": "pymc725", "age": 18, "index": 725, "body": "VTKGNKUHMP"} +{"name": "pymc115", "age": 14, "index": 115, "body": "VTKGNKUHMP"} +{"name": "pymc923", "age": 14, "index": 923, "body": "VTKGNKUHMP"} +{"name": "pymc628", "age": 22, "index": 628, "body": "VTKGNKUHMP"} +{"name": "pymc374", "age": 71, "index": 374, "body": "VTKGNKUHMP"} +{"name": "pymc418", "age": 14, "index": 418, "body": "VTKGNKUHMP"} +{"name": "pymc188", "age": 87, "index": 188, "body": "VTKGNKUHMP"} +{"name": "pymc230", "age": 28, "index": 230, "body": "VTKGNKUHMP"} +{"name": "pymc698", "age": 92, "index": 698, "body": "VTKGNKUHMP"} +{"name": "pymc259", "age": 57, "index": 259, "body": "VTKGNKUHMP"} +{"name": "pymc33", "age": 33, "index": 33, "body": "VTKGNKUHMP"} +{"name": "pymc223", "age": 21, "index": 223, "body": "VTKGNKUHMP"} +{"name": "pymc17", "age": 17, "index": 17, "body": "VTKGNKUHMP"} +{"name": "pymc866", "age": 58, "index": 866, "body": "VTKGNKUHMP"} +{"name": "pymc204", "age": 2, "index": 204, "body": "VTKGNKUHMP"} +{"name": "pymc290", "age": 88, "index": 290, "body": "VTKGNKUHMP"} +{"name": "pymc752", "age": 45, "index": 752, "body": "VTKGNKUHMP"} +{"name": "pymc49", "age": 49, "index": 49, "body": "VTKGNKUHMP"} +{"name": "pymc879", "age": 71, "index": 879, "body": "VTKGNKUHMP"} +{"name": "pymc882", "age": 74, "index": 882, "body": "VTKGNKUHMP"} +{"name": "pymc351", "age": 48, "index": 351, "body": "VTKGNKUHMP"} +{"name": "pymc813", "age": 5, "index": 813, "body": "VTKGNKUHMP"} +{"name": "pymc263", "age": 61, "index": 263, "body": "VTKGNKUHMP"} +{"name": "pymc932", "age": 23, "index": 932, "body": "VTKGNKUHMP"} +{"name": "pymc942", "age": 33, "index": 942, "body": "VTKGNKUHMP"} +{"name": "pymc299", "age": 97, "index": 299, "body": "VTKGNKUHMP"} +{"name": "pymc219", "age": 17, "index": 219, "body": "VTKGNKUHMP"} +{"name": "pymc35", "age": 35, "index": 35, "body": "VTKGNKUHMP"} +{"name": "pymc970", "age": 61, "index": 970, "body": "VTKGNKUHMP"} +{"name": "pymc850", "age": 42, "index": 850, "body": "VTKGNKUHMP"} +{"name": "pymc350", "age": 47, "index": 350, "body": "VTKGNKUHMP"} +{"name": "pymc355", "age": 52, "index": 355, "body": "VTKGNKUHMP"} +{"name": "pymc729", "age": 22, "index": 729, "body": "VTKGNKUHMP"} +{"name": "pymc138", "age": 37, "index": 138, "body": "VTKGNKUHMP"} +{"name": "pymc800", "age": 93, "index": 800, "body": "VTKGNKUHMP"} +{"name": "pymc107", "age": 6, "index": 107, "body": "VTKGNKUHMP"} +{"name": "pymc833", "age": 25, "index": 833, "body": "VTKGNKUHMP"} +{"name": "pymc960", "age": 51, "index": 960, "body": "VTKGNKUHMP"} +{"name": "pymc90", "age": 90, "index": 90, "body": "VTKGNKUHMP"} +{"name": "pymc171", "age": 70, "index": 171, "body": "VTKGNKUHMP"} +{"name": "pymc731", "age": 24, "index": 731, "body": "VTKGNKUHMP"} +{"name": "pymc293", "age": 91, "index": 293, "body": "VTKGNKUHMP"} +{"name": "pymc899", "age": 91, "index": 899, "body": "VTKGNKUHMP"} +{"name": "pymc165", "age": 64, "index": 165, "body": "VTKGNKUHMP"} +{"name": "pymc967", "age": 58, "index": 967, "body": "VTKGNKUHMP"} +{"name": "pymc11", "age": 11, "index": 11, "body": "VTKGNKUHMP"} +{"name": "pymc27", "age": 27, "index": 27, "body": "VTKGNKUHMP"} +{"name": "pymc255", "age": 53, "index": 255, "body": "VTKGNKUHMP"} +{"name": "pymc129", "age": 28, "index": 129, "body": "VTKGNKUHMP"} +{"name": "pymc226", "age": 24, "index": 226, "body": "VTKGNKUHMP"} +{"name": "pymc803", "age": 96, "index": 803, "body": "VTKGNKUHMP"} +{"name": "pymc922", "age": 13, "index": 922, "body": "VTKGNKUHMP"} +{"name": "pymc37", "age": 37, "index": 37, "body": "VTKGNKUHMP"} +{"name": "pymc227", "age": 25, "index": 227, "body": "VTKGNKUHMP"} +{"name": "pymc528", "age": 23, "index": 528, "body": "VTKGNKUHMP"} +{"name": "pymc875", "age": 67, "index": 875, "body": "VTKGNKUHMP"} +{"name": "pymc429", "age": 25, "index": 429, "body": "VTKGNKUHMP"} +{"name": "pymc279", "age": 77, "index": 279, "body": "VTKGNKUHMP"} +{"name": "pymc823", "age": 15, "index": 823, "body": "VTKGNKUHMP"} +{"name": "pymc730", "age": 23, "index": 730, "body": "VTKGNKUHMP"} +{"name": "pymc894", "age": 86, "index": 894, "body": "VTKGNKUHMP"} +{"name": "pymc740", "age": 33, "index": 740, "body": "VTKGNKUHMP"} +{"name": "pymc117", "age": 16, "index": 117, "body": "VTKGNKUHMP"} +{"name": "pymc93", "age": 93, "index": 93, "body": "VTKGNKUHMP"} +{"name": "pymc103", "age": 2, "index": 103, "body": "VTKGNKUHMP"} +{"name": "pymc247", "age": 45, "index": 247, "body": "VTKGNKUHMP"} +{"name": "pymc34", "age": 34, "index": 34, "body": "VTKGNKUHMP"} +{"name": "pymc38", "age": 38, "index": 38, "body": "VTKGNKUHMP"} +{"name": "pymc166", "age": 65, "index": 166, "body": "VTKGNKUHMP"} +{"name": "pymc944", "age": 35, "index": 944, "body": "VTKGNKUHMP"} +{"name": "pymc101", "age": 0, "index": 101, "body": "VTKGNKUHMP"} +{"name": "pymc254", "age": 52, "index": 254, "body": "VTKGNKUHMP"} +{"name": "pymc232", "age": 30, "index": 232, "body": "VTKGNKUHMP"} +{"name": "pymc889", "age": 81, "index": 889, "body": "VTKGNKUHMP"} +{"name": "pymc479", "age": 75, "index": 479, "body": "VTKGNKUHMP"} +{"name": "pymc371", "age": 68, "index": 371, "body": "VTKGNKUHMP"} +{"name": "pymc558", "age": 53, "index": 558, "body": "VTKGNKUHMP"} +{"name": "pymc182", "age": 81, "index": 182, "body": "VTKGNKUHMP"} +{"name": "pymc438", "age": 34, "index": 438, "body": "VTKGNKUHMP"} +{"name": "pymc887", "age": 79, "index": 887, "body": "VTKGNKUHMP"} +{"name": "pymc8", "age": 8, "index": 8, "body": "VTKGNKUHMP"} +{"name": "pymc235", "age": 33, "index": 235, "body": "VTKGNKUHMP"} +{"name": "pymc273", "age": 71, "index": 273, "body": "VTKGNKUHMP"} +{"name": "pymc679", "age": 73, "index": 679, "body": "VTKGNKUHMP"} +{"name": "pymc123", "age": 22, "index": 123, "body": "VTKGNKUHMP"} +{"name": "pymc687", "age": 81, "index": 687, "body": "VTKGNKUHMP"} +{"name": "pymc242", "age": 40, "index": 242, "body": "VTKGNKUHMP"} +{"name": "pymc952", "age": 43, "index": 952, "body": "VTKGNKUHMP"} +{"name": "pymc817", "age": 9, "index": 817, "body": "VTKGNKUHMP"} +{"name": "pymc962", "age": 53, "index": 962, "body": "VTKGNKUHMP"} +{"name": "pymc842", "age": 34, "index": 842, "body": "VTKGNKUHMP"} +{"name": "pymc28", "age": 28, "index": 28, "body": "VTKGNKUHMP"} +{"name": "pymc870", "age": 62, "index": 870, "body": "VTKGNKUHMP"} +{"name": "pymc951", "age": 42, "index": 951, "body": "VTKGNKUHMP"} +{"name": "pymc880", "age": 72, "index": 880, "body": "VTKGNKUHMP"} +{"name": "pymc723", "age": 16, "index": 723, "body": "VTKGNKUHMP"} +{"name": "pymc938", "age": 29, "index": 938, "body": "VTKGNKUHMP"} +{"name": "pymc157", "age": 56, "index": 157, "body": "VTKGNKUHMP"} +{"name": "pymc458", "age": 54, "index": 458, "body": "VTKGNKUHMP"} +{"name": "pymc538", "age": 33, "index": 538, "body": "VTKGNKUHMP"} +{"name": "pymc208", "age": 6, "index": 208, "body": "VTKGNKUHMP"} +{"name": "pymc751", "age": 44, "index": 751, "body": "VTKGNKUHMP"} +{"name": "pymc24", "age": 24, "index": 24, "body": "VTKGNKUHMP"} +{"name": "pymc748", "age": 41, "index": 748, "body": "VTKGNKUHMP"} +{"name": "pymc270", "age": 68, "index": 270, "body": "VTKGNKUHMP"} +{"name": "pymc812", "age": 4, "index": 812, "body": "VTKGNKUHMP"} +{"name": "pymc408", "age": 4, "index": 408, "body": "VTKGNKUHMP"} +{"name": "pymc81", "age": 81, "index": 81, "body": "VTKGNKUHMP"} +{"name": "pymc878", "age": 70, "index": 878, "body": "VTKGNKUHMP"} +{"name": "pymc965", "age": 56, "index": 965, "body": "VTKGNKUHMP"} +{"name": "pymc920", "age": 11, "index": 920, "body": "VTKGNKUHMP"} +{"name": "pymc683", "age": 77, "index": 683, "body": "VTKGNKUHMP"} +{"name": "pymc133", "age": 32, "index": 133, "body": "VTKGNKUHMP"} +{"name": "pymc693", "age": 87, "index": 693, "body": "VTKGNKUHMP"} +{"name": "pymc268", "age": 66, "index": 268, "body": "VTKGNKUHMP"} +{"name": "pymc287", "age": 85, "index": 287, "body": "VTKGNKUHMP"} +{"name": "pymc931", "age": 22, "index": 931, "body": "VTKGNKUHMP"} +{"name": "pymc18", "age": 18, "index": 18, "body": "VTKGNKUHMP"} +{"name": "pymc202", "age": 0, "index": 202, "body": "VTKGNKUHMP"} +{"name": "pymc449", "age": 45, "index": 449, "body": "VTKGNKUHMP"} +{"name": "pymc681", "age": 75, "index": 681, "body": "VTKGNKUHMP"} +{"name": "pymc946", "age": 37, "index": 946, "body": "VTKGNKUHMP"} +{"name": "pymc568", "age": 63, "index": 568, "body": "VTKGNKUHMP"} +{"name": "pymc969", "age": 60, "index": 969, "body": "VTKGNKUHMP"} +{"name": "pymc321", "age": 18, "index": 321, "body": "VTKGNKUHMP"} +{"name": "pymc201", "age": 100, "index": 201, "body": "VTKGNKUHMP"} +{"name": "pymc357", "age": 54, "index": 357, "body": "VTKGNKUHMP"} +{"name": "pymc609", "age": 3, "index": 609, "body": "VTKGNKUHMP"} +{"name": "pymc964", "age": 55, "index": 964, "body": "VTKGNKUHMP"} +{"name": "pymc428", "age": 24, "index": 428, "body": "VTKGNKUHMP"} +{"name": "pymc353", "age": 50, "index": 353, "body": "VTKGNKUHMP"} +{"name": "pymc211", "age": 9, "index": 211, "body": "VTKGNKUHMP"} +{"name": "pymc926", "age": 17, "index": 926, "body": "VTKGNKUHMP"} +{"name": "pymc250", "age": 48, "index": 250, "body": "VTKGNKUHMP"} +{"name": "pymc339", "age": 36, "index": 339, "body": "VTKGNKUHMP"} +{"name": "pymc187", "age": 86, "index": 187, "body": "VTKGNKUHMP"} +{"name": "pymc222", "age": 20, "index": 222, "body": "VTKGNKUHMP"} +{"name": "pymc174", "age": 73, "index": 174, "body": "VTKGNKUHMP"} +{"name": "pymc971", "age": 62, "index": 971, "body": "VTKGNKUHMP"} +{"name": "pymc198", "age": 97, "index": 198, "body": "VTKGNKUHMP"} +{"name": "pymc885", "age": 77, "index": 885, "body": "VTKGNKUHMP"} +{"name": "pymc43", "age": 43, "index": 43, "body": "VTKGNKUHMP"} +{"name": "pymc955", "age": 46, "index": 955, "body": "VTKGNKUHMP"} +{"name": "pymc874", "age": 66, "index": 874, "body": "VTKGNKUHMP"} +{"name": "pymc831", "age": 23, "index": 831, "body": "VTKGNKUHMP"} +{"name": "pymc750", "age": 43, "index": 750, "body": "VTKGNKUHMP"} +{"name": "pymc338", "age": 35, "index": 338, "body": "VTKGNKUHMP"} +{"name": "pymc733", "age": 26, "index": 733, "body": "VTKGNKUHMP"} +{"name": "pymc840", "age": 32, "index": 840, "body": "VTKGNKUHMP"} +{"name": "pymc217", "age": 15, "index": 217, "body": "VTKGNKUHMP"} +{"name": "pymc727", "age": 20, "index": 727, "body": "VTKGNKUHMP"} +{"name": "pymc167", "age": 66, "index": 167, "body": "VTKGNKUHMP"} +{"name": "pymc847", "age": 39, "index": 847, "body": "VTKGNKUHMP"} +{"name": "pymc839", "age": 31, "index": 839, "body": "VTKGNKUHMP"} +{"name": "pymc326", "age": 23, "index": 326, "body": "VTKGNKUHMP"} +{"name": "pymc205", "age": 3, "index": 205, "body": "VTKGNKUHMP"} +{"name": "pymc246", "age": 44, "index": 246, "body": "VTKGNKUHMP"} +{"name": "pymc190", "age": 89, "index": 190, "body": "VTKGNKUHMP"} +{"name": "pymc134", "age": 33, "index": 134, "body": "VTKGNKUHMP"} +{"name": "pymc848", "age": 40, "index": 848, "body": "VTKGNKUHMP"} +{"name": "pymc448", "age": 44, "index": 448, "body": "VTKGNKUHMP"} +{"name": "pymc154", "age": 53, "index": 154, "body": "VTKGNKUHMP"} +{"name": "pymc111", "age": 10, "index": 111, "body": "VTKGNKUHMP"} +{"name": "pymc258", "age": 56, "index": 258, "body": "VTKGNKUHMP"} +{"name": "pymc214", "age": 12, "index": 214, "body": "VTKGNKUHMP"} +{"name": "pymc697", "age": 91, "index": 697, "body": "VTKGNKUHMP"} +{"name": "pymc96", "age": 96, "index": 96, "body": "VTKGNKUHMP"} +{"name": "pymc948", "age": 39, "index": 948, "body": "VTKGNKUHMP"} +{"name": "pymc42", "age": 42, "index": 42, "body": "VTKGNKUHMP"} +{"name": "pymc284", "age": 82, "index": 284, "body": "VTKGNKUHMP"} +{"name": "pymc185", "age": 84, "index": 185, "body": "VTKGNKUHMP"} +{"name": "pymc84", "age": 84, "index": 84, "body": "VTKGNKUHMP"} +{"name": "pymc928", "age": 19, "index": 928, "body": "VTKGNKUHMP"} +{"name": "pymc266", "age": 64, "index": 266, "body": "VTKGNKUHMP"} +{"name": "pymc245", "age": 43, "index": 245, "body": "VTKGNKUHMP"} +{"name": "pymc864", "age": 56, "index": 864, "body": "VTKGNKUHMP"} +{"name": "pymc756", "age": 49, "index": 756, "body": "VTKGNKUHMP"} +{"name": "pymc218", "age": 16, "index": 218, "body": "VTKGNKUHMP"} +{"name": "pymc64", "age": 64, "index": 64, "body": "VTKGNKUHMP"} +{"name": "pymc869", "age": 61, "index": 869, "body": "VTKGNKUHMP"} +{"name": "pymc490", "age": 86, "index": 490, "body": "VTKGNKUHMP"} +{"name": "pymc914", "age": 5, "index": 914, "body": "VTKGNKUHMP"} +{"name": "pymc401", "age": 98, "index": 401, "body": "VTKGNKUHMP"} +{"name": "pymc312", "age": 9, "index": 312, "body": "VTKGNKUHMP"} +{"name": "pymc415", "age": 11, "index": 415, "body": "VTKGNKUHMP"} +{"name": "pymc621", "age": 15, "index": 621, "body": "VTKGNKUHMP"} +{"name": "pymc494", "age": 90, "index": 494, "body": "VTKGNKUHMP"} +{"name": "pymc508", "age": 3, "index": 508, "body": "VTKGNKUHMP"} +{"name": "pymc766", "age": 59, "index": 766, "body": "VTKGNKUHMP"} +{"name": "pymc660", "age": 54, "index": 660, "body": "VTKGNKUHMP"} +{"name": "pymc348", "age": 45, "index": 348, "body": "VTKGNKUHMP"} +{"name": "pymc487", "age": 83, "index": 487, "body": "VTKGNKUHMP"} +{"name": "pymc610", "age": 4, "index": 610, "body": "VTKGNKUHMP"} +{"name": "pymc991", "age": 82, "index": 991, "body": "VTKGNKUHMP"} +{"name": "pymc620", "age": 14, "index": 620, "body": "VTKGNKUHMP"} +{"name": "pymc662", "age": 56, "index": 662, "body": "VTKGNKUHMP"} +{"name": "pymc525", "age": 20, "index": 525, "body": "VTKGNKUHMP"} +{"name": "pymc451", "age": 47, "index": 451, "body": "VTKGNKUHMP"} +{"name": "pymc556", "age": 51, "index": 556, "body": "VTKGNKUHMP"} +{"name": "pymc706", "age": 100, "index": 706, "body": "VTKGNKUHMP"} +{"name": "pymc457", "age": 53, "index": 457, "body": "VTKGNKUHMP"} +{"name": "pymc426", "age": 22, "index": 426, "body": "VTKGNKUHMP"} +{"name": "pymc634", "age": 28, "index": 634, "body": "VTKGNKUHMP"} +{"name": "pymc789", "age": 82, "index": 789, "body": "VTKGNKUHMP"} +{"name": "pymc412", "age": 8, "index": 412, "body": "VTKGNKUHMP"} +{"name": "pymc450", "age": 46, "index": 450, "body": "VTKGNKUHMP"} +{"name": "pymc911", "age": 2, "index": 911, "body": "VTKGNKUHMP"} +{"name": "pymc301", "age": 99, "index": 301, "body": "VTKGNKUHMP"} +{"name": "pymc552", "age": 47, "index": 552, "body": "VTKGNKUHMP"} +{"name": "pymc906", "age": 98, "index": 906, "body": "VTKGNKUHMP"} +{"name": "pymc557", "age": 52, "index": 557, "body": "VTKGNKUHMP"} +{"name": "pymc700", "age": 94, "index": 700, "body": "VTKGNKUHMP"} +{"name": "pymc531", "age": 26, "index": 531, "body": "VTKGNKUHMP"} +{"name": "pymc414", "age": 10, "index": 414, "body": "VTKGNKUHMP"} +{"name": "pymc313", "age": 10, "index": 313, "body": "VTKGNKUHMP"} +{"name": "pymc655", "age": 49, "index": 655, "body": "VTKGNKUHMP"} +{"name": "pymc715", "age": 8, "index": 715, "body": "VTKGNKUHMP"} +{"name": "pymc341", "age": 38, "index": 341, "body": "VTKGNKUHMP"} +{"name": "pymc396", "age": 93, "index": 396, "body": "VTKGNKUHMP"} +{"name": "pymc650", "age": 44, "index": 650, "body": "VTKGNKUHMP"} +{"name": "pymc908", "age": 100, "index": 908, "body": "VTKGNKUHMP"} +{"name": "pymc994", "age": 85, "index": 994, "body": "VTKGNKUHMP"} +{"name": "pymc903", "age": 95, "index": 903, "body": "VTKGNKUHMP"} +{"name": "pymc705", "age": 99, "index": 705, "body": "VTKGNKUHMP"} +{"name": "pymc710", "age": 3, "index": 710, "body": "VTKGNKUHMP"} +{"name": "pymc523", "age": 18, "index": 523, "body": "VTKGNKUHMP"} +{"name": "pymc444", "age": 40, "index": 444, "body": "VTKGNKUHMP"} +{"name": "pymc380", "age": 77, "index": 380, "body": "VTKGNKUHMP"} +{"name": "pymc455", "age": 51, "index": 455, "body": "VTKGNKUHMP"} +{"name": "pymc761", "age": 54, "index": 761, "body": "VTKGNKUHMP"} +{"name": "pymc311", "age": 8, "index": 311, "body": "VTKGNKUHMP"} +{"name": "pymc397", "age": 94, "index": 397, "body": "VTKGNKUHMP"} +{"name": "pymc624", "age": 18, "index": 624, "body": "VTKGNKUHMP"} +{"name": "pymc600", "age": 95, "index": 600, "body": "VTKGNKUHMP"} +{"name": "pymc632", "age": 26, "index": 632, "body": "VTKGNKUHMP"} +{"name": "pymc642", "age": 36, "index": 642, "body": "VTKGNKUHMP"} +{"name": "pymc656", "age": 50, "index": 656, "body": "VTKGNKUHMP"} +{"name": "pymc386", "age": 83, "index": 386, "body": "VTKGNKUHMP"} +{"name": "pymc640", "age": 34, "index": 640, "body": "VTKGNKUHMP"} +{"name": "pymc318", "age": 15, "index": 318, "body": "VTKGNKUHMP"} +{"name": "pymc393", "age": 90, "index": 393, "body": "VTKGNKUHMP"} +{"name": "pymc616", "age": 10, "index": 616, "body": "VTKGNKUHMP"} +{"name": "pymc645", "age": 39, "index": 645, "body": "VTKGNKUHMP"} +{"name": "pymc770", "age": 63, "index": 770, "body": "VTKGNKUHMP"} +{"name": "pymc654", "age": 48, "index": 654, "body": "VTKGNKUHMP"} +{"name": "pymc764", "age": 57, "index": 764, "body": "VTKGNKUHMP"} +{"name": "pymc652", "age": 46, "index": 652, "body": "VTKGNKUHMP"} +{"name": "pymc709", "age": 2, "index": 709, "body": "VTKGNKUHMP"} +{"name": "pymc486", "age": 82, "index": 486, "body": "VTKGNKUHMP"} +{"name": "pymc417", "age": 13, "index": 417, "body": "VTKGNKUHMP"} +{"name": "pymc443", "age": 39, "index": 443, "body": "VTKGNKUHMP"} +{"name": "pymc447", "age": 43, "index": 447, "body": "VTKGNKUHMP"} +{"name": "pymc664", "age": 58, "index": 664, "body": "VTKGNKUHMP"} +{"name": "pymc482", "age": 78, "index": 482, "body": "VTKGNKUHMP"} +{"name": "pymc661", "age": 55, "index": 661, "body": "VTKGNKUHMP"} +{"name": "pymc636", "age": 30, "index": 636, "body": "VTKGNKUHMP"} +{"name": "pymc545", "age": 40, "index": 545, "body": "VTKGNKUHMP"} +{"name": "pymc543", "age": 38, "index": 543, "body": "VTKGNKUHMP"} +{"name": "pymc541", "age": 36, "index": 541, "body": "VTKGNKUHMP"} +{"name": "pymc441", "age": 37, "index": 441, "body": "VTKGNKUHMP"} +{"name": "pymc344", "age": 41, "index": 344, "body": "VTKGNKUHMP"} +{"name": "pymc342", "age": 39, "index": 342, "body": "VTKGNKUHMP"} +{"name": "pymc907", "age": 99, "index": 907, "body": "VTKGNKUHMP"} +{"name": "pymc704", "age": 98, "index": 704, "body": "VTKGNKUHMP"} +{"name": "pymc988", "age": 79, "index": 988, "body": "VTKGNKUHMP"} +{"name": "pymc771", "age": 64, "index": 771, "body": "VTKGNKUHMP"} +{"name": "pymc589", "age": 84, "index": 589, "body": "VTKGNKUHMP"} +{"name": "pymc719", "age": 12, "index": 719, "body": "VTKGNKUHMP"} +{"name": "pymc989", "age": 80, "index": 989, "body": "VTKGNKUHMP"} +{"name": "pymc411", "age": 7, "index": 411, "body": "VTKGNKUHMP"} +{"name": "pymc613", "age": 7, "index": 613, "body": "VTKGNKUHMP"} +{"name": "pymc718", "age": 11, "index": 718, "body": "VTKGNKUHMP"} +{"name": "pymc404", "age": 0, "index": 404, "body": "VTKGNKUHMP"} +{"name": "pymc918", "age": 9, "index": 918, "body": "VTKGNKUHMP"} +{"name": "pymc382", "age": 79, "index": 382, "body": "VTKGNKUHMP"} +{"name": "pymc390", "age": 87, "index": 390, "body": "VTKGNKUHMP"} +{"name": "pymc637", "age": 31, "index": 637, "body": "VTKGNKUHMP"} +{"name": "pymc607", "age": 1, "index": 607, "body": "VTKGNKUHMP"} +{"name": "pymc701", "age": 95, "index": 701, "body": "VTKGNKUHMP"} +{"name": "pymc402", "age": 99, "index": 402, "body": "VTKGNKUHMP"} +{"name": "pymc981", "age": 72, "index": 981, "body": "VTKGNKUHMP"} +{"name": "pymc901", "age": 93, "index": 901, "body": "VTKGNKUHMP"} +{"name": "pymc544", "age": 39, "index": 544, "body": "VTKGNKUHMP"} +{"name": "pymc452", "age": 48, "index": 452, "body": "VTKGNKUHMP"} +{"name": "pymc777", "age": 70, "index": 777, "body": "VTKGNKUHMP"} +{"name": "pymc798", "age": 91, "index": 798, "body": "VTKGNKUHMP"} +{"name": "pymc712", "age": 5, "index": 712, "body": "VTKGNKUHMP"} +{"name": "pymc520", "age": 15, "index": 520, "body": "VTKGNKUHMP"} +{"name": "pymc999", "age": 90, "index": 999, "body": "VTKGNKUHMP"} +{"name": "pymc775", "age": 68, "index": 775, "body": "VTKGNKUHMP"} +{"name": "pymc674", "age": 68, "index": 674, "body": "VTKGNKUHMP"} +{"name": "pymc677", "age": 71, "index": 677, "body": "VTKGNKUHMP"} +{"name": "pymc605", "age": 100, "index": 605, "body": "VTKGNKUHMP"} +{"name": "pymc676", "age": 70, "index": 676, "body": "VTKGNKUHMP"} +{"name": "pymc774", "age": 67, "index": 774, "body": "VTKGNKUHMP"} +{"name": "pymc518", "age": 13, "index": 518, "body": "VTKGNKUHMP"} +{"name": "pymc985", "age": 76, "index": 985, "body": "VTKGNKUHMP"} +{"name": "pymc385", "age": 82, "index": 385, "body": "VTKGNKUHMP"} +{"name": "pymc305", "age": 2, "index": 305, "body": "VTKGNKUHMP"} +{"name": "pymc302", "age": 100, "index": 302, "body": "VTKGNKUHMP"} +{"name": "pymc762", "age": 55, "index": 762, "body": "VTKGNKUHMP"} +{"name": "pymc399", "age": 96, "index": 399, "body": "VTKGNKUHMP"} +{"name": "pymc672", "age": 66, "index": 672, "body": "VTKGNKUHMP"} +{"name": "pymc442", "age": 38, "index": 442, "body": "VTKGNKUHMP"} +{"name": "pymc630", "age": 24, "index": 630, "body": "VTKGNKUHMP"} +{"name": "pymc623", "age": 17, "index": 623, "body": "VTKGNKUHMP"} +{"name": "pymc304", "age": 1, "index": 304, "body": "VTKGNKUHMP"} +{"name": "pymc716", "age": 9, "index": 716, "body": "VTKGNKUHMP"} +{"name": "pymc420", "age": 16, "index": 420, "body": "VTKGNKUHMP"} +{"name": "pymc666", "age": 60, "index": 666, "body": "VTKGNKUHMP"} +{"name": "pymc622", "age": 16, "index": 622, "body": "VTKGNKUHMP"} +{"name": "pymc407", "age": 3, "index": 407, "body": "VTKGNKUHMP"} +{"name": "pymc347", "age": 44, "index": 347, "body": "VTKGNKUHMP"} +{"name": "pymc984", "age": 75, "index": 984, "body": "VTKGNKUHMP"} +{"name": "pymc633", "age": 27, "index": 633, "body": "VTKGNKUHMP"} +{"name": "pymc657", "age": 51, "index": 657, "body": "VTKGNKUHMP"} +{"name": "pymc445", "age": 41, "index": 445, "body": "VTKGNKUHMP"} +{"name": "pymc644", "age": 38, "index": 644, "body": "VTKGNKUHMP"} +{"name": "pymc307", "age": 4, "index": 307, "body": "VTKGNKUHMP"} +{"name": "pymc647", "age": 41, "index": 647, "body": "VTKGNKUHMP"} +{"name": "pymc599", "age": 94, "index": 599, "body": "VTKGNKUHMP"} +{"name": "pymc485", "age": 81, "index": 485, "body": "VTKGNKUHMP"} +{"name": "pymc536", "age": 31, "index": 536, "body": "VTKGNKUHMP"} +{"name": "pymc992", "age": 83, "index": 992, "body": "VTKGNKUHMP"} +{"name": "pymc902", "age": 94, "index": 902, "body": "VTKGNKUHMP"} +{"name": "pymc532", "age": 27, "index": 532, "body": "VTKGNKUHMP"} +{"name": "pymc643", "age": 37, "index": 643, "body": "VTKGNKUHMP"} +{"name": "pymc308", "age": 5, "index": 308, "body": "VTKGNKUHMP"} +{"name": "pymc345", "age": 42, "index": 345, "body": "VTKGNKUHMP"} +{"name": "pymc340", "age": 37, "index": 340, "body": "VTKGNKUHMP"} +{"name": "pymc667", "age": 61, "index": 667, "body": "VTKGNKUHMP"} +{"name": "pymc406", "age": 2, "index": 406, "body": "VTKGNKUHMP"} +{"name": "pymc588", "age": 83, "index": 588, "body": "VTKGNKUHMP"} +{"name": "pymc542", "age": 37, "index": 542, "body": "VTKGNKUHMP"} +{"name": "pymc708", "age": 1, "index": 708, "body": "VTKGNKUHMP"} +{"name": "pymc551", "age": 46, "index": 551, "body": "VTKGNKUHMP"} +{"name": "pymc394", "age": 91, "index": 394, "body": "VTKGNKUHMP"} +{"name": "pymc300", "age": 98, "index": 300, "body": "VTKGNKUHMP"} +{"name": "pymc912", "age": 3, "index": 912, "body": "VTKGNKUHMP"} +{"name": "pymc423", "age": 19, "index": 423, "body": "VTKGNKUHMP"} +{"name": "pymc983", "age": 74, "index": 983, "body": "VTKGNKUHMP"} +{"name": "pymc309", "age": 6, "index": 309, "body": "VTKGNKUHMP"} +{"name": "pymc389", "age": 86, "index": 389, "body": "VTKGNKUHMP"} +{"name": "pymc427", "age": 23, "index": 427, "body": "VTKGNKUHMP"} +{"name": "pymc990", "age": 81, "index": 990, "body": "VTKGNKUHMP"} +{"name": "pymc675", "age": 69, "index": 675, "body": "VTKGNKUHMP"} +{"name": "pymc603", "age": 98, "index": 603, "body": "VTKGNKUHMP"} +{"name": "pymc303", "age": 0, "index": 303, "body": "VTKGNKUHMP"} +{"name": "pymc909", "age": 0, "index": 909, "body": "VTKGNKUHMP"} +{"name": "pymc617", "age": 11, "index": 617, "body": "VTKGNKUHMP"} +{"name": "pymc527", "age": 22, "index": 527, "body": "VTKGNKUHMP"} +{"name": "pymc554", "age": 49, "index": 554, "body": "VTKGNKUHMP"} +{"name": "pymc993", "age": 84, "index": 993, "body": "VTKGNKUHMP"} +{"name": "pymc555", "age": 50, "index": 555, "body": "VTKGNKUHMP"} +{"name": "pymc904", "age": 96, "index": 904, "body": "VTKGNKUHMP"} +{"name": "pymc665", "age": 59, "index": 665, "body": "VTKGNKUHMP"} +{"name": "pymc671", "age": 65, "index": 671, "body": "VTKGNKUHMP"} +{"name": "pymc535", "age": 30, "index": 535, "body": "VTKGNKUHMP"} +{"name": "pymc614", "age": 8, "index": 614, "body": "VTKGNKUHMP"} +{"name": "pymc673", "age": 67, "index": 673, "body": "VTKGNKUHMP"} +{"name": "pymc526", "age": 21, "index": 526, "body": "VTKGNKUHMP"} +{"name": "pymc540", "age": 35, "index": 540, "body": "VTKGNKUHMP"} +{"name": "pymc919", "age": 10, "index": 919, "body": "VTKGNKUHMP"} +{"name": "pymc403", "age": 100, "index": 403, "body": "VTKGNKUHMP"} +{"name": "pymc917", "age": 8, "index": 917, "body": "VTKGNKUHMP"} +{"name": "pymc349", "age": 46, "index": 349, "body": "VTKGNKUHMP"} +{"name": "pymc547", "age": 42, "index": 547, "body": "VTKGNKUHMP"} +{"name": "pymc491", "age": 87, "index": 491, "body": "VTKGNKUHMP"} +{"name": "pymc651", "age": 45, "index": 651, "body": "VTKGNKUHMP"} +{"name": "pymc717", "age": 10, "index": 717, "body": "VTKGNKUHMP"} +{"name": "pymc530", "age": 25, "index": 530, "body": "VTKGNKUHMP"} +{"name": "pymc987", "age": 78, "index": 987, "body": "VTKGNKUHMP"} +{"name": "pymc910", "age": 1, "index": 910, "body": "VTKGNKUHMP"} +{"name": "pymc625", "age": 19, "index": 625, "body": "VTKGNKUHMP"} +{"name": "pymc398", "age": 95, "index": 398, "body": "VTKGNKUHMP"} +{"name": "pymc492", "age": 88, "index": 492, "body": "VTKGNKUHMP"} +{"name": "pymc982", "age": 73, "index": 982, "body": "VTKGNKUHMP"} +{"name": "pymc598", "age": 93, "index": 598, "body": "VTKGNKUHMP"} +{"name": "pymc713", "age": 6, "index": 713, "body": "VTKGNKUHMP"} +{"name": "pymc641", "age": 35, "index": 641, "body": "VTKGNKUHMP"} +{"name": "pymc703", "age": 97, "index": 703, "body": "VTKGNKUHMP"} +{"name": "pymc317", "age": 14, "index": 317, "body": "VTKGNKUHMP"} +{"name": "pymc392", "age": 89, "index": 392, "body": "VTKGNKUHMP"} +{"name": "pymc711", "age": 4, "index": 711, "body": "VTKGNKUHMP"} +{"name": "pymc453", "age": 49, "index": 453, "body": "VTKGNKUHMP"} +{"name": "pymc391", "age": 88, "index": 391, "body": "VTKGNKUHMP"} +{"name": "pymc425", "age": 21, "index": 425, "body": "VTKGNKUHMP"} +{"name": "pymc799", "age": 92, "index": 799, "body": "VTKGNKUHMP"} +{"name": "pymc421", "age": 17, "index": 421, "body": "VTKGNKUHMP"} +{"name": "pymc381", "age": 78, "index": 381, "body": "VTKGNKUHMP"} +{"name": "pymc522", "age": 17, "index": 522, "body": "VTKGNKUHMP"} +{"name": "pymc765", "age": 58, "index": 765, "body": "VTKGNKUHMP"} +{"name": "pymc550", "age": 45, "index": 550, "body": "VTKGNKUHMP"} +{"name": "pymc606", "age": 0, "index": 606, "body": "VTKGNKUHMP"} +{"name": "pymc915", "age": 6, "index": 915, "body": "VTKGNKUHMP"} +{"name": "pymc387", "age": 84, "index": 387, "body": "VTKGNKUHMP"} +{"name": "pymc615", "age": 9, "index": 615, "body": "VTKGNKUHMP"} +{"name": "pymc454", "age": 50, "index": 454, "body": "VTKGNKUHMP"} +{"name": "pymc395", "age": 92, "index": 395, "body": "VTKGNKUHMP"} +{"name": "pymc534", "age": 29, "index": 534, "body": "VTKGNKUHMP"} +{"name": "pymc772", "age": 65, "index": 772, "body": "VTKGNKUHMP"} +{"name": "pymc646", "age": 40, "index": 646, "body": "VTKGNKUHMP"} +{"name": "pymc626", "age": 20, "index": 626, "body": "VTKGNKUHMP"} +{"name": "pymc663", "age": 57, "index": 663, "body": "VTKGNKUHMP"} +{"name": "pymc384", "age": 81, "index": 384, "body": "VTKGNKUHMP"} +{"name": "pymc707", "age": 0, "index": 707, "body": "VTKGNKUHMP"} +{"name": "pymc314", "age": 11, "index": 314, "body": "VTKGNKUHMP"} +{"name": "pymc767", "age": 60, "index": 767, "body": "VTKGNKUHMP"} +{"name": "pymc484", "age": 80, "index": 484, "body": "VTKGNKUHMP"} +{"name": "pymc537", "age": 32, "index": 537, "body": "VTKGNKUHMP"} +{"name": "pymc388", "age": 85, "index": 388, "body": "VTKGNKUHMP"} +{"name": "pymc500", "age": 96, "index": 500, "body": "VTKGNKUHMP"} +{"name": "pymc916", "age": 7, "index": 916, "body": "VTKGNKUHMP"} +{"name": "pymc591", "age": 86, "index": 591, "body": "VTKGNKUHMP"} +{"name": "pymc460", "age": 56, "index": 460, "body": "VTKGNKUHMP"} +{"name": "pymc446", "age": 42, "index": 446, "body": "VTKGNKUHMP"} +{"name": "pymc585", "age": 80, "index": 585, "body": "VTKGNKUHMP"} +{"name": "pymc480", "age": 76, "index": 480, "body": "VTKGNKUHMP"} +{"name": "pymc504", "age": 100, "index": 504, "body": "VTKGNKUHMP"} +{"name": "pymc413", "age": 9, "index": 413, "body": "VTKGNKUHMP"} +{"name": "pymc410", "age": 6, "index": 410, "body": "VTKGNKUHMP"} +{"name": "pymc517", "age": 12, "index": 517, "body": "VTKGNKUHMP"} +{"name": "pymc553", "age": 48, "index": 553, "body": "VTKGNKUHMP"} +{"name": "pymc780", "age": 73, "index": 780, "body": "VTKGNKUHMP"} +{"name": "pymc496", "age": 92, "index": 496, "body": "VTKGNKUHMP"} +{"name": "pymc424", "age": 20, "index": 424, "body": "VTKGNKUHMP"} +{"name": "pymc986", "age": 77, "index": 986, "body": "VTKGNKUHMP"} +{"name": "pymc773", "age": 66, "index": 773, "body": "VTKGNKUHMP"} +{"name": "pymc602", "age": 97, "index": 602, "body": "VTKGNKUHMP"} +{"name": "pymc900", "age": 92, "index": 900, "body": "VTKGNKUHMP"} +{"name": "pymc519", "age": 14, "index": 519, "body": "VTKGNKUHMP"} +{"name": "pymc582", "age": 77, "index": 582, "body": "VTKGNKUHMP"} +{"name": "pymc524", "age": 19, "index": 524, "body": "VTKGNKUHMP"} +{"name": "pymc473", "age": 69, "index": 473, "body": "VTKGNKUHMP"} +{"name": "pymc495", "age": 91, "index": 495, "body": "VTKGNKUHMP"} +{"name": "pymc493", "age": 89, "index": 493, "body": "VTKGNKUHMP"} +{"name": "pymc405", "age": 1, "index": 405, "body": "VTKGNKUHMP"} +{"name": "pymc483", "age": 79, "index": 483, "body": "VTKGNKUHMP"} +{"name": "pymc346", "age": 43, "index": 346, "body": "VTKGNKUHMP"} +{"name": "pymc584", "age": 79, "index": 584, "body": "VTKGNKUHMP"} +{"name": "pymc461", "age": 57, "index": 461, "body": "VTKGNKUHMP"} +{"name": "pymc763", "age": 56, "index": 763, "body": "VTKGNKUHMP"} +{"name": "pymc310", "age": 7, "index": 310, "body": "VTKGNKUHMP"} +{"name": "pymc670", "age": 64, "index": 670, "body": "VTKGNKUHMP"} +{"name": "pymc316", "age": 13, "index": 316, "body": "VTKGNKUHMP"} +{"name": "pymc433", "age": 29, "index": 433, "body": "VTKGNKUHMP"} +{"name": "pymc521", "age": 16, "index": 521, "body": "VTKGNKUHMP"} +{"name": "pymc611", "age": 5, "index": 611, "body": "VTKGNKUHMP"} +{"name": "pymc995", "age": 86, "index": 995, "body": "VTKGNKUHMP"} +{"name": "pymc574", "age": 69, "index": 574, "body": "VTKGNKUHMP"} +{"name": "pymc319", "age": 16, "index": 319, "body": "VTKGNKUHMP"} +{"name": "pymc998", "age": 89, "index": 998, "body": "VTKGNKUHMP"} +{"name": "pymc702", "age": 96, "index": 702, "body": "VTKGNKUHMP"} +{"name": "pymc383", "age": 80, "index": 383, "body": "VTKGNKUHMP"} +{"name": "pymc612", "age": 6, "index": 612, "body": "VTKGNKUHMP"} +{"name": "pymc601", "age": 96, "index": 601, "body": "VTKGNKUHMP"} +{"name": "pymc604", "age": 99, "index": 604, "body": "VTKGNKUHMP"} +{"name": "pymc400", "age": 97, "index": 400, "body": "VTKGNKUHMP"} +{"name": "pymc440", "age": 36, "index": 440, "body": "VTKGNKUHMP"} +{"name": "pymc788", "age": 81, "index": 788, "body": "VTKGNKUHMP"} +{"name": "pymc997", "age": 88, "index": 997, "body": "VTKGNKUHMP"} +{"name": "pymc416", "age": 12, "index": 416, "body": "VTKGNKUHMP"} +{"name": "pymc913", "age": 4, "index": 913, "body": "VTKGNKUHMP"} +{"name": "pymc546", "age": 41, "index": 546, "body": "VTKGNKUHMP"} +{"name": "pymc306", "age": 3, "index": 306, "body": "VTKGNKUHMP"} +{"name": "pymc996", "age": 87, "index": 996, "body": "VTKGNKUHMP"} +{"name": "pymc533", "age": 28, "index": 533, "body": "VTKGNKUHMP"} +{"name": "pymc343", "age": 40, "index": 343, "body": "VTKGNKUHMP"} +{"name": "pymc562", "age": 57, "index": 562, "body": "VTKGNKUHMP"} +{"name": "pymc463", "age": 59, "index": 463, "body": "VTKGNKUHMP"} +{"name": "pymc575", "age": 70, "index": 575, "body": "VTKGNKUHMP"} +{"name": "pymc790", "age": 83, "index": 790, "body": "VTKGNKUHMP"} +{"name": "pymc635", "age": 29, "index": 635, "body": "VTKGNKUHMP"} +{"name": "pymc564", "age": 59, "index": 564, "body": "VTKGNKUHMP"} +{"name": "pymc497", "age": 93, "index": 497, "body": "VTKGNKUHMP"} +{"name": "pymc905", "age": 97, "index": 905, "body": "VTKGNKUHMP"} +{"name": "pymc653", "age": 47, "index": 653, "body": "VTKGNKUHMP"} +{"name": "pymc571", "age": 66, "index": 571, "body": "VTKGNKUHMP"} +{"name": "pymc786", "age": 79, "index": 786, "body": "VTKGNKUHMP"} +{"name": "pymc516", "age": 11, "index": 516, "body": "VTKGNKUHMP"} +{"name": "pymc587", "age": 82, "index": 587, "body": "VTKGNKUHMP"} +{"name": "pymc980", "age": 71, "index": 980, "body": "VTKGNKUHMP"} +{"name": "pymc512", "age": 7, "index": 512, "body": "VTKGNKUHMP"} +{"name": "pymc436", "age": 32, "index": 436, "body": "VTKGNKUHMP"} +{"name": "pymc430", "age": 26, "index": 430, "body": "VTKGNKUHMP"} +{"name": "pymc315", "age": 12, "index": 315, "body": "VTKGNKUHMP"} +{"name": "pymc509", "age": 4, "index": 509, "body": "VTKGNKUHMP"} +{"name": "pymc581", "age": 76, "index": 581, "body": "VTKGNKUHMP"} +{"name": "pymc783", "age": 76, "index": 783, "body": "VTKGNKUHMP"} +{"name": "pymc594", "age": 89, "index": 594, "body": "VTKGNKUHMP"} +{"name": "pymc560", "age": 55, "index": 560, "body": "VTKGNKUHMP"} +{"name": "pymc572", "age": 67, "index": 572, "body": "VTKGNKUHMP"} +{"name": "pymc797", "age": 90, "index": 797, "body": "VTKGNKUHMP"} +{"name": "pymc592", "age": 87, "index": 592, "body": "VTKGNKUHMP"} +{"name": "pymc776", "age": 69, "index": 776, "body": "VTKGNKUHMP"} +{"name": "pymc795", "age": 88, "index": 795, "body": "VTKGNKUHMP"} +{"name": "pymc567", "age": 62, "index": 567, "body": "VTKGNKUHMP"} +{"name": "pymc477", "age": 73, "index": 477, "body": "VTKGNKUHMP"} +{"name": "pymc470", "age": 66, "index": 470, "body": "VTKGNKUHMP"} +{"name": "pymc627", "age": 21, "index": 627, "body": "VTKGNKUHMP"} +{"name": "pymc476", "age": 72, "index": 476, "body": "VTKGNKUHMP"} +{"name": "pymc597", "age": 92, "index": 597, "body": "VTKGNKUHMP"} +{"name": "pymc435", "age": 31, "index": 435, "body": "VTKGNKUHMP"} +{"name": "pymc714", "age": 7, "index": 714, "body": "VTKGNKUHMP"} +{"name": "pymc475", "age": 71, "index": 475, "body": "VTKGNKUHMP"} +{"name": "pymc456", "age": 52, "index": 456, "body": "VTKGNKUHMP"} +{"name": "pymc515", "age": 10, "index": 515, "body": "VTKGNKUHMP"} +{"name": "pymc631", "age": 25, "index": 631, "body": "VTKGNKUHMP"} +{"name": "pymc437", "age": 33, "index": 437, "body": "VTKGNKUHMP"} +{"name": "pymc432", "age": 28, "index": 432, "body": "VTKGNKUHMP"} +{"name": "pymc596", "age": 91, "index": 596, "body": "VTKGNKUHMP"} +{"name": "pymc576", "age": 71, "index": 576, "body": "VTKGNKUHMP"} +{"name": "pymc472", "age": 68, "index": 472, "body": "VTKGNKUHMP"} +{"name": "pymc481", "age": 77, "index": 481, "body": "VTKGNKUHMP"} +{"name": "pymc422", "age": 18, "index": 422, "body": "VTKGNKUHMP"} +{"name": "pymc793", "age": 86, "index": 793, "body": "VTKGNKUHMP"} +{"name": "pymc471", "age": 67, "index": 471, "body": "VTKGNKUHMP"} +{"name": "pymc787", "age": 80, "index": 787, "body": "VTKGNKUHMP"} +{"name": "pymc784", "age": 77, "index": 784, "body": "VTKGNKUHMP"} +{"name": "pymc593", "age": 88, "index": 593, "body": "VTKGNKUHMP"} +{"name": "pymc760", "age": 53, "index": 760, "body": "VTKGNKUHMP"} +{"name": "pymc501", "age": 97, "index": 501, "body": "VTKGNKUHMP"} +{"name": "pymc502", "age": 98, "index": 502, "body": "VTKGNKUHMP"} +{"name": "pymc465", "age": 61, "index": 465, "body": "VTKGNKUHMP"} +{"name": "pymc570", "age": 65, "index": 570, "body": "VTKGNKUHMP"} +{"name": "pymc573", "age": 68, "index": 573, "body": "VTKGNKUHMP"} +{"name": "pymc563", "age": 58, "index": 563, "body": "VTKGNKUHMP"} +{"name": "pymc796", "age": 89, "index": 796, "body": "VTKGNKUHMP"} +{"name": "pymc565", "age": 60, "index": 565, "body": "VTKGNKUHMP"} +{"name": "pymc785", "age": 78, "index": 785, "body": "VTKGNKUHMP"} +{"name": "pymc577", "age": 72, "index": 577, "body": "VTKGNKUHMP"} +{"name": "pymc566", "age": 61, "index": 566, "body": "VTKGNKUHMP"} +{"name": "pymc466", "age": 62, "index": 466, "body": "VTKGNKUHMP"} +{"name": "pymc514", "age": 9, "index": 514, "body": "VTKGNKUHMP"} +{"name": "pymc510", "age": 5, "index": 510, "body": "VTKGNKUHMP"} +{"name": "pymc583", "age": 78, "index": 583, "body": "VTKGNKUHMP"} +{"name": "pymc580", "age": 75, "index": 580, "body": "VTKGNKUHMP"} +{"name": "pymc506", "age": 1, "index": 506, "body": "VTKGNKUHMP"} +{"name": "pymc792", "age": 85, "index": 792, "body": "VTKGNKUHMP"} +{"name": "pymc505", "age": 0, "index": 505, "body": "VTKGNKUHMP"} +{"name": "pymc503", "age": 99, "index": 503, "body": "VTKGNKUHMP"} +{"name": "pymc595", "age": 90, "index": 595, "body": "VTKGNKUHMP"} +{"name": "pymc513", "age": 8, "index": 513, "body": "VTKGNKUHMP"} +{"name": "pymc434", "age": 30, "index": 434, "body": "VTKGNKUHMP"} +{"name": "pymc462", "age": 58, "index": 462, "body": "VTKGNKUHMP"} +{"name": "pymc464", "age": 60, "index": 464, "body": "VTKGNKUHMP"} +{"name": "pymc781", "age": 74, "index": 781, "body": "VTKGNKUHMP"} +{"name": "pymc561", "age": 56, "index": 561, "body": "VTKGNKUHMP"} +{"name": "pymc782", "age": 75, "index": 782, "body": "VTKGNKUHMP"} +{"name": "pymc791", "age": 84, "index": 791, "body": "VTKGNKUHMP"} +{"name": "pymc794", "age": 87, "index": 794, "body": "VTKGNKUHMP"} +{"name": "pymc590", "age": 85, "index": 590, "body": "VTKGNKUHMP"} +{"name": "pymc586", "age": 81, "index": 586, "body": "VTKGNKUHMP"} +{"name": "pymc474", "age": 70, "index": 474, "body": "VTKGNKUHMP"} +{"name": "pymc431", "age": 27, "index": 431, "body": "VTKGNKUHMP"} +{"name": "pymc507", "age": 2, "index": 507, "body": "VTKGNKUHMP"} +{"name": "pymc467", "age": 63, "index": 467, "body": "VTKGNKUHMP"} +{"name": "pymc511", "age": 6, "index": 511, "body": "VTKGNKUHMP"} diff --git a/resources/imex/json_1000_lines_invalid b/resources/imex/json_1000_lines_invalid new file mode 100644 index 000000000..cc1c936ab --- /dev/null +++ b/resources/imex/json_1000_lines_invalid @@ -0,0 +1,1000 @@ +{"name": "pymc189", "age": 88, "index": 189, "body": "VTKGNKUHMP"} +{"name": "pymc735", "age": 28, "index": 735, "body": "VTKGNKUHMP"} +{"name": "pymc959", "age": 50, "index": 959, "body": "VTKGNKUHMP"} +{"name":: "pymc272", "age": 70, "index": 272, "body": "VTKGNKUHMP"} +{"name": "pymc884", "age": 76, "index": 884, "body": "VTKGNKUHMP"} +{"name": "pymc239", "age": 37, "index": 239, "body": "VTKGNKUHMP"} +{"name": "pymc77", "age": 77, "index": 77, "body": "VTKGNKUHMP"} +{"name": "pymc886", "age": 78, "index": 886, "body": "VTKGNKUHMP"} +{"name": "pymc95", "age": 95, "index": 95, "body": "VTKGNKUHMP"} +{"name": "pymc73", "age": 73, "index": 73, "body": "VTKGNKUHMP"} +{"name": "pymc373", "age": 70, "index": 373, "body": "VTKGNKUHMP"} +{"name": "pymc459", "age": 55, "index": 459, "body": "VTKGNKUHMP"} +{"name": "pymc282", "age": 80, "index": 282, "body": "VTKGNKUHMP"} +{"name": "pymc966", "age": 57, "index": 966, "body": "VTKGNKUHMP"} +{"name": "pymc947", "age": 38, "index": 947, "body": "VTKGNKUHMP"} +{"name": "pymc334", "age": 31, "index": 334, "body": "VTKGNKUHMP"} +{"name": "pymc367", "age": 64, "index": 367, "body": "VTKGNKUHMP"} +{"name": "pymc868", "age": 60, "index": 868, "body": "VTKGNKUHMP"} +{"name": "pymc358", "age": 55, "index": 358, "body": "VTKGNKUHMP"} +{"name": "pymc658", "age": 52, "index": 658, "body": "VTKGNKUHMP"} +{"name": "pymc262", "age": 60, "index": 262, "body": "VTKGNKUHMP"} +{"name": "pymc844", "age": 36, "index": 844, "body": "VTKGNKUHMP"} +{"name": "pymc153", "age": 52, "index": 153, "body": "VTKGNKUHMP"} +{"name": "pymc30", "age": 30, "index": 30, "body": "VTKGNKUHMP"} +{"name": "pymc529", "age": 24, "index": 529, "body": "VTKGNKUHMP"} +{"name": "pymc276", "age": 74, "index": 276, "body": "VTKGNKUHMP"} +{"name": "pymc498", "age": 94, "index": 498, "body": "VTKGNKUHMP"} +{"name": "pymc638", "age": 32, "index": 638, "body": "VTKGNKUHMP"} +{"name": "pymc184", "age": 83, "index": 184, "body": "VTKGNKUHMP"} +{"name": "pymc159", "age": 58, "index": 159, "body": "VTKGNKUHMP"} +{"name": "pymc668", "age": 62, "index": 668, "body": "VTKGNKUHMP"} +{"name": "pymc112", "age": 11, "index": 112, "body": "VTKGNKUHMP"} +{"name": "pymc62", "age": 62, "index": 62, "body": "VTKGNKUHMP"} +{"name": "pymc9", "age": 9, "index": 9, "body": "VTKGNKUHMP"} +{"name": "pymc25", "age": 25, "index": 25, "body": "VTKGNKUHMP"} +{"name": "pymc361", "age": 58, "index": 361, "body": "VTKGNKUHMP"} +{"name": "pymc12", "age": 12, "index": 12, "body": "VTKGNKUHMP"} +{"name": "pymc32", "age": 32, "index": 32, "body": "VTKGNKUHMP"} +{"name": "pymc362", "age": 59, "index": 362, "body": "VTKGNKUHMP"} +{"name": "pymc265", "age": 63, "index": 265, "body": "VTKGNKUHMP"} +{"name": "pymc221", "age": 19, "index": 221, "body": "VTKGNKUHMP"} +{"name": "pymc162", "age": 61, "index": 162, "body": "VTKGNKUHMP"} +{"name": "pymc274", "age": 72, "index": 274, "body": "VTKGNKUHMP"} +{"name": "pymc801", "age": 94, "index": 801, "body": "VTKGNKUHMP"} +{"name": "pymc356", "age": 53, "index": 356, "body": "VTKGNKUHMP"} +{"name": "pymc58", "age": 58, "index": 58, "body": "VTKGNKUHMP"} +{"name": "pymc91", "age": 91, "index": 91, "body": "VTKGNKUHMP"} +{"name": "pymc933", "age": 24, "index": 933, "body": "VTKGNKUHMP"} +{"name": "pymc478", "age": 74, "index": 478, "body": "VTKGNKUHMP"} +{"name": "pymc152", "age": 51, "index": 152, "body": "VTKGNKUHMP"} +{"name": "pymc110", "age": 9, "index": 110, "body": "VTKGNKUHMP"} +{"name": "pymc816", "age": 8, "index": 816, "body": "VTKGNKUHMP"} +{"name": "pymc845", "age": 37, "index": 845, "body": "VTKGNKUHMP"} +{"name": "pymc191", "age": 90, "index": 191, "body": "VTKGNKUHMP"} +{"name": "pymc257", "age": 55, "index": 257, "body": "VTKGNKUHMP"} +{"name": "pymc323", "age": 20, "index": 323, "body": "VTKGNKUHMP"} +{"name": "pymc170", "age": 69, "index": 170, "body": "VTKGNKUHMP"} +{"name": "pymc559", "age": 54, "index": 559, "body": "VTKGNKUHMP"} +{"name": "pymc890", "age": 82, "index": 890, "body": "VTKGNKUHMP"} +{"name": "pymc736", "age": 29, "index": 736, "body": "VTKGNKUHMP"} +{"name": "pymc224", "age": 22, "index": 224, "body": "VTKGNKUHMP"} +{"name": "pymc757", "age": 50, "index": 757, "body": "VTKGNKUHMP"} +{"name": "pymc696", "age": 90, "index": 696, "body": "VTKGNKUHMP"} +{"name": "pymc92", "age": 92, "index": 92, "body": "VTKGNKUHMP"} +{"name": "pymc369", "age": 66, "index": 369, "body": "VTKGNKUHMP"} +{"name": "pymc489", "age": 85, "index": 489, "body": "VTKGNKUHMP"} +{"name": "pymc325", "age": 22, "index": 325, "body": "VTKGNKUHMP"} +{"name": "pymc354", "age": 51, "index": 354, "body": "VTKGNKUHMP"} +{"name": "pymc851", "age": 43, "index": 851, "body": "VTKGNKUHMP"} +{"name": "pymc54", "age": 54, "index": 54, "body": "VTKGNKUHMP"} +{"name": "pymc146", "age": 45, "index": 146, "body": "VTKGNKUHMP"} +{"name": "pymc89", "age": 89, "index": 89, "body": "VTKGNKUHMP"} +{"name": "pymc619", "age": 13, "index": 619, "body": "VTKGNKUHMP"} +{"name": "pymc360", "age": 57, "index": 360, "body": "VTKGNKUHMP"} +{"name": "pymc843", "age": 35, "index": 843, "body": "VTKGNKUHMP"} +{"name": "pymc832", "age": 24, "index": 832, "body": "VTKGNKUHMP"} +{"name": "pymc539", "age": 34, "index": 539, "body": "VTKGNKUHMP"} +{"name": "pymc322", "age": 19, "index": 322, "body": "VTKGNKUHMP"} +{"name": "pymc737", "age": 30, "index": 737, "body": "VTKGNKUHMP"} +{"name": "pymc881", "age": 73, "index": 881, "body": "VTKGNKUHMP"} +{"name": "pymc957", "age": 48, "index": 957, "body": "VTKGNKUHMP"} +{"name": "pymc99", "age": 99, "index": 99, "body": "VTKGNKUHMP"} +{"name": "pymc256", "age": 54, "index": 256, "body": "VTKGNKUHMP"} +{"name": "pymc949", "age": 40, "index": 949, "body": "VTKGNKUHMP"} +{"name": "pymc72", "age": 72, "index": 72, "body": "VTKGNKUHMP"} +{"name": "pymc128", "age": 27, "index": 128, "body": "VTKGNKUHMP"} +{"name": "pymc291", "age": 89, "index": 291, "body": "VTKGNKUHMP"} +{"name": "pymc939", "age": 30, "index": 939, "body": "VTKGNKUHMP"} +{"name": "pymc569", "age": 64, "index": 569, "body": "VTKGNKUHMP"} +{"name": "pymc865", "age": 57, "index": 865, "body": "VTKGNKUHMP"} +{"name": "pymc277", "age": 75, "index": 277, "body": "VTKGNKUHMP"} +{"name": "pymc139", "age": 38, "index": 139, "body": "VTKGNKUHMP"} +{"name": "pymc69", "age": 69, "index": 69, "body": "VTKGNKUHMP"} +{"name": "pymc79", "age": 79, "index": 79, "body": "VTKGNKUHMP"} +{"name": "pymc220", "age": 18, "index": 220, "body": "VTKGNKUHMP"} +{"name": "pymc896", "age": 88, "index": 896, "body": "VTKGNKUHMP"} +{"name": "pymc29", "age": 29, "index": 29, "body": "VTKGNKUHMP"} +{"name": "pymc876", "age": 68, "index": 876, "body": "VTKGNKUHMP"} +{"name": "pymc55", "age": 55, "index": 55, "body": "VTKGNKUHMP"} +{"name": "pymc271", "age": 69, "index": 271, "body": "VTKGNKUHMP"} +{"name": "pymc977", "age": 68, "index": 977, "body": "VTKGNKUHMP"} +{"name": "pymc961", "age": 52, "index": 961, "body": "VTKGNKUHMP"} +{"name": "pymc225", "age": 23, "index": 225, "body": "VTKGNKUHMP"} +{"name": "pymc690", "age": 84, "index": 690, "body": "VTKGNKUHMP"} +{"name": "pymc243", "age": 41, "index": 243, "body": "VTKGNKUHMP"} +{"name": "pymc648", "age": 42, "index": 648, "body": "VTKGNKUHMP"} +{"name": "pymc377", "age": 74, "index": 377, "body": "VTKGNKUHMP"} +{"name": "pymc261", "age": 59, "index": 261, "body": "VTKGNKUHMP"} +{"name": "pymc379", "age": 76, "index": 379, "body": "VTKGNKUHMP"} +{"name": "pymc837", "age": 29, "index": 837, "body": "VTKGNKUHMP"} +{"name": "pymc98", "age": 98, "index": 98, "body": "VTKGNKUHMP"} +{"name": "pymc872", "age": 64, "index": 872, "body": "VTKGNKUHMP"} +{"name": "pymc827", "age": 19, "index": 827, "body": "VTKGNKUHMP"} +{"name": "pymc7", "age": 7, "index": 7, "body": "VTKGNKUHMP"} +{"name": "pymc366", "age": 63, "index": 366, "body": "VTKGNKUHMP"} +{"name": "pymc82", "age": 82, "index": 82, "body": "VTKGNKUHMP"} +{"name": "pymc283", "age": 81, "index": 283, "body": "VTKGNKUHMP"} +{"name": "pymc734", "age": 27, "index": 734, "body": "VTKGNKUHMP"} +{"name": "pymc41", "age": 41, "index": 41, "body": "VTKGNKUHMP"} +{"name": "pymc862", "age": 54, "index": 862, "body": "VTKGNKUHMP"} +{"name": "pymc181", "age": 80, "index": 181, "body": "VTKGNKUHMP"} +{"name": "pymc958", "age": 49, "index": 958, "body": "VTKGNKUHMP"} +{"name": "pymc127", "age": 26, "index": 127, "body": "VTKGNKUHMP"} +{"name": "pymc548", "age": 43, "index": 548, "body": "VTKGNKUHMP"} +{"name": "pymc118", "age": 17, "index": 118, "body": "VTKGNKUHMP"} +{"name": "pymc280", "age": 78, "index": 280, "body": "VTKGNKUHMP"} +{"name": "pymc685", "age": 79, "index": 685, "body": "VTKGNKUHMP"} +{"name": "pymc102", "age": 1, "index": 102, "body": "VTKGNKUHMP"} +{"name": "pymc368", "age": 65, "index": 368, "body": "VTKGNKUHMP"} +{"name": "pymc278", "age": 76, "index": 278, "body": "VTKGNKUHMP"} +{"name": "pymc286", "age": 84, "index": 286, "body": "VTKGNKUHMP"} +{"name": "pymc0", "age": 0, "index": 0, "body": "VTKGNKUHMP"} +{"name": "pymc253", "age": 51, "index": 253, "body": "VTKGNKUHMP"} +{"name": "pymc163", "age": 62, "index": 163, "body": "VTKGNKUHMP"} +{"name": "pymc805", "age": 98, "index": 805, "body": "VTKGNKUHMP"} +{"name": "pymc469", "age": 65, "index": 469, "body": "VTKGNKUHMP"} +{"name": "pymc206", "age": 4, "index": 206, "body": "VTKGNKUHMP"} +{"name": "pymc825", "age": 17, "index": 825, "body": "VTKGNKUHMP"} +{"name": "pymc578", "age": 73, "index": 578, "body": "VTKGNKUHMP"} +{"name": "pymc86", "age": 86, "index": 86, "body": "VTKGNKUHMP"} +{"name": "pymc122", "age": 21, "index": 122, "body": "VTKGNKUHMP"} +{"name": "pymc289", "age": 87, "index": 289, "body": "VTKGNKUHMP"} +{"name": "pymc808", "age": 0, "index": 808, "body": "VTKGNKUHMP"} +{"name": "pymc57", "age": 57, "index": 57, "body": "VTKGNKUHMP"} +{"name": "pymc108", "age": 7, "index": 108, "body": "VTKGNKUHMP"} +{"name": "pymc692", "age": 86, "index": 692, "body": "VTKGNKUHMP"} +{"name": "pymc94", "age": 94, "index": 94, "body": "VTKGNKUHMP"} +{"name": "pymc213", "age": 11, "index": 213, "body": "VTKGNKUHMP"} +{"name": "pymc975", "age": 66, "index": 975, "body": "VTKGNKUHMP"} +{"name": "pymc160", "age": 59, "index": 160, "body": "VTKGNKUHMP"} +{"name": "pymc898", "age": 90, "index": 898, "body": "VTKGNKUHMP"} +{"name": "pymc173", "age": 72, "index": 173, "body": "VTKGNKUHMP"} +{"name": "pymc836", "age": 28, "index": 836, "body": "VTKGNKUHMP"} +{"name": "pymc176", "age": 75, "index": 176, "body": "VTKGNKUHMP"} +{"name": "pymc804", "age": 97, "index": 804, "body": "VTKGNKUHMP"} +{"name": "pymc820", "age": 12, "index": 820, "body": "VTKGNKUHMP"} +{"name": "pymc893", "age": 85, "index": 893, "body": "VTKGNKUHMP"} +{"name": "pymc20", "age": 20, "index": 20, "body": "VTKGNKUHMP"} +{"name": "pymc821", "age": 13, "index": 821, "body": "VTKGNKUHMP"} +{"name": "pymc372", "age": 69, "index": 372, "body": "VTKGNKUHMP"} +{"name": "pymc332", "age": 29, "index": 332, "body": "VTKGNKUHMP"} +{"name": "pymc618", "age": 12, "index": 618, "body": "VTKGNKUHMP"} +{"name": "pymc695", "age": 89, "index": 695, "body": "VTKGNKUHMP"} +{"name": "pymc754", "age": 47, "index": 754, "body": "VTKGNKUHMP"} +{"name": "pymc807", "age": 100, "index": 807, "body": "VTKGNKUHMP"} +{"name": "pymc863", "age": 55, "index": 863, "body": "VTKGNKUHMP"} +{"name": "pymc364", "age": 61, "index": 364, "body": "VTKGNKUHMP"} +{"name": "pymc883", "age": 75, "index": 883, "body": "VTKGNKUHMP"} +{"name": "pymc680", "age": 74, "index": 680, "body": "VTKGNKUHMP"} +{"name": "pymc46", "age": 46, "index": 46, "body": "VTKGNKUHMP"} +{"name": "pymc758", "age": 51, "index": 758, "body": "VTKGNKUHMP"} +{"name": "pymc873", "age": 65, "index": 873, "body": "VTKGNKUHMP"} +{"name": "pymc109", "age": 8, "index": 109, "body": "VTKGNKUHMP"} +{"name": "pymc149", "age": 48, "index": 149, "body": "VTKGNKUHMP"} +{"name": "pymc956", "age": 47, "index": 956, "body": "VTKGNKUHMP"} +{"name": "pymc234", "age": 32, "index": 234, "body": "VTKGNKUHMP"} +{"name": "pymc743", "age": 36, "index": 743, "body": "VTKGNKUHMP"} +{"name": "pymc296", "age": 94, "index": 296, "body": "VTKGNKUHMP"} +{"name": "pymc806", "age": 99, "index": 806, "body": "VTKGNKUHMP"} +{"name": "pymc74", "age": 74, "index": 74, "body": "VTKGNKUHMP"} +{"name": "pymc238", "age": 36, "index": 238, "body": "VTKGNKUHMP"} +{"name": "pymc67", "age": 67, "index": 67, "body": "VTKGNKUHMP"} +{"name": "pymc71", "age": 71, "index": 71, "body": "VTKGNKUHMP"} +{"name": "pymc930", "age": 21, "index": 930, "body": "VTKGNKUHMP"} +{"name": "pymc251", "age": 49, "index": 251, "body": "VTKGNKUHMP"} +{"name": "pymc854", "age": 46, "index": 854, "body": "VTKGNKUHMP"} +{"name": "pymc241", "age": 39, "index": 241, "body": "VTKGNKUHMP"} +{"name": "pymc336", "age": 33, "index": 336, "body": "VTKGNKUHMP"} +{"name": "pymc2", "age": 2, "index": 2, "body": "VTKGNKUHMP"} +{"name": "pymc328", "age": 25, "index": 328, "body": "VTKGNKUHMP"} +{"name": "pymc52", "age": 52, "index": 52, "body": "VTKGNKUHMP"} +{"name": "pymc210", "age": 8, "index": 210, "body": "VTKGNKUHMP"} +{"name": "pymc973", "age": 64, "index": 973, "body": "VTKGNKUHMP"} +{"name": "pymc857", "age": 49, "index": 857, "body": "VTKGNKUHMP"} +{"name": "pymc929", "age": 20, "index": 929, "body": "VTKGNKUHMP"} +{"name": "pymc78", "age": 78, "index": 78, "body": "VTKGNKUHMP"} +{"name": "pymc722", "age": 15, "index": 722, "body": "VTKGNKUHMP"} +{"name": "pymc327", "age": 24, "index": 327, "body": "VTKGNKUHMP"} +{"name": "pymc14", "age": 14, "index": 14, "body": "VTKGNKUHMP"} +{"name": "pymc744", "age": 37, "index": 744, "body": "VTKGNKUHMP"} +{"name": "pymc183", "age": 82, "index": 183, "body": "VTKGNKUHMP"} +{"name": "pymc329", "age": 26, "index": 329, "body": "VTKGNKUHMP"} +{"name": "pymc549", "age": 44, "index": 549, "body": "VTKGNKUHMP"} +{"name": "pymc921", "age": 12, "index": 921, "body": "VTKGNKUHMP"} +{"name": "pymc19", "age": 19, "index": 19, "body": "VTKGNKUHMP"} +{"name": "pymc724", "age": 17, "index": 724, "body": "VTKGNKUHMP"} +{"name": "pymc281", "age": 79, "index": 281, "body": "VTKGNKUHMP"} +{"name": "pymc207", "age": 5, "index": 207, "body": "VTKGNKUHMP"} +{"name": "pymc156", "age": 55, "index": 156, "body": "VTKGNKUHMP"} +{"name": "pymc172", "age": 71, "index": 172, "body": "VTKGNKUHMP"} +{"name": "pymc140", "age": 39, "index": 140, "body": "VTKGNKUHMP"} +{"name": "pymc229", "age": 27, "index": 229, "body": "VTKGNKUHMP"} +{"name": "pymc147", "age": 46, "index": 147, "body": "VTKGNKUHMP"} +{"name": "pymc941", "age": 32, "index": 941, "body": "VTKGNKUHMP"} +{"name": "pymc130", "age": 29, "index": 130, "body": "VTKGNKUHMP"} +{"name": "pymc168", "age": 67, "index": 168, "body": "VTKGNKUHMP"} +{"name": "pymc954", "age": 45, "index": 954, "body": "VTKGNKUHMP"} +{"name": "pymc97", "age": 97, "index": 97, "body": "VTKGNKUHMP"} +{"name": "pymc856", "age": 48, "index": 856, "body": "VTKGNKUHMP"} +{"name": "pymc50", "age": 50, "index": 50, "body": "VTKGNKUHMP"} +{"name": "pymc935", "age": 26, "index": 935, "body": "VTKGNKUHMP"} +{"name": "pymc124", "age": 23, "index": 124, "body": "VTKGNKUHMP"} +{"name": "pymc759", "age": 52, "index": 759, "body": "VTKGNKUHMP"} +{"name": "pymc216", "age": 14, "index": 216, "body": "VTKGNKUHMP"} +{"name": "pymc228", "age": 26, "index": 228, "body": "VTKGNKUHMP"} +{"name": "pymc275", "age": 73, "index": 275, "body": "VTKGNKUHMP"} +{"name": "pymc895", "age": 87, "index": 895, "body": "VTKGNKUHMP"} +{"name": "pymc824", "age": 16, "index": 824, "body": "VTKGNKUHMP"} +{"name": "pymc749", "age": 42, "index": 749, "body": "VTKGNKUHMP"} +{"name": "pymc132", "age": 31, "index": 132, "body": "VTKGNKUHMP"} +{"name": "pymc940", "age": 31, "index": 940, "body": "VTKGNKUHMP"} +{"name": "pymc121", "age": 20, "index": 121, "body": "VTKGNKUHMP"} +{"name": "pymc288", "age": 86, "index": 288, "body": "VTKGNKUHMP"} +{"name": "pymc203", "age": 1, "index": 203, "body": "VTKGNKUHMP"} +{"name": "pymc63", "age": 63, "index": 63, "body": "VTKGNKUHMP"} +{"name": "pymc15", "age": 15, "index": 15, "body": "VTKGNKUHMP"} +{"name": "pymc164", "age": 63, "index": 164, "body": "VTKGNKUHMP"} +{"name": "pymc830", "age": 22, "index": 830, "body": "VTKGNKUHMP"} +{"name": "pymc137", "age": 36, "index": 137, "body": "VTKGNKUHMP"} +{"name": "pymc31", "age": 31, "index": 31, "body": "VTKGNKUHMP"} +{"name": "pymc721", "age": 14, "index": 721, "body": "VTKGNKUHMP"} +{"name": "pymc192", "age": 91, "index": 192, "body": "VTKGNKUHMP"} +{"name": "pymc629", "age": 23, "index": 629, "body": "VTKGNKUHMP"} +{"name": "pymc753", "age": 46, "index": 753, "body": "VTKGNKUHMP"} +{"name": "pymc126", "age": 25, "index": 126, "body": "VTKGNKUHMP"} +{"name": "pymc186", "age": 85, "index": 186, "body": "VTKGNKUHMP"} +{"name": "pymc359", "age": 56, "index": 359, "body": "VTKGNKUHMP"} +{"name": "pymc22", "age": 22, "index": 22, "body": "VTKGNKUHMP"} +{"name": "pymc120", "age": 19, "index": 120, "body": "VTKGNKUHMP"} +{"name": "pymc678", "age": 72, "index": 678, "body": "VTKGNKUHMP"} +{"name": "pymc747", "age": 40, "index": 747, "body": "VTKGNKUHMP"} +{"name": "pymc699", "age": 93, "index": 699, "body": "VTKGNKUHMP"} +{"name": "pymc264", "age": 62, "index": 264, "body": "VTKGNKUHMP"} +{"name": "pymc365", "age": 62, "index": 365, "body": "VTKGNKUHMP"} +{"name": "pymc119", "age": 18, "index": 119, "body": "VTKGNKUHMP"} +{"name": "pymc924", "age": 15, "index": 924, "body": "VTKGNKUHMP"} +{"name": "pymc158", "age": 57, "index": 158, "body": "VTKGNKUHMP"} +{"name": "pymc659", "age": 53, "index": 659, "body": "VTKGNKUHMP"} +{"name": "pymc23", "age": 23, "index": 23, "body": "VTKGNKUHMP"} +{"name": "pymc66", "age": 66, "index": 66, "body": "VTKGNKUHMP"} +{"name": "pymc331", "age": 28, "index": 331, "body": "VTKGNKUHMP"} +{"name": "pymc335", "age": 32, "index": 335, "body": "VTKGNKUHMP"} +{"name": "pymc978", "age": 69, "index": 978, "body": "VTKGNKUHMP"} +{"name": "pymc810", "age": 2, "index": 810, "body": "VTKGNKUHMP"} +{"name": "pymc39", "age": 39, "index": 39, "body": "VTKGNKUHMP"} +{"name": "pymc60", "age": 60, "index": 60, "body": "VTKGNKUHMP"} +{"name": "pymc846", "age": 38, "index": 846, "body": "VTKGNKUHMP"} +{"name": "pymc59", "age": 59, "index": 59, "body": "VTKGNKUHMP"} +{"name": "pymc116", "age": 15, "index": 116, "body": "VTKGNKUHMP"} +{"name": "pymc51", "age": 51, "index": 51, "body": "VTKGNKUHMP"} +{"name": "pymc963", "age": 54, "index": 963, "body": "VTKGNKUHMP"} +{"name": "pymc260", "age": 58, "index": 260, "body": "VTKGNKUHMP"} +{"name": "pymc835", "age": 27, "index": 835, "body": "VTKGNKUHMP"} +{"name": "pymc746", "age": 39, "index": 746, "body": "VTKGNKUHMP"} +{"name": "pymc40", "age": 40, "index": 40, "body": "VTKGNKUHMP"} +{"name": "pymc113", "age": 12, "index": 113, "body": "VTKGNKUHMP"} +{"name": "pymc144", "age": 43, "index": 144, "body": "VTKGNKUHMP"} +{"name": "pymc68", "age": 68, "index": 68, "body": "VTKGNKUHMP"} +{"name": "pymc237", "age": 35, "index": 237, "body": "VTKGNKUHMP"} +{"name": "pymc231", "age": 29, "index": 231, "body": "VTKGNKUHMP"} +{"name": "pymc742", "age": 35, "index": 742, "body": "VTKGNKUHMP"} +{"name": "pymc233", "age": 31, "index": 233, "body": "VTKGNKUHMP"} +{"name": "pymc333", "age": 30, "index": 333, "body": "VTKGNKUHMP"} +{"name": "pymc10", "age": 10, "index": 10, "body": "VTKGNKUHMP"} +{"name": "pymc26", "age": 26, "index": 26, "body": "VTKGNKUHMP"} +{"name": "pymc36", "age": 36, "index": 36, "body": "VTKGNKUHMP"} +{"name": "pymc860", "age": 52, "index": 860, "body": "VTKGNKUHMP"} +{"name": "pymc5", "age": 5, "index": 5, "body": "VTKGNKUHMP"} +{"name": "pymc56", "age": 56, "index": 56, "body": "VTKGNKUHMP"} +{"name": "pymc85", "age": 85, "index": 85, "body": "VTKGNKUHMP"} +{"name": "pymc3", "age": 3, "index": 3, "body": "VTKGNKUHMP"} +{"name": "pymc80", "age": 80, "index": 80, "body": "VTKGNKUHMP"} +{"name": "pymc169", "age": 68, "index": 169, "body": "VTKGNKUHMP"} +{"name": "pymc822", "age": 14, "index": 822, "body": "VTKGNKUHMP"} +{"name": "pymc897", "age": 89, "index": 897, "body": "VTKGNKUHMP"} +{"name": "pymc867", "age": 59, "index": 867, "body": "VTKGNKUHMP"} +{"name": "pymc694", "age": 88, "index": 694, "body": "VTKGNKUHMP"} +{"name": "pymc179", "age": 78, "index": 179, "body": "VTKGNKUHMP"} +{"name": "pymc768", "age": 61, "index": 768, "body": "VTKGNKUHMP"} +{"name": "pymc818", "age": 10, "index": 818, "body": "VTKGNKUHMP"} +{"name": "pymc193", "age": 92, "index": 193, "body": "VTKGNKUHMP"} +{"name": "pymc419", "age": 15, "index": 419, "body": "VTKGNKUHMP"} +{"name": "pymc61", "age": 61, "index": 61, "body": "VTKGNKUHMP"} +{"name": "pymc689", "age": 83, "index": 689, "body": "VTKGNKUHMP"} +{"name": "pymc639", "age": 33, "index": 639, "body": "VTKGNKUHMP"} +{"name": "pymc819", "age": 11, "index": 819, "body": "VTKGNKUHMP"} +{"name": "pymc285", "age": 83, "index": 285, "body": "VTKGNKUHMP"} +{"name": "pymc953", "age": 44, "index": 953, "body": "VTKGNKUHMP"} +{"name": "pymc269", "age": 67, "index": 269, "body": "VTKGNKUHMP"} +{"name": "pymc65", "age": 65, "index": 65, "body": "VTKGNKUHMP"} +{"name": "pymc499", "age": 95, "index": 499, "body": "VTKGNKUHMP"} +{"name": "pymc178", "age": 77, "index": 178, "body": "VTKGNKUHMP"} +{"name": "pymc829", "age": 21, "index": 829, "body": "VTKGNKUHMP"} +{"name": "pymc363", "age": 60, "index": 363, "body": "VTKGNKUHMP"} +{"name": "pymc161", "age": 60, "index": 161, "body": "VTKGNKUHMP"} +{"name": "pymc688", "age": 82, "index": 688, "body": "VTKGNKUHMP"} +{"name": "pymc934", "age": 25, "index": 934, "body": "VTKGNKUHMP"} +{"name": "pymc376", "age": 73, "index": 376, "body": "VTKGNKUHMP"} +{"name": "pymc859", "age": 51, "index": 859, "body": "VTKGNKUHMP"} +{"name": "pymc888", "age": 80, "index": 888, "body": "VTKGNKUHMP"} +{"name": "pymc212", "age": 10, "index": 212, "body": "VTKGNKUHMP"} +{"name": "pymc853", "age": 45, "index": 853, "body": "VTKGNKUHMP"} +{"name": "pymc200", "age": 99, "index": 200, "body": "VTKGNKUHMP"} +{"name": "pymc145", "age": 44, "index": 145, "body": "VTKGNKUHMP"} +{"name": "pymc175", "age": 74, "index": 175, "body": "VTKGNKUHMP"} +{"name": "pymc691", "age": 85, "index": 691, "body": "VTKGNKUHMP"} +{"name": "pymc945", "age": 36, "index": 945, "body": "VTKGNKUHMP"} +{"name": "pymc738", "age": 31, "index": 738, "body": "VTKGNKUHMP"} +{"name": "pymc370", "age": 67, "index": 370, "body": "VTKGNKUHMP"} +{"name": "pymc811", "age": 3, "index": 811, "body": "VTKGNKUHMP"} +{"name": "pymc891", "age": 83, "index": 891, "body": "VTKGNKUHMP"} +{"name": "pymc236", "age": 34, "index": 236, "body": "VTKGNKUHMP"} +{"name": "pymc320", "age": 17, "index": 320, "body": "VTKGNKUHMP"} +{"name": "pymc195", "age": 94, "index": 195, "body": "VTKGNKUHMP"} +{"name": "pymc858", "age": 50, "index": 858, "body": "VTKGNKUHMP"} +{"name": "pymc194", "age": 93, "index": 194, "body": "VTKGNKUHMP"} +{"name": "pymc608", "age": 2, "index": 608, "body": "VTKGNKUHMP"} +{"name": "pymc682", "age": 76, "index": 682, "body": "VTKGNKUHMP"} +{"name": "pymc252", "age": 50, "index": 252, "body": "VTKGNKUHMP"} +{"name": "pymc943", "age": 34, "index": 943, "body": "VTKGNKUHMP"} +{"name": "pymc861", "age": 53, "index": 861, "body": "VTKGNKUHMP"} +{"name": "pymc809", "age": 1, "index": 809, "body": "VTKGNKUHMP"} +{"name": "pymc197", "age": 96, "index": 197, "body": "VTKGNKUHMP"} +{"name": "pymc106", "age": 5, "index": 106, "body": "VTKGNKUHMP"} +{"name": "pymc838", "age": 30, "index": 838, "body": "VTKGNKUHMP"} +{"name": "pymc16", "age": 16, "index": 16, "body": "VTKGNKUHMP"} +{"name": "pymc741", "age": 34, "index": 741, "body": "VTKGNKUHMP"} +{"name": "pymc849", "age": 41, "index": 849, "body": "VTKGNKUHMP"} +{"name": "pymc105", "age": 4, "index": 105, "body": "VTKGNKUHMP"} +{"name": "pymc378", "age": 75, "index": 378, "body": "VTKGNKUHMP"} +{"name": "pymc45", "age": 45, "index": 45, "body": "VTKGNKUHMP"} +{"name": "pymc177", "age": 76, "index": 177, "body": "VTKGNKUHMP"} +{"name": "pymc104", "age": 3, "index": 104, "body": "VTKGNKUHMP"} +{"name": "pymc196", "age": 95, "index": 196, "body": "VTKGNKUHMP"} +{"name": "pymc83", "age": 83, "index": 83, "body": "VTKGNKUHMP"} +{"name": "pymc488", "age": 84, "index": 488, "body": "VTKGNKUHMP"} +{"name": "pymc13", "age": 13, "index": 13, "body": "VTKGNKUHMP"} +{"name": "pymc720", "age": 13, "index": 720, "body": "VTKGNKUHMP"} +{"name": "pymc87", "age": 87, "index": 87, "body": "VTKGNKUHMP"} +{"name": "pymc950", "age": 41, "index": 950, "body": "VTKGNKUHMP"} +{"name": "pymc468", "age": 64, "index": 468, "body": "VTKGNKUHMP"} +{"name": "pymc834", "age": 26, "index": 834, "body": "VTKGNKUHMP"} +{"name": "pymc815", "age": 7, "index": 815, "body": "VTKGNKUHMP"} +{"name": "pymc215", "age": 13, "index": 215, "body": "VTKGNKUHMP"} +{"name": "pymc295", "age": 93, "index": 295, "body": "VTKGNKUHMP"} +{"name": "pymc292", "age": 90, "index": 292, "body": "VTKGNKUHMP"} +{"name": "pymc180", "age": 79, "index": 180, "body": "VTKGNKUHMP"} +{"name": "pymc728", "age": 21, "index": 728, "body": "VTKGNKUHMP"} +{"name": "pymc155", "age": 54, "index": 155, "body": "VTKGNKUHMP"} +{"name": "pymc855", "age": 47, "index": 855, "body": "VTKGNKUHMP"} +{"name": "pymc841", "age": 33, "index": 841, "body": "VTKGNKUHMP"} +{"name": "pymc209", "age": 7, "index": 209, "body": "VTKGNKUHMP"} +{"name": "pymc100", "age": 100, "index": 100, "body": "VTKGNKUHMP"} +{"name": "pymc248", "age": 46, "index": 248, "body": "VTKGNKUHMP"} +{"name": "pymc769", "age": 62, "index": 769, "body": "VTKGNKUHMP"} +{"name": "pymc330", "age": 27, "index": 330, "body": "VTKGNKUHMP"} +{"name": "pymc142", "age": 41, "index": 142, "body": "VTKGNKUHMP"} +{"name": "pymc976", "age": 67, "index": 976, "body": "VTKGNKUHMP"} +{"name": "pymc151", "age": 50, "index": 151, "body": "VTKGNKUHMP"} +{"name": "pymc852", "age": 44, "index": 852, "body": "VTKGNKUHMP"} +{"name": "pymc649", "age": 43, "index": 649, "body": "VTKGNKUHMP"} +{"name": "pymc294", "age": 92, "index": 294, "body": "VTKGNKUHMP"} +{"name": "pymc686", "age": 80, "index": 686, "body": "VTKGNKUHMP"} +{"name": "pymc352", "age": 49, "index": 352, "body": "VTKGNKUHMP"} +{"name": "pymc70", "age": 70, "index": 70, "body": "VTKGNKUHMP"} +{"name": "pymc114", "age": 13, "index": 114, "body": "VTKGNKUHMP"} +{"name": "pymc48", "age": 48, "index": 48, "body": "VTKGNKUHMP"} +{"name": "pymc47", "age": 47, "index": 47, "body": "VTKGNKUHMP"} +{"name": "pymc669", "age": 63, "index": 669, "body": "VTKGNKUHMP"} +{"name": "pymc974", "age": 65, "index": 974, "body": "VTKGNKUHMP"} +{"name": "pymc745", "age": 38, "index": 745, "body": "VTKGNKUHMP"} +{"name": "pymc150", "age": 49, "index": 150, "body": "VTKGNKUHMP"} +{"name": "pymc375", "age": 72, "index": 375, "body": "VTKGNKUHMP"} +{"name": "pymc6", "age": 6, "index": 6, "body": "VTKGNKUHMP"} +{"name": "pymc937", "age": 28, "index": 937, "body": "VTKGNKUHMP"} +{"name": "pymc968", "age": 59, "index": 968, "body": "VTKGNKUHMP"} +{"name": "pymc814", "age": 6, "index": 814, "body": "VTKGNKUHMP"} +{"name": "pymc88", "age": 88, "index": 88, "body": "VTKGNKUHMP"} +{"name": "pymc141", "age": 40, "index": 141, "body": "VTKGNKUHMP"} +{"name": "pymc125", "age": 24, "index": 125, "body": "VTKGNKUHMP"} +{"name": "pymc726", "age": 19, "index": 726, "body": "VTKGNKUHMP"} +{"name": "pymc936", "age": 27, "index": 936, "body": "VTKGNKUHMP"} +{"name": "pymc778", "age": 71, "index": 778, "body": "VTKGNKUHMP"} +{"name": "pymc972", "age": 63, "index": 972, "body": "VTKGNKUHMP"} +{"name": "pymc579", "age": 74, "index": 579, "body": "VTKGNKUHMP"} +{"name": "pymc249", "age": 47, "index": 249, "body": "VTKGNKUHMP"} +{"name": "pymc871", "age": 63, "index": 871, "body": "VTKGNKUHMP"} +{"name": "pymc337", "age": 34, "index": 337, "body": "VTKGNKUHMP"} +{"name": "pymc979", "age": 70, "index": 979, "body": "VTKGNKUHMP"} +{"name": "pymc779", "age": 72, "index": 779, "body": "VTKGNKUHMP"} +{"name": "pymc136", "age": 35, "index": 136, "body": "VTKGNKUHMP"} +{"name": "pymc199", "age": 98, "index": 199, "body": "VTKGNKUHMP"} +{"name": "pymc53", "age": 53, "index": 53, "body": "VTKGNKUHMP"} +{"name": "pymc684", "age": 78, "index": 684, "body": "VTKGNKUHMP"} +{"name": "pymc297", "age": 95, "index": 297, "body": "VTKGNKUHMP"} +{"name": "pymc439", "age": 35, "index": 439, "body": "VTKGNKUHMP"} +{"name": "pymc75", "age": 75, "index": 75, "body": "VTKGNKUHMP"} +{"name": "pymc755", "age": 48, "index": 755, "body": "VTKGNKUHMP"} +{"name": "pymc877", "age": 69, "index": 877, "body": "VTKGNKUHMP"} +{"name": "pymc927", "age": 18, "index": 927, "body": "VTKGNKUHMP"} +{"name": "pymc76", "age": 76, "index": 76, "body": "VTKGNKUHMP"} +{"name": "pymc148", "age": 47, "index": 148, "body": "VTKGNKUHMP"} +{"name": "pymc44", "age": 44, "index": 44, "body": "VTKGNKUHMP"} +{"name": "pymc739", "age": 32, "index": 739, "body": "VTKGNKUHMP"} +{"name": "pymc828", "age": 20, "index": 828, "body": "VTKGNKUHMP"} +{"name": "pymc826", "age": 18, "index": 826, "body": "VTKGNKUHMP"} +{"name": "pymc732", "age": 25, "index": 732, "body": "VTKGNKUHMP"} +{"name": "pymc135", "age": 34, "index": 135, "body": "VTKGNKUHMP"} +{"name": "pymc324", "age": 21, "index": 324, "body": "VTKGNKUHMP"} +{"name": "pymc409", "age": 5, "index": 409, "body": "VTKGNKUHMP"} +{"name": "pymc267", "age": 65, "index": 267, "body": "VTKGNKUHMP"} +{"name": "pymc244", "age": 42, "index": 244, "body": "VTKGNKUHMP"} +{"name": "pymc143", "age": 42, "index": 143, "body": "VTKGNKUHMP"} +{"name": "pymc802", "age": 95, "index": 802, "body": "VTKGNKUHMP"} +{"name": "pymc892", "age": 84, "index": 892, "body": "VTKGNKUHMP"} +{"name": "pymc240", "age": 38, "index": 240, "body": "VTKGNKUHMP"} +{"name": "pymc131", "age": 30, "index": 131, "body": "VTKGNKUHMP"} +{"name": "pymc21", "age": 21, "index": 21, "body": "VTKGNKUHMP"} +{"name": "pymc298", "age": 96, "index": 298, "body": "VTKGNKUHMP"} +{"name": "pymc4", "age": 4, "index": 4, "body": "VTKGNKUHMP"} +{"name": "pymc1", "age": 1, "index": 1, "body": "VTKGNKUHMP"} +{"name": "pymc925", "age": 16, "index": 925, "body": "VTKGNKUHMP"} +{"name": "pymc725", "age": 18, "index": 725, "body": "VTKGNKUHMP"} +{"name": "pymc115", "age": 14, "index": 115, "body": "VTKGNKUHMP"} +{"name": "pymc923", "age": 14, "index": 923, "body": "VTKGNKUHMP"} +{"name": "pymc628", "age": 22, "index": 628, "body": "VTKGNKUHMP"} +{"name": "pymc374", "age": 71, "index": 374, "body": "VTKGNKUHMP"} +{"name": "pymc418", "age": 14, "index": 418, "body": "VTKGNKUHMP"} +{"name": "pymc188", "age": 87, "index": 188, "body": "VTKGNKUHMP"} +{"name": "pymc230", "age": 28, "index": 230, "body": "VTKGNKUHMP"} +{"name": "pymc698", "age": 92, "index": 698, "body": "VTKGNKUHMP"} +{"name": "pymc259", "age": 57, "index": 259, "body": "VTKGNKUHMP"} +{"name": "pymc33", "age": 33, "index": 33, "body": "VTKGNKUHMP"} +{"name": "pymc223", "age": 21, "index": 223, "body": "VTKGNKUHMP"} +{"name": "pymc17", "age": 17, "index": 17, "body": "VTKGNKUHMP"} +{"name": "pymc866", "age": 58, "index": 866, "body": "VTKGNKUHMP"} +{"name": "pymc204", "age": 2, "index": 204, "body": "VTKGNKUHMP"} +{"name": "pymc290", "age": 88, "index": 290, "body": "VTKGNKUHMP"} +{"name": "pymc752", "age": 45, "index": 752, "body": "VTKGNKUHMP"} +{"name": "pymc49", "age": 49, "index": 49, "body": "VTKGNKUHMP"} +{"name": "pymc879", "age": 71, "index": 879, "body": "VTKGNKUHMP"} +{"name": "pymc882", "age": 74, "index": 882, "body": "VTKGNKUHMP"} +{"name": "pymc351", "age": 48, "index": 351, "body": "VTKGNKUHMP"} +{"name": "pymc813", "age": 5, "index": 813, "body": "VTKGNKUHMP"} +{"name": "pymc263", "age": 61, "index": 263, "body": "VTKGNKUHMP"} +{"name": "pymc932", "age": 23, "index": 932, "body": "VTKGNKUHMP"} +{"name": "pymc942", "age": 33, "index": 942, "body": "VTKGNKUHMP"} +{"name": "pymc299", "age": 97, "index": 299, "body": "VTKGNKUHMP"} +{"name": "pymc219", "age": 17, "index": 219, "body": "VTKGNKUHMP"} +{"name": "pymc35", "age": 35, "index": 35, "body": "VTKGNKUHMP"} +{"name": "pymc970", "age": 61, "index": 970, "body": "VTKGNKUHMP"} +{"name": "pymc850", "age": 42, "index": 850, "body": "VTKGNKUHMP"} +{"name": "pymc350", "age": 47, "index": 350, "body": "VTKGNKUHMP"} +{"name": "pymc355", "age": 52, "index": 355, "body": "VTKGNKUHMP"} +{"name": "pymc729", "age": 22, "index": 729, "body": "VTKGNKUHMP"} +{"name": "pymc138", "age": 37, "index": 138, "body": "VTKGNKUHMP"} +{"name": "pymc800", "age": 93, "index": 800, "body": "VTKGNKUHMP"} +{"name": "pymc107", "age": 6, "index": 107, "body": "VTKGNKUHMP"} +{"name": "pymc833", "age": 25, "index": 833, "body": "VTKGNKUHMP"} +{"name": "pymc960", "age": 51, "index": 960, "body": "VTKGNKUHMP"} +{"name": "pymc90", "age": 90, "index": 90, "body": "VTKGNKUHMP"} +{"name": "pymc171", "age": 70, "index": 171, "body": "VTKGNKUHMP"} +{"name": "pymc731", "age": 24, "index": 731, "body": "VTKGNKUHMP"} +{"name": "pymc293", "age": 91, "index": 293, "body": "VTKGNKUHMP"} +{"name": "pymc899", "age": 91, "index": 899, "body": "VTKGNKUHMP"} +{"name": "pymc165", "age": 64, "index": 165, "body": "VTKGNKUHMP"} +{"name": "pymc967", "age": 58, "index": 967, "body": "VTKGNKUHMP"} +{"name": "pymc11", "age": 11, "index": 11, "body": "VTKGNKUHMP"} +{"name": "pymc27", "age": 27, "index": 27, "body": "VTKGNKUHMP"} +{"name": "pymc255", "age": 53, "index": 255, "body": "VTKGNKUHMP"} +{"name": "pymc129", "age": 28, "index": 129, "body": "VTKGNKUHMP"} +{"name": "pymc226", "age": 24, "index": 226, "body": "VTKGNKUHMP"} +{"name": "pymc803", "age": 96, "index": 803, "body": "VTKGNKUHMP"} +{"name": "pymc922", "age": 13, "index": 922, "body": "VTKGNKUHMP"} +{"name": "pymc37", "age": 37, "index": 37, "body": "VTKGNKUHMP"} +{"name": "pymc227", "age": 25, "index": 227, "body": "VTKGNKUHMP"} +{"name": "pymc528", "age": 23, "index": 528, "body": "VTKGNKUHMP"} +{"name": "pymc875", "age": 67, "index": 875, "body": "VTKGNKUHMP"} +{"name": "pymc429", "age": 25, "index": 429, "body": "VTKGNKUHMP"} +{"name": "pymc279", "age": 77, "index": 279, "body": "VTKGNKUHMP"} +{"name": "pymc823", "age": 15, "index": 823, "body": "VTKGNKUHMP"} +{"name": "pymc730", "age": 23, "index": 730, "body": "VTKGNKUHMP"} +{"name": "pymc894", "age": 86, "index": 894, "body": "VTKGNKUHMP"} +{"name": "pymc740", "age": 33, "index": 740, "body": "VTKGNKUHMP"} +{"name": "pymc117", "age": 16, "index": 117, "body": "VTKGNKUHMP"} +{"name": "pymc93", "age": 93, "index": 93, "body": "VTKGNKUHMP"} +{"name": "pymc103", "age": 2, "index": 103, "body": "VTKGNKUHMP"} +{"name": "pymc247", "age": 45, "index": 247, "body": "VTKGNKUHMP"} +{"name": "pymc34", "age": 34, "index": 34, "body": "VTKGNKUHMP"} +{"name": "pymc38", "age": 38, "index": 38, "body": "VTKGNKUHMP"} +{"name": "pymc166", "age": 65, "index": 166, "body": "VTKGNKUHMP"} +{"name": "pymc944", "age": 35, "index": 944, "body": "VTKGNKUHMP"} +{"name": "pymc101", "age": 0, "index": 101, "body": "VTKGNKUHMP"} +{"name": "pymc254", "age": 52, "index": 254, "body": "VTKGNKUHMP"} +{"name": "pymc232", "age": 30, "index": 232, "body": "VTKGNKUHMP"} +{"name": "pymc889", "age": 81, "index": 889, "body": "VTKGNKUHMP"} +{"name": "pymc479", "age": 75, "index": 479, "body": "VTKGNKUHMP"} +{"name": "pymc371", "age": 68, "index": 371, "body": "VTKGNKUHMP"} +{"name": "pymc558", "age": 53, "index": 558, "body": "VTKGNKUHMP"} +{"name": "pymc182", "age": 81, "index": 182, "body": "VTKGNKUHMP"} +{"name": "pymc438", "age": 34, "index": 438, "body": "VTKGNKUHMP"} +{"name": "pymc887", "age": 79, "index": 887, "body": "VTKGNKUHMP"} +{"name": "pymc8", "age": 8, "index": 8, "body": "VTKGNKUHMP"} +{"name": "pymc235", "age": 33, "index": 235, "body": "VTKGNKUHMP"} +{"name": "pymc273", "age": 71, "index": 273, "body": "VTKGNKUHMP"} +{"name": "pymc679", "age": 73, "index": 679, "body": "VTKGNKUHMP"} +{"name": "pymc123", "age": 22, "index": 123, "body": "VTKGNKUHMP"} +{"name": "pymc687", "age": 81, "index": 687, "body": "VTKGNKUHMP"} +{"name": "pymc242", "age": 40, "index": 242, "body": "VTKGNKUHMP"} +{"name": "pymc952", "age": 43, "index": 952, "body": "VTKGNKUHMP"} +{"name": "pymc817", "age": 9, "index": 817, "body": "VTKGNKUHMP"} +{"name": "pymc962", "age": 53, "index": 962, "body": "VTKGNKUHMP"} +{"name": "pymc842", "age": 34, "index": 842, "body": "VTKGNKUHMP"} +{"name": "pymc28", "age": 28, "index": 28, "body": "VTKGNKUHMP"} +{"name": "pymc870", "age": 62, "index": 870, "body": "VTKGNKUHMP"} +{"name": "pymc951", "age": 42, "index": 951, "body": "VTKGNKUHMP"} +{"name": "pymc880", "age": 72, "index": 880, "body": "VTKGNKUHMP"} +{"name": "pymc723", "age": 16, "index": 723, "body": "VTKGNKUHMP"} +{"name": "pymc938", "age": 29, "index": 938, "body": "VTKGNKUHMP"} +{"name": "pymc157", "age": 56, "index": 157, "body": "VTKGNKUHMP"} +{"name": "pymc458", "age": 54, "index": 458, "body": "VTKGNKUHMP"} +{"name": "pymc538", "age": 33, "index": 538, "body": "VTKGNKUHMP"} +{"name": "pymc208", "age": 6, "index": 208, "body": "VTKGNKUHMP"} +{"name": "pymc751", "age": 44, "index": 751, "body": "VTKGNKUHMP"} +{"name": "pymc24", "age": 24, "index": 24, "body": "VTKGNKUHMP"} +{"name": "pymc748", "age": 41, "index": 748, "body": "VTKGNKUHMP"} +{"name": "pymc270", "age": 68, "index": 270, "body": "VTKGNKUHMP"} +{"name": "pymc812", "age": 4, "index": 812, "body": "VTKGNKUHMP"} +{"name": "pymc408", "age": 4, "index": 408, "body": "VTKGNKUHMP"} +{"name": "pymc81", "age": 81, "index": 81, "body": "VTKGNKUHMP"} +{"name": "pymc878", "age": 70, "index": 878, "body": "VTKGNKUHMP"} +{"name": "pymc965", "age": 56, "index": 965, "body": "VTKGNKUHMP"} +{"name": "pymc920", "age": 11, "index": 920, "body": "VTKGNKUHMP"} +{"name": "pymc683", "age": 77, "index": 683, "body": "VTKGNKUHMP"} +{"name": "pymc133", "age": 32, "index": 133, "body": "VTKGNKUHMP"} +{"name": "pymc693", "age": 87, "index": 693, "body": "VTKGNKUHMP"} +{"name": "pymc268", "age": 66, "index": 268, "body": "VTKGNKUHMP"} +{"name": "pymc287", "age": 85, "index": 287, "body": "VTKGNKUHMP"} +{"name": "pymc931", "age": 22, "index": 931, "body": "VTKGNKUHMP"} +{"name": "pymc18", "age": 18, "index": 18, "body": "VTKGNKUHMP"} +{"name": "pymc202", "age": 0, "index": 202, "body": "VTKGNKUHMP"} +{"name": "pymc449", "age": 45, "index": 449, "body": "VTKGNKUHMP"} +{"name": "pymc681", "age": 75, "index": 681, "body": "VTKGNKUHMP"} +{"name": "pymc946", "age": 37, "index": 946, "body": "VTKGNKUHMP"} +{"name": "pymc568", "age": 63, "index": 568, "body": "VTKGNKUHMP"} +{"name": "pymc969", "age": 60, "index": 969, "body": "VTKGNKUHMP"} +{"name": "pymc321", "age": 18, "index": 321, "body": "VTKGNKUHMP"} +{"name": "pymc201", "age": 100, "index": 201, "body": "VTKGNKUHMP"} +{"name": "pymc357", "age": 54, "index": 357, "body": "VTKGNKUHMP"} +{"name": "pymc609", "age": 3, "index": 609, "body": "VTKGNKUHMP"} +{"name": "pymc964", "age": 55, "index": 964, "body": "VTKGNKUHMP"} +{"name": "pymc428", "age": 24, "index": 428, "body": "VTKGNKUHMP"} +{"name": "pymc353", "age": 50, "index": 353, "body": "VTKGNKUHMP"} +{"name": "pymc211", "age": 9, "index": 211, "body": "VTKGNKUHMP"} +{"name": "pymc926", "age": 17, "index": 926, "body": "VTKGNKUHMP"} +{"name": "pymc250", "age": 48, "index": 250, "body": "VTKGNKUHMP"} +{"name": "pymc339", "age": 36, "index": 339, "body": "VTKGNKUHMP"} +{"name": "pymc187", "age": 86, "index": 187, "body": "VTKGNKUHMP"} +{"name": "pymc222", "age": 20, "index": 222, "body": "VTKGNKUHMP"} +{"name": "pymc174", "age": 73, "index": 174, "body": "VTKGNKUHMP"} +{"name": "pymc971", "age": 62, "index": 971, "body": "VTKGNKUHMP"} +{"name": "pymc198", "age": 97, "index": 198, "body": "VTKGNKUHMP"} +{"name": "pymc885", "age": 77, "index": 885, "body": "VTKGNKUHMP"} +{"name": "pymc43", "age": 43, "index": 43, "body": "VTKGNKUHMP"} +{"name": "pymc955", "age": 46, "index": 955, "body": "VTKGNKUHMP"} +{"name": "pymc874", "age": 66, "index": 874, "body": "VTKGNKUHMP"} +{"name": "pymc831", "age": 23, "index": 831, "body": "VTKGNKUHMP"} +{"name": "pymc750", "age": 43, "index": 750, "body": "VTKGNKUHMP"} +{"name": "pymc338", "age": 35, "index": 338, "body": "VTKGNKUHMP"} +{"name": "pymc733", "age": 26, "index": 733, "body": "VTKGNKUHMP"} +{"name": "pymc840", "age": 32, "index": 840, "body": "VTKGNKUHMP"} +{"name": "pymc217", "age": 15, "index": 217, "body": "VTKGNKUHMP"} +{"name": "pymc727", "age": 20, "index": 727, "body": "VTKGNKUHMP"} +{"name": "pymc167", "age": 66, "index": 167, "body": "VTKGNKUHMP"} +{"name": "pymc847", "age": 39, "index": 847, "body": "VTKGNKUHMP"} +{"name": "pymc839", "age": 31, "index": 839, "body": "VTKGNKUHMP"} +{"name": "pymc326", "age": 23, "index": 326, "body": "VTKGNKUHMP"} +{"name": "pymc205", "age": 3, "index": 205, "body": "VTKGNKUHMP"} +{"name": "pymc246", "age": 44, "index": 246, "body": "VTKGNKUHMP"} +{"name": "pymc190", "age": 89, "index": 190, "body": "VTKGNKUHMP"} +{"name": "pymc134", "age": 33, "index": 134, "body": "VTKGNKUHMP"} +{"name": "pymc848", "age": 40, "index": 848, "body": "VTKGNKUHMP"} +{"name": "pymc448", "age": 44, "index": 448, "body": "VTKGNKUHMP"} +{"name": "pymc154", "age": 53, "index": 154, "body": "VTKGNKUHMP"} +{"name": "pymc111", "age": 10, "index": 111, "body": "VTKGNKUHMP"} +{"name": "pymc258", "age": 56, "index": 258, "body": "VTKGNKUHMP"} +{"name": "pymc214", "age": 12, "index": 214, "body": "VTKGNKUHMP"} +{"name": "pymc697", "age": 91, "index": 697, "body": "VTKGNKUHMP"} +{"name": "pymc96", "age": 96, "index": 96, "body": "VTKGNKUHMP"} +{"name": "pymc948", "age": 39, "index": 948, "body": "VTKGNKUHMP"} +{"name": "pymc42", "age": 42, "index": 42, "body": "VTKGNKUHMP"} +{"name": "pymc284", "age": 82, "index": 284, "body": "VTKGNKUHMP"} +{"name": "pymc185", "age": 84, "index": 185, "body": "VTKGNKUHMP"} +{"name": "pymc84", "age": 84, "index": 84, "body": "VTKGNKUHMP"} +{"name": "pymc928", "age": 19, "index": 928, "body": "VTKGNKUHMP"} +{"name": "pymc266", "age": 64, "index": 266, "body": "VTKGNKUHMP"} +{"name": "pymc245", "age": 43, "index": 245, "body": "VTKGNKUHMP"} +{"name": "pymc864", "age": 56, "index": 864, "body": "VTKGNKUHMP"} +{"name": "pymc756", "age": 49, "index": 756, "body": "VTKGNKUHMP"} +{"name": "pymc218", "age": 16, "index": 218, "body": "VTKGNKUHMP"} +{"name": "pymc64", "age": 64, "index": 64, "body": "VTKGNKUHMP"} +{"name": "pymc869", "age": 61, "index": 869, "body": "VTKGNKUHMP"} +{"name": "pymc490", "age": 86, "index": 490, "body": "VTKGNKUHMP"} +{"name": "pymc914", "age": 5, "index": 914, "body": "VTKGNKUHMP"} +{"name": "pymc401", "age": 98, "index": 401, "body": "VTKGNKUHMP"} +{"name": "pymc312", "age": 9, "index": 312, "body": "VTKGNKUHMP"} +{"name": "pymc415", "age": 11, "index": 415, "body": "VTKGNKUHMP"} +{"name": "pymc621", "age": 15, "index": 621, "body": "VTKGNKUHMP"} +{"name": "pymc494", "age": 90, "index": 494, "body": "VTKGNKUHMP"} +{"name": "pymc508", "age": 3, "index": 508, "body": "VTKGNKUHMP"} +{"name": "pymc766", "age": 59, "index": 766, "body": "VTKGNKUHMP"} +{"name": "pymc660", "age": 54, "index": 660, "body": "VTKGNKUHMP"} +{"name": "pymc348", "age": 45, "index": 348, "body": "VTKGNKUHMP"} +{"name": "pymc487", "age": 83, "index": 487, "body": "VTKGNKUHMP"} +{"name": "pymc610", "age": 4, "index": 610, "body": "VTKGNKUHMP"} +{"name": "pymc991", "age": 82, "index": 991, "body": "VTKGNKUHMP"} +{"name": "pymc620", "age": 14, "index": 620, "body": "VTKGNKUHMP"} +{"name": "pymc662", "age": 56, "index": 662, "body": "VTKGNKUHMP"} +{"name": "pymc525", "age": 20, "index": 525, "body": "VTKGNKUHMP"} +{"name": "pymc451", "age": 47, "index": 451, "body": "VTKGNKUHMP"} +{"name": "pymc556", "age": 51, "index": 556, "body": "VTKGNKUHMP"} +{"name": "pymc706", "age": 100, "index": 706, "body": "VTKGNKUHMP"} +{"name": "pymc457", "age": 53, "index": 457, "body": "VTKGNKUHMP"} +{"name": "pymc426", "age": 22, "index": 426, "body": "VTKGNKUHMP"} +{"name": "pymc634", "age": 28, "index": 634, "body": "VTKGNKUHMP"} +{"name": "pymc789", "age": 82, "index": 789, "body": "VTKGNKUHMP"} +{"name": "pymc412", "age": 8, "index": 412, "body": "VTKGNKUHMP"} +{"name": "pymc450", "age": 46, "index": 450, "body": "VTKGNKUHMP"} +{"name": "pymc911", "age": 2, "index": 911, "body": "VTKGNKUHMP"} +{"name": "pymc301", "age": 99, "index": 301, "body": "VTKGNKUHMP"} +{"name": "pymc552", "age": 47, "index": 552, "body": "VTKGNKUHMP"} +{"name": "pymc906", "age": 98, "index": 906, "body": "VTKGNKUHMP"} +{"name": "pymc557", "age": 52, "index": 557, "body": "VTKGNKUHMP"} +{"name": "pymc700", "age": 94, "index": 700, "body": "VTKGNKUHMP"} +{"name": "pymc531", "age": 26, "index": 531, "body": "VTKGNKUHMP"} +{"name": "pymc414", "age": 10, "index": 414, "body": "VTKGNKUHMP"} +{"name": "pymc313", "age": 10, "index": 313, "body": "VTKGNKUHMP"} +{"name": "pymc655", "age": 49, "index": 655, "body": "VTKGNKUHMP"} +{"name": "pymc715", "age": 8, "index": 715, "body": "VTKGNKUHMP"} +{"name": "pymc341", "age": 38, "index": 341, "body": "VTKGNKUHMP"} +{"name": "pymc396", "age": 93, "index": 396, "body": "VTKGNKUHMP"} +{"name": "pymc650", "age": 44, "index": 650, "body": "VTKGNKUHMP"} +{"name": "pymc908", "age": 100, "index": 908, "body": "VTKGNKUHMP"} +{"name": "pymc994", "age": 85, "index": 994, "body": "VTKGNKUHMP"} +{"name": "pymc903", "age": 95, "index": 903, "body": "VTKGNKUHMP"} +{"name": "pymc705", "age": 99, "index": 705, "body": "VTKGNKUHMP"} +{"name": "pymc710", "age": 3, "index": 710, "body": "VTKGNKUHMP"} +{"name": "pymc523", "age": 18, "index": 523, "body": "VTKGNKUHMP"} +{"name": "pymc444", "age": 40, "index": 444, "body": "VTKGNKUHMP"} +{"name": "pymc380", "age": 77, "index": 380, "body": "VTKGNKUHMP"} +{"name": "pymc455", "age": 51, "index": 455, "body": "VTKGNKUHMP"} +{"name": "pymc761", "age": 54, "index": 761, "body": "VTKGNKUHMP"} +{"name": "pymc311", "age": 8, "index": 311, "body": "VTKGNKUHMP"} +{"name": "pymc397", "age": 94, "index": 397, "body": "VTKGNKUHMP"} +{"name": "pymc624", "age": 18, "index": 624, "body": "VTKGNKUHMP"} +{"name": "pymc600", "age": 95, "index": 600, "body": "VTKGNKUHMP"} +{"name": "pymc632", "age": 26, "index": 632, "body": "VTKGNKUHMP"} +{"name": "pymc642", "age": 36, "index": 642, "body": "VTKGNKUHMP"} +{"name": "pymc656", "age": 50, "index": 656, "body": "VTKGNKUHMP"} +{"name": "pymc386", "age": 83, "index": 386, "body": "VTKGNKUHMP"} +{"name": "pymc640", "age": 34, "index": 640, "body": "VTKGNKUHMP"} +{"name": "pymc318", "age": 15, "index": 318, "body": "VTKGNKUHMP"} +{"name": "pymc393", "age": 90, "index": 393, "body": "VTKGNKUHMP"} +{"name": "pymc616", "age": 10, "index": 616, "body": "VTKGNKUHMP"} +{"name": "pymc645", "age": 39, "index": 645, "body": "VTKGNKUHMP"} +{"name": "pymc770", "age": 63, "index": 770, "body": "VTKGNKUHMP"} +{"name": "pymc654", "age": 48, "index": 654, "body": "VTKGNKUHMP"} +{"name": "pymc764", "age": 57, "index": 764, "body": "VTKGNKUHMP"} +{"name": "pymc652", "age": 46, "index": 652, "body": "VTKGNKUHMP"} +{"name": "pymc709", "age": 2, "index": 709, "body": "VTKGNKUHMP"} +{"name": "pymc486", "age": 82, "index": 486, "body": "VTKGNKUHMP"} +{"name": "pymc417", "age": 13, "index": 417, "body": "VTKGNKUHMP"} +{"name": "pymc443", "age": 39, "index": 443, "body": "VTKGNKUHMP"} +{"name": "pymc447", "age": 43, "index": 447, "body": "VTKGNKUHMP"} +{"name": "pymc664", "age": 58, "index": 664, "body": "VTKGNKUHMP"} +{"name": "pymc482", "age": 78, "index": 482, "body": "VTKGNKUHMP"} +{"name": "pymc661", "age": 55, "index": 661, "body": "VTKGNKUHMP"} +{"name": "pymc636", "age": 30, "index": 636, "body": "VTKGNKUHMP"} +{"name": "pymc545", "age": 40, "index": 545, "body": "VTKGNKUHMP"} +{"name": "pymc543", "age": 38, "index": 543, "body": "VTKGNKUHMP"} +{"name": "pymc541", "age": 36, "index": 541, "body": "VTKGNKUHMP"} +{"name": "pymc441", "age": 37, "index": 441, "body": "VTKGNKUHMP"} +{"name": "pymc344", "age": 41, "index": 344, "body": "VTKGNKUHMP"} +{"name": "pymc342", "age": 39, "index": 342, "body": "VTKGNKUHMP"} +{"name": "pymc907", "age": 99, "index": 907, "body": "VTKGNKUHMP"} +{"name": "pymc704", "age": 98, "index": 704, "body": "VTKGNKUHMP"} +{"name": "pymc988", "age": 79, "index": 988, "body": "VTKGNKUHMP"} +{"name": "pymc771", "age": 64, "index": 771, "body": "VTKGNKUHMP"} +{"name": "pymc589", "age": 84, "index": 589, "body": "VTKGNKUHMP"} +{"name": "pymc719", "age": 12, "index": 719, "body": "VTKGNKUHMP"} +{"name": "pymc989", "age": 80, "index": 989, "body": "VTKGNKUHMP"} +{"name": "pymc411", "age": 7, "index": 411, "body": "VTKGNKUHMP"} +{"name": "pymc613", "age": 7, "index": 613, "body": "VTKGNKUHMP"} +{"name": "pymc718", "age": 11, "index": 718, "body": "VTKGNKUHMP"} +{"name": "pymc404", "age": 0, "index": 404, "body": "VTKGNKUHMP"} +{"name": "pymc918", "age": 9, "index": 918, "body": "VTKGNKUHMP"} +{"name": "pymc382", "age": 79, "index": 382, "body": "VTKGNKUHMP"} +{"name": "pymc390", "age": 87, "index": 390, "body": "VTKGNKUHMP"} +{"name": "pymc637", "age": 31, "index": 637, "body": "VTKGNKUHMP"} +{"name": "pymc607", "age": 1, "index": 607, "body": "VTKGNKUHMP"} +{"name": "pymc701", "age": 95, "index": 701, "body": "VTKGNKUHMP"} +{"name": "pymc402", "age": 99, "index": 402, "body": "VTKGNKUHMP"} +{"name": "pymc981", "age": 72, "index": 981, "body": "VTKGNKUHMP"} +{"name": "pymc901", "age": 93, "index": 901, "body": "VTKGNKUHMP"} +{"name": "pymc544", "age": 39, "index": 544, "body": "VTKGNKUHMP"} +{"name": "pymc452", "age": 48, "index": 452, "body": "VTKGNKUHMP"} +{"name": "pymc777", "age": 70, "index": 777, "body": "VTKGNKUHMP"} +{"name": "pymc798", "age": 91, "index": 798, "body": "VTKGNKUHMP"} +{"name": "pymc712", "age": 5, "index": 712, "body": "VTKGNKUHMP"} +{"name": "pymc520", "age": 15, "index": 520, "body": "VTKGNKUHMP"} +{"name": "pymc999", "age": 90, "index": 999, "body": "VTKGNKUHMP"} +{"name": "pymc775", "age": 68, "index": 775, "body": "VTKGNKUHMP"} +{"name": "pymc674", "age": 68, "index": 674, "body": "VTKGNKUHMP"} +{"name": "pymc677", "age": 71, "index": 677, "body": "VTKGNKUHMP"} +{"name": "pymc605", "age": 100, "index": 605, "body": "VTKGNKUHMP"} +{"name": "pymc676", "age": 70, "index": 676, "body": "VTKGNKUHMP"} +{"name": "pymc774", "age": 67, "index": 774, "body": "VTKGNKUHMP"} +{"name": "pymc518", "age": 13, "index": 518, "body": "VTKGNKUHMP"} +{"name": "pymc985", "age": 76, "index": 985, "body": "VTKGNKUHMP"} +{"name": "pymc385", "age": 82, "index": 385, "body": "VTKGNKUHMP"} +{"name": "pymc305", "age": 2, "index": 305, "body": "VTKGNKUHMP"} +{"name": "pymc302", "age": 100, "index": 302, "body": "VTKGNKUHMP"} +{"name": "pymc762", "age": 55, "index": 762, "body": "VTKGNKUHMP"} +{"name": "pymc399", "age": 96, "index": 399, "body": "VTKGNKUHMP"} +{"name": "pymc672", "age": 66, "index": 672, "body": "VTKGNKUHMP"} +{"name": "pymc442", "age": 38, "index": 442, "body": "VTKGNKUHMP"} +{"name": "pymc630", "age": 24, "index": 630, "body": "VTKGNKUHMP"} +{"name": "pymc623", "age": 17, "index": 623, "body": "VTKGNKUHMP"} +{"name": "pymc304", "age": 1, "index": 304, "body": "VTKGNKUHMP"} +{"name": "pymc716", "age": 9, "index": 716, "body": "VTKGNKUHMP"} +{"name": "pymc420", "age": 16, "index": 420, "body": "VTKGNKUHMP"} +{"name": "pymc666", "age": 60, "index": 666, "body": "VTKGNKUHMP"} +{"name": "pymc622", "age": 16, "index": 622, "body": "VTKGNKUHMP"} +{"name": "pymc407", "age": 3, "index": 407, "body": "VTKGNKUHMP"} +{"name": "pymc347", "age": 44, "index": 347, "body": "VTKGNKUHMP"} +{"name": "pymc984", "age": 75, "index": 984, "body": "VTKGNKUHMP"} +{"name": "pymc633", "age": 27, "index": 633, "body": "VTKGNKUHMP"} +{"name": "pymc657", "age": 51, "index": 657, "body": "VTKGNKUHMP"} +{"name": "pymc445", "age": 41, "index": 445, "body": "VTKGNKUHMP"} +{"name": "pymc644", "age": 38, "index": 644, "body": "VTKGNKUHMP"} +{"name": "pymc307", "age": 4, "index": 307, "body": "VTKGNKUHMP"} +{"name": "pymc647", "age": 41, "index": 647, "body": "VTKGNKUHMP"} +{"name": "pymc599", "age": 94, "index": 599, "body": "VTKGNKUHMP"} +{"name": "pymc485", "age": 81, "index": 485, "body": "VTKGNKUHMP"} +{"name": "pymc536", "age": 31, "index": 536, "body": "VTKGNKUHMP"} +{"name": "pymc992", "age": 83, "index": 992, "body": "VTKGNKUHMP"} +{"name": "pymc902", "age": 94, "index": 902, "body": "VTKGNKUHMP"} +{"name": "pymc532", "age": 27, "index": 532, "body": "VTKGNKUHMP"} +{"name": "pymc643", "age": 37, "index": 643, "body": "VTKGNKUHMP"} +{"name": "pymc308", "age": 5, "index": 308, "body": "VTKGNKUHMP"} +{"name": "pymc345", "age": 42, "index": 345, "body": "VTKGNKUHMP"} +{"name": "pymc340", "age": 37, "index": 340, "body": "VTKGNKUHMP"} +{"name": "pymc667", "age": 61, "index": 667, "body": "VTKGNKUHMP"} +{"name": "pymc406", "age": 2, "index": 406, "body": "VTKGNKUHMP"} +{"name": "pymc588", "age": 83, "index": 588, "body": "VTKGNKUHMP"} +{"name": "pymc542", "age": 37, "index": 542, "body": "VTKGNKUHMP"} +{"name": "pymc708", "age": 1, "index": 708, "body": "VTKGNKUHMP"} +{"name": "pymc551", "age": 46, "index": 551, "body": "VTKGNKUHMP"} +{"name": "pymc394", "age": 91, "index": 394, "body": "VTKGNKUHMP"} +{"name": "pymc300", "age": 98, "index": 300, "body": "VTKGNKUHMP"} +{"name": "pymc912", "age": 3, "index": 912, "body": "VTKGNKUHMP"} +{"name": "pymc423", "age": 19, "index": 423, "body": "VTKGNKUHMP"} +{"name": "pymc983", "age": 74, "index": 983, "body": "VTKGNKUHMP"} +{"name": "pymc309", "age": 6, "index": 309, "body": "VTKGNKUHMP"} +{"name": "pymc389", "age": 86, "index": 389, "body": "VTKGNKUHMP"} +{"name": "pymc427", "age": 23, "index": 427, "body": "VTKGNKUHMP"} +{"name": "pymc990", "age": 81, "index": 990, "body": "VTKGNKUHMP"} +{"name": "pymc675", "age": 69, "index": 675, "body": "VTKGNKUHMP"} +{"name": "pymc603", "age": 98, "index": 603, "body": "VTKGNKUHMP"} +{"name": "pymc303", "age": 0, "index": 303, "body": "VTKGNKUHMP"} +{"name": "pymc909", "age": 0, "index": 909, "body": "VTKGNKUHMP"} +{"name": "pymc617", "age": 11, "index": 617, "body": "VTKGNKUHMP"} +{"name": "pymc527", "age": 22, "index": 527, "body": "VTKGNKUHMP"} +{"name": "pymc554", "age": 49, "index": 554, "body": "VTKGNKUHMP"} +{"name": "pymc993", "age": 84, "index": 993, "body": "VTKGNKUHMP"} +{"name": "pymc555", "age": 50, "index": 555, "body": "VTKGNKUHMP"} +{"name": "pymc904", "age": 96, "index": 904, "body": "VTKGNKUHMP"} +{"name": "pymc665", "age": 59, "index": 665, "body": "VTKGNKUHMP"} +{"name": "pymc671", "age": 65, "index": 671, "body": "VTKGNKUHMP"} +{"name": "pymc535", "age": 30, "index": 535, "body": "VTKGNKUHMP"} +{"name": "pymc614", "age": 8, "index": 614, "body": "VTKGNKUHMP"} +{"name": "pymc673", "age": 67, "index": 673, "body": "VTKGNKUHMP"} +{"name": "pymc526", "age": 21, "index": 526, "body": "VTKGNKUHMP"} +{"name": "pymc540", "age": 35, "index": 540, "body": "VTKGNKUHMP"} +{"name": "pymc919", "age": 10, "index": 919, "body": "VTKGNKUHMP"} +{"name": "pymc403", "age": 100, "index": 403, "body": "VTKGNKUHMP"} +{"name": "pymc917", "age": 8, "index": 917, "body": "VTKGNKUHMP"} +{"name": "pymc349", "age": 46, "index": 349, "body": "VTKGNKUHMP"} +{"name": "pymc547", "age": 42, "index": 547, "body": "VTKGNKUHMP"} +{"name": "pymc491", "age": 87, "index": 491, "body": "VTKGNKUHMP"} +{"name": "pymc651", "age": 45, "index": 651, "body": "VTKGNKUHMP"} +{"name": "pymc717", "age": 10, "index": 717, "body": "VTKGNKUHMP"} +{"name": "pymc530", "age": 25, "index": 530, "body": "VTKGNKUHMP"} +{"name": "pymc987", "age": 78, "index": 987, "body": "VTKGNKUHMP"} +{"name": "pymc910", "age": 1, "index": 910, "body": "VTKGNKUHMP"} +{"name": "pymc625", "age": 19, "index": 625, "body": "VTKGNKUHMP"} +{"name": "pymc398", "age": 95, "index": 398, "body": "VTKGNKUHMP"} +{"name": "pymc492", "age": 88, "index": 492, "body": "VTKGNKUHMP"} +{"name": "pymc982", "age": 73, "index": 982, "body": "VTKGNKUHMP"} +{"name": "pymc598", "age": 93, "index": 598, "body": "VTKGNKUHMP"} +{"name": "pymc713", "age": 6, "index": 713, "body": "VTKGNKUHMP"} +{"name": "pymc641", "age": 35, "index": 641, "body": "VTKGNKUHMP"} +{"name": "pymc703", "age": 97, "index": 703, "body": "VTKGNKUHMP"} +{"name": "pymc317", "age": 14, "index": 317, "body": "VTKGNKUHMP"} +{"name": "pymc392", "age": 89, "index": 392, "body": "VTKGNKUHMP"} +{"name": "pymc711", "age": 4, "index": 711, "body": "VTKGNKUHMP"} +{"name": "pymc453", "age": 49, "index": 453, "body": "VTKGNKUHMP"} +{"name": "pymc391", "age": 88, "index": 391, "body": "VTKGNKUHMP"} +{"name": "pymc425", "age": 21, "index": 425, "body": "VTKGNKUHMP"} +{"name": "pymc799", "age": 92, "index": 799, "body": "VTKGNKUHMP"} +{"name": "pymc421", "age": 17, "index": 421, "body": "VTKGNKUHMP"} +{"name": "pymc381", "age": 78, "index": 381, "body": "VTKGNKUHMP"} +{"name": "pymc522", "age": 17, "index": 522, "body": "VTKGNKUHMP"} +{"name": "pymc765", "age": 58, "index": 765, "body": "VTKGNKUHMP"} +{"name": "pymc550", "age": 45, "index": 550, "body": "VTKGNKUHMP"} +{"name": "pymc606", "age": 0, "index": 606, "body": "VTKGNKUHMP"} +{"name": "pymc915", "age": 6, "index": 915, "body": "VTKGNKUHMP"} +{"name": "pymc387", "age": 84, "index": 387, "body": "VTKGNKUHMP"} +{"name": "pymc615", "age": 9, "index": 615, "body": "VTKGNKUHMP"} +{"name": "pymc454", "age": 50, "index": 454, "body": "VTKGNKUHMP"} +{"name": "pymc395", "age": 92, "index": 395, "body": "VTKGNKUHMP"} +{"name": "pymc534", "age": 29, "index": 534, "body": "VTKGNKUHMP"} +{"name": "pymc772", "age": 65, "index": 772, "body": "VTKGNKUHMP"} +{"name": "pymc646", "age": 40, "index": 646, "body": "VTKGNKUHMP"} +{"name": "pymc626", "age": 20, "index": 626, "body": "VTKGNKUHMP"} +{"name": "pymc663", "age": 57, "index": 663, "body": "VTKGNKUHMP"} +{"name": "pymc384", "age": 81, "index": 384, "body": "VTKGNKUHMP"} +{"name": "pymc707", "age": 0, "index": 707, "body": "VTKGNKUHMP"} +{"name": "pymc314", "age": 11, "index": 314, "body": "VTKGNKUHMP"} +{"name": "pymc767", "age": 60, "index": 767, "body": "VTKGNKUHMP"} +{"name": "pymc484", "age": 80, "index": 484, "body": "VTKGNKUHMP"} +{"name": "pymc537", "age": 32, "index": 537, "body": "VTKGNKUHMP"} +{"name": "pymc388", "age": 85, "index": 388, "body": "VTKGNKUHMP"} +{"name": "pymc500", "age": 96, "index": 500, "body": "VTKGNKUHMP"} +{"name": "pymc916", "age": 7, "index": 916, "body": "VTKGNKUHMP"} +{"name": "pymc591", "age": 86, "index": 591, "body": "VTKGNKUHMP"} +{"name": "pymc460", "age": 56, "index": 460, "body": "VTKGNKUHMP"} +{"name": "pymc446", "age": 42, "index": 446, "body": "VTKGNKUHMP"} +{"name": "pymc585", "age": 80, "index": 585, "body": "VTKGNKUHMP"} +{"name": "pymc480", "age": 76, "index": 480, "body": "VTKGNKUHMP"} +{"name": "pymc504", "age": 100, "index": 504, "body": "VTKGNKUHMP"} +{"name": "pymc413", "age": 9, "index": 413, "body": "VTKGNKUHMP"} +{"name": "pymc410", "age": 6, "index": 410, "body": "VTKGNKUHMP"} +{"name": "pymc517", "age": 12, "index": 517, "body": "VTKGNKUHMP"} +{"name": "pymc553", "age": 48, "index": 553, "body": "VTKGNKUHMP"} +{"name": "pymc780", "age": 73, "index": 780, "body": "VTKGNKUHMP"} +{"name": "pymc496", "age": 92, "index": 496, "body": "VTKGNKUHMP"} +{"name": "pymc424", "age": 20, "index": 424, "body": "VTKGNKUHMP"} +{"name": "pymc986", "age": 77, "index": 986, "body": "VTKGNKUHMP"} +{"name": "pymc773", "age": 66, "index": 773, "body": "VTKGNKUHMP"} +{"name": "pymc602", "age": 97, "index": 602, "body": "VTKGNKUHMP"} +{"name": "pymc900", "age": 92, "index": 900, "body": "VTKGNKUHMP"} +{"name": "pymc519", "age": 14, "index": 519, "body": "VTKGNKUHMP"} +{"name": "pymc582", "age": 77, "index": 582, "body": "VTKGNKUHMP"} +{"name": "pymc524", "age": 19, "index": 524, "body": "VTKGNKUHMP"} +{"name": "pymc473", "age": 69, "index": 473, "body": "VTKGNKUHMP"} +{"name": "pymc495", "age": 91, "index": 495, "body": "VTKGNKUHMP"} +{"name": "pymc493", "age": 89, "index": 493, "body": "VTKGNKUHMP"} +{"name": "pymc405", "age": 1, "index": 405, "body": "VTKGNKUHMP"} +{"name": "pymc483", "age": 79, "index": 483, "body": "VTKGNKUHMP"} +{"name": "pymc346", "age": 43, "index": 346, "body": "VTKGNKUHMP"} +{"name": "pymc584", "age": 79, "index": 584, "body": "VTKGNKUHMP"} +{"name": "pymc461", "age": 57, "index": 461, "body": "VTKGNKUHMP"} +{"name": "pymc763", "age": 56, "index": 763, "body": "VTKGNKUHMP"} +{"name": "pymc310", "age": 7, "index": 310, "body": "VTKGNKUHMP"} +{"name": "pymc670", "age": 64, "index": 670, "body": "VTKGNKUHMP"} +{"name": "pymc316", "age": 13, "index": 316, "body": "VTKGNKUHMP"} +{"name": "pymc433", "age": 29, "index": 433, "body": "VTKGNKUHMP"} +{"name": "pymc521", "age": 16, "index": 521, "body": "VTKGNKUHMP"} +{"name": "pymc611", "age": 5, "index": 611, "body": "VTKGNKUHMP"} +{"name": "pymc995", "age": 86, "index": 995, "body": "VTKGNKUHMP"} +{"name": "pymc574", "age": 69, "index": 574, "body": "VTKGNKUHMP"} +{"name": "pymc319", "age": 16, "index": 319, "body": "VTKGNKUHMP"} +{"name": "pymc998", "age": 89, "index": 998, "body": "VTKGNKUHMP"} +{"name": "pymc702", "age": 96, "index": 702, "body": "VTKGNKUHMP"} +{"name": "pymc383", "age": 80, "index": 383, "body": "VTKGNKUHMP"} +{"name": "pymc612", "age": 6, "index": 612, "body": "VTKGNKUHMP"} +{"name": "pymc601", "age": 96, "index": 601, "body": "VTKGNKUHMP"} +{"name": "pymc604", "age": 99, "index": 604, "body": "VTKGNKUHMP"} +{"name": "pymc400", "age": 97, "index": 400, "body": "VTKGNKUHMP"} +{"name": "pymc440", "age": 36, "index": 440, "body": "VTKGNKUHMP"} +{"name": "pymc788", "age": 81, "index": 788, "body": "VTKGNKUHMP"} +{"name": "pymc997", "age": 88, "index": 997, "body": "VTKGNKUHMP"} +{"name": "pymc416", "age": 12, "index": 416, "body": "VTKGNKUHMP"} +{"name": "pymc913", "age": 4, "index": 913, "body": "VTKGNKUHMP"} +{"name": "pymc546", "age": 41, "index": 546, "body": "VTKGNKUHMP"} +{"name": "pymc306", "age": 3, "index": 306, "body": "VTKGNKUHMP"} +{"name": "pymc996", "age": 87, "index": 996, "body": "VTKGNKUHMP"} +{"name": "pymc533", "age": 28, "index": 533, "body": "VTKGNKUHMP"} +{"name": "pymc343", "age": 40, "index": 343, "body": "VTKGNKUHMP"} +{"name": "pymc562", "age": 57, "index": 562, "body": "VTKGNKUHMP"} +{"name": "pymc463", "age": 59, "index": 463, "body": "VTKGNKUHMP"} +{"name": "pymc575", "age": 70, "index": 575, "body": "VTKGNKUHMP"} +{"name": "pymc790", "age": 83, "index": 790, "body": "VTKGNKUHMP"} +{"name": "pymc635", "age": 29, "index": 635, "body": "VTKGNKUHMP"} +{"name": "pymc564", "age": 59, "index": 564, "body": "VTKGNKUHMP"} +{"name": "pymc497", "age": 93, "index": 497, "body": "VTKGNKUHMP"} +{"name": "pymc905", "age": 97, "index": 905, "body": "VTKGNKUHMP"} +{"name": "pymc653", "age": 47, "index": 653, "body": "VTKGNKUHMP"} +{"name": "pymc571", "age": 66, "index": 571, "body": "VTKGNKUHMP"} +{"name": "pymc786", "age": 79, "index": 786, "body": "VTKGNKUHMP"} +{"name": "pymc516", "age": 11, "index": 516, "body": "VTKGNKUHMP"} +{"name": "pymc587", "age": 82, "index": 587, "body": "VTKGNKUHMP"} +{"name": "pymc980", "age": 71, "index": 980, "body": "VTKGNKUHMP"} +{"name": "pymc512", "age": 7, "index": 512, "body": "VTKGNKUHMP"} +{"name": "pymc436", "age": 32, "index": 436, "body": "VTKGNKUHMP"} +{"name": "pymc430", "age": 26, "index": 430, "body": "VTKGNKUHMP"} +{"name": "pymc315", "age": 12, "index": 315, "body": "VTKGNKUHMP"} +{"name": "pymc509", "age": 4, "index": 509, "body": "VTKGNKUHMP"} +{"name": "pymc581", "age": 76, "index": 581, "body": "VTKGNKUHMP"} +{"name": "pymc783", "age": 76, "index": 783, "body": "VTKGNKUHMP"} +{"name": "pymc594", "age": 89, "index": 594, "body": "VTKGNKUHMP"} +{"name": "pymc560", "age": 55, "index": 560, "body": "VTKGNKUHMP"} +{"name": "pymc572", "age": 67, "index": 572, "body": "VTKGNKUHMP"} +{"name": "pymc797", "age": 90, "index": 797, "body": "VTKGNKUHMP"} +{"name": "pymc592", "age": 87, "index": 592, "body": "VTKGNKUHMP"} +{"name": "pymc776", "age": 69, "index": 776, "body": "VTKGNKUHMP"} +{"name": "pymc795", "age": 88, "index": 795, "body": "VTKGNKUHMP"} +{"name": "pymc567", "age": 62, "index": 567, "body": "VTKGNKUHMP"} +{"name": "pymc477", "age": 73, "index": 477, "body": "VTKGNKUHMP"} +{"name": "pymc470", "age": 66, "index": 470, "body": "VTKGNKUHMP"} +{"name": "pymc627", "age": 21, "index": 627, "body": "VTKGNKUHMP"} +{"name": "pymc476", "age": 72, "index": 476, "body": "VTKGNKUHMP"} +{"name": "pymc597", "age": 92, "index": 597, "body": "VTKGNKUHMP"} +{"name": "pymc435", "age": 31, "index": 435, "body": "VTKGNKUHMP"} +{"name": "pymc714", "age": 7, "index": 714, "body": "VTKGNKUHMP"} +{"name": "pymc475", "age": 71, "index": 475, "body": "VTKGNKUHMP"} +{"name": "pymc456", "age": 52, "index": 456, "body": "VTKGNKUHMP"} +{"name": "pymc515", "age": 10, "index": 515, "body": "VTKGNKUHMP"} +{"name": "pymc631", "age": 25, "index": 631, "body": "VTKGNKUHMP"} +{"name": "pymc437", "age": 33, "index": 437, "body": "VTKGNKUHMP"} +{"name": "pymc432", "age": 28, "index": 432, "body": "VTKGNKUHMP"} +{"name": "pymc596", "age": 91, "index": 596, "body": "VTKGNKUHMP"} +{"name": "pymc576", "age": 71, "index": 576, "body": "VTKGNKUHMP"} +{"name": "pymc472", "age": 68, "index": 472, "body": "VTKGNKUHMP"} +{"name": "pymc481", "age": 77, "index": 481, "body": "VTKGNKUHMP"} +{"name": "pymc422", "age": 18, "index": 422, "body": "VTKGNKUHMP"} +{"name": "pymc793", "age": 86, "index": 793, "body": "VTKGNKUHMP"} +{"name": "pymc471", "age": 67, "index": 471, "body": "VTKGNKUHMP"} +{"name": "pymc787", "age": 80, "index": 787, "body": "VTKGNKUHMP"} +{"name": "pymc784", "age": 77, "index": 784, "body": "VTKGNKUHMP"} +{"name": "pymc593", "age": 88, "index": 593, "body": "VTKGNKUHMP"} +{"name": "pymc760", "age": 53, "index": 760, "body": "VTKGNKUHMP"} +{"name": "pymc501", "age": 97, "index": 501, "body": "VTKGNKUHMP"} +{"name": "pymc502", "age": 98, "index": 502, "body": "VTKGNKUHMP"} +{"name": "pymc465", "age": 61, "index": 465, "body": "VTKGNKUHMP"} +{"name": "pymc570", "age": 65, "index": 570, "body": "VTKGNKUHMP"} +{"name": "pymc573", "age": 68, "index": 573, "body": "VTKGNKUHMP"} +{"name": "pymc563", "age": 58, "index": 563, "body": "VTKGNKUHMP"} +{"name": "pymc796", "age": 89, "index": 796, "body": "VTKGNKUHMP"} +{"name": "pymc565", "age": 60, "index": 565, "body": "VTKGNKUHMP"} +{"name": "pymc785", "age": 78, "index": 785, "body": "VTKGNKUHMP"} +{"name": "pymc577", "age": 72, "index": 577, "body": "VTKGNKUHMP"} +{"name": "pymc566", "age": 61, "index": 566, "body": "VTKGNKUHMP"} +{"name": "pymc466", "age": 62, "index": 466, "body": "VTKGNKUHMP"} +{"name": "pymc514", "age": 9, "index": 514, "body": "VTKGNKUHMP"} +{"name": "pymc510", "age": 5, "index": 510, "body": "VTKGNKUHMP"} +{"name": "pymc583", "age": 78, "index": 583, "body": "VTKGNKUHMP"} +{"name": "pymc580", "age": 75, "index": 580, "body": "VTKGNKUHMP"} +{"name": "pymc506", "age": 1, "index": 506, "body": "VTKGNKUHMP"} +{"name": "pymc792", "age": 85, "index": 792, "body": "VTKGNKUHMP"} +{"name": "pymc505", "age": 0, "index": 505, "body": "VTKGNKUHMP"} +{"name": "pymc503", "age": 99, "index": 503, "body": "VTKGNKUHMP"} +{"name": "pymc595", "age": 90, "index": 595, "body": "VTKGNKUHMP"} +{"name": "pymc513", "age": 8, "index": 513, "body": "VTKGNKUHMP"} +{"name": "pymc434", "age": 30, "index": 434, "body": "VTKGNKUHMP"} +{"name": "pymc462", "age": 58, "index": 462, "body": "VTKGNKUHMP"} +{"name": "pymc464", "age": 60, "index": 464, "body": "VTKGNKUHMP"} +{"name": "pymc781", "age": 74, "index": 781, "body": "VTKGNKUHMP"} +{"name": "pymc561", "age": 56, "index": 561, "body": "VTKGNKUHMP"} +{"name": "pymc782", "age": 75, "index": 782, "body": "VTKGNKUHMP"} +{"name": "pymc791", "age": 84, "index": 791, "body": "VTKGNKUHMP"} +{"name": "pymc794", "age": 87, "index": 794, "body": "VTKGNKUHMP"} +{"name": "pymc590", "age": 85, "index": 590, "body": "VTKGNKUHMP"} +{"name": "pymc586", "age": 81, "index": 586, "body": "VTKGNKUHMP"} +{"name": "pymc474", "age": 70, "index": 474, "body": "VTKGNKUHMP"} +{"name": "pymc431", "age": 27, "index": 431, "body": "VTKGNKUHMP"} +{"name": "pymc507", "age": 2, "index": 507, "body": "VTKGNKUHMP"} +{"name": "pymc467", "age": 63, "index": 467, "body": "VTKGNKUHMP"} +{"name": "pymc511", "age": 6, "index": 511, "body": "VTKGNKUHMP"} diff --git a/resources/imex/json_10_lines b/resources/imex/json_10_lines new file mode 100644 index 000000000..c0ab929d8 --- /dev/null +++ b/resources/imex/json_10_lines @@ -0,0 +1,10 @@ +{"name": "pymc9", "age": 9, "index": 9, "body": "VTKGNKUHMP"} +{"name": "pymc5", "age": 5, "index": 5, "body": "VTKGNKUHMP"} +{"name": "pymc6", "age": 6, "index": 6, "body": "VTKGNKUHMP"} +{"name": "pymc3", "age": 3, "index": 3, "body": "VTKGNKUHMP"} +{"name": "pymc1", "age": 1, "index": 1, "body": "VTKGNKUHMP"} +{"name": "pymc4", "age": 4, "index": 4, "body": "VTKGNKUHMP"} +{"name": "pymc0", "age": 0, "index": 0, "body": "VTKGNKUHMP"} +{"name": "pymc8", "age": 8, "index": 8, "body": "VTKGNKUHMP"} +{"name": "pymc7", "age": 7, "index": 7, "body": "VTKGNKUHMP"} +{"name": "pymc2", "age": 2, "index": 2, "body": "VTKGNKUHMP"} diff --git a/resources/imex/json_list_1000_lines b/resources/imex/json_list_1000_lines new file mode 100644 index 000000000..db51ddea8 --- /dev/null +++ b/resources/imex/json_list_1000_lines @@ -0,0 +1,1000 @@ +[{"name": "pymc265", "age": 63, "index": 265, "body": "VTKGNKUHMP"}, +{"name": "pymc254", "age": 52, "index": 254, "body": "VTKGNKUHMP"}, +{"name": "pymc105", "age": 4, "index": 105, "body": "VTKGNKUHMP"}, +{"name": "pymc95", "age": 95, "index": 95, "body": "VTKGNKUHMP"}, +{"name": "pymc75", "age": 75, "index": 75, "body": "VTKGNKUHMP"}, +{"name": "pymc882", "age": 74, "index": 882, "body": "VTKGNKUHMP"}, +{"name": "pymc285", "age": 83, "index": 285, "body": "VTKGNKUHMP"}, +{"name": "pymc293", "age": 91, "index": 293, "body": "VTKGNKUHMP"}, +{"name": "pymc750", "age": 43, "index": 750, "body": "VTKGNKUHMP"}, +{"name": "pymc68", "age": 68, "index": 68, "body": "VTKGNKUHMP"}, +{"name": "pymc177", "age": 76, "index": 177, "body": "VTKGNKUHMP"}, +{"name": "pymc115", "age": 14, "index": 115, "body": "VTKGNKUHMP"}, +{"name": "pymc479", "age": 75, "index": 479, "body": "VTKGNKUHMP"}, +{"name": "pymc877", "age": 69, "index": 877, "body": "VTKGNKUHMP"}, +{"name": "pymc283", "age": 81, "index": 283, "body": "VTKGNKUHMP"}, +{"name": "pymc741", "age": 34, "index": 741, "body": "VTKGNKUHMP"}, +{"name": "pymc923", "age": 14, "index": 923, "body": "VTKGNKUHMP"}, +{"name": "pymc264", "age": 62, "index": 264, "body": "VTKGNKUHMP"}, +{"name": "pymc109", "age": 8, "index": 109, "body": "VTKGNKUHMP"}, +{"name": "pymc90", "age": 90, "index": 90, "body": "VTKGNKUHMP"}, +{"name": "pymc952", "age": 43, "index": 952, "body": "VTKGNKUHMP"}, +{"name": "pymc779", "age": 72, "index": 779, "body": "VTKGNKUHMP"}, +{"name": "pymc778", "age": 71, "index": 778, "body": "VTKGNKUHMP"}, +{"name": "pymc292", "age": 90, "index": 292, "body": "VTKGNKUHMP"}, +{"name": "pymc697", "age": 91, "index": 697, "body": "VTKGNKUHMP"}, +{"name": "pymc957", "age": 48, "index": 957, "body": "VTKGNKUHMP"}, +{"name": "pymc756", "age": 49, "index": 756, "body": "VTKGNKUHMP"}, +{"name": "pymc682", "age": 76, "index": 682, "body": "VTKGNKUHMP"}, +{"name": "pymc149", "age": 48, "index": 149, "body": "VTKGNKUHMP"}, +{"name": "pymc608", "age": 2, "index": 608, "body": "VTKGNKUHMP"}, +{"name": "pymc127", "age": 26, "index": 127, "body": "VTKGNKUHMP"}, +{"name": "pymc69", "age": 69, "index": 69, "body": "VTKGNKUHMP"}, +{"name": "pymc932", "age": 23, "index": 932, "body": "VTKGNKUHMP"}, +{"name": "pymc735", "age": 28, "index": 735, "body": "VTKGNKUHMP"}, +{"name": "pymc29", "age": 29, "index": 29, "body": "VTKGNKUHMP"}, +{"name": "pymc687", "age": 81, "index": 687, "body": "VTKGNKUHMP"}, +{"name": "pymc579", "age": 74, "index": 579, "body": "VTKGNKUHMP"}, +{"name": "pymc208", "age": 6, "index": 208, "body": "VTKGNKUHMP"}, +{"name": "pymc228", "age": 26, "index": 228, "body": "VTKGNKUHMP"}, +{"name": "pymc965", "age": 56, "index": 965, "body": "VTKGNKUHMP"}, +{"name": "pymc219", "age": 17, "index": 219, "body": "VTKGNKUHMP"}, +{"name": "pymc747", "age": 40, "index": 747, "body": "VTKGNKUHMP"}, +{"name": "pymc221", "age": 19, "index": 221, "body": "VTKGNKUHMP"}, +{"name": "pymc248", "age": 46, "index": 248, "body": "VTKGNKUHMP"}, +{"name": "pymc859", "age": 51, "index": 859, "body": "VTKGNKUHMP"}, +{"name": "pymc438", "age": 34, "index": 438, "body": "VTKGNKUHMP"}, +{"name": "pymc834", "age": 26, "index": 834, "body": "VTKGNKUHMP"}, +{"name": "pymc199", "age": 98, "index": 199, "body": "VTKGNKUHMP"}, +{"name": "pymc139", "age": 38, "index": 139, "body": "VTKGNKUHMP"}, +{"name": "pymc825", "age": 17, "index": 825, "body": "VTKGNKUHMP"}, +{"name": "pymc56", "age": 56, "index": 56, "body": "VTKGNKUHMP"}, +{"name": "pymc804", "age": 97, "index": 804, "body": "VTKGNKUHMP"}, +{"name": "pymc824", "age": 16, "index": 824, "body": "VTKGNKUHMP"}, +{"name": "pymc277", "age": 75, "index": 277, "body": "VTKGNKUHMP"}, +{"name": "pymc866", "age": 58, "index": 866, "body": "VTKGNKUHMP"}, +{"name": "pymc488", "age": 84, "index": 488, "body": "VTKGNKUHMP"}, +{"name": "pymc837", "age": 29, "index": 837, "body": "VTKGNKUHMP"}, +{"name": "pymc744", "age": 37, "index": 744, "body": "VTKGNKUHMP"}, +{"name": "pymc853", "age": 45, "index": 853, "body": "VTKGNKUHMP"}, +{"name": "pymc372", "age": 69, "index": 372, "body": "VTKGNKUHMP"}, +{"name": "pymc201", "age": 100, "index": 201, "body": "VTKGNKUHMP"}, +{"name": "pymc144", "age": 43, "index": 144, "body": "VTKGNKUHMP"}, +{"name": "pymc678", "age": 72, "index": 678, "body": "VTKGNKUHMP"}, +{"name": "pymc369", "age": 66, "index": 369, "body": "VTKGNKUHMP"}, +{"name": "pymc157", "age": 56, "index": 157, "body": "VTKGNKUHMP"}, +{"name": "pymc258", "age": 56, "index": 258, "body": "VTKGNKUHMP"}, +{"name": "pymc350", "age": 47, "index": 350, "body": "VTKGNKUHMP"}, +{"name": "pymc681", "age": 75, "index": 681, "body": "VTKGNKUHMP"}, +{"name": "pymc962", "age": 53, "index": 962, "body": "VTKGNKUHMP"}, +{"name": "pymc333", "age": 30, "index": 333, "body": "VTKGNKUHMP"}, +{"name": "pymc188", "age": 87, "index": 188, "body": "VTKGNKUHMP"}, +{"name": "pymc21", "age": 21, "index": 21, "body": "VTKGNKUHMP"}, +{"name": "pymc150", "age": 49, "index": 150, "body": "VTKGNKUHMP"}, +{"name": "pymc230", "age": 28, "index": 230, "body": "VTKGNKUHMP"}, +{"name": "pymc800", "age": 93, "index": 800, "body": "VTKGNKUHMP"}, +{"name": "pymc111", "age": 10, "index": 111, "body": "VTKGNKUHMP"}, +{"name": "pymc175", "age": 74, "index": 175, "body": "VTKGNKUHMP"}, +{"name": "pymc810", "age": 2, "index": 810, "body": "VTKGNKUHMP"}, +{"name": "pymc458", "age": 54, "index": 458, "body": "VTKGNKUHMP"}, +{"name": "pymc251", "age": 49, "index": 251, "body": "VTKGNKUHMP"}, +{"name": "pymc46", "age": 46, "index": 46, "body": "VTKGNKUHMP"}, +{"name": "pymc806", "age": 99, "index": 806, "body": "VTKGNKUHMP"}, +{"name": "pymc71", "age": 71, "index": 71, "body": "VTKGNKUHMP"}, +{"name": "pymc241", "age": 39, "index": 241, "body": "VTKGNKUHMP"}, +{"name": "pymc728", "age": 21, "index": 728, "body": "VTKGNKUHMP"}, +{"name": "pymc25", "age": 25, "index": 25, "body": "VTKGNKUHMP"}, +{"name": "pymc886", "age": 78, "index": 886, "body": "VTKGNKUHMP"}, +{"name": "pymc73", "age": 73, "index": 73, "body": "VTKGNKUHMP"}, +{"name": "pymc360", "age": 57, "index": 360, "body": "VTKGNKUHMP"}, +{"name": "pymc899", "age": 91, "index": 899, "body": "VTKGNKUHMP"}, +{"name": "pymc979", "age": 70, "index": 979, "body": "VTKGNKUHMP"}, +{"name": "pymc178", "age": 77, "index": 178, "body": "VTKGNKUHMP"}, +{"name": "pymc84", "age": 84, "index": 84, "body": "VTKGNKUHMP"}, +{"name": "pymc130", "age": 29, "index": 130, "body": "VTKGNKUHMP"}, +{"name": "pymc324", "age": 21, "index": 324, "body": "VTKGNKUHMP"}, +{"name": "pymc355", "age": 52, "index": 355, "body": "VTKGNKUHMP"}, +{"name": "pymc680", "age": 74, "index": 680, "body": "VTKGNKUHMP"}, +{"name": "pymc842", "age": 34, "index": 842, "body": "VTKGNKUHMP"}, +{"name": "pymc289", "age": 87, "index": 289, "body": "VTKGNKUHMP"}, +{"name": "pymc729", "age": 22, "index": 729, "body": "VTKGNKUHMP"}, +{"name": "pymc299", "age": 97, "index": 299, "body": "VTKGNKUHMP"}, +{"name": "pymc894", "age": 86, "index": 894, "body": "VTKGNKUHMP"}, +{"name": "pymc639", "age": 33, "index": 639, "body": "VTKGNKUHMP"}, +{"name": "pymc969", "age": 60, "index": 969, "body": "VTKGNKUHMP"}, +{"name": "pymc194", "age": 93, "index": 194, "body": "VTKGNKUHMP"}, +{"name": "pymc96", "age": 96, "index": 96, "body": "VTKGNKUHMP"}, +{"name": "pymc22", "age": 22, "index": 22, "body": "VTKGNKUHMP"}, +{"name": "pymc529", "age": 24, "index": 529, "body": "VTKGNKUHMP"}, +{"name": "pymc865", "age": 57, "index": 865, "body": "VTKGNKUHMP"}, +{"name": "pymc222", "age": 20, "index": 222, "body": "VTKGNKUHMP"}, +{"name": "pymc98", "age": 98, "index": 98, "body": "VTKGNKUHMP"}, +{"name": "pymc17", "age": 17, "index": 17, "body": "VTKGNKUHMP"}, +{"name": "pymc32", "age": 32, "index": 32, "body": "VTKGNKUHMP"}, +{"name": "pymc870", "age": 62, "index": 870, "body": "VTKGNKUHMP"}, +{"name": "pymc19", "age": 19, "index": 19, "body": "VTKGNKUHMP"}, +{"name": "pymc897", "age": 89, "index": 897, "body": "VTKGNKUHMP"}, +{"name": "pymc104", "age": 3, "index": 104, "body": "VTKGNKUHMP"}, +{"name": "pymc469", "age": 65, "index": 469, "body": "VTKGNKUHMP"}, +{"name": "pymc830", "age": 22, "index": 830, "body": "VTKGNKUHMP"}, +{"name": "pymc53", "age": 53, "index": 53, "body": "VTKGNKUHMP"}, +{"name": "pymc48", "age": 48, "index": 48, "body": "VTKGNKUHMP"}, +{"name": "pymc158", "age": 57, "index": 158, "body": "VTKGNKUHMP"}, +{"name": "pymc270", "age": 68, "index": 270, "body": "VTKGNKUHMP"}, +{"name": "pymc869", "age": 61, "index": 869, "body": "VTKGNKUHMP"}, +{"name": "pymc847", "age": 39, "index": 847, "body": "VTKGNKUHMP"}, +{"name": "pymc370", "age": 67, "index": 370, "body": "VTKGNKUHMP"}, +{"name": "pymc243", "age": 41, "index": 243, "body": "VTKGNKUHMP"}, +{"name": "pymc143", "age": 42, "index": 143, "body": "VTKGNKUHMP"}, +{"name": "pymc120", "age": 19, "index": 120, "body": "VTKGNKUHMP"}, +{"name": "pymc817", "age": 9, "index": 817, "body": "VTKGNKUHMP"}, +{"name": "pymc132", "age": 31, "index": 132, "body": "VTKGNKUHMP"}, +{"name": "pymc298", "age": 96, "index": 298, "body": "VTKGNKUHMP"}, +{"name": "pymc449", "age": 45, "index": 449, "body": "VTKGNKUHMP"}, +{"name": "pymc122", "age": 21, "index": 122, "body": "VTKGNKUHMP"}, +{"name": "pymc50", "age": 50, "index": 50, "body": "VTKGNKUHMP"}, +{"name": "pymc192", "age": 91, "index": 192, "body": "VTKGNKUHMP"}, +{"name": "pymc720", "age": 13, "index": 720, "body": "VTKGNKUHMP"}, +{"name": "pymc971", "age": 62, "index": 971, "body": "VTKGNKUHMP"}, +{"name": "pymc51", "age": 51, "index": 51, "body": "VTKGNKUHMP"}, +{"name": "pymc102", "age": 1, "index": 102, "body": "VTKGNKUHMP"}, +{"name": "pymc34", "age": 34, "index": 34, "body": "VTKGNKUHMP"}, +{"name": "pymc88", "age": 88, "index": 88, "body": "VTKGNKUHMP"}, +{"name": "pymc1", "age": 1, "index": 1, "body": "VTKGNKUHMP"}, +{"name": "pymc802", "age": 95, "index": 802, "body": "VTKGNKUHMP"}, +{"name": "pymc976", "age": 67, "index": 976, "body": "VTKGNKUHMP"}, +{"name": "pymc734", "age": 27, "index": 734, "body": "VTKGNKUHMP"}, +{"name": "pymc39", "age": 39, "index": 39, "body": "VTKGNKUHMP"}, +{"name": "pymc419", "age": 15, "index": 419, "body": "VTKGNKUHMP"}, +{"name": "pymc487", "age": 83, "index": 487, "body": "VTKGNKUHMP"}, +{"name": "pymc526", "age": 21, "index": 526, "body": "VTKGNKUHMP"}, +{"name": "pymc677", "age": 71, "index": 677, "body": "VTKGNKUHMP"}, +{"name": "pymc489", "age": 85, "index": 489, "body": "VTKGNKUHMP"}, +{"name": "pymc49", "age": 49, "index": 49, "body": "VTKGNKUHMP"}, +{"name": "pymc30", "age": 30, "index": 30, "body": "VTKGNKUHMP"}, +{"name": "pymc272", "age": 70, "index": 272, "body": "VTKGNKUHMP"}, +{"name": "pymc170", "age": 69, "index": 170, "body": "VTKGNKUHMP"}, +{"name": "pymc832", "age": 24, "index": 832, "body": "VTKGNKUHMP"}, +{"name": "pymc156", "age": 55, "index": 156, "body": "VTKGNKUHMP"}, +{"name": "pymc746", "age": 39, "index": 746, "body": "VTKGNKUHMP"}, +{"name": "pymc723", "age": 16, "index": 723, "body": "VTKGNKUHMP"}, +{"name": "pymc357", "age": 54, "index": 357, "body": "VTKGNKUHMP"}, +{"name": "pymc568", "age": 63, "index": 568, "body": "VTKGNKUHMP"}, +{"name": "pymc2", "age": 2, "index": 2, "body": "VTKGNKUHMP"}, +{"name": "pymc66", "age": 66, "index": 66, "body": "VTKGNKUHMP"}, +{"name": "pymc320", "age": 17, "index": 320, "body": "VTKGNKUHMP"}, +{"name": "pymc845", "age": 37, "index": 845, "body": "VTKGNKUHMP"}, +{"name": "pymc731", "age": 24, "index": 731, "body": "VTKGNKUHMP"}, +{"name": "pymc205", "age": 3, "index": 205, "body": "VTKGNKUHMP"}, +{"name": "pymc162", "age": 61, "index": 162, "body": "VTKGNKUHMP"}, +{"name": "pymc239", "age": 37, "index": 239, "body": "VTKGNKUHMP"}, +{"name": "pymc70", "age": 70, "index": 70, "body": "VTKGNKUHMP"}, +{"name": "pymc740", "age": 33, "index": 740, "body": "VTKGNKUHMP"}, +{"name": "pymc578", "age": 73, "index": 578, "body": "VTKGNKUHMP"}, +{"name": "pymc169", "age": 68, "index": 169, "body": "VTKGNKUHMP"}, +{"name": "pymc86", "age": 86, "index": 86, "body": "VTKGNKUHMP"}, +{"name": "pymc99", "age": 99, "index": 99, "body": "VTKGNKUHMP"}, +{"name": "pymc249", "age": 47, "index": 249, "body": "VTKGNKUHMP"}, +{"name": "pymc977", "age": 68, "index": 977, "body": "VTKGNKUHMP"}, +{"name": "pymc353", "age": 50, "index": 353, "body": "VTKGNKUHMP"}, +{"name": "pymc267", "age": 65, "index": 267, "body": "VTKGNKUHMP"}, +{"name": "pymc931", "age": 22, "index": 931, "body": "VTKGNKUHMP"}, +{"name": "pymc538", "age": 33, "index": 538, "body": "VTKGNKUHMP"}, +{"name": "pymc958", "age": 49, "index": 958, "body": "VTKGNKUHMP"}, +{"name": "pymc753", "age": 46, "index": 753, "body": "VTKGNKUHMP"}, +{"name": "pymc754", "age": 47, "index": 754, "body": "VTKGNKUHMP"}, +{"name": "pymc155", "age": 54, "index": 155, "body": "VTKGNKUHMP"}, +{"name": "pymc736", "age": 29, "index": 736, "body": "VTKGNKUHMP"}, +{"name": "pymc191", "age": 90, "index": 191, "body": "VTKGNKUHMP"}, +{"name": "pymc559", "age": 54, "index": 559, "body": "VTKGNKUHMP"}, +{"name": "pymc726", "age": 19, "index": 726, "body": "VTKGNKUHMP"}, +{"name": "pymc107", "age": 6, "index": 107, "body": "VTKGNKUHMP"}, +{"name": "pymc57", "age": 57, "index": 57, "body": "VTKGNKUHMP"}, +{"name": "pymc827", "age": 19, "index": 827, "body": "VTKGNKUHMP"}, +{"name": "pymc379", "age": 76, "index": 379, "body": "VTKGNKUHMP"}, +{"name": "pymc128", "age": 27, "index": 128, "body": "VTKGNKUHMP"}, +{"name": "pymc857", "age": 49, "index": 857, "body": "VTKGNKUHMP"}, +{"name": "pymc263", "age": 61, "index": 263, "body": "VTKGNKUHMP"}, +{"name": "pymc259", "age": 57, "index": 259, "body": "VTKGNKUHMP"}, +{"name": "pymc167", "age": 66, "index": 167, "body": "VTKGNKUHMP"}, +{"name": "pymc363", "age": 60, "index": 363, "body": "VTKGNKUHMP"}, +{"name": "pymc288", "age": 86, "index": 288, "body": "VTKGNKUHMP"}, +{"name": "pymc231", "age": 29, "index": 231, "body": "VTKGNKUHMP"}, +{"name": "pymc200", "age": 99, "index": 200, "body": "VTKGNKUHMP"}, +{"name": "pymc941", "age": 32, "index": 941, "body": "VTKGNKUHMP"}, +{"name": "pymc801", "age": 94, "index": 801, "body": "VTKGNKUHMP"}, +{"name": "pymc14", "age": 14, "index": 14, "body": "VTKGNKUHMP"}, +{"name": "pymc679", "age": 73, "index": 679, "body": "VTKGNKUHMP"}, +{"name": "pymc328", "age": 25, "index": 328, "body": "VTKGNKUHMP"}, +{"name": "pymc129", "age": 28, "index": 129, "body": "VTKGNKUHMP"}, +{"name": "pymc448", "age": 44, "index": 448, "body": "VTKGNKUHMP"}, +{"name": "pymc234", "age": 32, "index": 234, "body": "VTKGNKUHMP"}, +{"name": "pymc164", "age": 63, "index": 164, "body": "VTKGNKUHMP"}, +{"name": "pymc181", "age": 80, "index": 181, "body": "VTKGNKUHMP"}, +{"name": "pymc849", "age": 41, "index": 849, "body": "VTKGNKUHMP"}, +{"name": "pymc108", "age": 7, "index": 108, "body": "VTKGNKUHMP"}, +{"name": "pymc371", "age": 68, "index": 371, "body": "VTKGNKUHMP"}, +{"name": "pymc939", "age": 30, "index": 939, "body": "VTKGNKUHMP"}, +{"name": "pymc116", "age": 15, "index": 116, "body": "VTKGNKUHMP"}, +{"name": "pymc47", "age": 47, "index": 47, "body": "VTKGNKUHMP"}, +{"name": "pymc253", "age": 51, "index": 253, "body": "VTKGNKUHMP"}, +{"name": "pymc950", "age": 41, "index": 950, "body": "VTKGNKUHMP"}, +{"name": "pymc722", "age": 15, "index": 722, "body": "VTKGNKUHMP"}, +{"name": "pymc826", "age": 18, "index": 826, "body": "VTKGNKUHMP"}, +{"name": "pymc238", "age": 36, "index": 238, "body": "VTKGNKUHMP"}, +{"name": "pymc257", "age": 55, "index": 257, "body": "VTKGNKUHMP"}, +{"name": "pymc880", "age": 72, "index": 880, "body": "VTKGNKUHMP"}, +{"name": "pymc166", "age": 65, "index": 166, "body": "VTKGNKUHMP"}, +{"name": "pymc727", "age": 20, "index": 727, "body": "VTKGNKUHMP"}, +{"name": "pymc751", "age": 44, "index": 751, "body": "VTKGNKUHMP"}, +{"name": "pymc97", "age": 97, "index": 97, "body": "VTKGNKUHMP"}, +{"name": "pymc235", "age": 33, "index": 235, "body": "VTKGNKUHMP"}, +{"name": "pymc951", "age": 42, "index": 951, "body": "VTKGNKUHMP"}, +{"name": "pymc278", "age": 76, "index": 278, "body": "VTKGNKUHMP"}, +{"name": "pymc80", "age": 80, "index": 80, "body": "VTKGNKUHMP"}, +{"name": "pymc124", "age": 23, "index": 124, "body": "VTKGNKUHMP"}, +{"name": "pymc956", "age": 47, "index": 956, "body": "VTKGNKUHMP"}, +{"name": "pymc872", "age": 64, "index": 872, "body": "VTKGNKUHMP"}, +{"name": "pymc948", "age": 39, "index": 948, "body": "VTKGNKUHMP"}, +{"name": "pymc186", "age": 85, "index": 186, "body": "VTKGNKUHMP"}, +{"name": "pymc947", "age": 38, "index": 947, "body": "VTKGNKUHMP"}, +{"name": "pymc276", "age": 74, "index": 276, "body": "VTKGNKUHMP"}, +{"name": "pymc59", "age": 59, "index": 59, "body": "VTKGNKUHMP"}, +{"name": "pymc168", "age": 67, "index": 168, "body": "VTKGNKUHMP"}, +{"name": "pymc284", "age": 82, "index": 284, "body": "VTKGNKUHMP"}, +{"name": "pymc260", "age": 58, "index": 260, "body": "VTKGNKUHMP"}, +{"name": "pymc118", "age": 17, "index": 118, "body": "VTKGNKUHMP"}, +{"name": "pymc83", "age": 83, "index": 83, "body": "VTKGNKUHMP"}, +{"name": "pymc148", "age": 47, "index": 148, "body": "VTKGNKUHMP"}, +{"name": "pymc256", "age": 54, "index": 256, "body": "VTKGNKUHMP"}, +{"name": "pymc281", "age": 79, "index": 281, "body": "VTKGNKUHMP"}, +{"name": "pymc26", "age": 26, "index": 26, "body": "VTKGNKUHMP"}, +{"name": "pymc757", "age": 50, "index": 757, "body": "VTKGNKUHMP"}, +{"name": "pymc863", "age": 55, "index": 863, "body": "VTKGNKUHMP"}, +{"name": "pymc879", "age": 71, "index": 879, "body": "VTKGNKUHMP"}, +{"name": "pymc326", "age": 23, "index": 326, "body": "VTKGNKUHMP"}, +{"name": "pymc282", "age": 80, "index": 282, "body": "VTKGNKUHMP"}, +{"name": "pymc184", "age": 83, "index": 184, "body": "VTKGNKUHMP"}, +{"name": "pymc619", "age": 13, "index": 619, "body": "VTKGNKUHMP"}, +{"name": "pymc334", "age": 31, "index": 334, "body": "VTKGNKUHMP"}, +{"name": "pymc213", "age": 11, "index": 213, "body": "VTKGNKUHMP"}, +{"name": "pymc92", "age": 92, "index": 92, "body": "VTKGNKUHMP"}, +{"name": "pymc160", "age": 59, "index": 160, "body": "VTKGNKUHMP"}, +{"name": "pymc152", "age": 51, "index": 152, "body": "VTKGNKUHMP"}, +{"name": "pymc499", "age": 95, "index": 499, "body": "VTKGNKUHMP"}, +{"name": "pymc721", "age": 14, "index": 721, "body": "VTKGNKUHMP"}, +{"name": "pymc7", "age": 7, "index": 7, "body": "VTKGNKUHMP"}, +{"name": "pymc352", "age": 49, "index": 352, "body": "VTKGNKUHMP"}, +{"name": "pymc478", "age": 74, "index": 478, "body": "VTKGNKUHMP"}, +{"name": "pymc140", "age": 39, "index": 140, "body": "VTKGNKUHMP"}, +{"name": "pymc850", "age": 42, "index": 850, "body": "VTKGNKUHMP"}, +{"name": "pymc833", "age": 25, "index": 833, "body": "VTKGNKUHMP"}, +{"name": "pymc179", "age": 78, "index": 179, "body": "VTKGNKUHMP"}, +{"name": "pymc337", "age": 34, "index": 337, "body": "VTKGNKUHMP"}, +{"name": "pymc759", "age": 52, "index": 759, "body": "VTKGNKUHMP"}, +{"name": "pymc821", "age": 13, "index": 821, "body": "VTKGNKUHMP"}, +{"name": "pymc949", "age": 40, "index": 949, "body": "VTKGNKUHMP"}, +{"name": "pymc296", "age": 94, "index": 296, "body": "VTKGNKUHMP"}, +{"name": "pymc848", "age": 40, "index": 848, "body": "VTKGNKUHMP"}, +{"name": "pymc23", "age": 23, "index": 23, "body": "VTKGNKUHMP"}, +{"name": "pymc91", "age": 91, "index": 91, "body": "VTKGNKUHMP"}, +{"name": "pymc942", "age": 33, "index": 942, "body": "VTKGNKUHMP"}, +{"name": "pymc42", "age": 42, "index": 42, "body": "VTKGNKUHMP"}, +{"name": "pymc843", "age": 35, "index": 843, "body": "VTKGNKUHMP"}, +{"name": "pymc749", "age": 42, "index": 749, "body": "VTKGNKUHMP"}, +{"name": "pymc377", "age": 74, "index": 377, "body": "VTKGNKUHMP"}, +{"name": "pymc126", "age": 25, "index": 126, "body": "VTKGNKUHMP"}, +{"name": "pymc244", "age": 42, "index": 244, "body": "VTKGNKUHMP"}, +{"name": "pymc683", "age": 77, "index": 683, "body": "VTKGNKUHMP"}, +{"name": "pymc816", "age": 8, "index": 816, "body": "VTKGNKUHMP"}, +{"name": "pymc890", "age": 82, "index": 890, "body": "VTKGNKUHMP"}, +{"name": "pymc944", "age": 35, "index": 944, "body": "VTKGNKUHMP"}, +{"name": "pymc101", "age": 0, "index": 101, "body": "VTKGNKUHMP"}, +{"name": "pymc173", "age": 72, "index": 173, "body": "VTKGNKUHMP"}, +{"name": "pymc809", "age": 1, "index": 809, "body": "VTKGNKUHMP"}, +{"name": "pymc294", "age": 92, "index": 294, "body": "VTKGNKUHMP"}, +{"name": "pymc141", "age": 40, "index": 141, "body": "VTKGNKUHMP"}, +{"name": "pymc739", "age": 32, "index": 739, "body": "VTKGNKUHMP"}, +{"name": "pymc732", "age": 25, "index": 732, "body": "VTKGNKUHMP"}, +{"name": "pymc171", "age": 70, "index": 171, "body": "VTKGNKUHMP"}, +{"name": "pymc685", "age": 79, "index": 685, "body": "VTKGNKUHMP"}, +{"name": "pymc376", "age": 73, "index": 376, "body": "VTKGNKUHMP"}, +{"name": "pymc65", "age": 65, "index": 65, "body": "VTKGNKUHMP"}, +{"name": "pymc940", "age": 31, "index": 940, "body": "VTKGNKUHMP"}, +{"name": "pymc224", "age": 22, "index": 224, "body": "VTKGNKUHMP"}, +{"name": "pymc123", "age": 22, "index": 123, "body": "VTKGNKUHMP"}, +{"name": "pymc176", "age": 75, "index": 176, "body": "VTKGNKUHMP"}, +{"name": "pymc812", "age": 4, "index": 812, "body": "VTKGNKUHMP"}, +{"name": "pymc269", "age": 67, "index": 269, "body": "VTKGNKUHMP"}, +{"name": "pymc669", "age": 63, "index": 669, "body": "VTKGNKUHMP"}, +{"name": "pymc290", "age": 88, "index": 290, "body": "VTKGNKUHMP"}, +{"name": "pymc210", "age": 8, "index": 210, "body": "VTKGNKUHMP"}, +{"name": "pymc197", "age": 96, "index": 197, "body": "VTKGNKUHMP"}, +{"name": "pymc820", "age": 12, "index": 820, "body": "VTKGNKUHMP"}, +{"name": "pymc725", "age": 18, "index": 725, "body": "VTKGNKUHMP"}, +{"name": "pymc27", "age": 27, "index": 27, "body": "VTKGNKUHMP"}, +{"name": "pymc336", "age": 33, "index": 336, "body": "VTKGNKUHMP"}, +{"name": "pymc876", "age": 68, "index": 876, "body": "VTKGNKUHMP"}, +{"name": "pymc648", "age": 42, "index": 648, "body": "VTKGNKUHMP"}, +{"name": "pymc889", "age": 81, "index": 889, "body": "VTKGNKUHMP"}, +{"name": "pymc35", "age": 35, "index": 35, "body": "VTKGNKUHMP"}, +{"name": "pymc724", "age": 17, "index": 724, "body": "VTKGNKUHMP"}, +{"name": "pymc13", "age": 13, "index": 13, "body": "VTKGNKUHMP"}, +{"name": "pymc8", "age": 8, "index": 8, "body": "VTKGNKUHMP"}, +{"name": "pymc972", "age": 63, "index": 972, "body": "VTKGNKUHMP"}, +{"name": "pymc112", "age": 11, "index": 112, "body": "VTKGNKUHMP"}, +{"name": "pymc692", "age": 86, "index": 692, "body": "VTKGNKUHMP"}, +{"name": "pymc291", "age": 89, "index": 291, "body": "VTKGNKUHMP"}, +{"name": "pymc87", "age": 87, "index": 87, "body": "VTKGNKUHMP"}, +{"name": "pymc628", "age": 22, "index": 628, "body": "VTKGNKUHMP"}, +{"name": "pymc978", "age": 69, "index": 978, "body": "VTKGNKUHMP"}, +{"name": "pymc10", "age": 10, "index": 10, "body": "VTKGNKUHMP"}, +{"name": "pymc961", "age": 52, "index": 961, "body": "VTKGNKUHMP"}, +{"name": "pymc974", "age": 65, "index": 974, "body": "VTKGNKUHMP"}, +{"name": "pymc67", "age": 67, "index": 67, "body": "VTKGNKUHMP"}, +{"name": "pymc960", "age": 51, "index": 960, "body": "VTKGNKUHMP"}, +{"name": "pymc335", "age": 32, "index": 335, "body": "VTKGNKUHMP"}, +{"name": "pymc327", "age": 24, "index": 327, "body": "VTKGNKUHMP"}, +{"name": "pymc815", "age": 7, "index": 815, "body": "VTKGNKUHMP"}, +{"name": "pymc203", "age": 1, "index": 203, "body": "VTKGNKUHMP"}, +{"name": "pymc975", "age": 66, "index": 975, "body": "VTKGNKUHMP"}, +{"name": "pymc261", "age": 59, "index": 261, "body": "VTKGNKUHMP"}, +{"name": "pymc43", "age": 43, "index": 43, "body": "VTKGNKUHMP"}, +{"name": "pymc743", "age": 36, "index": 743, "body": "VTKGNKUHMP"}, +{"name": "pymc934", "age": 25, "index": 934, "body": "VTKGNKUHMP"}, +{"name": "pymc875", "age": 67, "index": 875, "body": "VTKGNKUHMP"}, +{"name": "pymc138", "age": 37, "index": 138, "body": "VTKGNKUHMP"}, +{"name": "pymc936", "age": 27, "index": 936, "body": "VTKGNKUHMP"}, +{"name": "pymc136", "age": 35, "index": 136, "body": "VTKGNKUHMP"}, +{"name": "pymc844", "age": 36, "index": 844, "body": "VTKGNKUHMP"}, +{"name": "pymc322", "age": 19, "index": 322, "body": "VTKGNKUHMP"}, +{"name": "pymc569", "age": 64, "index": 569, "body": "VTKGNKUHMP"}, +{"name": "pymc609", "age": 3, "index": 609, "body": "VTKGNKUHMP"}, +{"name": "pymc214", "age": 12, "index": 214, "body": "VTKGNKUHMP"}, +{"name": "pymc72", "age": 72, "index": 72, "body": "VTKGNKUHMP"}, +{"name": "pymc835", "age": 27, "index": 835, "body": "VTKGNKUHMP"}, +{"name": "pymc81", "age": 81, "index": 81, "body": "VTKGNKUHMP"}, +{"name": "pymc874", "age": 66, "index": 874, "body": "VTKGNKUHMP"}, +{"name": "pymc216", "age": 14, "index": 216, "body": "VTKGNKUHMP"}, +{"name": "pymc60", "age": 60, "index": 60, "body": "VTKGNKUHMP"}, +{"name": "pymc246", "age": 44, "index": 246, "body": "VTKGNKUHMP"}, +{"name": "pymc967", "age": 58, "index": 967, "body": "VTKGNKUHMP"}, +{"name": "pymc268", "age": 66, "index": 268, "body": "VTKGNKUHMP"}, +{"name": "pymc182", "age": 81, "index": 182, "body": "VTKGNKUHMP"}, +{"name": "pymc165", "age": 64, "index": 165, "body": "VTKGNKUHMP"}, +{"name": "pymc946", "age": 37, "index": 946, "body": "VTKGNKUHMP"}, +{"name": "pymc860", "age": 52, "index": 860, "body": "VTKGNKUHMP"}, +{"name": "pymc218", "age": 16, "index": 218, "body": "VTKGNKUHMP"}, +{"name": "pymc823", "age": 15, "index": 823, "body": "VTKGNKUHMP"}, +{"name": "pymc964", "age": 55, "index": 964, "body": "VTKGNKUHMP"}, +{"name": "pymc895", "age": 87, "index": 895, "body": "VTKGNKUHMP"}, +{"name": "pymc362", "age": 59, "index": 362, "body": "VTKGNKUHMP"}, +{"name": "pymc36", "age": 36, "index": 36, "body": "VTKGNKUHMP"}, +{"name": "pymc970", "age": 61, "index": 970, "body": "VTKGNKUHMP"}, +{"name": "pymc498", "age": 94, "index": 498, "body": "VTKGNKUHMP"}, +{"name": "pymc151", "age": 50, "index": 151, "body": "VTKGNKUHMP"}, +{"name": "pymc539", "age": 34, "index": 539, "body": "VTKGNKUHMP"}, +{"name": "pymc338", "age": 35, "index": 338, "body": "VTKGNKUHMP"}, +{"name": "pymc752", "age": 45, "index": 752, "body": "VTKGNKUHMP"}, +{"name": "pymc89", "age": 89, "index": 89, "body": "VTKGNKUHMP"}, +{"name": "pymc358", "age": 55, "index": 358, "body": "VTKGNKUHMP"}, +{"name": "pymc695", "age": 89, "index": 695, "body": "VTKGNKUHMP"}, +{"name": "pymc31", "age": 31, "index": 31, "body": "VTKGNKUHMP"}, +{"name": "pymc64", "age": 64, "index": 64, "body": "VTKGNKUHMP"}, +{"name": "pymc699", "age": 93, "index": 699, "body": "VTKGNKUHMP"}, +{"name": "pymc217", "age": 15, "index": 217, "body": "VTKGNKUHMP"}, +{"name": "pymc271", "age": 69, "index": 271, "body": "VTKGNKUHMP"}, +{"name": "pymc745", "age": 38, "index": 745, "body": "VTKGNKUHMP"}, +{"name": "pymc163", "age": 62, "index": 163, "body": "VTKGNKUHMP"}, +{"name": "pymc891", "age": 83, "index": 891, "body": "VTKGNKUHMP"}, +{"name": "pymc12", "age": 12, "index": 12, "body": "VTKGNKUHMP"}, +{"name": "pymc180", "age": 79, "index": 180, "body": "VTKGNKUHMP"}, +{"name": "pymc236", "age": 34, "index": 236, "body": "VTKGNKUHMP"}, +{"name": "pymc106", "age": 5, "index": 106, "body": "VTKGNKUHMP"}, +{"name": "pymc202", "age": 0, "index": 202, "body": "VTKGNKUHMP"}, +{"name": "pymc146", "age": 45, "index": 146, "body": "VTKGNKUHMP"}, +{"name": "pymc62", "age": 62, "index": 62, "body": "VTKGNKUHMP"}, +{"name": "pymc242", "age": 40, "index": 242, "body": "VTKGNKUHMP"}, +{"name": "pymc252", "age": 50, "index": 252, "body": "VTKGNKUHMP"}, +{"name": "pymc354", "age": 51, "index": 354, "body": "VTKGNKUHMP"}, +{"name": "pymc846", "age": 38, "index": 846, "body": "VTKGNKUHMP"}, +{"name": "pymc937", "age": 28, "index": 937, "body": "VTKGNKUHMP"}, +{"name": "pymc329", "age": 26, "index": 329, "body": "VTKGNKUHMP"}, +{"name": "pymc110", "age": 9, "index": 110, "body": "VTKGNKUHMP"}, +{"name": "pymc356", "age": 53, "index": 356, "body": "VTKGNKUHMP"}, +{"name": "pymc638", "age": 32, "index": 638, "body": "VTKGNKUHMP"}, +{"name": "pymc15", "age": 15, "index": 15, "body": "VTKGNKUHMP"}, +{"name": "pymc813", "age": 5, "index": 813, "body": "VTKGNKUHMP"}, +{"name": "pymc5", "age": 5, "index": 5, "body": "VTKGNKUHMP"}, +{"name": "pymc117", "age": 16, "index": 117, "body": "VTKGNKUHMP"}, +{"name": "pymc323", "age": 20, "index": 323, "body": "VTKGNKUHMP"}, +{"name": "pymc884", "age": 76, "index": 884, "body": "VTKGNKUHMP"}, +{"name": "pymc691", "age": 85, "index": 691, "body": "VTKGNKUHMP"}, +{"name": "pymc887", "age": 79, "index": 887, "body": "VTKGNKUHMP"}, +{"name": "pymc408", "age": 4, "index": 408, "body": "VTKGNKUHMP"}, +{"name": "pymc690", "age": 84, "index": 690, "body": "VTKGNKUHMP"}, +{"name": "pymc223", "age": 21, "index": 223, "body": "VTKGNKUHMP"}, +{"name": "pymc684", "age": 78, "index": 684, "body": "VTKGNKUHMP"}, +{"name": "pymc190", "age": 89, "index": 190, "body": "VTKGNKUHMP"}, +{"name": "pymc862", "age": 54, "index": 862, "body": "VTKGNKUHMP"}, +{"name": "pymc933", "age": 24, "index": 933, "body": "VTKGNKUHMP"}, +{"name": "pymc38", "age": 38, "index": 38, "body": "VTKGNKUHMP"}, +{"name": "pymc852", "age": 44, "index": 852, "body": "VTKGNKUHMP"}, +{"name": "pymc137", "age": 36, "index": 137, "body": "VTKGNKUHMP"}, +{"name": "pymc954", "age": 45, "index": 954, "body": "VTKGNKUHMP"}, +{"name": "pymc855", "age": 47, "index": 855, "body": "VTKGNKUHMP"}, +{"name": "pymc938", "age": 29, "index": 938, "body": "VTKGNKUHMP"}, +{"name": "pymc94", "age": 94, "index": 94, "body": "VTKGNKUHMP"}, +{"name": "pymc955", "age": 46, "index": 955, "body": "VTKGNKUHMP"}, +{"name": "pymc93", "age": 93, "index": 93, "body": "VTKGNKUHMP"}, +{"name": "pymc737", "age": 30, "index": 737, "body": "VTKGNKUHMP"}, +{"name": "pymc76", "age": 76, "index": 76, "body": "VTKGNKUHMP"}, +{"name": "pymc459", "age": 55, "index": 459, "body": "VTKGNKUHMP"}, +{"name": "pymc973", "age": 64, "index": 973, "body": "VTKGNKUHMP"}, +{"name": "pymc428", "age": 24, "index": 428, "body": "VTKGNKUHMP"}, +{"name": "pymc262", "age": 60, "index": 262, "body": "VTKGNKUHMP"}, +{"name": "pymc113", "age": 12, "index": 113, "body": "VTKGNKUHMP"}, +{"name": "pymc659", "age": 53, "index": 659, "body": "VTKGNKUHMP"}, +{"name": "pymc229", "age": 27, "index": 229, "body": "VTKGNKUHMP"}, +{"name": "pymc103", "age": 2, "index": 103, "body": "VTKGNKUHMP"}, +{"name": "pymc367", "age": 64, "index": 367, "body": "VTKGNKUHMP"}, +{"name": "pymc273", "age": 71, "index": 273, "body": "VTKGNKUHMP"}, +{"name": "pymc321", "age": 18, "index": 321, "body": "VTKGNKUHMP"}, +{"name": "pymc40", "age": 40, "index": 40, "body": "VTKGNKUHMP"}, +{"name": "pymc361", "age": 58, "index": 361, "body": "VTKGNKUHMP"}, +{"name": "pymc172", "age": 71, "index": 172, "body": "VTKGNKUHMP"}, +{"name": "pymc898", "age": 90, "index": 898, "body": "VTKGNKUHMP"}, +{"name": "pymc868", "age": 60, "index": 868, "body": "VTKGNKUHMP"}, +{"name": "pymc885", "age": 77, "index": 885, "body": "VTKGNKUHMP"}, +{"name": "pymc943", "age": 34, "index": 943, "body": "VTKGNKUHMP"}, +{"name": "pymc803", "age": 96, "index": 803, "body": "VTKGNKUHMP"}, +{"name": "pymc9", "age": 9, "index": 9, "body": "VTKGNKUHMP"}, +{"name": "pymc77", "age": 77, "index": 77, "body": "VTKGNKUHMP"}, +{"name": "pymc558", "age": 53, "index": 558, "body": "VTKGNKUHMP"}, +{"name": "pymc215", "age": 13, "index": 215, "body": "VTKGNKUHMP"}, +{"name": "pymc730", "age": 23, "index": 730, "body": "VTKGNKUHMP"}, +{"name": "pymc187", "age": 86, "index": 187, "body": "VTKGNKUHMP"}, +{"name": "pymc240", "age": 38, "index": 240, "body": "VTKGNKUHMP"}, +{"name": "pymc927", "age": 18, "index": 927, "body": "VTKGNKUHMP"}, +{"name": "pymc966", "age": 57, "index": 966, "body": "VTKGNKUHMP"}, +{"name": "pymc295", "age": 93, "index": 295, "body": "VTKGNKUHMP"}, +{"name": "pymc841", "age": 33, "index": 841, "body": "VTKGNKUHMP"}, +{"name": "pymc851", "age": 43, "index": 851, "body": "VTKGNKUHMP"}, +{"name": "pymc468", "age": 64, "index": 468, "body": "VTKGNKUHMP"}, +{"name": "pymc364", "age": 61, "index": 364, "body": "VTKGNKUHMP"}, +{"name": "pymc61", "age": 61, "index": 61, "body": "VTKGNKUHMP"}, +{"name": "pymc922", "age": 13, "index": 922, "body": "VTKGNKUHMP"}, +{"name": "pymc378", "age": 75, "index": 378, "body": "VTKGNKUHMP"}, +{"name": "pymc686", "age": 80, "index": 686, "body": "VTKGNKUHMP"}, +{"name": "pymc331", "age": 28, "index": 331, "body": "VTKGNKUHMP"}, +{"name": "pymc693", "age": 87, "index": 693, "body": "VTKGNKUHMP"}, +{"name": "pymc892", "age": 84, "index": 892, "body": "VTKGNKUHMP"}, +{"name": "pymc374", "age": 71, "index": 374, "body": "VTKGNKUHMP"}, +{"name": "pymc864", "age": 56, "index": 864, "body": "VTKGNKUHMP"}, +{"name": "pymc839", "age": 31, "index": 839, "body": "VTKGNKUHMP"}, +{"name": "pymc6", "age": 6, "index": 6, "body": "VTKGNKUHMP"}, +{"name": "pymc953", "age": 44, "index": 953, "body": "VTKGNKUHMP"}, +{"name": "pymc858", "age": 50, "index": 858, "body": "VTKGNKUHMP"}, +{"name": "pymc28", "age": 28, "index": 28, "body": "VTKGNKUHMP"}, +{"name": "pymc926", "age": 17, "index": 926, "body": "VTKGNKUHMP"}, +{"name": "pymc78", "age": 78, "index": 78, "body": "VTKGNKUHMP"}, +{"name": "pymc365", "age": 62, "index": 365, "body": "VTKGNKUHMP"}, +{"name": "pymc100", "age": 100, "index": 100, "body": "VTKGNKUHMP"}, +{"name": "pymc33", "age": 33, "index": 33, "body": "VTKGNKUHMP"}, +{"name": "pymc18", "age": 18, "index": 18, "body": "VTKGNKUHMP"}, +{"name": "pymc195", "age": 94, "index": 195, "body": "VTKGNKUHMP"}, +{"name": "pymc549", "age": 44, "index": 549, "body": "VTKGNKUHMP"}, +{"name": "pymc255", "age": 53, "index": 255, "body": "VTKGNKUHMP"}, +{"name": "pymc883", "age": 75, "index": 883, "body": "VTKGNKUHMP"}, +{"name": "pymc245", "age": 43, "index": 245, "body": "VTKGNKUHMP"}, +{"name": "pymc125", "age": 24, "index": 125, "body": "VTKGNKUHMP"}, +{"name": "pymc52", "age": 52, "index": 52, "body": "VTKGNKUHMP"}, +{"name": "pymc206", "age": 4, "index": 206, "body": "VTKGNKUHMP"}, +{"name": "pymc37", "age": 37, "index": 37, "body": "VTKGNKUHMP"}, +{"name": "pymc133", "age": 32, "index": 133, "body": "VTKGNKUHMP"}, +{"name": "pymc339", "age": 36, "index": 339, "body": "VTKGNKUHMP"}, +{"name": "pymc198", "age": 97, "index": 198, "body": "VTKGNKUHMP"}, +{"name": "pymc838", "age": 30, "index": 838, "body": "VTKGNKUHMP"}, +{"name": "pymc963", "age": 54, "index": 963, "body": "VTKGNKUHMP"}, +{"name": "pymc925", "age": 16, "index": 925, "body": "VTKGNKUHMP"}, +{"name": "pymc58", "age": 58, "index": 58, "body": "VTKGNKUHMP"}, +{"name": "pymc878", "age": 70, "index": 878, "body": "VTKGNKUHMP"}, +{"name": "pymc921", "age": 12, "index": 921, "body": "VTKGNKUHMP"}, +{"name": "pymc836", "age": 28, "index": 836, "body": "VTKGNKUHMP"}, +{"name": "pymc807", "age": 100, "index": 807, "body": "VTKGNKUHMP"}, +{"name": "pymc119", "age": 18, "index": 119, "body": "VTKGNKUHMP"}, +{"name": "pymc79", "age": 79, "index": 79, "body": "VTKGNKUHMP"}, +{"name": "pymc366", "age": 63, "index": 366, "body": "VTKGNKUHMP"}, +{"name": "pymc183", "age": 82, "index": 183, "body": "VTKGNKUHMP"}, +{"name": "pymc45", "age": 45, "index": 45, "body": "VTKGNKUHMP"}, +{"name": "pymc814", "age": 6, "index": 814, "body": "VTKGNKUHMP"}, +{"name": "pymc548", "age": 43, "index": 548, "body": "VTKGNKUHMP"}, +{"name": "pymc359", "age": 56, "index": 359, "body": "VTKGNKUHMP"}, +{"name": "pymc274", "age": 72, "index": 274, "body": "VTKGNKUHMP"}, +{"name": "pymc881", "age": 73, "index": 881, "body": "VTKGNKUHMP"}, +{"name": "pymc528", "age": 23, "index": 528, "body": "VTKGNKUHMP"}, +{"name": "pymc618", "age": 12, "index": 618, "body": "VTKGNKUHMP"}, +{"name": "pymc209", "age": 7, "index": 209, "body": "VTKGNKUHMP"}, +{"name": "pymc351", "age": 48, "index": 351, "body": "VTKGNKUHMP"}, +{"name": "pymc871", "age": 63, "index": 871, "body": "VTKGNKUHMP"}, +{"name": "pymc16", "age": 16, "index": 16, "body": "VTKGNKUHMP"}, +{"name": "pymc193", "age": 92, "index": 193, "body": "VTKGNKUHMP"}, +{"name": "pymc668", "age": 62, "index": 668, "body": "VTKGNKUHMP"}, +{"name": "pymc418", "age": 14, "index": 418, "body": "VTKGNKUHMP"}, +{"name": "pymc161", "age": 60, "index": 161, "body": "VTKGNKUHMP"}, +{"name": "pymc174", "age": 73, "index": 174, "body": "VTKGNKUHMP"}, +{"name": "pymc55", "age": 55, "index": 55, "body": "VTKGNKUHMP"}, +{"name": "pymc854", "age": 46, "index": 854, "body": "VTKGNKUHMP"}, +{"name": "pymc694", "age": 88, "index": 694, "body": "VTKGNKUHMP"}, +{"name": "pymc738", "age": 31, "index": 738, "body": "VTKGNKUHMP"}, +{"name": "pymc85", "age": 85, "index": 85, "body": "VTKGNKUHMP"}, +{"name": "pymc888", "age": 80, "index": 888, "body": "VTKGNKUHMP"}, +{"name": "pymc742", "age": 35, "index": 742, "body": "VTKGNKUHMP"}, +{"name": "pymc250", "age": 48, "index": 250, "body": "VTKGNKUHMP"}, +{"name": "pymc225", "age": 23, "index": 225, "body": "VTKGNKUHMP"}, +{"name": "pymc41", "age": 41, "index": 41, "body": "VTKGNKUHMP"}, +{"name": "pymc840", "age": 32, "index": 840, "body": "VTKGNKUHMP"}, +{"name": "pymc829", "age": 21, "index": 829, "body": "VTKGNKUHMP"}, +{"name": "pymc153", "age": 52, "index": 153, "body": "VTKGNKUHMP"}, +{"name": "pymc935", "age": 26, "index": 935, "body": "VTKGNKUHMP"}, +{"name": "pymc247", "age": 45, "index": 247, "body": "VTKGNKUHMP"}, +{"name": "pymc287", "age": 85, "index": 287, "body": "VTKGNKUHMP"}, +{"name": "pymc227", "age": 25, "index": 227, "body": "VTKGNKUHMP"}, +{"name": "pymc928", "age": 19, "index": 928, "body": "VTKGNKUHMP"}, +{"name": "pymc330", "age": 27, "index": 330, "body": "VTKGNKUHMP"}, +{"name": "pymc121", "age": 20, "index": 121, "body": "VTKGNKUHMP"}, +{"name": "pymc893", "age": 85, "index": 893, "body": "VTKGNKUHMP"}, +{"name": "pymc114", "age": 13, "index": 114, "body": "VTKGNKUHMP"}, +{"name": "pymc768", "age": 61, "index": 768, "body": "VTKGNKUHMP"}, +{"name": "pymc861", "age": 53, "index": 861, "body": "VTKGNKUHMP"}, +{"name": "pymc233", "age": 31, "index": 233, "body": "VTKGNKUHMP"}, +{"name": "pymc658", "age": 52, "index": 658, "body": "VTKGNKUHMP"}, +{"name": "pymc698", "age": 92, "index": 698, "body": "VTKGNKUHMP"}, +{"name": "pymc332", "age": 29, "index": 332, "body": "VTKGNKUHMP"}, +{"name": "pymc135", "age": 34, "index": 135, "body": "VTKGNKUHMP"}, +{"name": "pymc805", "age": 98, "index": 805, "body": "VTKGNKUHMP"}, +{"name": "pymc74", "age": 74, "index": 74, "body": "VTKGNKUHMP"}, +{"name": "pymc134", "age": 33, "index": 134, "body": "VTKGNKUHMP"}, +{"name": "pymc189", "age": 88, "index": 189, "body": "VTKGNKUHMP"}, +{"name": "pymc896", "age": 88, "index": 896, "body": "VTKGNKUHMP"}, +{"name": "pymc733", "age": 26, "index": 733, "body": "VTKGNKUHMP"}, +{"name": "pymc755", "age": 48, "index": 755, "body": "VTKGNKUHMP"}, +{"name": "pymc196", "age": 95, "index": 196, "body": "VTKGNKUHMP"}, +{"name": "pymc4", "age": 4, "index": 4, "body": "VTKGNKUHMP"}, +{"name": "pymc688", "age": 82, "index": 688, "body": "VTKGNKUHMP"}, +{"name": "pymc930", "age": 21, "index": 930, "body": "VTKGNKUHMP"}, +{"name": "pymc286", "age": 84, "index": 286, "body": "VTKGNKUHMP"}, +{"name": "pymc968", "age": 59, "index": 968, "body": "VTKGNKUHMP"}, +{"name": "pymc867", "age": 59, "index": 867, "body": "VTKGNKUHMP"}, +{"name": "pymc828", "age": 20, "index": 828, "body": "VTKGNKUHMP"}, +{"name": "pymc280", "age": 78, "index": 280, "body": "VTKGNKUHMP"}, +{"name": "pymc147", "age": 46, "index": 147, "body": "VTKGNKUHMP"}, +{"name": "pymc279", "age": 77, "index": 279, "body": "VTKGNKUHMP"}, +{"name": "pymc232", "age": 30, "index": 232, "body": "VTKGNKUHMP"}, +{"name": "pymc54", "age": 54, "index": 54, "body": "VTKGNKUHMP"}, +{"name": "pymc212", "age": 10, "index": 212, "body": "VTKGNKUHMP"}, +{"name": "pymc145", "age": 44, "index": 145, "body": "VTKGNKUHMP"}, +{"name": "pymc204", "age": 2, "index": 204, "body": "VTKGNKUHMP"}, +{"name": "pymc649", "age": 43, "index": 649, "body": "VTKGNKUHMP"}, +{"name": "pymc373", "age": 70, "index": 373, "body": "VTKGNKUHMP"}, +{"name": "pymc297", "age": 95, "index": 297, "body": "VTKGNKUHMP"}, +{"name": "pymc920", "age": 11, "index": 920, "body": "VTKGNKUHMP"}, +{"name": "pymc3", "age": 3, "index": 3, "body": "VTKGNKUHMP"}, +{"name": "pymc20", "age": 20, "index": 20, "body": "VTKGNKUHMP"}, +{"name": "pymc63", "age": 63, "index": 63, "body": "VTKGNKUHMP"}, +{"name": "pymc924", "age": 15, "index": 924, "body": "VTKGNKUHMP"}, +{"name": "pymc0", "age": 0, "index": 0, "body": "VTKGNKUHMP"}, +{"name": "pymc629", "age": 23, "index": 629, "body": "VTKGNKUHMP"}, +{"name": "pymc873", "age": 65, "index": 873, "body": "VTKGNKUHMP"}, +{"name": "pymc375", "age": 72, "index": 375, "body": "VTKGNKUHMP"}, +{"name": "pymc822", "age": 14, "index": 822, "body": "VTKGNKUHMP"}, +{"name": "pymc439", "age": 35, "index": 439, "body": "VTKGNKUHMP"}, +{"name": "pymc696", "age": 90, "index": 696, "body": "VTKGNKUHMP"}, +{"name": "pymc429", "age": 25, "index": 429, "body": "VTKGNKUHMP"}, +{"name": "pymc959", "age": 50, "index": 959, "body": "VTKGNKUHMP"}, +{"name": "pymc220", "age": 18, "index": 220, "body": "VTKGNKUHMP"}, +{"name": "pymc831", "age": 23, "index": 831, "body": "VTKGNKUHMP"}, +{"name": "pymc368", "age": 65, "index": 368, "body": "VTKGNKUHMP"}, +{"name": "pymc207", "age": 5, "index": 207, "body": "VTKGNKUHMP"}, +{"name": "pymc131", "age": 30, "index": 131, "body": "VTKGNKUHMP"}, +{"name": "pymc211", "age": 9, "index": 211, "body": "VTKGNKUHMP"}, +{"name": "pymc819", "age": 11, "index": 819, "body": "VTKGNKUHMP"}, +{"name": "pymc185", "age": 84, "index": 185, "body": "VTKGNKUHMP"}, +{"name": "pymc758", "age": 51, "index": 758, "body": "VTKGNKUHMP"}, +{"name": "pymc945", "age": 36, "index": 945, "body": "VTKGNKUHMP"}, +{"name": "pymc266", "age": 64, "index": 266, "body": "VTKGNKUHMP"}, +{"name": "pymc818", "age": 10, "index": 818, "body": "VTKGNKUHMP"}, +{"name": "pymc275", "age": 73, "index": 275, "body": "VTKGNKUHMP"}, +{"name": "pymc142", "age": 41, "index": 142, "body": "VTKGNKUHMP"}, +{"name": "pymc159", "age": 58, "index": 159, "body": "VTKGNKUHMP"}, +{"name": "pymc409", "age": 5, "index": 409, "body": "VTKGNKUHMP"}, +{"name": "pymc82", "age": 82, "index": 82, "body": "VTKGNKUHMP"}, +{"name": "pymc748", "age": 41, "index": 748, "body": "VTKGNKUHMP"}, +{"name": "pymc154", "age": 53, "index": 154, "body": "VTKGNKUHMP"}, +{"name": "pymc44", "age": 44, "index": 44, "body": "VTKGNKUHMP"}, +{"name": "pymc929", "age": 20, "index": 929, "body": "VTKGNKUHMP"}, +{"name": "pymc325", "age": 22, "index": 325, "body": "VTKGNKUHMP"}, +{"name": "pymc24", "age": 24, "index": 24, "body": "VTKGNKUHMP"}, +{"name": "pymc11", "age": 11, "index": 11, "body": "VTKGNKUHMP"}, +{"name": "pymc808", "age": 0, "index": 808, "body": "VTKGNKUHMP"}, +{"name": "pymc811", "age": 3, "index": 811, "body": "VTKGNKUHMP"}, +{"name": "pymc689", "age": 83, "index": 689, "body": "VTKGNKUHMP"}, +{"name": "pymc237", "age": 35, "index": 237, "body": "VTKGNKUHMP"}, +{"name": "pymc226", "age": 24, "index": 226, "body": "VTKGNKUHMP"}, +{"name": "pymc769", "age": 62, "index": 769, "body": "VTKGNKUHMP"}, +{"name": "pymc856", "age": 48, "index": 856, "body": "VTKGNKUHMP"}, +{"name": "pymc912", "age": 3, "index": 912, "body": "VTKGNKUHMP"}, +{"name": "pymc605", "age": 100, "index": 605, "body": "VTKGNKUHMP"}, +{"name": "pymc667", "age": 61, "index": 667, "body": "VTKGNKUHMP"}, +{"name": "pymc313", "age": 10, "index": 313, "body": "VTKGNKUHMP"}, +{"name": "pymc486", "age": 82, "index": 486, "body": "VTKGNKUHMP"}, +{"name": "pymc302", "age": 100, "index": 302, "body": "VTKGNKUHMP"}, +{"name": "pymc655", "age": 49, "index": 655, "body": "VTKGNKUHMP"}, +{"name": "pymc717", "age": 10, "index": 717, "body": "VTKGNKUHMP"}, +{"name": "pymc398", "age": 95, "index": 398, "body": "VTKGNKUHMP"}, +{"name": "pymc553", "age": 48, "index": 553, "body": "VTKGNKUHMP"}, +{"name": "pymc994", "age": 85, "index": 994, "body": "VTKGNKUHMP"}, +{"name": "pymc495", "age": 91, "index": 495, "body": "VTKGNKUHMP"}, +{"name": "pymc518", "age": 13, "index": 518, "body": "VTKGNKUHMP"}, +{"name": "pymc391", "age": 88, "index": 391, "body": "VTKGNKUHMP"}, +{"name": "pymc636", "age": 30, "index": 636, "body": "VTKGNKUHMP"}, +{"name": "pymc625", "age": 19, "index": 625, "body": "VTKGNKUHMP"}, +{"name": "pymc441", "age": 37, "index": 441, "body": "VTKGNKUHMP"}, +{"name": "pymc622", "age": 16, "index": 622, "body": "VTKGNKUHMP"}, +{"name": "pymc542", "age": 37, "index": 542, "body": "VTKGNKUHMP"}, +{"name": "pymc990", "age": 81, "index": 990, "body": "VTKGNKUHMP"}, +{"name": "pymc607", "age": 1, "index": 607, "body": "VTKGNKUHMP"}, +{"name": "pymc523", "age": 18, "index": 523, "body": "VTKGNKUHMP"}, +{"name": "pymc996", "age": 87, "index": 996, "body": "VTKGNKUHMP"}, +{"name": "pymc533", "age": 28, "index": 533, "body": "VTKGNKUHMP"}, +{"name": "pymc412", "age": 8, "index": 412, "body": "VTKGNKUHMP"}, +{"name": "pymc909", "age": 0, "index": 909, "body": "VTKGNKUHMP"}, +{"name": "pymc642", "age": 36, "index": 642, "body": "VTKGNKUHMP"}, +{"name": "pymc710", "age": 3, "index": 710, "body": "VTKGNKUHMP"}, +{"name": "pymc319", "age": 16, "index": 319, "body": "VTKGNKUHMP"}, +{"name": "pymc904", "age": 96, "index": 904, "body": "VTKGNKUHMP"}, +{"name": "pymc776", "age": 69, "index": 776, "body": "VTKGNKUHMP"}, +{"name": "pymc550", "age": 45, "index": 550, "body": "VTKGNKUHMP"}, +{"name": "pymc907", "age": 99, "index": 907, "body": "VTKGNKUHMP"}, +{"name": "pymc676", "age": 70, "index": 676, "body": "VTKGNKUHMP"}, +{"name": "pymc492", "age": 88, "index": 492, "body": "VTKGNKUHMP"}, +{"name": "pymc402", "age": 99, "index": 402, "body": "VTKGNKUHMP"}, +{"name": "pymc531", "age": 26, "index": 531, "body": "VTKGNKUHMP"}, +{"name": "pymc631", "age": 25, "index": 631, "body": "VTKGNKUHMP"}, +{"name": "pymc652", "age": 46, "index": 652, "body": "VTKGNKUHMP"}, +{"name": "pymc987", "age": 78, "index": 987, "body": "VTKGNKUHMP"}, +{"name": "pymc640", "age": 34, "index": 640, "body": "VTKGNKUHMP"}, +{"name": "pymc308", "age": 5, "index": 308, "body": "VTKGNKUHMP"}, +{"name": "pymc650", "age": 44, "index": 650, "body": "VTKGNKUHMP"}, +{"name": "pymc670", "age": 64, "index": 670, "body": "VTKGNKUHMP"}, +{"name": "pymc340", "age": 37, "index": 340, "body": "VTKGNKUHMP"}, +{"name": "pymc992", "age": 83, "index": 992, "body": "VTKGNKUHMP"}, +{"name": "pymc517", "age": 12, "index": 517, "body": "VTKGNKUHMP"}, +{"name": "pymc519", "age": 14, "index": 519, "body": "VTKGNKUHMP"}, +{"name": "pymc490", "age": 86, "index": 490, "body": "VTKGNKUHMP"}, +{"name": "pymc602", "age": 97, "index": 602, "body": "VTKGNKUHMP"}, +{"name": "pymc624", "age": 18, "index": 624, "body": "VTKGNKUHMP"}, +{"name": "pymc425", "age": 21, "index": 425, "body": "VTKGNKUHMP"}, +{"name": "pymc343", "age": 40, "index": 343, "body": "VTKGNKUHMP"}, +{"name": "pymc452", "age": 48, "index": 452, "body": "VTKGNKUHMP"}, +{"name": "pymc610", "age": 4, "index": 610, "body": "VTKGNKUHMP"}, +{"name": "pymc421", "age": 17, "index": 421, "body": "VTKGNKUHMP"}, +{"name": "pymc627", "age": 21, "index": 627, "body": "VTKGNKUHMP"}, +{"name": "pymc773", "age": 66, "index": 773, "body": "VTKGNKUHMP"}, +{"name": "pymc481", "age": 77, "index": 481, "body": "VTKGNKUHMP"}, +{"name": "pymc615", "age": 9, "index": 615, "body": "VTKGNKUHMP"}, +{"name": "pymc411", "age": 7, "index": 411, "body": "VTKGNKUHMP"}, +{"name": "pymc318", "age": 15, "index": 318, "body": "VTKGNKUHMP"}, +{"name": "pymc390", "age": 87, "index": 390, "body": "VTKGNKUHMP"}, +{"name": "pymc991", "age": 82, "index": 991, "body": "VTKGNKUHMP"}, +{"name": "pymc546", "age": 41, "index": 546, "body": "VTKGNKUHMP"}, +{"name": "pymc616", "age": 10, "index": 616, "body": "VTKGNKUHMP"}, +{"name": "pymc763", "age": 56, "index": 763, "body": "VTKGNKUHMP"}, +{"name": "pymc403", "age": 100, "index": 403, "body": "VTKGNKUHMP"}, +{"name": "pymc664", "age": 58, "index": 664, "body": "VTKGNKUHMP"}, +{"name": "pymc521", "age": 16, "index": 521, "body": "VTKGNKUHMP"}, +{"name": "pymc525", "age": 20, "index": 525, "body": "VTKGNKUHMP"}, +{"name": "pymc910", "age": 1, "index": 910, "body": "VTKGNKUHMP"}, +{"name": "pymc614", "age": 8, "index": 614, "body": "VTKGNKUHMP"}, +{"name": "pymc547", "age": 42, "index": 547, "body": "VTKGNKUHMP"}, +{"name": "pymc656", "age": 50, "index": 656, "body": "VTKGNKUHMP"}, +{"name": "pymc764", "age": 57, "index": 764, "body": "VTKGNKUHMP"}, +{"name": "pymc494", "age": 90, "index": 494, "body": "VTKGNKUHMP"}, +{"name": "pymc314", "age": 11, "index": 314, "body": "VTKGNKUHMP"}, +{"name": "pymc482", "age": 78, "index": 482, "body": "VTKGNKUHMP"}, +{"name": "pymc524", "age": 19, "index": 524, "body": "VTKGNKUHMP"}, +{"name": "pymc311", "age": 8, "index": 311, "body": "VTKGNKUHMP"}, +{"name": "pymc454", "age": 50, "index": 454, "body": "VTKGNKUHMP"}, +{"name": "pymc312", "age": 9, "index": 312, "body": "VTKGNKUHMP"}, +{"name": "pymc766", "age": 59, "index": 766, "body": "VTKGNKUHMP"}, +{"name": "pymc789", "age": 82, "index": 789, "body": "VTKGNKUHMP"}, +{"name": "pymc446", "age": 42, "index": 446, "body": "VTKGNKUHMP"}, +{"name": "pymc612", "age": 6, "index": 612, "body": "VTKGNKUHMP"}, +{"name": "pymc620", "age": 14, "index": 620, "body": "VTKGNKUHMP"}, +{"name": "pymc509", "age": 4, "index": 509, "body": "VTKGNKUHMP"}, +{"name": "pymc346", "age": 43, "index": 346, "body": "VTKGNKUHMP"}, +{"name": "pymc420", "age": 16, "index": 420, "body": "VTKGNKUHMP"}, +{"name": "pymc632", "age": 26, "index": 632, "body": "VTKGNKUHMP"}, +{"name": "pymc445", "age": 41, "index": 445, "body": "VTKGNKUHMP"}, +{"name": "pymc306", "age": 3, "index": 306, "body": "VTKGNKUHMP"}, +{"name": "pymc405", "age": 1, "index": 405, "body": "VTKGNKUHMP"}, +{"name": "pymc654", "age": 48, "index": 654, "body": "VTKGNKUHMP"}, +{"name": "pymc536", "age": 31, "index": 536, "body": "VTKGNKUHMP"}, +{"name": "pymc713", "age": 6, "index": 713, "body": "VTKGNKUHMP"}, +{"name": "pymc986", "age": 77, "index": 986, "body": "VTKGNKUHMP"}, +{"name": "pymc900", "age": 92, "index": 900, "body": "VTKGNKUHMP"}, +{"name": "pymc673", "age": 67, "index": 673, "body": "VTKGNKUHMP"}, +{"name": "pymc601", "age": 96, "index": 601, "body": "VTKGNKUHMP"}, +{"name": "pymc999", "age": 90, "index": 999, "body": "VTKGNKUHMP"}, +{"name": "pymc304", "age": 1, "index": 304, "body": "VTKGNKUHMP"}, +{"name": "pymc633", "age": 27, "index": 633, "body": "VTKGNKUHMP"}, +{"name": "pymc603", "age": 98, "index": 603, "body": "VTKGNKUHMP"}, +{"name": "pymc715", "age": 8, "index": 715, "body": "VTKGNKUHMP"}, +{"name": "pymc404", "age": 0, "index": 404, "body": "VTKGNKUHMP"}, +{"name": "pymc556", "age": 51, "index": 556, "body": "VTKGNKUHMP"}, +{"name": "pymc651", "age": 45, "index": 651, "body": "VTKGNKUHMP"}, +{"name": "pymc604", "age": 99, "index": 604, "body": "VTKGNKUHMP"}, +{"name": "pymc982", "age": 73, "index": 982, "body": "VTKGNKUHMP"}, +{"name": "pymc300", "age": 98, "index": 300, "body": "VTKGNKUHMP"}, +{"name": "pymc380", "age": 77, "index": 380, "body": "VTKGNKUHMP"}, +{"name": "pymc775", "age": 68, "index": 775, "body": "VTKGNKUHMP"}, +{"name": "pymc444", "age": 40, "index": 444, "body": "VTKGNKUHMP"}, +{"name": "pymc919", "age": 10, "index": 919, "body": "VTKGNKUHMP"}, +{"name": "pymc349", "age": 46, "index": 349, "body": "VTKGNKUHMP"}, +{"name": "pymc660", "age": 54, "index": 660, "body": "VTKGNKUHMP"}, +{"name": "pymc702", "age": 96, "index": 702, "body": "VTKGNKUHMP"}, +{"name": "pymc301", "age": 99, "index": 301, "body": "VTKGNKUHMP"}, +{"name": "pymc447", "age": 43, "index": 447, "body": "VTKGNKUHMP"}, +{"name": "pymc455", "age": 51, "index": 455, "body": "VTKGNKUHMP"}, +{"name": "pymc985", "age": 76, "index": 985, "body": "VTKGNKUHMP"}, +{"name": "pymc393", "age": 90, "index": 393, "body": "VTKGNKUHMP"}, +{"name": "pymc483", "age": 79, "index": 483, "body": "VTKGNKUHMP"}, +{"name": "pymc644", "age": 38, "index": 644, "body": "VTKGNKUHMP"}, +{"name": "pymc450", "age": 46, "index": 450, "body": "VTKGNKUHMP"}, +{"name": "pymc799", "age": 92, "index": 799, "body": "VTKGNKUHMP"}, +{"name": "pymc384", "age": 81, "index": 384, "body": "VTKGNKUHMP"}, +{"name": "pymc386", "age": 83, "index": 386, "body": "VTKGNKUHMP"}, +{"name": "pymc534", "age": 29, "index": 534, "body": "VTKGNKUHMP"}, +{"name": "pymc760", "age": 53, "index": 760, "body": "VTKGNKUHMP"}, +{"name": "pymc617", "age": 11, "index": 617, "body": "VTKGNKUHMP"}, +{"name": "pymc388", "age": 85, "index": 388, "body": "VTKGNKUHMP"}, +{"name": "pymc905", "age": 97, "index": 905, "body": "VTKGNKUHMP"}, +{"name": "pymc410", "age": 6, "index": 410, "body": "VTKGNKUHMP"}, +{"name": "pymc508", "age": 3, "index": 508, "body": "VTKGNKUHMP"}, +{"name": "pymc623", "age": 17, "index": 623, "body": "VTKGNKUHMP"}, +{"name": "pymc705", "age": 99, "index": 705, "body": "VTKGNKUHMP"}, +{"name": "pymc709", "age": 2, "index": 709, "body": "VTKGNKUHMP"}, +{"name": "pymc387", "age": 84, "index": 387, "body": "VTKGNKUHMP"}, +{"name": "pymc493", "age": 89, "index": 493, "body": "VTKGNKUHMP"}, +{"name": "pymc611", "age": 5, "index": 611, "body": "VTKGNKUHMP"}, +{"name": "pymc901", "age": 93, "index": 901, "body": "VTKGNKUHMP"}, +{"name": "pymc762", "age": 55, "index": 762, "body": "VTKGNKUHMP"}, +{"name": "pymc544", "age": 39, "index": 544, "body": "VTKGNKUHMP"}, +{"name": "pymc674", "age": 68, "index": 674, "body": "VTKGNKUHMP"}, +{"name": "pymc392", "age": 89, "index": 392, "body": "VTKGNKUHMP"}, +{"name": "pymc634", "age": 28, "index": 634, "body": "VTKGNKUHMP"}, +{"name": "pymc530", "age": 25, "index": 530, "body": "VTKGNKUHMP"}, +{"name": "pymc520", "age": 15, "index": 520, "body": "VTKGNKUHMP"}, +{"name": "pymc426", "age": 22, "index": 426, "body": "VTKGNKUHMP"}, +{"name": "pymc662", "age": 56, "index": 662, "body": "VTKGNKUHMP"}, +{"name": "pymc424", "age": 20, "index": 424, "body": "VTKGNKUHMP"}, +{"name": "pymc983", "age": 74, "index": 983, "body": "VTKGNKUHMP"}, +{"name": "pymc344", "age": 41, "index": 344, "body": "VTKGNKUHMP"}, +{"name": "pymc665", "age": 59, "index": 665, "body": "VTKGNKUHMP"}, +{"name": "pymc451", "age": 47, "index": 451, "body": "VTKGNKUHMP"}, +{"name": "pymc914", "age": 5, "index": 914, "body": "VTKGNKUHMP"}, +{"name": "pymc701", "age": 95, "index": 701, "body": "VTKGNKUHMP"}, +{"name": "pymc917", "age": 8, "index": 917, "body": "VTKGNKUHMP"}, +{"name": "pymc598", "age": 93, "index": 598, "body": "VTKGNKUHMP"}, +{"name": "pymc700", "age": 94, "index": 700, "body": "VTKGNKUHMP"}, +{"name": "pymc551", "age": 46, "index": 551, "body": "VTKGNKUHMP"}, +{"name": "pymc714", "age": 7, "index": 714, "body": "VTKGNKUHMP"}, +{"name": "pymc772", "age": 65, "index": 772, "body": "VTKGNKUHMP"}, +{"name": "pymc645", "age": 39, "index": 645, "body": "VTKGNKUHMP"}, +{"name": "pymc480", "age": 76, "index": 480, "body": "VTKGNKUHMP"}, +{"name": "pymc661", "age": 55, "index": 661, "body": "VTKGNKUHMP"}, +{"name": "pymc671", "age": 65, "index": 671, "body": "VTKGNKUHMP"}, +{"name": "pymc415", "age": 11, "index": 415, "body": "VTKGNKUHMP"}, +{"name": "pymc491", "age": 87, "index": 491, "body": "VTKGNKUHMP"}, +{"name": "pymc453", "age": 49, "index": 453, "body": "VTKGNKUHMP"}, +{"name": "pymc413", "age": 9, "index": 413, "body": "VTKGNKUHMP"}, +{"name": "pymc600", "age": 95, "index": 600, "body": "VTKGNKUHMP"}, +{"name": "pymc908", "age": 100, "index": 908, "body": "VTKGNKUHMP"}, +{"name": "pymc915", "age": 6, "index": 915, "body": "VTKGNKUHMP"}, +{"name": "pymc993", "age": 84, "index": 993, "body": "VTKGNKUHMP"}, +{"name": "pymc348", "age": 45, "index": 348, "body": "VTKGNKUHMP"}, +{"name": "pymc385", "age": 82, "index": 385, "body": "VTKGNKUHMP"}, +{"name": "pymc765", "age": 58, "index": 765, "body": "VTKGNKUHMP"}, +{"name": "pymc532", "age": 27, "index": 532, "body": "VTKGNKUHMP"}, +{"name": "pymc305", "age": 2, "index": 305, "body": "VTKGNKUHMP"}, +{"name": "pymc416", "age": 12, "index": 416, "body": "VTKGNKUHMP"}, +{"name": "pymc716", "age": 9, "index": 716, "body": "VTKGNKUHMP"}, +{"name": "pymc443", "age": 39, "index": 443, "body": "VTKGNKUHMP"}, +{"name": "pymc703", "age": 97, "index": 703, "body": "VTKGNKUHMP"}, +{"name": "pymc902", "age": 94, "index": 902, "body": "VTKGNKUHMP"}, +{"name": "pymc406", "age": 2, "index": 406, "body": "VTKGNKUHMP"}, +{"name": "pymc347", "age": 44, "index": 347, "body": "VTKGNKUHMP"}, +{"name": "pymc417", "age": 13, "index": 417, "body": "VTKGNKUHMP"}, +{"name": "pymc672", "age": 66, "index": 672, "body": "VTKGNKUHMP"}, +{"name": "pymc777", "age": 70, "index": 777, "body": "VTKGNKUHMP"}, +{"name": "pymc527", "age": 22, "index": 527, "body": "VTKGNKUHMP"}, +{"name": "pymc913", "age": 4, "index": 913, "body": "VTKGNKUHMP"}, +{"name": "pymc537", "age": 32, "index": 537, "body": "VTKGNKUHMP"}, +{"name": "pymc657", "age": 51, "index": 657, "body": "VTKGNKUHMP"}, +{"name": "pymc396", "age": 93, "index": 396, "body": "VTKGNKUHMP"}, +{"name": "pymc641", "age": 35, "index": 641, "body": "VTKGNKUHMP"}, +{"name": "pymc997", "age": 88, "index": 997, "body": "VTKGNKUHMP"}, +{"name": "pymc414", "age": 10, "index": 414, "body": "VTKGNKUHMP"}, +{"name": "pymc761", "age": 54, "index": 761, "body": "VTKGNKUHMP"}, +{"name": "pymc984", "age": 75, "index": 984, "body": "VTKGNKUHMP"}, +{"name": "pymc496", "age": 92, "index": 496, "body": "VTKGNKUHMP"}, +{"name": "pymc911", "age": 2, "index": 911, "body": "VTKGNKUHMP"}, +{"name": "pymc788", "age": 81, "index": 788, "body": "VTKGNKUHMP"}, +{"name": "pymc399", "age": 96, "index": 399, "body": "VTKGNKUHMP"}, +{"name": "pymc423", "age": 19, "index": 423, "body": "VTKGNKUHMP"}, +{"name": "pymc771", "age": 64, "index": 771, "body": "VTKGNKUHMP"}, +{"name": "pymc588", "age": 83, "index": 588, "body": "VTKGNKUHMP"}, +{"name": "pymc613", "age": 7, "index": 613, "body": "VTKGNKUHMP"}, +{"name": "pymc606", "age": 0, "index": 606, "body": "VTKGNKUHMP"}, +{"name": "pymc704", "age": 98, "index": 704, "body": "VTKGNKUHMP"}, +{"name": "pymc918", "age": 9, "index": 918, "body": "VTKGNKUHMP"}, +{"name": "pymc522", "age": 17, "index": 522, "body": "VTKGNKUHMP"}, +{"name": "pymc557", "age": 52, "index": 557, "body": "VTKGNKUHMP"}, +{"name": "pymc621", "age": 15, "index": 621, "body": "VTKGNKUHMP"}, +{"name": "pymc535", "age": 30, "index": 535, "body": "VTKGNKUHMP"}, +{"name": "pymc317", "age": 14, "index": 317, "body": "VTKGNKUHMP"}, +{"name": "pymc555", "age": 50, "index": 555, "body": "VTKGNKUHMP"}, +{"name": "pymc442", "age": 38, "index": 442, "body": "VTKGNKUHMP"}, +{"name": "pymc653", "age": 47, "index": 653, "body": "VTKGNKUHMP"}, +{"name": "pymc903", "age": 95, "index": 903, "body": "VTKGNKUHMP"}, +{"name": "pymc666", "age": 60, "index": 666, "body": "VTKGNKUHMP"}, +{"name": "pymc541", "age": 36, "index": 541, "body": "VTKGNKUHMP"}, +{"name": "pymc708", "age": 1, "index": 708, "body": "VTKGNKUHMP"}, +{"name": "pymc440", "age": 36, "index": 440, "body": "VTKGNKUHMP"}, +{"name": "pymc647", "age": 41, "index": 647, "body": "VTKGNKUHMP"}, +{"name": "pymc995", "age": 86, "index": 995, "body": "VTKGNKUHMP"}, +{"name": "pymc646", "age": 40, "index": 646, "body": "VTKGNKUHMP"}, +{"name": "pymc906", "age": 98, "index": 906, "body": "VTKGNKUHMP"}, +{"name": "pymc774", "age": 67, "index": 774, "body": "VTKGNKUHMP"}, +{"name": "pymc345", "age": 42, "index": 345, "body": "VTKGNKUHMP"}, +{"name": "pymc718", "age": 11, "index": 718, "body": "VTKGNKUHMP"}, +{"name": "pymc316", "age": 13, "index": 316, "body": "VTKGNKUHMP"}, +{"name": "pymc310", "age": 7, "index": 310, "body": "VTKGNKUHMP"}, +{"name": "pymc635", "age": 29, "index": 635, "body": "VTKGNKUHMP"}, +{"name": "pymc540", "age": 35, "index": 540, "body": "VTKGNKUHMP"}, +{"name": "pymc382", "age": 79, "index": 382, "body": "VTKGNKUHMP"}, +{"name": "pymc637", "age": 31, "index": 637, "body": "VTKGNKUHMP"}, +{"name": "pymc394", "age": 91, "index": 394, "body": "VTKGNKUHMP"}, +{"name": "pymc401", "age": 98, "index": 401, "body": "VTKGNKUHMP"}, +{"name": "pymc307", "age": 4, "index": 307, "body": "VTKGNKUHMP"}, +{"name": "pymc342", "age": 39, "index": 342, "body": "VTKGNKUHMP"}, +{"name": "pymc341", "age": 38, "index": 341, "body": "VTKGNKUHMP"}, +{"name": "pymc407", "age": 3, "index": 407, "body": "VTKGNKUHMP"}, +{"name": "pymc706", "age": 100, "index": 706, "body": "VTKGNKUHMP"}, +{"name": "pymc552", "age": 47, "index": 552, "body": "VTKGNKUHMP"}, +{"name": "pymc397", "age": 94, "index": 397, "body": "VTKGNKUHMP"}, +{"name": "pymc643", "age": 37, "index": 643, "body": "VTKGNKUHMP"}, +{"name": "pymc381", "age": 78, "index": 381, "body": "VTKGNKUHMP"}, +{"name": "pymc989", "age": 80, "index": 989, "body": "VTKGNKUHMP"}, +{"name": "pymc767", "age": 60, "index": 767, "body": "VTKGNKUHMP"}, +{"name": "pymc484", "age": 80, "index": 484, "body": "VTKGNKUHMP"}, +{"name": "pymc988", "age": 79, "index": 988, "body": "VTKGNKUHMP"}, +{"name": "pymc497", "age": 93, "index": 497, "body": "VTKGNKUHMP"}, +{"name": "pymc630", "age": 24, "index": 630, "body": "VTKGNKUHMP"}, +{"name": "pymc599", "age": 94, "index": 599, "body": "VTKGNKUHMP"}, +{"name": "pymc626", "age": 20, "index": 626, "body": "VTKGNKUHMP"}, +{"name": "pymc457", "age": 53, "index": 457, "body": "VTKGNKUHMP"}, +{"name": "pymc998", "age": 89, "index": 998, "body": "VTKGNKUHMP"}, +{"name": "pymc981", "age": 72, "index": 981, "body": "VTKGNKUHMP"}, +{"name": "pymc719", "age": 12, "index": 719, "body": "VTKGNKUHMP"}, +{"name": "pymc545", "age": 40, "index": 545, "body": "VTKGNKUHMP"}, +{"name": "pymc554", "age": 49, "index": 554, "body": "VTKGNKUHMP"}, +{"name": "pymc315", "age": 12, "index": 315, "body": "VTKGNKUHMP"}, +{"name": "pymc303", "age": 0, "index": 303, "body": "VTKGNKUHMP"}, +{"name": "pymc795", "age": 88, "index": 795, "body": "VTKGNKUHMP"}, +{"name": "pymc461", "age": 57, "index": 461, "body": "VTKGNKUHMP"}, +{"name": "pymc516", "age": 11, "index": 516, "body": "VTKGNKUHMP"}, +{"name": "pymc470", "age": 66, "index": 470, "body": "VTKGNKUHMP"}, +{"name": "pymc707", "age": 0, "index": 707, "body": "VTKGNKUHMP"}, +{"name": "pymc712", "age": 5, "index": 712, "body": "VTKGNKUHMP"}, +{"name": "pymc798", "age": 91, "index": 798, "body": "VTKGNKUHMP"}, +{"name": "pymc389", "age": 86, "index": 389, "body": "VTKGNKUHMP"}, +{"name": "pymc505", "age": 0, "index": 505, "body": "VTKGNKUHMP"}, +{"name": "pymc400", "age": 97, "index": 400, "body": "VTKGNKUHMP"}, +{"name": "pymc573", "age": 68, "index": 573, "body": "VTKGNKUHMP"}, +{"name": "pymc422", "age": 18, "index": 422, "body": "VTKGNKUHMP"}, +{"name": "pymc711", "age": 4, "index": 711, "body": "VTKGNKUHMP"}, +{"name": "pymc663", "age": 57, "index": 663, "body": "VTKGNKUHMP"}, +{"name": "pymc797", "age": 90, "index": 797, "body": "VTKGNKUHMP"}, +{"name": "pymc980", "age": 71, "index": 980, "body": "VTKGNKUHMP"}, +{"name": "pymc916", "age": 7, "index": 916, "body": "VTKGNKUHMP"}, +{"name": "pymc582", "age": 77, "index": 582, "body": "VTKGNKUHMP"}, +{"name": "pymc456", "age": 52, "index": 456, "body": "VTKGNKUHMP"}, +{"name": "pymc427", "age": 23, "index": 427, "body": "VTKGNKUHMP"}, +{"name": "pymc309", "age": 6, "index": 309, "body": "VTKGNKUHMP"}, +{"name": "pymc502", "age": 98, "index": 502, "body": "VTKGNKUHMP"}, +{"name": "pymc592", "age": 87, "index": 592, "body": "VTKGNKUHMP"}, +{"name": "pymc770", "age": 63, "index": 770, "body": "VTKGNKUHMP"}, +{"name": "pymc432", "age": 28, "index": 432, "body": "VTKGNKUHMP"}, +{"name": "pymc589", "age": 84, "index": 589, "body": "VTKGNKUHMP"}, +{"name": "pymc500", "age": 96, "index": 500, "body": "VTKGNKUHMP"}, +{"name": "pymc792", "age": 85, "index": 792, "body": "VTKGNKUHMP"}, +{"name": "pymc431", "age": 27, "index": 431, "body": "VTKGNKUHMP"}, +{"name": "pymc395", "age": 92, "index": 395, "body": "VTKGNKUHMP"}, +{"name": "pymc780", "age": 73, "index": 780, "body": "VTKGNKUHMP"}, +{"name": "pymc485", "age": 81, "index": 485, "body": "VTKGNKUHMP"}, +{"name": "pymc675", "age": 69, "index": 675, "body": "VTKGNKUHMP"}, +{"name": "pymc511", "age": 6, "index": 511, "body": "VTKGNKUHMP"}, +{"name": "pymc785", "age": 78, "index": 785, "body": "VTKGNKUHMP"}, +{"name": "pymc581", "age": 76, "index": 581, "body": "VTKGNKUHMP"}, +{"name": "pymc543", "age": 38, "index": 543, "body": "VTKGNKUHMP"}, +{"name": "pymc572", "age": 67, "index": 572, "body": "VTKGNKUHMP"}, +{"name": "pymc786", "age": 79, "index": 786, "body": "VTKGNKUHMP"}, +{"name": "pymc593", "age": 88, "index": 593, "body": "VTKGNKUHMP"}, +{"name": "pymc784", "age": 77, "index": 784, "body": "VTKGNKUHMP"}, +{"name": "pymc504", "age": 100, "index": 504, "body": "VTKGNKUHMP"}, +{"name": "pymc466", "age": 62, "index": 466, "body": "VTKGNKUHMP"}, +{"name": "pymc512", "age": 7, "index": 512, "body": "VTKGNKUHMP"}, +{"name": "pymc463", "age": 59, "index": 463, "body": "VTKGNKUHMP"}, +{"name": "pymc460", "age": 56, "index": 460, "body": "VTKGNKUHMP"}, +{"name": "pymc383", "age": 80, "index": 383, "body": "VTKGNKUHMP"}, +{"name": "pymc782", "age": 75, "index": 782, "body": "VTKGNKUHMP"}, +{"name": "pymc434", "age": 30, "index": 434, "body": "VTKGNKUHMP"}, +{"name": "pymc474", "age": 70, "index": 474, "body": "VTKGNKUHMP"}, +{"name": "pymc595", "age": 90, "index": 595, "body": "VTKGNKUHMP"}, +{"name": "pymc791", "age": 84, "index": 791, "body": "VTKGNKUHMP"}, +{"name": "pymc476", "age": 72, "index": 476, "body": "VTKGNKUHMP"}, +{"name": "pymc793", "age": 86, "index": 793, "body": "VTKGNKUHMP"}, +{"name": "pymc594", "age": 89, "index": 594, "body": "VTKGNKUHMP"}, +{"name": "pymc794", "age": 87, "index": 794, "body": "VTKGNKUHMP"}, +{"name": "pymc472", "age": 68, "index": 472, "body": "VTKGNKUHMP"}, +{"name": "pymc562", "age": 57, "index": 562, "body": "VTKGNKUHMP"}, +{"name": "pymc473", "age": 69, "index": 473, "body": "VTKGNKUHMP"}, +{"name": "pymc571", "age": 66, "index": 571, "body": "VTKGNKUHMP"}, +{"name": "pymc513", "age": 8, "index": 513, "body": "VTKGNKUHMP"}, +{"name": "pymc566", "age": 61, "index": 566, "body": "VTKGNKUHMP"}, +{"name": "pymc564", "age": 59, "index": 564, "body": "VTKGNKUHMP"}, +{"name": "pymc787", "age": 80, "index": 787, "body": "VTKGNKUHMP"}, +{"name": "pymc580", "age": 75, "index": 580, "body": "VTKGNKUHMP"}, +{"name": "pymc565", "age": 60, "index": 565, "body": "VTKGNKUHMP"}, +{"name": "pymc503", "age": 99, "index": 503, "body": "VTKGNKUHMP"}, +{"name": "pymc781", "age": 74, "index": 781, "body": "VTKGNKUHMP"}, +{"name": "pymc570", "age": 65, "index": 570, "body": "VTKGNKUHMP"}, +{"name": "pymc436", "age": 32, "index": 436, "body": "VTKGNKUHMP"}, +{"name": "pymc510", "age": 5, "index": 510, "body": "VTKGNKUHMP"}, +{"name": "pymc585", "age": 80, "index": 585, "body": "VTKGNKUHMP"}, +{"name": "pymc501", "age": 97, "index": 501, "body": "VTKGNKUHMP"}, +{"name": "pymc583", "age": 78, "index": 583, "body": "VTKGNKUHMP"}, +{"name": "pymc790", "age": 83, "index": 790, "body": "VTKGNKUHMP"}, +{"name": "pymc567", "age": 62, "index": 567, "body": "VTKGNKUHMP"}, +{"name": "pymc477", "age": 73, "index": 477, "body": "VTKGNKUHMP"}, +{"name": "pymc586", "age": 81, "index": 586, "body": "VTKGNKUHMP"}, +{"name": "pymc596", "age": 91, "index": 596, "body": "VTKGNKUHMP"}, +{"name": "pymc435", "age": 31, "index": 435, "body": "VTKGNKUHMP"}, +{"name": "pymc587", "age": 82, "index": 587, "body": "VTKGNKUHMP"}, +{"name": "pymc574", "age": 69, "index": 574, "body": "VTKGNKUHMP"}, +{"name": "pymc584", "age": 79, "index": 584, "body": "VTKGNKUHMP"}, +{"name": "pymc506", "age": 1, "index": 506, "body": "VTKGNKUHMP"}, +{"name": "pymc783", "age": 76, "index": 783, "body": "VTKGNKUHMP"}, +{"name": "pymc796", "age": 89, "index": 796, "body": "VTKGNKUHMP"}, +{"name": "pymc465", "age": 61, "index": 465, "body": "VTKGNKUHMP"}, +{"name": "pymc437", "age": 33, "index": 437, "body": "VTKGNKUHMP"}, +{"name": "pymc464", "age": 60, "index": 464, "body": "VTKGNKUHMP"}, +{"name": "pymc462", "age": 58, "index": 462, "body": "VTKGNKUHMP"}, +{"name": "pymc560", "age": 55, "index": 560, "body": "VTKGNKUHMP"}, +{"name": "pymc576", "age": 71, "index": 576, "body": "VTKGNKUHMP"}, +{"name": "pymc591", "age": 86, "index": 591, "body": "VTKGNKUHMP"}, +{"name": "pymc475", "age": 71, "index": 475, "body": "VTKGNKUHMP"}, +{"name": "pymc430", "age": 26, "index": 430, "body": "VTKGNKUHMP"}, +{"name": "pymc433", "age": 29, "index": 433, "body": "VTKGNKUHMP"}, +{"name": "pymc597", "age": 92, "index": 597, "body": "VTKGNKUHMP"}, +{"name": "pymc575", "age": 70, "index": 575, "body": "VTKGNKUHMP"}, +{"name": "pymc563", "age": 58, "index": 563, "body": "VTKGNKUHMP"}, +{"name": "pymc514", "age": 9, "index": 514, "body": "VTKGNKUHMP"}, +{"name": "pymc507", "age": 2, "index": 507, "body": "VTKGNKUHMP"}, +{"name": "pymc467", "age": 63, "index": 467, "body": "VTKGNKUHMP"}, +{"name": "pymc471", "age": 67, "index": 471, "body": "VTKGNKUHMP"}, +{"name": "pymc590", "age": 85, "index": 590, "body": "VTKGNKUHMP"}, +{"name": "pymc577", "age": 72, "index": 577, "body": "VTKGNKUHMP"}, +{"name": "pymc515", "age": 10, "index": 515, "body": "VTKGNKUHMP"}, +{"name": "pymc561", "age": 56, "index": 561, "body": "VTKGNKUHMP"}] \ No newline at end of file diff --git a/resources/imex/json_list_1000_lines_invalid b/resources/imex/json_list_1000_lines_invalid new file mode 100644 index 000000000..43800f6d4 --- /dev/null +++ b/resources/imex/json_list_1000_lines_invalid @@ -0,0 +1,1000 @@ +[{"name": "pymc265", "age": 63, "index": 265, "body": "VTKGNKUHMP"}, +{"name": "pymc254", "age": 52, "index": 254, "body": "VTKGNKUHMP"}, +{"name": "pymc105", "age": 4, "index": 105, "body": "VTKGNKUHMP"}, +{"name":: "pymc95", "age": 95, "index": 95, "body": "VTKGNKUHMP"}, +{"name": "pymc75", "age": 75, "index": 75, "body": "VTKGNKUHMP"}, +{"name": "pymc882", "age": 74, "index": 882, "body": "VTKGNKUHMP"}, +{"name": "pymc285", "age": 83, "index": 285, "body": "VTKGNKUHMP"}, +{"name": "pymc293", "age": 91, "index": 293, "body": "VTKGNKUHMP"}, +{"name": "pymc750", "age": 43, "index": 750, "body": "VTKGNKUHMP"}, +{"name": "pymc68", "age": 68, "index": 68, "body": "VTKGNKUHMP"}, +{"name": "pymc177", "age": 76, "index": 177, "body": "VTKGNKUHMP"}, +{"name": "pymc115", "age": 14, "index": 115, "body": "VTKGNKUHMP"}, +{"name": "pymc479", "age": 75, "index": 479, "body": "VTKGNKUHMP"}, +{"name": "pymc877", "age": 69, "index": 877, "body": "VTKGNKUHMP"}, +{"name": "pymc283", "age": 81, "index": 283, "body": "VTKGNKUHMP"}, +{"name": "pymc741", "age": 34, "index": 741, "body": "VTKGNKUHMP"}, +{"name": "pymc923", "age": 14, "index": 923, "body": "VTKGNKUHMP"}, +{"name": "pymc264", "age": 62, "index": 264, "body": "VTKGNKUHMP"}, +{"name": "pymc109", "age": 8, "index": 109, "body": "VTKGNKUHMP"}, +{"name": "pymc90", "age": 90, "index": 90, "body": "VTKGNKUHMP"}, +{"name": "pymc952", "age": 43, "index": 952, "body": "VTKGNKUHMP"}, +{"name": "pymc779", "age": 72, "index": 779, "body": "VTKGNKUHMP"}, +{"name": "pymc778", "age": 71, "index": 778, "body": "VTKGNKUHMP"}, +{"name": "pymc292", "age": 90, "index": 292, "body": "VTKGNKUHMP"}, +{"name": "pymc697", "age": 91, "index": 697, "body": "VTKGNKUHMP"}, +{"name": "pymc957", "age": 48, "index": 957, "body": "VTKGNKUHMP"}, +{"name": "pymc756", "age": 49, "index": 756, "body": "VTKGNKUHMP"}, +{"name": "pymc682", "age": 76, "index": 682, "body": "VTKGNKUHMP"}, +{"name": "pymc149", "age": 48, "index": 149, "body": "VTKGNKUHMP"}, +{"name": "pymc608", "age": 2, "index": 608, "body": "VTKGNKUHMP"}, +{"name": "pymc127", "age": 26, "index": 127, "body": "VTKGNKUHMP"}, +{"name": "pymc69", "age": 69, "index": 69, "body": "VTKGNKUHMP"}, +{"name": "pymc932", "age": 23, "index": 932, "body": "VTKGNKUHMP"}, +{"name": "pymc735", "age": 28, "index": 735, "body": "VTKGNKUHMP"}, +{"name": "pymc29", "age": 29, "index": 29, "body": "VTKGNKUHMP"}, +{"name": "pymc687", "age": 81, "index": 687, "body": "VTKGNKUHMP"}, +{"name": "pymc579", "age": 74, "index": 579, "body": "VTKGNKUHMP"}, +{"name": "pymc208", "age": 6, "index": 208, "body": "VTKGNKUHMP"}, +{"name": "pymc228", "age": 26, "index": 228, "body": "VTKGNKUHMP"}, +{"name": "pymc965", "age": 56, "index": 965, "body": "VTKGNKUHMP"}, +{"name": "pymc219", "age": 17, "index": 219, "body": "VTKGNKUHMP"}, +{"name": "pymc747", "age": 40, "index": 747, "body": "VTKGNKUHMP"}, +{"name": "pymc221", "age": 19, "index": 221, "body": "VTKGNKUHMP"}, +{"name": "pymc248", "age": 46, "index": 248, "body": "VTKGNKUHMP"}, +{"name": "pymc859", "age": 51, "index": 859, "body": "VTKGNKUHMP"}, +{"name": "pymc438", "age": 34, "index": 438, "body": "VTKGNKUHMP"}, +{"name": "pymc834", "age": 26, "index": 834, "body": "VTKGNKUHMP"}, +{"name": "pymc199", "age": 98, "index": 199, "body": "VTKGNKUHMP"}, +{"name": "pymc139", "age": 38, "index": 139, "body": "VTKGNKUHMP"}, +{"name": "pymc825", "age": 17, "index": 825, "body": "VTKGNKUHMP"}, +{"name": "pymc56", "age": 56, "index": 56, "body": "VTKGNKUHMP"}, +{"name": "pymc804", "age": 97, "index": 804, "body": "VTKGNKUHMP"}, +{"name": "pymc824", "age": 16, "index": 824, "body": "VTKGNKUHMP"}, +{"name": "pymc277", "age": 75, "index": 277, "body": "VTKGNKUHMP"}, +{"name": "pymc866", "age": 58, "index": 866, "body": "VTKGNKUHMP"}, +{"name": "pymc488", "age": 84, "index": 488, "body": "VTKGNKUHMP"}, +{"name": "pymc837", "age": 29, "index": 837, "body": "VTKGNKUHMP"}, +{"name": "pymc744", "age": 37, "index": 744, "body": "VTKGNKUHMP"}, +{"name": "pymc853", "age": 45, "index": 853, "body": "VTKGNKUHMP"}, +{"name": "pymc372", "age": 69, "index": 372, "body": "VTKGNKUHMP"}, +{"name": "pymc201", "age": 100, "index": 201, "body": "VTKGNKUHMP"}, +{"name": "pymc144", "age": 43, "index": 144, "body": "VTKGNKUHMP"}, +{"name": "pymc678", "age": 72, "index": 678, "body": "VTKGNKUHMP"}, +{"name": "pymc369", "age": 66, "index": 369, "body": "VTKGNKUHMP"}, +{"name": "pymc157", "age": 56, "index": 157, "body": "VTKGNKUHMP"}, +{"name": "pymc258", "age": 56, "index": 258, "body": "VTKGNKUHMP"}, +{"name": "pymc350", "age": 47, "index": 350, "body": "VTKGNKUHMP"}, +{"name": "pymc681", "age": 75, "index": 681, "body": "VTKGNKUHMP"}, +{"name": "pymc962", "age": 53, "index": 962, "body": "VTKGNKUHMP"}, +{"name": "pymc333", "age": 30, "index": 333, "body": "VTKGNKUHMP"}, +{"name": "pymc188", "age": 87, "index": 188, "body": "VTKGNKUHMP"}, +{"name": "pymc21", "age": 21, "index": 21, "body": "VTKGNKUHMP"}, +{"name": "pymc150", "age": 49, "index": 150, "body": "VTKGNKUHMP"}, +{"name": "pymc230", "age": 28, "index": 230, "body": "VTKGNKUHMP"}, +{"name": "pymc800", "age": 93, "index": 800, "body": "VTKGNKUHMP"}, +{"name": "pymc111", "age": 10, "index": 111, "body": "VTKGNKUHMP"}, +{"name": "pymc175", "age": 74, "index": 175, "body": "VTKGNKUHMP"}, +{"name": "pymc810", "age": 2, "index": 810, "body": "VTKGNKUHMP"}, +{"name": "pymc458", "age": 54, "index": 458, "body": "VTKGNKUHMP"}, +{"name": "pymc251", "age": 49, "index": 251, "body": "VTKGNKUHMP"}, +{"name": "pymc46", "age": 46, "index": 46, "body": "VTKGNKUHMP"}, +{"name": "pymc806", "age": 99, "index": 806, "body": "VTKGNKUHMP"}, +{"name": "pymc71", "age": 71, "index": 71, "body": "VTKGNKUHMP"}, +{"name": "pymc241", "age": 39, "index": 241, "body": "VTKGNKUHMP"}, +{"name": "pymc728", "age": 21, "index": 728, "body": "VTKGNKUHMP"}, +{"name": "pymc25", "age": 25, "index": 25, "body": "VTKGNKUHMP"}, +{"name": "pymc886", "age": 78, "index": 886, "body": "VTKGNKUHMP"}, +{"name": "pymc73", "age": 73, "index": 73, "body": "VTKGNKUHMP"}, +{"name": "pymc360", "age": 57, "index": 360, "body": "VTKGNKUHMP"}, +{"name": "pymc899", "age": 91, "index": 899, "body": "VTKGNKUHMP"}, +{"name": "pymc979", "age": 70, "index": 979, "body": "VTKGNKUHMP"}, +{"name": "pymc178", "age": 77, "index": 178, "body": "VTKGNKUHMP"}, +{"name": "pymc84", "age": 84, "index": 84, "body": "VTKGNKUHMP"}, +{"name": "pymc130", "age": 29, "index": 130, "body": "VTKGNKUHMP"}, +{"name": "pymc324", "age": 21, "index": 324, "body": "VTKGNKUHMP"}, +{"name": "pymc355", "age": 52, "index": 355, "body": "VTKGNKUHMP"}, +{"name": "pymc680", "age": 74, "index": 680, "body": "VTKGNKUHMP"}, +{"name": "pymc842", "age": 34, "index": 842, "body": "VTKGNKUHMP"}, +{"name": "pymc289", "age": 87, "index": 289, "body": "VTKGNKUHMP"}, +{"name": "pymc729", "age": 22, "index": 729, "body": "VTKGNKUHMP"}, +{"name": "pymc299", "age": 97, "index": 299, "body": "VTKGNKUHMP"}, +{"name": "pymc894", "age": 86, "index": 894, "body": "VTKGNKUHMP"}, +{"name": "pymc639", "age": 33, "index": 639, "body": "VTKGNKUHMP"}, +{"name": "pymc969", "age": 60, "index": 969, "body": "VTKGNKUHMP"}, +{"name": "pymc194", "age": 93, "index": 194, "body": "VTKGNKUHMP"}, +{"name": "pymc96", "age": 96, "index": 96, "body": "VTKGNKUHMP"}, +{"name": "pymc22", "age": 22, "index": 22, "body": "VTKGNKUHMP"}, +{"name": "pymc529", "age": 24, "index": 529, "body": "VTKGNKUHMP"}, +{"name": "pymc865", "age": 57, "index": 865, "body": "VTKGNKUHMP"}, +{"name": "pymc222", "age": 20, "index": 222, "body": "VTKGNKUHMP"}, +{"name": "pymc98", "age": 98, "index": 98, "body": "VTKGNKUHMP"}, +{"name": "pymc17", "age": 17, "index": 17, "body": "VTKGNKUHMP"}, +{"name": "pymc32", "age": 32, "index": 32, "body": "VTKGNKUHMP"}, +{"name": "pymc870", "age": 62, "index": 870, "body": "VTKGNKUHMP"}, +{"name": "pymc19", "age": 19, "index": 19, "body": "VTKGNKUHMP"}, +{"name": "pymc897", "age": 89, "index": 897, "body": "VTKGNKUHMP"}, +{"name": "pymc104", "age": 3, "index": 104, "body": "VTKGNKUHMP"}, +{"name": "pymc469", "age": 65, "index": 469, "body": "VTKGNKUHMP"}, +{"name": "pymc830", "age": 22, "index": 830, "body": "VTKGNKUHMP"}, +{"name": "pymc53", "age": 53, "index": 53, "body": "VTKGNKUHMP"}, +{"name": "pymc48", "age": 48, "index": 48, "body": "VTKGNKUHMP"}, +{"name": "pymc158", "age": 57, "index": 158, "body": "VTKGNKUHMP"}, +{"name": "pymc270", "age": 68, "index": 270, "body": "VTKGNKUHMP"}, +{"name": "pymc869", "age": 61, "index": 869, "body": "VTKGNKUHMP"}, +{"name": "pymc847", "age": 39, "index": 847, "body": "VTKGNKUHMP"}, +{"name": "pymc370", "age": 67, "index": 370, "body": "VTKGNKUHMP"}, +{"name": "pymc243", "age": 41, "index": 243, "body": "VTKGNKUHMP"}, +{"name": "pymc143", "age": 42, "index": 143, "body": "VTKGNKUHMP"}, +{"name": "pymc120", "age": 19, "index": 120, "body": "VTKGNKUHMP"}, +{"name": "pymc817", "age": 9, "index": 817, "body": "VTKGNKUHMP"}, +{"name": "pymc132", "age": 31, "index": 132, "body": "VTKGNKUHMP"}, +{"name": "pymc298", "age": 96, "index": 298, "body": "VTKGNKUHMP"}, +{"name": "pymc449", "age": 45, "index": 449, "body": "VTKGNKUHMP"}, +{"name": "pymc122", "age": 21, "index": 122, "body": "VTKGNKUHMP"}, +{"name": "pymc50", "age": 50, "index": 50, "body": "VTKGNKUHMP"}, +{"name": "pymc192", "age": 91, "index": 192, "body": "VTKGNKUHMP"}, +{"name": "pymc720", "age": 13, "index": 720, "body": "VTKGNKUHMP"}, +{"name": "pymc971", "age": 62, "index": 971, "body": "VTKGNKUHMP"}, +{"name": "pymc51", "age": 51, "index": 51, "body": "VTKGNKUHMP"}, +{"name": "pymc102", "age": 1, "index": 102, "body": "VTKGNKUHMP"}, +{"name": "pymc34", "age": 34, "index": 34, "body": "VTKGNKUHMP"}, +{"name": "pymc88", "age": 88, "index": 88, "body": "VTKGNKUHMP"}, +{"name": "pymc1", "age": 1, "index": 1, "body": "VTKGNKUHMP"}, +{"name": "pymc802", "age": 95, "index": 802, "body": "VTKGNKUHMP"}, +{"name": "pymc976", "age": 67, "index": 976, "body": "VTKGNKUHMP"}, +{"name": "pymc734", "age": 27, "index": 734, "body": "VTKGNKUHMP"}, +{"name": "pymc39", "age": 39, "index": 39, "body": "VTKGNKUHMP"}, +{"name": "pymc419", "age": 15, "index": 419, "body": "VTKGNKUHMP"}, +{"name": "pymc487", "age": 83, "index": 487, "body": "VTKGNKUHMP"}, +{"name": "pymc526", "age": 21, "index": 526, "body": "VTKGNKUHMP"}, +{"name": "pymc677", "age": 71, "index": 677, "body": "VTKGNKUHMP"}, +{"name": "pymc489", "age": 85, "index": 489, "body": "VTKGNKUHMP"}, +{"name": "pymc49", "age": 49, "index": 49, "body": "VTKGNKUHMP"}, +{"name": "pymc30", "age": 30, "index": 30, "body": "VTKGNKUHMP"}, +{"name": "pymc272", "age": 70, "index": 272, "body": "VTKGNKUHMP"}, +{"name": "pymc170", "age": 69, "index": 170, "body": "VTKGNKUHMP"}, +{"name": "pymc832", "age": 24, "index": 832, "body": "VTKGNKUHMP"}, +{"name": "pymc156", "age": 55, "index": 156, "body": "VTKGNKUHMP"}, +{"name": "pymc746", "age": 39, "index": 746, "body": "VTKGNKUHMP"}, +{"name": "pymc723", "age": 16, "index": 723, "body": "VTKGNKUHMP"}, +{"name": "pymc357", "age": 54, "index": 357, "body": "VTKGNKUHMP"}, +{"name": "pymc568", "age": 63, "index": 568, "body": "VTKGNKUHMP"}, +{"name": "pymc2", "age": 2, "index": 2, "body": "VTKGNKUHMP"}, +{"name": "pymc66", "age": 66, "index": 66, "body": "VTKGNKUHMP"}, +{"name": "pymc320", "age": 17, "index": 320, "body": "VTKGNKUHMP"}, +{"name": "pymc845", "age": 37, "index": 845, "body": "VTKGNKUHMP"}, +{"name": "pymc731", "age": 24, "index": 731, "body": "VTKGNKUHMP"}, +{"name": "pymc205", "age": 3, "index": 205, "body": "VTKGNKUHMP"}, +{"name": "pymc162", "age": 61, "index": 162, "body": "VTKGNKUHMP"}, +{"name": "pymc239", "age": 37, "index": 239, "body": "VTKGNKUHMP"}, +{"name": "pymc70", "age": 70, "index": 70, "body": "VTKGNKUHMP"}, +{"name": "pymc740", "age": 33, "index": 740, "body": "VTKGNKUHMP"}, +{"name": "pymc578", "age": 73, "index": 578, "body": "VTKGNKUHMP"}, +{"name": "pymc169", "age": 68, "index": 169, "body": "VTKGNKUHMP"}, +{"name": "pymc86", "age": 86, "index": 86, "body": "VTKGNKUHMP"}, +{"name": "pymc99", "age": 99, "index": 99, "body": "VTKGNKUHMP"}, +{"name": "pymc249", "age": 47, "index": 249, "body": "VTKGNKUHMP"}, +{"name": "pymc977", "age": 68, "index": 977, "body": "VTKGNKUHMP"}, +{"name": "pymc353", "age": 50, "index": 353, "body": "VTKGNKUHMP"}, +{"name": "pymc267", "age": 65, "index": 267, "body": "VTKGNKUHMP"}, +{"name": "pymc931", "age": 22, "index": 931, "body": "VTKGNKUHMP"}, +{"name": "pymc538", "age": 33, "index": 538, "body": "VTKGNKUHMP"}, +{"name": "pymc958", "age": 49, "index": 958, "body": "VTKGNKUHMP"}, +{"name": "pymc753", "age": 46, "index": 753, "body": "VTKGNKUHMP"}, +{"name": "pymc754", "age": 47, "index": 754, "body": "VTKGNKUHMP"}, +{"name": "pymc155", "age": 54, "index": 155, "body": "VTKGNKUHMP"}, +{"name": "pymc736", "age": 29, "index": 736, "body": "VTKGNKUHMP"}, +{"name": "pymc191", "age": 90, "index": 191, "body": "VTKGNKUHMP"}, +{"name": "pymc559", "age": 54, "index": 559, "body": "VTKGNKUHMP"}, +{"name": "pymc726", "age": 19, "index": 726, "body": "VTKGNKUHMP"}, +{"name": "pymc107", "age": 6, "index": 107, "body": "VTKGNKUHMP"}, +{"name": "pymc57", "age": 57, "index": 57, "body": "VTKGNKUHMP"}, +{"name": "pymc827", "age": 19, "index": 827, "body": "VTKGNKUHMP"}, +{"name": "pymc379", "age": 76, "index": 379, "body": "VTKGNKUHMP"}, +{"name": "pymc128", "age": 27, "index": 128, "body": "VTKGNKUHMP"}, +{"name": "pymc857", "age": 49, "index": 857, "body": "VTKGNKUHMP"}, +{"name": "pymc263", "age": 61, "index": 263, "body": "VTKGNKUHMP"}, +{"name": "pymc259", "age": 57, "index": 259, "body": "VTKGNKUHMP"}, +{"name": "pymc167", "age": 66, "index": 167, "body": "VTKGNKUHMP"}, +{"name": "pymc363", "age": 60, "index": 363, "body": "VTKGNKUHMP"}, +{"name": "pymc288", "age": 86, "index": 288, "body": "VTKGNKUHMP"}, +{"name": "pymc231", "age": 29, "index": 231, "body": "VTKGNKUHMP"}, +{"name": "pymc200", "age": 99, "index": 200, "body": "VTKGNKUHMP"}, +{"name": "pymc941", "age": 32, "index": 941, "body": "VTKGNKUHMP"}, +{"name": "pymc801", "age": 94, "index": 801, "body": "VTKGNKUHMP"}, +{"name": "pymc14", "age": 14, "index": 14, "body": "VTKGNKUHMP"}, +{"name": "pymc679", "age": 73, "index": 679, "body": "VTKGNKUHMP"}, +{"name": "pymc328", "age": 25, "index": 328, "body": "VTKGNKUHMP"}, +{"name": "pymc129", "age": 28, "index": 129, "body": "VTKGNKUHMP"}, +{"name": "pymc448", "age": 44, "index": 448, "body": "VTKGNKUHMP"}, +{"name": "pymc234", "age": 32, "index": 234, "body": "VTKGNKUHMP"}, +{"name": "pymc164", "age": 63, "index": 164, "body": "VTKGNKUHMP"}, +{"name": "pymc181", "age": 80, "index": 181, "body": "VTKGNKUHMP"}, +{"name": "pymc849", "age": 41, "index": 849, "body": "VTKGNKUHMP"}, +{"name": "pymc108", "age": 7, "index": 108, "body": "VTKGNKUHMP"}, +{"name": "pymc371", "age": 68, "index": 371, "body": "VTKGNKUHMP"}, +{"name": "pymc939", "age": 30, "index": 939, "body": "VTKGNKUHMP"}, +{"name": "pymc116", "age": 15, "index": 116, "body": "VTKGNKUHMP"}, +{"name": "pymc47", "age": 47, "index": 47, "body": "VTKGNKUHMP"}, +{"name": "pymc253", "age": 51, "index": 253, "body": "VTKGNKUHMP"}, +{"name": "pymc950", "age": 41, "index": 950, "body": "VTKGNKUHMP"}, +{"name": "pymc722", "age": 15, "index": 722, "body": "VTKGNKUHMP"}, +{"name": "pymc826", "age": 18, "index": 826, "body": "VTKGNKUHMP"}, +{"name": "pymc238", "age": 36, "index": 238, "body": "VTKGNKUHMP"}, +{"name": "pymc257", "age": 55, "index": 257, "body": "VTKGNKUHMP"}, +{"name": "pymc880", "age": 72, "index": 880, "body": "VTKGNKUHMP"}, +{"name": "pymc166", "age": 65, "index": 166, "body": "VTKGNKUHMP"}, +{"name": "pymc727", "age": 20, "index": 727, "body": "VTKGNKUHMP"}, +{"name": "pymc751", "age": 44, "index": 751, "body": "VTKGNKUHMP"}, +{"name": "pymc97", "age": 97, "index": 97, "body": "VTKGNKUHMP"}, +{"name": "pymc235", "age": 33, "index": 235, "body": "VTKGNKUHMP"}, +{"name": "pymc951", "age": 42, "index": 951, "body": "VTKGNKUHMP"}, +{"name": "pymc278", "age": 76, "index": 278, "body": "VTKGNKUHMP"}, +{"name": "pymc80", "age": 80, "index": 80, "body": "VTKGNKUHMP"}, +{"name": "pymc124", "age": 23, "index": 124, "body": "VTKGNKUHMP"}, +{"name": "pymc956", "age": 47, "index": 956, "body": "VTKGNKUHMP"}, +{"name": "pymc872", "age": 64, "index": 872, "body": "VTKGNKUHMP"}, +{"name": "pymc948", "age": 39, "index": 948, "body": "VTKGNKUHMP"}, +{"name": "pymc186", "age": 85, "index": 186, "body": "VTKGNKUHMP"}, +{"name": "pymc947", "age": 38, "index": 947, "body": "VTKGNKUHMP"}, +{"name": "pymc276", "age": 74, "index": 276, "body": "VTKGNKUHMP"}, +{"name": "pymc59", "age": 59, "index": 59, "body": "VTKGNKUHMP"}, +{"name": "pymc168", "age": 67, "index": 168, "body": "VTKGNKUHMP"}, +{"name": "pymc284", "age": 82, "index": 284, "body": "VTKGNKUHMP"}, +{"name": "pymc260", "age": 58, "index": 260, "body": "VTKGNKUHMP"}, +{"name": "pymc118", "age": 17, "index": 118, "body": "VTKGNKUHMP"}, +{"name": "pymc83", "age": 83, "index": 83, "body": "VTKGNKUHMP"}, +{"name": "pymc148", "age": 47, "index": 148, "body": "VTKGNKUHMP"}, +{"name": "pymc256", "age": 54, "index": 256, "body": "VTKGNKUHMP"}, +{"name": "pymc281", "age": 79, "index": 281, "body": "VTKGNKUHMP"}, +{"name": "pymc26", "age": 26, "index": 26, "body": "VTKGNKUHMP"}, +{"name": "pymc757", "age": 50, "index": 757, "body": "VTKGNKUHMP"}, +{"name": "pymc863", "age": 55, "index": 863, "body": "VTKGNKUHMP"}, +{"name": "pymc879", "age": 71, "index": 879, "body": "VTKGNKUHMP"}, +{"name": "pymc326", "age": 23, "index": 326, "body": "VTKGNKUHMP"}, +{"name": "pymc282", "age": 80, "index": 282, "body": "VTKGNKUHMP"}, +{"name": "pymc184", "age": 83, "index": 184, "body": "VTKGNKUHMP"}, +{"name": "pymc619", "age": 13, "index": 619, "body": "VTKGNKUHMP"}, +{"name": "pymc334", "age": 31, "index": 334, "body": "VTKGNKUHMP"}, +{"name": "pymc213", "age": 11, "index": 213, "body": "VTKGNKUHMP"}, +{"name": "pymc92", "age": 92, "index": 92, "body": "VTKGNKUHMP"}, +{"name": "pymc160", "age": 59, "index": 160, "body": "VTKGNKUHMP"}, +{"name": "pymc152", "age": 51, "index": 152, "body": "VTKGNKUHMP"}, +{"name": "pymc499", "age": 95, "index": 499, "body": "VTKGNKUHMP"}, +{"name": "pymc721", "age": 14, "index": 721, "body": "VTKGNKUHMP"}, +{"name": "pymc7", "age": 7, "index": 7, "body": "VTKGNKUHMP"}, +{"name": "pymc352", "age": 49, "index": 352, "body": "VTKGNKUHMP"}, +{"name": "pymc478", "age": 74, "index": 478, "body": "VTKGNKUHMP"}, +{"name": "pymc140", "age": 39, "index": 140, "body": "VTKGNKUHMP"}, +{"name": "pymc850", "age": 42, "index": 850, "body": "VTKGNKUHMP"}, +{"name": "pymc833", "age": 25, "index": 833, "body": "VTKGNKUHMP"}, +{"name": "pymc179", "age": 78, "index": 179, "body": "VTKGNKUHMP"}, +{"name": "pymc337", "age": 34, "index": 337, "body": "VTKGNKUHMP"}, +{"name": "pymc759", "age": 52, "index": 759, "body": "VTKGNKUHMP"}, +{"name": "pymc821", "age": 13, "index": 821, "body": "VTKGNKUHMP"}, +{"name": "pymc949", "age": 40, "index": 949, "body": "VTKGNKUHMP"}, +{"name": "pymc296", "age": 94, "index": 296, "body": "VTKGNKUHMP"}, +{"name": "pymc848", "age": 40, "index": 848, "body": "VTKGNKUHMP"}, +{"name": "pymc23", "age": 23, "index": 23, "body": "VTKGNKUHMP"}, +{"name": "pymc91", "age": 91, "index": 91, "body": "VTKGNKUHMP"}, +{"name": "pymc942", "age": 33, "index": 942, "body": "VTKGNKUHMP"}, +{"name": "pymc42", "age": 42, "index": 42, "body": "VTKGNKUHMP"}, +{"name": "pymc843", "age": 35, "index": 843, "body": "VTKGNKUHMP"}, +{"name": "pymc749", "age": 42, "index": 749, "body": "VTKGNKUHMP"}, +{"name": "pymc377", "age": 74, "index": 377, "body": "VTKGNKUHMP"}, +{"name": "pymc126", "age": 25, "index": 126, "body": "VTKGNKUHMP"}, +{"name": "pymc244", "age": 42, "index": 244, "body": "VTKGNKUHMP"}, +{"name": "pymc683", "age": 77, "index": 683, "body": "VTKGNKUHMP"}, +{"name": "pymc816", "age": 8, "index": 816, "body": "VTKGNKUHMP"}, +{"name": "pymc890", "age": 82, "index": 890, "body": "VTKGNKUHMP"}, +{"name": "pymc944", "age": 35, "index": 944, "body": "VTKGNKUHMP"}, +{"name": "pymc101", "age": 0, "index": 101, "body": "VTKGNKUHMP"}, +{"name": "pymc173", "age": 72, "index": 173, "body": "VTKGNKUHMP"}, +{"name": "pymc809", "age": 1, "index": 809, "body": "VTKGNKUHMP"}, +{"name": "pymc294", "age": 92, "index": 294, "body": "VTKGNKUHMP"}, +{"name": "pymc141", "age": 40, "index": 141, "body": "VTKGNKUHMP"}, +{"name": "pymc739", "age": 32, "index": 739, "body": "VTKGNKUHMP"}, +{"name": "pymc732", "age": 25, "index": 732, "body": "VTKGNKUHMP"}, +{"name": "pymc171", "age": 70, "index": 171, "body": "VTKGNKUHMP"}, +{"name": "pymc685", "age": 79, "index": 685, "body": "VTKGNKUHMP"}, +{"name": "pymc376", "age": 73, "index": 376, "body": "VTKGNKUHMP"}, +{"name": "pymc65", "age": 65, "index": 65, "body": "VTKGNKUHMP"}, +{"name": "pymc940", "age": 31, "index": 940, "body": "VTKGNKUHMP"}, +{"name": "pymc224", "age": 22, "index": 224, "body": "VTKGNKUHMP"}, +{"name": "pymc123", "age": 22, "index": 123, "body": "VTKGNKUHMP"}, +{"name": "pymc176", "age": 75, "index": 176, "body": "VTKGNKUHMP"}, +{"name": "pymc812", "age": 4, "index": 812, "body": "VTKGNKUHMP"}, +{"name": "pymc269", "age": 67, "index": 269, "body": "VTKGNKUHMP"}, +{"name": "pymc669", "age": 63, "index": 669, "body": "VTKGNKUHMP"}, +{"name": "pymc290", "age": 88, "index": 290, "body": "VTKGNKUHMP"}, +{"name": "pymc210", "age": 8, "index": 210, "body": "VTKGNKUHMP"}, +{"name": "pymc197", "age": 96, "index": 197, "body": "VTKGNKUHMP"}, +{"name": "pymc820", "age": 12, "index": 820, "body": "VTKGNKUHMP"}, +{"name": "pymc725", "age": 18, "index": 725, "body": "VTKGNKUHMP"}, +{"name": "pymc27", "age": 27, "index": 27, "body": "VTKGNKUHMP"}, +{"name": "pymc336", "age": 33, "index": 336, "body": "VTKGNKUHMP"}, +{"name": "pymc876", "age": 68, "index": 876, "body": "VTKGNKUHMP"}, +{"name": "pymc648", "age": 42, "index": 648, "body": "VTKGNKUHMP"}, +{"name": "pymc889", "age": 81, "index": 889, "body": "VTKGNKUHMP"}, +{"name": "pymc35", "age": 35, "index": 35, "body": "VTKGNKUHMP"}, +{"name": "pymc724", "age": 17, "index": 724, "body": "VTKGNKUHMP"}, +{"name": "pymc13", "age": 13, "index": 13, "body": "VTKGNKUHMP"}, +{"name": "pymc8", "age": 8, "index": 8, "body": "VTKGNKUHMP"}, +{"name": "pymc972", "age": 63, "index": 972, "body": "VTKGNKUHMP"}, +{"name": "pymc112", "age": 11, "index": 112, "body": "VTKGNKUHMP"}, +{"name": "pymc692", "age": 86, "index": 692, "body": "VTKGNKUHMP"}, +{"name": "pymc291", "age": 89, "index": 291, "body": "VTKGNKUHMP"}, +{"name": "pymc87", "age": 87, "index": 87, "body": "VTKGNKUHMP"}, +{"name": "pymc628", "age": 22, "index": 628, "body": "VTKGNKUHMP"}, +{"name": "pymc978", "age": 69, "index": 978, "body": "VTKGNKUHMP"}, +{"name": "pymc10", "age": 10, "index": 10, "body": "VTKGNKUHMP"}, +{"name": "pymc961", "age": 52, "index": 961, "body": "VTKGNKUHMP"}, +{"name": "pymc974", "age": 65, "index": 974, "body": "VTKGNKUHMP"}, +{"name": "pymc67", "age": 67, "index": 67, "body": "VTKGNKUHMP"}, +{"name": "pymc960", "age": 51, "index": 960, "body": "VTKGNKUHMP"}, +{"name": "pymc335", "age": 32, "index": 335, "body": "VTKGNKUHMP"}, +{"name": "pymc327", "age": 24, "index": 327, "body": "VTKGNKUHMP"}, +{"name": "pymc815", "age": 7, "index": 815, "body": "VTKGNKUHMP"}, +{"name": "pymc203", "age": 1, "index": 203, "body": "VTKGNKUHMP"}, +{"name": "pymc975", "age": 66, "index": 975, "body": "VTKGNKUHMP"}, +{"name": "pymc261", "age": 59, "index": 261, "body": "VTKGNKUHMP"}, +{"name": "pymc43", "age": 43, "index": 43, "body": "VTKGNKUHMP"}, +{"name": "pymc743", "age": 36, "index": 743, "body": "VTKGNKUHMP"}, +{"name": "pymc934", "age": 25, "index": 934, "body": "VTKGNKUHMP"}, +{"name": "pymc875", "age": 67, "index": 875, "body": "VTKGNKUHMP"}, +{"name": "pymc138", "age": 37, "index": 138, "body": "VTKGNKUHMP"}, +{"name": "pymc936", "age": 27, "index": 936, "body": "VTKGNKUHMP"}, +{"name": "pymc136", "age": 35, "index": 136, "body": "VTKGNKUHMP"}, +{"name": "pymc844", "age": 36, "index": 844, "body": "VTKGNKUHMP"}, +{"name": "pymc322", "age": 19, "index": 322, "body": "VTKGNKUHMP"}, +{"name": "pymc569", "age": 64, "index": 569, "body": "VTKGNKUHMP"}, +{"name": "pymc609", "age": 3, "index": 609, "body": "VTKGNKUHMP"}, +{"name": "pymc214", "age": 12, "index": 214, "body": "VTKGNKUHMP"}, +{"name": "pymc72", "age": 72, "index": 72, "body": "VTKGNKUHMP"}, +{"name": "pymc835", "age": 27, "index": 835, "body": "VTKGNKUHMP"}, +{"name": "pymc81", "age": 81, "index": 81, "body": "VTKGNKUHMP"}, +{"name": "pymc874", "age": 66, "index": 874, "body": "VTKGNKUHMP"}, +{"name": "pymc216", "age": 14, "index": 216, "body": "VTKGNKUHMP"}, +{"name": "pymc60", "age": 60, "index": 60, "body": "VTKGNKUHMP"}, +{"name": "pymc246", "age": 44, "index": 246, "body": "VTKGNKUHMP"}, +{"name": "pymc967", "age": 58, "index": 967, "body": "VTKGNKUHMP"}, +{"name": "pymc268", "age": 66, "index": 268, "body": "VTKGNKUHMP"}, +{"name": "pymc182", "age": 81, "index": 182, "body": "VTKGNKUHMP"}, +{"name": "pymc165", "age": 64, "index": 165, "body": "VTKGNKUHMP"}, +{"name": "pymc946", "age": 37, "index": 946, "body": "VTKGNKUHMP"}, +{"name": "pymc860", "age": 52, "index": 860, "body": "VTKGNKUHMP"}, +{"name": "pymc218", "age": 16, "index": 218, "body": "VTKGNKUHMP"}, +{"name": "pymc823", "age": 15, "index": 823, "body": "VTKGNKUHMP"}, +{"name": "pymc964", "age": 55, "index": 964, "body": "VTKGNKUHMP"}, +{"name": "pymc895", "age": 87, "index": 895, "body": "VTKGNKUHMP"}, +{"name": "pymc362", "age": 59, "index": 362, "body": "VTKGNKUHMP"}, +{"name": "pymc36", "age": 36, "index": 36, "body": "VTKGNKUHMP"}, +{"name": "pymc970", "age": 61, "index": 970, "body": "VTKGNKUHMP"}, +{"name": "pymc498", "age": 94, "index": 498, "body": "VTKGNKUHMP"}, +{"name": "pymc151", "age": 50, "index": 151, "body": "VTKGNKUHMP"}, +{"name": "pymc539", "age": 34, "index": 539, "body": "VTKGNKUHMP"}, +{"name": "pymc338", "age": 35, "index": 338, "body": "VTKGNKUHMP"}, +{"name": "pymc752", "age": 45, "index": 752, "body": "VTKGNKUHMP"}, +{"name": "pymc89", "age": 89, "index": 89, "body": "VTKGNKUHMP"}, +{"name": "pymc358", "age": 55, "index": 358, "body": "VTKGNKUHMP"}, +{"name": "pymc695", "age": 89, "index": 695, "body": "VTKGNKUHMP"}, +{"name": "pymc31", "age": 31, "index": 31, "body": "VTKGNKUHMP"}, +{"name": "pymc64", "age": 64, "index": 64, "body": "VTKGNKUHMP"}, +{"name": "pymc699", "age": 93, "index": 699, "body": "VTKGNKUHMP"}, +{"name": "pymc217", "age": 15, "index": 217, "body": "VTKGNKUHMP"}, +{"name": "pymc271", "age": 69, "index": 271, "body": "VTKGNKUHMP"}, +{"name": "pymc745", "age": 38, "index": 745, "body": "VTKGNKUHMP"}, +{"name": "pymc163", "age": 62, "index": 163, "body": "VTKGNKUHMP"}, +{"name": "pymc891", "age": 83, "index": 891, "body": "VTKGNKUHMP"}, +{"name": "pymc12", "age": 12, "index": 12, "body": "VTKGNKUHMP"}, +{"name": "pymc180", "age": 79, "index": 180, "body": "VTKGNKUHMP"}, +{"name": "pymc236", "age": 34, "index": 236, "body": "VTKGNKUHMP"}, +{"name": "pymc106", "age": 5, "index": 106, "body": "VTKGNKUHMP"}, +{"name": "pymc202", "age": 0, "index": 202, "body": "VTKGNKUHMP"}, +{"name": "pymc146", "age": 45, "index": 146, "body": "VTKGNKUHMP"}, +{"name": "pymc62", "age": 62, "index": 62, "body": "VTKGNKUHMP"}, +{"name": "pymc242", "age": 40, "index": 242, "body": "VTKGNKUHMP"}, +{"name": "pymc252", "age": 50, "index": 252, "body": "VTKGNKUHMP"}, +{"name": "pymc354", "age": 51, "index": 354, "body": "VTKGNKUHMP"}, +{"name": "pymc846", "age": 38, "index": 846, "body": "VTKGNKUHMP"}, +{"name": "pymc937", "age": 28, "index": 937, "body": "VTKGNKUHMP"}, +{"name": "pymc329", "age": 26, "index": 329, "body": "VTKGNKUHMP"}, +{"name": "pymc110", "age": 9, "index": 110, "body": "VTKGNKUHMP"}, +{"name": "pymc356", "age": 53, "index": 356, "body": "VTKGNKUHMP"}, +{"name": "pymc638", "age": 32, "index": 638, "body": "VTKGNKUHMP"}, +{"name": "pymc15", "age": 15, "index": 15, "body": "VTKGNKUHMP"}, +{"name": "pymc813", "age": 5, "index": 813, "body": "VTKGNKUHMP"}, +{"name": "pymc5", "age": 5, "index": 5, "body": "VTKGNKUHMP"}, +{"name": "pymc117", "age": 16, "index": 117, "body": "VTKGNKUHMP"}, +{"name": "pymc323", "age": 20, "index": 323, "body": "VTKGNKUHMP"}, +{"name": "pymc884", "age": 76, "index": 884, "body": "VTKGNKUHMP"}, +{"name": "pymc691", "age": 85, "index": 691, "body": "VTKGNKUHMP"}, +{"name": "pymc887", "age": 79, "index": 887, "body": "VTKGNKUHMP"}, +{"name": "pymc408", "age": 4, "index": 408, "body": "VTKGNKUHMP"}, +{"name": "pymc690", "age": 84, "index": 690, "body": "VTKGNKUHMP"}, +{"name": "pymc223", "age": 21, "index": 223, "body": "VTKGNKUHMP"}, +{"name": "pymc684", "age": 78, "index": 684, "body": "VTKGNKUHMP"}, +{"name": "pymc190", "age": 89, "index": 190, "body": "VTKGNKUHMP"}, +{"name": "pymc862", "age": 54, "index": 862, "body": "VTKGNKUHMP"}, +{"name": "pymc933", "age": 24, "index": 933, "body": "VTKGNKUHMP"}, +{"name": "pymc38", "age": 38, "index": 38, "body": "VTKGNKUHMP"}, +{"name": "pymc852", "age": 44, "index": 852, "body": "VTKGNKUHMP"}, +{"name": "pymc137", "age": 36, "index": 137, "body": "VTKGNKUHMP"}, +{"name": "pymc954", "age": 45, "index": 954, "body": "VTKGNKUHMP"}, +{"name": "pymc855", "age": 47, "index": 855, "body": "VTKGNKUHMP"}, +{"name": "pymc938", "age": 29, "index": 938, "body": "VTKGNKUHMP"}, +{"name": "pymc94", "age": 94, "index": 94, "body": "VTKGNKUHMP"}, +{"name": "pymc955", "age": 46, "index": 955, "body": "VTKGNKUHMP"}, +{"name": "pymc93", "age": 93, "index": 93, "body": "VTKGNKUHMP"}, +{"name": "pymc737", "age": 30, "index": 737, "body": "VTKGNKUHMP"}, +{"name": "pymc76", "age": 76, "index": 76, "body": "VTKGNKUHMP"}, +{"name": "pymc459", "age": 55, "index": 459, "body": "VTKGNKUHMP"}, +{"name": "pymc973", "age": 64, "index": 973, "body": "VTKGNKUHMP"}, +{"name": "pymc428", "age": 24, "index": 428, "body": "VTKGNKUHMP"}, +{"name": "pymc262", "age": 60, "index": 262, "body": "VTKGNKUHMP"}, +{"name": "pymc113", "age": 12, "index": 113, "body": "VTKGNKUHMP"}, +{"name": "pymc659", "age": 53, "index": 659, "body": "VTKGNKUHMP"}, +{"name": "pymc229", "age": 27, "index": 229, "body": "VTKGNKUHMP"}, +{"name": "pymc103", "age": 2, "index": 103, "body": "VTKGNKUHMP"}, +{"name": "pymc367", "age": 64, "index": 367, "body": "VTKGNKUHMP"}, +{"name": "pymc273", "age": 71, "index": 273, "body": "VTKGNKUHMP"}, +{"name": "pymc321", "age": 18, "index": 321, "body": "VTKGNKUHMP"}, +{"name": "pymc40", "age": 40, "index": 40, "body": "VTKGNKUHMP"}, +{"name": "pymc361", "age": 58, "index": 361, "body": "VTKGNKUHMP"}, +{"name": "pymc172", "age": 71, "index": 172, "body": "VTKGNKUHMP"}, +{"name": "pymc898", "age": 90, "index": 898, "body": "VTKGNKUHMP"}, +{"name": "pymc868", "age": 60, "index": 868, "body": "VTKGNKUHMP"}, +{"name": "pymc885", "age": 77, "index": 885, "body": "VTKGNKUHMP"}, +{"name": "pymc943", "age": 34, "index": 943, "body": "VTKGNKUHMP"}, +{"name": "pymc803", "age": 96, "index": 803, "body": "VTKGNKUHMP"}, +{"name": "pymc9", "age": 9, "index": 9, "body": "VTKGNKUHMP"}, +{"name": "pymc77", "age": 77, "index": 77, "body": "VTKGNKUHMP"}, +{"name": "pymc558", "age": 53, "index": 558, "body": "VTKGNKUHMP"}, +{"name": "pymc215", "age": 13, "index": 215, "body": "VTKGNKUHMP"}, +{"name": "pymc730", "age": 23, "index": 730, "body": "VTKGNKUHMP"}, +{"name": "pymc187", "age": 86, "index": 187, "body": "VTKGNKUHMP"}, +{"name": "pymc240", "age": 38, "index": 240, "body": "VTKGNKUHMP"}, +{"name": "pymc927", "age": 18, "index": 927, "body": "VTKGNKUHMP"}, +{"name": "pymc966", "age": 57, "index": 966, "body": "VTKGNKUHMP"}, +{"name": "pymc295", "age": 93, "index": 295, "body": "VTKGNKUHMP"}, +{"name": "pymc841", "age": 33, "index": 841, "body": "VTKGNKUHMP"}, +{"name": "pymc851", "age": 43, "index": 851, "body": "VTKGNKUHMP"}, +{"name": "pymc468", "age": 64, "index": 468, "body": "VTKGNKUHMP"}, +{"name": "pymc364", "age": 61, "index": 364, "body": "VTKGNKUHMP"}, +{"name": "pymc61", "age": 61, "index": 61, "body": "VTKGNKUHMP"}, +{"name": "pymc922", "age": 13, "index": 922, "body": "VTKGNKUHMP"}, +{"name": "pymc378", "age": 75, "index": 378, "body": "VTKGNKUHMP"}, +{"name": "pymc686", "age": 80, "index": 686, "body": "VTKGNKUHMP"}, +{"name": "pymc331", "age": 28, "index": 331, "body": "VTKGNKUHMP"}, +{"name": "pymc693", "age": 87, "index": 693, "body": "VTKGNKUHMP"}, +{"name": "pymc892", "age": 84, "index": 892, "body": "VTKGNKUHMP"}, +{"name": "pymc374", "age": 71, "index": 374, "body": "VTKGNKUHMP"}, +{"name": "pymc864", "age": 56, "index": 864, "body": "VTKGNKUHMP"}, +{"name": "pymc839", "age": 31, "index": 839, "body": "VTKGNKUHMP"}, +{"name": "pymc6", "age": 6, "index": 6, "body": "VTKGNKUHMP"}, +{"name": "pymc953", "age": 44, "index": 953, "body": "VTKGNKUHMP"}, +{"name": "pymc858", "age": 50, "index": 858, "body": "VTKGNKUHMP"}, +{"name": "pymc28", "age": 28, "index": 28, "body": "VTKGNKUHMP"}, +{"name": "pymc926", "age": 17, "index": 926, "body": "VTKGNKUHMP"}, +{"name": "pymc78", "age": 78, "index": 78, "body": "VTKGNKUHMP"}, +{"name": "pymc365", "age": 62, "index": 365, "body": "VTKGNKUHMP"}, +{"name": "pymc100", "age": 100, "index": 100, "body": "VTKGNKUHMP"}, +{"name": "pymc33", "age": 33, "index": 33, "body": "VTKGNKUHMP"}, +{"name": "pymc18", "age": 18, "index": 18, "body": "VTKGNKUHMP"}, +{"name": "pymc195", "age": 94, "index": 195, "body": "VTKGNKUHMP"}, +{"name": "pymc549", "age": 44, "index": 549, "body": "VTKGNKUHMP"}, +{"name": "pymc255", "age": 53, "index": 255, "body": "VTKGNKUHMP"}, +{"name": "pymc883", "age": 75, "index": 883, "body": "VTKGNKUHMP"}, +{"name": "pymc245", "age": 43, "index": 245, "body": "VTKGNKUHMP"}, +{"name": "pymc125", "age": 24, "index": 125, "body": "VTKGNKUHMP"}, +{"name": "pymc52", "age": 52, "index": 52, "body": "VTKGNKUHMP"}, +{"name": "pymc206", "age": 4, "index": 206, "body": "VTKGNKUHMP"}, +{"name": "pymc37", "age": 37, "index": 37, "body": "VTKGNKUHMP"}, +{"name": "pymc133", "age": 32, "index": 133, "body": "VTKGNKUHMP"}, +{"name": "pymc339", "age": 36, "index": 339, "body": "VTKGNKUHMP"}, +{"name": "pymc198", "age": 97, "index": 198, "body": "VTKGNKUHMP"}, +{"name": "pymc838", "age": 30, "index": 838, "body": "VTKGNKUHMP"}, +{"name": "pymc963", "age": 54, "index": 963, "body": "VTKGNKUHMP"}, +{"name": "pymc925", "age": 16, "index": 925, "body": "VTKGNKUHMP"}, +{"name": "pymc58", "age": 58, "index": 58, "body": "VTKGNKUHMP"}, +{"name": "pymc878", "age": 70, "index": 878, "body": "VTKGNKUHMP"}, +{"name": "pymc921", "age": 12, "index": 921, "body": "VTKGNKUHMP"}, +{"name": "pymc836", "age": 28, "index": 836, "body": "VTKGNKUHMP"}, +{"name": "pymc807", "age": 100, "index": 807, "body": "VTKGNKUHMP"}, +{"name": "pymc119", "age": 18, "index": 119, "body": "VTKGNKUHMP"}, +{"name": "pymc79", "age": 79, "index": 79, "body": "VTKGNKUHMP"}, +{"name": "pymc366", "age": 63, "index": 366, "body": "VTKGNKUHMP"}, +{"name": "pymc183", "age": 82, "index": 183, "body": "VTKGNKUHMP"}, +{"name": "pymc45", "age": 45, "index": 45, "body": "VTKGNKUHMP"}, +{"name": "pymc814", "age": 6, "index": 814, "body": "VTKGNKUHMP"}, +{"name": "pymc548", "age": 43, "index": 548, "body": "VTKGNKUHMP"}, +{"name": "pymc359", "age": 56, "index": 359, "body": "VTKGNKUHMP"}, +{"name": "pymc274", "age": 72, "index": 274, "body": "VTKGNKUHMP"}, +{"name": "pymc881", "age": 73, "index": 881, "body": "VTKGNKUHMP"}, +{"name": "pymc528", "age": 23, "index": 528, "body": "VTKGNKUHMP"}, +{"name": "pymc618", "age": 12, "index": 618, "body": "VTKGNKUHMP"}, +{"name": "pymc209", "age": 7, "index": 209, "body": "VTKGNKUHMP"}, +{"name": "pymc351", "age": 48, "index": 351, "body": "VTKGNKUHMP"}, +{"name": "pymc871", "age": 63, "index": 871, "body": "VTKGNKUHMP"}, +{"name": "pymc16", "age": 16, "index": 16, "body": "VTKGNKUHMP"}, +{"name": "pymc193", "age": 92, "index": 193, "body": "VTKGNKUHMP"}, +{"name": "pymc668", "age": 62, "index": 668, "body": "VTKGNKUHMP"}, +{"name": "pymc418", "age": 14, "index": 418, "body": "VTKGNKUHMP"}, +{"name": "pymc161", "age": 60, "index": 161, "body": "VTKGNKUHMP"}, +{"name": "pymc174", "age": 73, "index": 174, "body": "VTKGNKUHMP"}, +{"name": "pymc55", "age": 55, "index": 55, "body": "VTKGNKUHMP"}, +{"name": "pymc854", "age": 46, "index": 854, "body": "VTKGNKUHMP"}, +{"name": "pymc694", "age": 88, "index": 694, "body": "VTKGNKUHMP"}, +{"name": "pymc738", "age": 31, "index": 738, "body": "VTKGNKUHMP"}, +{"name": "pymc85", "age": 85, "index": 85, "body": "VTKGNKUHMP"}, +{"name": "pymc888", "age": 80, "index": 888, "body": "VTKGNKUHMP"}, +{"name": "pymc742", "age": 35, "index": 742, "body": "VTKGNKUHMP"}, +{"name": "pymc250", "age": 48, "index": 250, "body": "VTKGNKUHMP"}, +{"name": "pymc225", "age": 23, "index": 225, "body": "VTKGNKUHMP"}, +{"name": "pymc41", "age": 41, "index": 41, "body": "VTKGNKUHMP"}, +{"name": "pymc840", "age": 32, "index": 840, "body": "VTKGNKUHMP"}, +{"name": "pymc829", "age": 21, "index": 829, "body": "VTKGNKUHMP"}, +{"name": "pymc153", "age": 52, "index": 153, "body": "VTKGNKUHMP"}, +{"name": "pymc935", "age": 26, "index": 935, "body": "VTKGNKUHMP"}, +{"name": "pymc247", "age": 45, "index": 247, "body": "VTKGNKUHMP"}, +{"name": "pymc287", "age": 85, "index": 287, "body": "VTKGNKUHMP"}, +{"name": "pymc227", "age": 25, "index": 227, "body": "VTKGNKUHMP"}, +{"name": "pymc928", "age": 19, "index": 928, "body": "VTKGNKUHMP"}, +{"name": "pymc330", "age": 27, "index": 330, "body": "VTKGNKUHMP"}, +{"name": "pymc121", "age": 20, "index": 121, "body": "VTKGNKUHMP"}, +{"name": "pymc893", "age": 85, "index": 893, "body": "VTKGNKUHMP"}, +{"name": "pymc114", "age": 13, "index": 114, "body": "VTKGNKUHMP"}, +{"name": "pymc768", "age": 61, "index": 768, "body": "VTKGNKUHMP"}, +{"name": "pymc861", "age": 53, "index": 861, "body": "VTKGNKUHMP"}, +{"name": "pymc233", "age": 31, "index": 233, "body": "VTKGNKUHMP"}, +{"name": "pymc658", "age": 52, "index": 658, "body": "VTKGNKUHMP"}, +{"name": "pymc698", "age": 92, "index": 698, "body": "VTKGNKUHMP"}, +{"name": "pymc332", "age": 29, "index": 332, "body": "VTKGNKUHMP"}, +{"name": "pymc135", "age": 34, "index": 135, "body": "VTKGNKUHMP"}, +{"name": "pymc805", "age": 98, "index": 805, "body": "VTKGNKUHMP"}, +{"name": "pymc74", "age": 74, "index": 74, "body": "VTKGNKUHMP"}, +{"name": "pymc134", "age": 33, "index": 134, "body": "VTKGNKUHMP"}, +{"name": "pymc189", "age": 88, "index": 189, "body": "VTKGNKUHMP"}, +{"name": "pymc896", "age": 88, "index": 896, "body": "VTKGNKUHMP"}, +{"name": "pymc733", "age": 26, "index": 733, "body": "VTKGNKUHMP"}, +{"name": "pymc755", "age": 48, "index": 755, "body": "VTKGNKUHMP"}, +{"name": "pymc196", "age": 95, "index": 196, "body": "VTKGNKUHMP"}, +{"name": "pymc4", "age": 4, "index": 4, "body": "VTKGNKUHMP"}, +{"name": "pymc688", "age": 82, "index": 688, "body": "VTKGNKUHMP"}, +{"name": "pymc930", "age": 21, "index": 930, "body": "VTKGNKUHMP"}, +{"name": "pymc286", "age": 84, "index": 286, "body": "VTKGNKUHMP"}, +{"name": "pymc968", "age": 59, "index": 968, "body": "VTKGNKUHMP"}, +{"name": "pymc867", "age": 59, "index": 867, "body": "VTKGNKUHMP"}, +{"name": "pymc828", "age": 20, "index": 828, "body": "VTKGNKUHMP"}, +{"name": "pymc280", "age": 78, "index": 280, "body": "VTKGNKUHMP"}, +{"name": "pymc147", "age": 46, "index": 147, "body": "VTKGNKUHMP"}, +{"name": "pymc279", "age": 77, "index": 279, "body": "VTKGNKUHMP"}, +{"name": "pymc232", "age": 30, "index": 232, "body": "VTKGNKUHMP"}, +{"name": "pymc54", "age": 54, "index": 54, "body": "VTKGNKUHMP"}, +{"name": "pymc212", "age": 10, "index": 212, "body": "VTKGNKUHMP"}, +{"name": "pymc145", "age": 44, "index": 145, "body": "VTKGNKUHMP"}, +{"name": "pymc204", "age": 2, "index": 204, "body": "VTKGNKUHMP"}, +{"name": "pymc649", "age": 43, "index": 649, "body": "VTKGNKUHMP"}, +{"name": "pymc373", "age": 70, "index": 373, "body": "VTKGNKUHMP"}, +{"name": "pymc297", "age": 95, "index": 297, "body": "VTKGNKUHMP"}, +{"name": "pymc920", "age": 11, "index": 920, "body": "VTKGNKUHMP"}, +{"name": "pymc3", "age": 3, "index": 3, "body": "VTKGNKUHMP"}, +{"name": "pymc20", "age": 20, "index": 20, "body": "VTKGNKUHMP"}, +{"name": "pymc63", "age": 63, "index": 63, "body": "VTKGNKUHMP"}, +{"name": "pymc924", "age": 15, "index": 924, "body": "VTKGNKUHMP"}, +{"name": "pymc0", "age": 0, "index": 0, "body": "VTKGNKUHMP"}, +{"name": "pymc629", "age": 23, "index": 629, "body": "VTKGNKUHMP"}, +{"name": "pymc873", "age": 65, "index": 873, "body": "VTKGNKUHMP"}, +{"name": "pymc375", "age": 72, "index": 375, "body": "VTKGNKUHMP"}, +{"name": "pymc822", "age": 14, "index": 822, "body": "VTKGNKUHMP"}, +{"name": "pymc439", "age": 35, "index": 439, "body": "VTKGNKUHMP"}, +{"name": "pymc696", "age": 90, "index": 696, "body": "VTKGNKUHMP"}, +{"name": "pymc429", "age": 25, "index": 429, "body": "VTKGNKUHMP"}, +{"name": "pymc959", "age": 50, "index": 959, "body": "VTKGNKUHMP"}, +{"name": "pymc220", "age": 18, "index": 220, "body": "VTKGNKUHMP"}, +{"name": "pymc831", "age": 23, "index": 831, "body": "VTKGNKUHMP"}, +{"name": "pymc368", "age": 65, "index": 368, "body": "VTKGNKUHMP"}, +{"name": "pymc207", "age": 5, "index": 207, "body": "VTKGNKUHMP"}, +{"name": "pymc131", "age": 30, "index": 131, "body": "VTKGNKUHMP"}, +{"name": "pymc211", "age": 9, "index": 211, "body": "VTKGNKUHMP"}, +{"name": "pymc819", "age": 11, "index": 819, "body": "VTKGNKUHMP"}, +{"name": "pymc185", "age": 84, "index": 185, "body": "VTKGNKUHMP"}, +{"name": "pymc758", "age": 51, "index": 758, "body": "VTKGNKUHMP"}, +{"name": "pymc945", "age": 36, "index": 945, "body": "VTKGNKUHMP"}, +{"name": "pymc266", "age": 64, "index": 266, "body": "VTKGNKUHMP"}, +{"name": "pymc818", "age": 10, "index": 818, "body": "VTKGNKUHMP"}, +{"name": "pymc275", "age": 73, "index": 275, "body": "VTKGNKUHMP"}, +{"name": "pymc142", "age": 41, "index": 142, "body": "VTKGNKUHMP"}, +{"name": "pymc159", "age": 58, "index": 159, "body": "VTKGNKUHMP"}, +{"name": "pymc409", "age": 5, "index": 409, "body": "VTKGNKUHMP"}, +{"name": "pymc82", "age": 82, "index": 82, "body": "VTKGNKUHMP"}, +{"name": "pymc748", "age": 41, "index": 748, "body": "VTKGNKUHMP"}, +{"name": "pymc154", "age": 53, "index": 154, "body": "VTKGNKUHMP"}, +{"name": "pymc44", "age": 44, "index": 44, "body": "VTKGNKUHMP"}, +{"name": "pymc929", "age": 20, "index": 929, "body": "VTKGNKUHMP"}, +{"name": "pymc325", "age": 22, "index": 325, "body": "VTKGNKUHMP"}, +{"name": "pymc24", "age": 24, "index": 24, "body": "VTKGNKUHMP"}, +{"name": "pymc11", "age": 11, "index": 11, "body": "VTKGNKUHMP"}, +{"name": "pymc808", "age": 0, "index": 808, "body": "VTKGNKUHMP"}, +{"name": "pymc811", "age": 3, "index": 811, "body": "VTKGNKUHMP"}, +{"name": "pymc689", "age": 83, "index": 689, "body": "VTKGNKUHMP"}, +{"name": "pymc237", "age": 35, "index": 237, "body": "VTKGNKUHMP"}, +{"name": "pymc226", "age": 24, "index": 226, "body": "VTKGNKUHMP"}, +{"name": "pymc769", "age": 62, "index": 769, "body": "VTKGNKUHMP"}, +{"name": "pymc856", "age": 48, "index": 856, "body": "VTKGNKUHMP"}, +{"name": "pymc912", "age": 3, "index": 912, "body": "VTKGNKUHMP"}, +{"name": "pymc605", "age": 100, "index": 605, "body": "VTKGNKUHMP"}, +{"name": "pymc667", "age": 61, "index": 667, "body": "VTKGNKUHMP"}, +{"name": "pymc313", "age": 10, "index": 313, "body": "VTKGNKUHMP"}, +{"name": "pymc486", "age": 82, "index": 486, "body": "VTKGNKUHMP"}, +{"name": "pymc302", "age": 100, "index": 302, "body": "VTKGNKUHMP"}, +{"name": "pymc655", "age": 49, "index": 655, "body": "VTKGNKUHMP"}, +{"name": "pymc717", "age": 10, "index": 717, "body": "VTKGNKUHMP"}, +{"name": "pymc398", "age": 95, "index": 398, "body": "VTKGNKUHMP"}, +{"name": "pymc553", "age": 48, "index": 553, "body": "VTKGNKUHMP"}, +{"name": "pymc994", "age": 85, "index": 994, "body": "VTKGNKUHMP"}, +{"name": "pymc495", "age": 91, "index": 495, "body": "VTKGNKUHMP"}, +{"name": "pymc518", "age": 13, "index": 518, "body": "VTKGNKUHMP"}, +{"name": "pymc391", "age": 88, "index": 391, "body": "VTKGNKUHMP"}, +{"name": "pymc636", "age": 30, "index": 636, "body": "VTKGNKUHMP"}, +{"name": "pymc625", "age": 19, "index": 625, "body": "VTKGNKUHMP"}, +{"name": "pymc441", "age": 37, "index": 441, "body": "VTKGNKUHMP"}, +{"name": "pymc622", "age": 16, "index": 622, "body": "VTKGNKUHMP"}, +{"name": "pymc542", "age": 37, "index": 542, "body": "VTKGNKUHMP"}, +{"name": "pymc990", "age": 81, "index": 990, "body": "VTKGNKUHMP"}, +{"name": "pymc607", "age": 1, "index": 607, "body": "VTKGNKUHMP"}, +{"name": "pymc523", "age": 18, "index": 523, "body": "VTKGNKUHMP"}, +{"name": "pymc996", "age": 87, "index": 996, "body": "VTKGNKUHMP"}, +{"name": "pymc533", "age": 28, "index": 533, "body": "VTKGNKUHMP"}, +{"name": "pymc412", "age": 8, "index": 412, "body": "VTKGNKUHMP"}, +{"name": "pymc909", "age": 0, "index": 909, "body": "VTKGNKUHMP"}, +{"name": "pymc642", "age": 36, "index": 642, "body": "VTKGNKUHMP"}, +{"name": "pymc710", "age": 3, "index": 710, "body": "VTKGNKUHMP"}, +{"name": "pymc319", "age": 16, "index": 319, "body": "VTKGNKUHMP"}, +{"name": "pymc904", "age": 96, "index": 904, "body": "VTKGNKUHMP"}, +{"name": "pymc776", "age": 69, "index": 776, "body": "VTKGNKUHMP"}, +{"name": "pymc550", "age": 45, "index": 550, "body": "VTKGNKUHMP"}, +{"name": "pymc907", "age": 99, "index": 907, "body": "VTKGNKUHMP"}, +{"name": "pymc676", "age": 70, "index": 676, "body": "VTKGNKUHMP"}, +{"name": "pymc492", "age": 88, "index": 492, "body": "VTKGNKUHMP"}, +{"name": "pymc402", "age": 99, "index": 402, "body": "VTKGNKUHMP"}, +{"name": "pymc531", "age": 26, "index": 531, "body": "VTKGNKUHMP"}, +{"name": "pymc631", "age": 25, "index": 631, "body": "VTKGNKUHMP"}, +{"name": "pymc652", "age": 46, "index": 652, "body": "VTKGNKUHMP"}, +{"name": "pymc987", "age": 78, "index": 987, "body": "VTKGNKUHMP"}, +{"name": "pymc640", "age": 34, "index": 640, "body": "VTKGNKUHMP"}, +{"name": "pymc308", "age": 5, "index": 308, "body": "VTKGNKUHMP"}, +{"name": "pymc650", "age": 44, "index": 650, "body": "VTKGNKUHMP"}, +{"name": "pymc670", "age": 64, "index": 670, "body": "VTKGNKUHMP"}, +{"name": "pymc340", "age": 37, "index": 340, "body": "VTKGNKUHMP"}, +{"name": "pymc992", "age": 83, "index": 992, "body": "VTKGNKUHMP"}, +{"name": "pymc517", "age": 12, "index": 517, "body": "VTKGNKUHMP"}, +{"name": "pymc519", "age": 14, "index": 519, "body": "VTKGNKUHMP"}, +{"name": "pymc490", "age": 86, "index": 490, "body": "VTKGNKUHMP"}, +{"name": "pymc602", "age": 97, "index": 602, "body": "VTKGNKUHMP"}, +{"name": "pymc624", "age": 18, "index": 624, "body": "VTKGNKUHMP"}, +{"name": "pymc425", "age": 21, "index": 425, "body": "VTKGNKUHMP"}, +{"name": "pymc343", "age": 40, "index": 343, "body": "VTKGNKUHMP"}, +{"name": "pymc452", "age": 48, "index": 452, "body": "VTKGNKUHMP"}, +{"name": "pymc610", "age": 4, "index": 610, "body": "VTKGNKUHMP"}, +{"name": "pymc421", "age": 17, "index": 421, "body": "VTKGNKUHMP"}, +{"name": "pymc627", "age": 21, "index": 627, "body": "VTKGNKUHMP"}, +{"name": "pymc773", "age": 66, "index": 773, "body": "VTKGNKUHMP"}, +{"name": "pymc481", "age": 77, "index": 481, "body": "VTKGNKUHMP"}, +{"name": "pymc615", "age": 9, "index": 615, "body": "VTKGNKUHMP"}, +{"name": "pymc411", "age": 7, "index": 411, "body": "VTKGNKUHMP"}, +{"name": "pymc318", "age": 15, "index": 318, "body": "VTKGNKUHMP"}, +{"name": "pymc390", "age": 87, "index": 390, "body": "VTKGNKUHMP"}, +{"name": "pymc991", "age": 82, "index": 991, "body": "VTKGNKUHMP"}, +{"name": "pymc546", "age": 41, "index": 546, "body": "VTKGNKUHMP"}, +{"name": "pymc616", "age": 10, "index": 616, "body": "VTKGNKUHMP"}, +{"name": "pymc763", "age": 56, "index": 763, "body": "VTKGNKUHMP"}, +{"name": "pymc403", "age": 100, "index": 403, "body": "VTKGNKUHMP"}, +{"name": "pymc664", "age": 58, "index": 664, "body": "VTKGNKUHMP"}, +{"name": "pymc521", "age": 16, "index": 521, "body": "VTKGNKUHMP"}, +{"name": "pymc525", "age": 20, "index": 525, "body": "VTKGNKUHMP"}, +{"name": "pymc910", "age": 1, "index": 910, "body": "VTKGNKUHMP"}, +{"name": "pymc614", "age": 8, "index": 614, "body": "VTKGNKUHMP"}, +{"name": "pymc547", "age": 42, "index": 547, "body": "VTKGNKUHMP"}, +{"name": "pymc656", "age": 50, "index": 656, "body": "VTKGNKUHMP"}, +{"name": "pymc764", "age": 57, "index": 764, "body": "VTKGNKUHMP"}, +{"name": "pymc494", "age": 90, "index": 494, "body": "VTKGNKUHMP"}, +{"name": "pymc314", "age": 11, "index": 314, "body": "VTKGNKUHMP"}, +{"name": "pymc482", "age": 78, "index": 482, "body": "VTKGNKUHMP"}, +{"name": "pymc524", "age": 19, "index": 524, "body": "VTKGNKUHMP"}, +{"name": "pymc311", "age": 8, "index": 311, "body": "VTKGNKUHMP"}, +{"name": "pymc454", "age": 50, "index": 454, "body": "VTKGNKUHMP"}, +{"name": "pymc312", "age": 9, "index": 312, "body": "VTKGNKUHMP"}, +{"name": "pymc766", "age": 59, "index": 766, "body": "VTKGNKUHMP"}, +{"name": "pymc789", "age": 82, "index": 789, "body": "VTKGNKUHMP"}, +{"name": "pymc446", "age": 42, "index": 446, "body": "VTKGNKUHMP"}, +{"name": "pymc612", "age": 6, "index": 612, "body": "VTKGNKUHMP"}, +{"name": "pymc620", "age": 14, "index": 620, "body": "VTKGNKUHMP"}, +{"name": "pymc509", "age": 4, "index": 509, "body": "VTKGNKUHMP"}, +{"name": "pymc346", "age": 43, "index": 346, "body": "VTKGNKUHMP"}, +{"name": "pymc420", "age": 16, "index": 420, "body": "VTKGNKUHMP"}, +{"name": "pymc632", "age": 26, "index": 632, "body": "VTKGNKUHMP"}, +{"name": "pymc445", "age": 41, "index": 445, "body": "VTKGNKUHMP"}, +{"name": "pymc306", "age": 3, "index": 306, "body": "VTKGNKUHMP"}, +{"name": "pymc405", "age": 1, "index": 405, "body": "VTKGNKUHMP"}, +{"name": "pymc654", "age": 48, "index": 654, "body": "VTKGNKUHMP"}, +{"name": "pymc536", "age": 31, "index": 536, "body": "VTKGNKUHMP"}, +{"name": "pymc713", "age": 6, "index": 713, "body": "VTKGNKUHMP"}, +{"name": "pymc986", "age": 77, "index": 986, "body": "VTKGNKUHMP"}, +{"name": "pymc900", "age": 92, "index": 900, "body": "VTKGNKUHMP"}, +{"name": "pymc673", "age": 67, "index": 673, "body": "VTKGNKUHMP"}, +{"name": "pymc601", "age": 96, "index": 601, "body": "VTKGNKUHMP"}, +{"name": "pymc999", "age": 90, "index": 999, "body": "VTKGNKUHMP"}, +{"name": "pymc304", "age": 1, "index": 304, "body": "VTKGNKUHMP"}, +{"name": "pymc633", "age": 27, "index": 633, "body": "VTKGNKUHMP"}, +{"name": "pymc603", "age": 98, "index": 603, "body": "VTKGNKUHMP"}, +{"name": "pymc715", "age": 8, "index": 715, "body": "VTKGNKUHMP"}, +{"name": "pymc404", "age": 0, "index": 404, "body": "VTKGNKUHMP"}, +{"name": "pymc556", "age": 51, "index": 556, "body": "VTKGNKUHMP"}, +{"name": "pymc651", "age": 45, "index": 651, "body": "VTKGNKUHMP"}, +{"name": "pymc604", "age": 99, "index": 604, "body": "VTKGNKUHMP"}, +{"name": "pymc982", "age": 73, "index": 982, "body": "VTKGNKUHMP"}, +{"name": "pymc300", "age": 98, "index": 300, "body": "VTKGNKUHMP"}, +{"name": "pymc380", "age": 77, "index": 380, "body": "VTKGNKUHMP"}, +{"name": "pymc775", "age": 68, "index": 775, "body": "VTKGNKUHMP"}, +{"name": "pymc444", "age": 40, "index": 444, "body": "VTKGNKUHMP"}, +{"name": "pymc919", "age": 10, "index": 919, "body": "VTKGNKUHMP"}, +{"name": "pymc349", "age": 46, "index": 349, "body": "VTKGNKUHMP"}, +{"name": "pymc660", "age": 54, "index": 660, "body": "VTKGNKUHMP"}, +{"name": "pymc702", "age": 96, "index": 702, "body": "VTKGNKUHMP"}, +{"name": "pymc301", "age": 99, "index": 301, "body": "VTKGNKUHMP"}, +{"name": "pymc447", "age": 43, "index": 447, "body": "VTKGNKUHMP"}, +{"name": "pymc455", "age": 51, "index": 455, "body": "VTKGNKUHMP"}, +{"name": "pymc985", "age": 76, "index": 985, "body": "VTKGNKUHMP"}, +{"name": "pymc393", "age": 90, "index": 393, "body": "VTKGNKUHMP"}, +{"name": "pymc483", "age": 79, "index": 483, "body": "VTKGNKUHMP"}, +{"name": "pymc644", "age": 38, "index": 644, "body": "VTKGNKUHMP"}, +{"name": "pymc450", "age": 46, "index": 450, "body": "VTKGNKUHMP"}, +{"name": "pymc799", "age": 92, "index": 799, "body": "VTKGNKUHMP"}, +{"name": "pymc384", "age": 81, "index": 384, "body": "VTKGNKUHMP"}, +{"name": "pymc386", "age": 83, "index": 386, "body": "VTKGNKUHMP"}, +{"name": "pymc534", "age": 29, "index": 534, "body": "VTKGNKUHMP"}, +{"name": "pymc760", "age": 53, "index": 760, "body": "VTKGNKUHMP"}, +{"name": "pymc617", "age": 11, "index": 617, "body": "VTKGNKUHMP"}, +{"name": "pymc388", "age": 85, "index": 388, "body": "VTKGNKUHMP"}, +{"name": "pymc905", "age": 97, "index": 905, "body": "VTKGNKUHMP"}, +{"name": "pymc410", "age": 6, "index": 410, "body": "VTKGNKUHMP"}, +{"name": "pymc508", "age": 3, "index": 508, "body": "VTKGNKUHMP"}, +{"name": "pymc623", "age": 17, "index": 623, "body": "VTKGNKUHMP"}, +{"name": "pymc705", "age": 99, "index": 705, "body": "VTKGNKUHMP"}, +{"name": "pymc709", "age": 2, "index": 709, "body": "VTKGNKUHMP"}, +{"name": "pymc387", "age": 84, "index": 387, "body": "VTKGNKUHMP"}, +{"name": "pymc493", "age": 89, "index": 493, "body": "VTKGNKUHMP"}, +{"name": "pymc611", "age": 5, "index": 611, "body": "VTKGNKUHMP"}, +{"name": "pymc901", "age": 93, "index": 901, "body": "VTKGNKUHMP"}, +{"name": "pymc762", "age": 55, "index": 762, "body": "VTKGNKUHMP"}, +{"name": "pymc544", "age": 39, "index": 544, "body": "VTKGNKUHMP"}, +{"name": "pymc674", "age": 68, "index": 674, "body": "VTKGNKUHMP"}, +{"name": "pymc392", "age": 89, "index": 392, "body": "VTKGNKUHMP"}, +{"name": "pymc634", "age": 28, "index": 634, "body": "VTKGNKUHMP"}, +{"name": "pymc530", "age": 25, "index": 530, "body": "VTKGNKUHMP"}, +{"name": "pymc520", "age": 15, "index": 520, "body": "VTKGNKUHMP"}, +{"name": "pymc426", "age": 22, "index": 426, "body": "VTKGNKUHMP"}, +{"name": "pymc662", "age": 56, "index": 662, "body": "VTKGNKUHMP"}, +{"name": "pymc424", "age": 20, "index": 424, "body": "VTKGNKUHMP"}, +{"name": "pymc983", "age": 74, "index": 983, "body": "VTKGNKUHMP"}, +{"name": "pymc344", "age": 41, "index": 344, "body": "VTKGNKUHMP"}, +{"name": "pymc665", "age": 59, "index": 665, "body": "VTKGNKUHMP"}, +{"name": "pymc451", "age": 47, "index": 451, "body": "VTKGNKUHMP"}, +{"name": "pymc914", "age": 5, "index": 914, "body": "VTKGNKUHMP"}, +{"name": "pymc701", "age": 95, "index": 701, "body": "VTKGNKUHMP"}, +{"name": "pymc917", "age": 8, "index": 917, "body": "VTKGNKUHMP"}, +{"name": "pymc598", "age": 93, "index": 598, "body": "VTKGNKUHMP"}, +{"name": "pymc700", "age": 94, "index": 700, "body": "VTKGNKUHMP"}, +{"name": "pymc551", "age": 46, "index": 551, "body": "VTKGNKUHMP"}, +{"name": "pymc714", "age": 7, "index": 714, "body": "VTKGNKUHMP"}, +{"name": "pymc772", "age": 65, "index": 772, "body": "VTKGNKUHMP"}, +{"name": "pymc645", "age": 39, "index": 645, "body": "VTKGNKUHMP"}, +{"name": "pymc480", "age": 76, "index": 480, "body": "VTKGNKUHMP"}, +{"name": "pymc661", "age": 55, "index": 661, "body": "VTKGNKUHMP"}, +{"name": "pymc671", "age": 65, "index": 671, "body": "VTKGNKUHMP"}, +{"name": "pymc415", "age": 11, "index": 415, "body": "VTKGNKUHMP"}, +{"name": "pymc491", "age": 87, "index": 491, "body": "VTKGNKUHMP"}, +{"name": "pymc453", "age": 49, "index": 453, "body": "VTKGNKUHMP"}, +{"name": "pymc413", "age": 9, "index": 413, "body": "VTKGNKUHMP"}, +{"name": "pymc600", "age": 95, "index": 600, "body": "VTKGNKUHMP"}, +{"name": "pymc908", "age": 100, "index": 908, "body": "VTKGNKUHMP"}, +{"name": "pymc915", "age": 6, "index": 915, "body": "VTKGNKUHMP"}, +{"name": "pymc993", "age": 84, "index": 993, "body": "VTKGNKUHMP"}, +{"name": "pymc348", "age": 45, "index": 348, "body": "VTKGNKUHMP"}, +{"name": "pymc385", "age": 82, "index": 385, "body": "VTKGNKUHMP"}, +{"name": "pymc765", "age": 58, "index": 765, "body": "VTKGNKUHMP"}, +{"name": "pymc532", "age": 27, "index": 532, "body": "VTKGNKUHMP"}, +{"name": "pymc305", "age": 2, "index": 305, "body": "VTKGNKUHMP"}, +{"name": "pymc416", "age": 12, "index": 416, "body": "VTKGNKUHMP"}, +{"name": "pymc716", "age": 9, "index": 716, "body": "VTKGNKUHMP"}, +{"name": "pymc443", "age": 39, "index": 443, "body": "VTKGNKUHMP"}, +{"name": "pymc703", "age": 97, "index": 703, "body": "VTKGNKUHMP"}, +{"name": "pymc902", "age": 94, "index": 902, "body": "VTKGNKUHMP"}, +{"name": "pymc406", "age": 2, "index": 406, "body": "VTKGNKUHMP"}, +{"name": "pymc347", "age": 44, "index": 347, "body": "VTKGNKUHMP"}, +{"name": "pymc417", "age": 13, "index": 417, "body": "VTKGNKUHMP"}, +{"name": "pymc672", "age": 66, "index": 672, "body": "VTKGNKUHMP"}, +{"name": "pymc777", "age": 70, "index": 777, "body": "VTKGNKUHMP"}, +{"name": "pymc527", "age": 22, "index": 527, "body": "VTKGNKUHMP"}, +{"name": "pymc913", "age": 4, "index": 913, "body": "VTKGNKUHMP"}, +{"name": "pymc537", "age": 32, "index": 537, "body": "VTKGNKUHMP"}, +{"name": "pymc657", "age": 51, "index": 657, "body": "VTKGNKUHMP"}, +{"name": "pymc396", "age": 93, "index": 396, "body": "VTKGNKUHMP"}, +{"name": "pymc641", "age": 35, "index": 641, "body": "VTKGNKUHMP"}, +{"name": "pymc997", "age": 88, "index": 997, "body": "VTKGNKUHMP"}, +{"name": "pymc414", "age": 10, "index": 414, "body": "VTKGNKUHMP"}, +{"name": "pymc761", "age": 54, "index": 761, "body": "VTKGNKUHMP"}, +{"name": "pymc984", "age": 75, "index": 984, "body": "VTKGNKUHMP"}, +{"name": "pymc496", "age": 92, "index": 496, "body": "VTKGNKUHMP"}, +{"name": "pymc911", "age": 2, "index": 911, "body": "VTKGNKUHMP"}, +{"name": "pymc788", "age": 81, "index": 788, "body": "VTKGNKUHMP"}, +{"name": "pymc399", "age": 96, "index": 399, "body": "VTKGNKUHMP"}, +{"name": "pymc423", "age": 19, "index": 423, "body": "VTKGNKUHMP"}, +{"name": "pymc771", "age": 64, "index": 771, "body": "VTKGNKUHMP"}, +{"name": "pymc588", "age": 83, "index": 588, "body": "VTKGNKUHMP"}, +{"name": "pymc613", "age": 7, "index": 613, "body": "VTKGNKUHMP"}, +{"name": "pymc606", "age": 0, "index": 606, "body": "VTKGNKUHMP"}, +{"name": "pymc704", "age": 98, "index": 704, "body": "VTKGNKUHMP"}, +{"name": "pymc918", "age": 9, "index": 918, "body": "VTKGNKUHMP"}, +{"name": "pymc522", "age": 17, "index": 522, "body": "VTKGNKUHMP"}, +{"name": "pymc557", "age": 52, "index": 557, "body": "VTKGNKUHMP"}, +{"name": "pymc621", "age": 15, "index": 621, "body": "VTKGNKUHMP"}, +{"name": "pymc535", "age": 30, "index": 535, "body": "VTKGNKUHMP"}, +{"name": "pymc317", "age": 14, "index": 317, "body": "VTKGNKUHMP"}, +{"name": "pymc555", "age": 50, "index": 555, "body": "VTKGNKUHMP"}, +{"name": "pymc442", "age": 38, "index": 442, "body": "VTKGNKUHMP"}, +{"name": "pymc653", "age": 47, "index": 653, "body": "VTKGNKUHMP"}, +{"name": "pymc903", "age": 95, "index": 903, "body": "VTKGNKUHMP"}, +{"name": "pymc666", "age": 60, "index": 666, "body": "VTKGNKUHMP"}, +{"name": "pymc541", "age": 36, "index": 541, "body": "VTKGNKUHMP"}, +{"name": "pymc708", "age": 1, "index": 708, "body": "VTKGNKUHMP"}, +{"name": "pymc440", "age": 36, "index": 440, "body": "VTKGNKUHMP"}, +{"name": "pymc647", "age": 41, "index": 647, "body": "VTKGNKUHMP"}, +{"name": "pymc995", "age": 86, "index": 995, "body": "VTKGNKUHMP"}, +{"name": "pymc646", "age": 40, "index": 646, "body": "VTKGNKUHMP"}, +{"name": "pymc906", "age": 98, "index": 906, "body": "VTKGNKUHMP"}, +{"name": "pymc774", "age": 67, "index": 774, "body": "VTKGNKUHMP"}, +{"name": "pymc345", "age": 42, "index": 345, "body": "VTKGNKUHMP"}, +{"name": "pymc718", "age": 11, "index": 718, "body": "VTKGNKUHMP"}, +{"name": "pymc316", "age": 13, "index": 316, "body": "VTKGNKUHMP"}, +{"name": "pymc310", "age": 7, "index": 310, "body": "VTKGNKUHMP"}, +{"name": "pymc635", "age": 29, "index": 635, "body": "VTKGNKUHMP"}, +{"name": "pymc540", "age": 35, "index": 540, "body": "VTKGNKUHMP"}, +{"name": "pymc382", "age": 79, "index": 382, "body": "VTKGNKUHMP"}, +{"name": "pymc637", "age": 31, "index": 637, "body": "VTKGNKUHMP"}, +{"name": "pymc394", "age": 91, "index": 394, "body": "VTKGNKUHMP"}, +{"name": "pymc401", "age": 98, "index": 401, "body": "VTKGNKUHMP"}, +{"name": "pymc307", "age": 4, "index": 307, "body": "VTKGNKUHMP"}, +{"name": "pymc342", "age": 39, "index": 342, "body": "VTKGNKUHMP"}, +{"name": "pymc341", "age": 38, "index": 341, "body": "VTKGNKUHMP"}, +{"name": "pymc407", "age": 3, "index": 407, "body": "VTKGNKUHMP"}, +{"name": "pymc706", "age": 100, "index": 706, "body": "VTKGNKUHMP"}, +{"name": "pymc552", "age": 47, "index": 552, "body": "VTKGNKUHMP"}, +{"name": "pymc397", "age": 94, "index": 397, "body": "VTKGNKUHMP"}, +{"name": "pymc643", "age": 37, "index": 643, "body": "VTKGNKUHMP"}, +{"name": "pymc381", "age": 78, "index": 381, "body": "VTKGNKUHMP"}, +{"name": "pymc989", "age": 80, "index": 989, "body": "VTKGNKUHMP"}, +{"name": "pymc767", "age": 60, "index": 767, "body": "VTKGNKUHMP"}, +{"name": "pymc484", "age": 80, "index": 484, "body": "VTKGNKUHMP"}, +{"name": "pymc988", "age": 79, "index": 988, "body": "VTKGNKUHMP"}, +{"name": "pymc497", "age": 93, "index": 497, "body": "VTKGNKUHMP"}, +{"name": "pymc630", "age": 24, "index": 630, "body": "VTKGNKUHMP"}, +{"name": "pymc599", "age": 94, "index": 599, "body": "VTKGNKUHMP"}, +{"name": "pymc626", "age": 20, "index": 626, "body": "VTKGNKUHMP"}, +{"name": "pymc457", "age": 53, "index": 457, "body": "VTKGNKUHMP"}, +{"name": "pymc998", "age": 89, "index": 998, "body": "VTKGNKUHMP"}, +{"name": "pymc981", "age": 72, "index": 981, "body": "VTKGNKUHMP"}, +{"name": "pymc719", "age": 12, "index": 719, "body": "VTKGNKUHMP"}, +{"name": "pymc545", "age": 40, "index": 545, "body": "VTKGNKUHMP"}, +{"name": "pymc554", "age": 49, "index": 554, "body": "VTKGNKUHMP"}, +{"name": "pymc315", "age": 12, "index": 315, "body": "VTKGNKUHMP"}, +{"name": "pymc303", "age": 0, "index": 303, "body": "VTKGNKUHMP"}, +{"name": "pymc795", "age": 88, "index": 795, "body": "VTKGNKUHMP"}, +{"name": "pymc461", "age": 57, "index": 461, "body": "VTKGNKUHMP"}, +{"name": "pymc516", "age": 11, "index": 516, "body": "VTKGNKUHMP"}, +{"name": "pymc470", "age": 66, "index": 470, "body": "VTKGNKUHMP"}, +{"name": "pymc707", "age": 0, "index": 707, "body": "VTKGNKUHMP"}, +{"name": "pymc712", "age": 5, "index": 712, "body": "VTKGNKUHMP"}, +{"name": "pymc798", "age": 91, "index": 798, "body": "VTKGNKUHMP"}, +{"name": "pymc389", "age": 86, "index": 389, "body": "VTKGNKUHMP"}, +{"name": "pymc505", "age": 0, "index": 505, "body": "VTKGNKUHMP"}, +{"name": "pymc400", "age": 97, "index": 400, "body": "VTKGNKUHMP"}, +{"name": "pymc573", "age": 68, "index": 573, "body": "VTKGNKUHMP"}, +{"name": "pymc422", "age": 18, "index": 422, "body": "VTKGNKUHMP"}, +{"name": "pymc711", "age": 4, "index": 711, "body": "VTKGNKUHMP"}, +{"name": "pymc663", "age": 57, "index": 663, "body": "VTKGNKUHMP"}, +{"name": "pymc797", "age": 90, "index": 797, "body": "VTKGNKUHMP"}, +{"name": "pymc980", "age": 71, "index": 980, "body": "VTKGNKUHMP"}, +{"name": "pymc916", "age": 7, "index": 916, "body": "VTKGNKUHMP"}, +{"name": "pymc582", "age": 77, "index": 582, "body": "VTKGNKUHMP"}, +{"name": "pymc456", "age": 52, "index": 456, "body": "VTKGNKUHMP"}, +{"name": "pymc427", "age": 23, "index": 427, "body": "VTKGNKUHMP"}, +{"name": "pymc309", "age": 6, "index": 309, "body": "VTKGNKUHMP"}, +{"name": "pymc502", "age": 98, "index": 502, "body": "VTKGNKUHMP"}, +{"name": "pymc592", "age": 87, "index": 592, "body": "VTKGNKUHMP"}, +{"name": "pymc770", "age": 63, "index": 770, "body": "VTKGNKUHMP"}, +{"name": "pymc432", "age": 28, "index": 432, "body": "VTKGNKUHMP"}, +{"name": "pymc589", "age": 84, "index": 589, "body": "VTKGNKUHMP"}, +{"name": "pymc500", "age": 96, "index": 500, "body": "VTKGNKUHMP"}, +{"name": "pymc792", "age": 85, "index": 792, "body": "VTKGNKUHMP"}, +{"name": "pymc431", "age": 27, "index": 431, "body": "VTKGNKUHMP"}, +{"name": "pymc395", "age": 92, "index": 395, "body": "VTKGNKUHMP"}, +{"name": "pymc780", "age": 73, "index": 780, "body": "VTKGNKUHMP"}, +{"name": "pymc485", "age": 81, "index": 485, "body": "VTKGNKUHMP"}, +{"name": "pymc675", "age": 69, "index": 675, "body": "VTKGNKUHMP"}, +{"name": "pymc511", "age": 6, "index": 511, "body": "VTKGNKUHMP"}, +{"name": "pymc785", "age": 78, "index": 785, "body": "VTKGNKUHMP"}, +{"name": "pymc581", "age": 76, "index": 581, "body": "VTKGNKUHMP"}, +{"name": "pymc543", "age": 38, "index": 543, "body": "VTKGNKUHMP"}, +{"name": "pymc572", "age": 67, "index": 572, "body": "VTKGNKUHMP"}, +{"name": "pymc786", "age": 79, "index": 786, "body": "VTKGNKUHMP"}, +{"name": "pymc593", "age": 88, "index": 593, "body": "VTKGNKUHMP"}, +{"name": "pymc784", "age": 77, "index": 784, "body": "VTKGNKUHMP"}, +{"name": "pymc504", "age": 100, "index": 504, "body": "VTKGNKUHMP"}, +{"name": "pymc466", "age": 62, "index": 466, "body": "VTKGNKUHMP"}, +{"name": "pymc512", "age": 7, "index": 512, "body": "VTKGNKUHMP"}, +{"name": "pymc463", "age": 59, "index": 463, "body": "VTKGNKUHMP"}, +{"name": "pymc460", "age": 56, "index": 460, "body": "VTKGNKUHMP"}, +{"name": "pymc383", "age": 80, "index": 383, "body": "VTKGNKUHMP"}, +{"name": "pymc782", "age": 75, "index": 782, "body": "VTKGNKUHMP"}, +{"name": "pymc434", "age": 30, "index": 434, "body": "VTKGNKUHMP"}, +{"name": "pymc474", "age": 70, "index": 474, "body": "VTKGNKUHMP"}, +{"name": "pymc595", "age": 90, "index": 595, "body": "VTKGNKUHMP"}, +{"name": "pymc791", "age": 84, "index": 791, "body": "VTKGNKUHMP"}, +{"name": "pymc476", "age": 72, "index": 476, "body": "VTKGNKUHMP"}, +{"name": "pymc793", "age": 86, "index": 793, "body": "VTKGNKUHMP"}, +{"name": "pymc594", "age": 89, "index": 594, "body": "VTKGNKUHMP"}, +{"name": "pymc794", "age": 87, "index": 794, "body": "VTKGNKUHMP"}, +{"name": "pymc472", "age": 68, "index": 472, "body": "VTKGNKUHMP"}, +{"name": "pymc562", "age": 57, "index": 562, "body": "VTKGNKUHMP"}, +{"name": "pymc473", "age": 69, "index": 473, "body": "VTKGNKUHMP"}, +{"name": "pymc571", "age": 66, "index": 571, "body": "VTKGNKUHMP"}, +{"name": "pymc513", "age": 8, "index": 513, "body": "VTKGNKUHMP"}, +{"name": "pymc566", "age": 61, "index": 566, "body": "VTKGNKUHMP"}, +{"name": "pymc564", "age": 59, "index": 564, "body": "VTKGNKUHMP"}, +{"name": "pymc787", "age": 80, "index": 787, "body": "VTKGNKUHMP"}, +{"name": "pymc580", "age": 75, "index": 580, "body": "VTKGNKUHMP"}, +{"name": "pymc565", "age": 60, "index": 565, "body": "VTKGNKUHMP"}, +{"name": "pymc503", "age": 99, "index": 503, "body": "VTKGNKUHMP"}, +{"name": "pymc781", "age": 74, "index": 781, "body": "VTKGNKUHMP"}, +{"name": "pymc570", "age": 65, "index": 570, "body": "VTKGNKUHMP"}, +{"name": "pymc436", "age": 32, "index": 436, "body": "VTKGNKUHMP"}, +{"name": "pymc510", "age": 5, "index": 510, "body": "VTKGNKUHMP"}, +{"name": "pymc585", "age": 80, "index": 585, "body": "VTKGNKUHMP"}, +{"name": "pymc501", "age": 97, "index": 501, "body": "VTKGNKUHMP"}, +{"name": "pymc583", "age": 78, "index": 583, "body": "VTKGNKUHMP"}, +{"name": "pymc790", "age": 83, "index": 790, "body": "VTKGNKUHMP"}, +{"name": "pymc567", "age": 62, "index": 567, "body": "VTKGNKUHMP"}, +{"name": "pymc477", "age": 73, "index": 477, "body": "VTKGNKUHMP"}, +{"name": "pymc586", "age": 81, "index": 586, "body": "VTKGNKUHMP"}, +{"name": "pymc596", "age": 91, "index": 596, "body": "VTKGNKUHMP"}, +{"name": "pymc435", "age": 31, "index": 435, "body": "VTKGNKUHMP"}, +{"name": "pymc587", "age": 82, "index": 587, "body": "VTKGNKUHMP"}, +{"name": "pymc574", "age": 69, "index": 574, "body": "VTKGNKUHMP"}, +{"name": "pymc584", "age": 79, "index": 584, "body": "VTKGNKUHMP"}, +{"name": "pymc506", "age": 1, "index": 506, "body": "VTKGNKUHMP"}, +{"name": "pymc783", "age": 76, "index": 783, "body": "VTKGNKUHMP"}, +{"name": "pymc796", "age": 89, "index": 796, "body": "VTKGNKUHMP"}, +{"name": "pymc465", "age": 61, "index": 465, "body": "VTKGNKUHMP"}, +{"name": "pymc437", "age": 33, "index": 437, "body": "VTKGNKUHMP"}, +{"name": "pymc464", "age": 60, "index": 464, "body": "VTKGNKUHMP"}, +{"name": "pymc462", "age": 58, "index": 462, "body": "VTKGNKUHMP"}, +{"name": "pymc560", "age": 55, "index": 560, "body": "VTKGNKUHMP"}, +{"name": "pymc576", "age": 71, "index": 576, "body": "VTKGNKUHMP"}, +{"name": "pymc591", "age": 86, "index": 591, "body": "VTKGNKUHMP"}, +{"name": "pymc475", "age": 71, "index": 475, "body": "VTKGNKUHMP"}, +{"name": "pymc430", "age": 26, "index": 430, "body": "VTKGNKUHMP"}, +{"name": "pymc433", "age": 29, "index": 433, "body": "VTKGNKUHMP"}, +{"name": "pymc597", "age": 92, "index": 597, "body": "VTKGNKUHMP"}, +{"name": "pymc575", "age": 70, "index": 575, "body": "VTKGNKUHMP"}, +{"name": "pymc563", "age": 58, "index": 563, "body": "VTKGNKUHMP"}, +{"name": "pymc514", "age": 9, "index": 514, "body": "VTKGNKUHMP"}, +{"name": "pymc507", "age": 2, "index": 507, "body": "VTKGNKUHMP"}, +{"name": "pymc467", "age": 63, "index": 467, "body": "VTKGNKUHMP"}, +{"name": "pymc471", "age": 67, "index": 471, "body": "VTKGNKUHMP"}, +{"name": "pymc590", "age": 85, "index": 590, "body": "VTKGNKUHMP"}, +{"name": "pymc577", "age": 72, "index": 577, "body": "VTKGNKUHMP"}, +{"name": "pymc515", "age": 10, "index": 515, "body": "VTKGNKUHMP"}, +{"name": "pymc561", "age": 56, "index": 561, "body": "VTKGNKUHMP"}] diff --git a/resources/imex/json_list_10_lines b/resources/imex/json_list_10_lines new file mode 100644 index 000000000..bd5e7890b --- /dev/null +++ b/resources/imex/json_list_10_lines @@ -0,0 +1,10 @@ +[{"name": "pymc1", "age": 1, "index": 1, "body": "VTKGNKUHMP"}, +{"name": "pymc0", "age": 0, "index": 0, "body": "VTKGNKUHMP"}, +{"name": "pymc9", "age": 9, "index": 9, "body": "VTKGNKUHMP"}, +{"name": "pymc4", "age": 4, "index": 4, "body": "VTKGNKUHMP"}, +{"name": "pymc7", "age": 7, "index": 7, "body": "VTKGNKUHMP"}, +{"name": "pymc5", "age": 5, "index": 5, "body": "VTKGNKUHMP"}, +{"name": "pymc2", "age": 2, "index": 2, "body": "VTKGNKUHMP"}, +{"name": "pymc6", "age": 6, "index": 6, "body": "VTKGNKUHMP"}, +{"name": "pymc3", "age": 3, "index": 3, "body": "VTKGNKUHMP"}, +{"name": "pymc8", "age": 8, "index": 8, "body": "VTKGNKUHMP"}] diff --git a/scripts/getcoredumps.py b/scripts/getcoredumps.py index 18f30a6d6..3684fdf26 100644 --- a/scripts/getcoredumps.py +++ b/scripts/getcoredumps.py @@ -86,6 +86,7 @@ def run(self): command = "mkdir -p /tmp/backup_crash/{0};" \ "mv -f /tmp/core* /tmp/backup_crash/{0};" \ "mv -f /opt/{1}/var/lib/{1}/erl_crash.dump* /tmp/backup_crash/{0}; " \ + "mv -f /opt/{1}/var/lib/{1}/*.dmp /tmp/backup_crash/{0};" \ "mv -f /opt/{1}/var/lib/{1}/crash/*.dmp /tmp/backup_crash/{0};".\ format(stamp, server_type) print "put all crashes on {0} in backup folder: /tmp/backup_crash/{1}".format(self.server.ip, stamp) diff --git a/scripts/install.py b/scripts/install.py old mode 100644 new mode 100755 index bc4e26d9a..744abb6e0 --- a/scripts/install.py +++ b/scripts/install.py @@ -27,8 +27,11 @@ from testconstants import CB_REPO from testconstants import COUCHBASE_VERSION_2 from testconstants import COUCHBASE_VERSION_3, COUCHBASE_FROM_WATSON -from testconstants import CB_VERSION_NAME, COUCHBASE_FROM_VERSION_4 +from testconstants import CB_VERSION_NAME, COUCHBASE_FROM_VERSION_4,\ + CB_RELEASE_BUILDS from testconstants import MIN_KV_QUOTA, INDEX_QUOTA, FTS_QUOTA +from testconstants import LINUX_COUCHBASE_PORT_CONFIG_PATH, LINUX_COUCHBASE_OLD_CONFIG_PATH +from testconstants import WIN_COUCHBASE_PORT_CONFIG_PATH, WIN_COUCHBASE_OLD_CONFIG_PATH import TestInput @@ -44,17 +47,18 @@ def usage(err=None): -i Path to .ini file containing cluster information. Available keys: - product=cb|mb Used to specify couchbase or membase. - version=SHORT_VERSION Example: "2.0.0r-71". - parallel=false Useful when you're installing a cluster. - toy= Install a toy build - init_nodes=False Initialize nodes - vbuckets= The number of vbuckets in the server installation. - sync_threads=True Sync or acync threads(+S or +A) - erlang_threads= Number of erlang threads (default=16:16 for +S type) - upr=True Enable UPR replication - xdcr_upr= Enable UPR for XDCR (temporary param until XDCR with UPR is stable), values: None | True | False - fts_query_limit=1000000 Set a limit for the max results to be returned by fts for any query + product=cb|mb Used to specify couchbase or membase. + version=SHORT_VERSION Example: "2.0.0r-71". + parallel=false Useful when you're installing a cluster. + toy= Install a toy build + init_nodes=False Initialize nodes + vbuckets= The number of vbuckets in the server installation. + sync_threads=True Sync or acync threads(+S or +A) + erlang_threads= Number of erlang threads (default=16:16 for +S type) + upr=True Enable UPR replication + xdcr_upr= Enable UPR for XDCR (temporary param until XDCR with UPR is stable), values: None | True | False + fts_query_limit=1000000 Set a limit for the max results to be returned by fts for any query + change_indexer_ports=false Sets indexer ports values to non-default ports Examples: @@ -224,7 +228,8 @@ def build_url(self, params): releases_version = ["1.6.5.4", "1.7.0", "1.7.1", "1.7.1.1", "1.8.0"] cb_releases_version = ["1.8.1", "2.0.0", "2.0.1", "2.1.0", "2.1.1", "2.2.0", "2.5.0", "2.5.1", "2.5.2", "3.0.0", "3.0.1", "3.0.2", - "3.0.3", "3.1.0", "3.1.1", "3.1.2", "3.1.3", "3.1.5"] + "3.0.3", "3.1.0", "3.1.1", "3.1.2", "3.1.3", "3.1.5","3.1.6", + "4.0.0", "4.0.1", "4.1.0", "4.1.1", "4.1.2", "4.5.0"] build_repo = MV_LATESTBUILD_REPO if toy is not "": build_repo = CB_REPO @@ -236,11 +241,13 @@ def build_url(self, params): sys.exit("version is not support yet") for name in names: if version[:5] in releases_version: - build = BuildQuery().find_membase_release_build(deliverable_type=info.deliverable_type, - os_architecture=info.architecture_type, - build_version=version, - product='membase-server-enterprise') - elif version[:5] in cb_releases_version: + build = BuildQuery().find_membase_release_build( + deliverable_type=info.deliverable_type, + os_architecture=info.architecture_type, + build_version=version, + product='membase-server-enterprise') + elif len(version) > 6 and version[6:].replace("-rel", "") == \ + CB_RELEASE_BUILDS[version[:5]]: build = BuildQuery().find_couchbase_release_build( deliverable_type=info.deliverable_type, os_architecture=info.architecture_type, @@ -248,19 +255,20 @@ def build_url(self, params): product=name, os_version = info.distribution_version) else: - builds, changes = BuildQuery().get_all_builds(version=version, timeout=timeout, \ - direct_build_url=direct_build_url, \ - deliverable_type=info.deliverable_type, \ - architecture_type=info.architecture_type, \ - edition_type=name, \ - repo=build_repo, toy=toy, \ - distribution_version=info.distribution_version.lower(), \ + builds, changes = BuildQuery().get_all_builds(version=version, + timeout=timeout, + direct_build_url=direct_build_url, + deliverable_type=info.deliverable_type, + architecture_type=info.architecture_type, + edition_type=name, + repo=build_repo, toy=toy, + distribution_version=info.distribution_version.lower(), distribution_type=info.distribution_type.lower()) - build = BuildQuery().find_build(builds, name, info.deliverable_type, \ - info.architecture_type, version, toy=toy, \ - openssl=openssl, direct_build_url=direct_build_url, \ - distribution_version=info.distribution_version.lower(), \ - distribution_type=info.distribution_type.lower()) + build = BuildQuery().find_build(builds, name, info.deliverable_type, + info.architecture_type, version, toy=toy, + openssl=openssl, direct_build_url=direct_build_url, + distribution_version=info.distribution_version.lower(), + distribution_type=info.distribution_type.lower()) if build: if 'amazon' in params: @@ -1008,6 +1016,45 @@ def check_build(input): params = {"ini": "resources/jenkins/fusion.ini", "product": "ms", "version": "1.7.1r-31", "amazon": "false"} +def change_couchbase_indexer_ports(input): + params = {"indexer_admin_port": 9110, + "indexer_scan_port": 9111, + "indexer_http_port": 9112, + "indexer_stream_init_port": 9113, + "indexer_stream_catchup_port": 9114, + "indexer_stream_maint_port": 9115} + remote_client = RemoteMachineShellConnection(input.servers[0]) + info = remote_client.extract_remote_info() + remote_client.disconnect() + type = info.type.lower() + if type == "windows": + port_config_path = WIN_COUCHBASE_PORT_CONFIG_PATH + old_config_path = WIN_COUCHBASE_OLD_CONFIG_PATH + else: + port_config_path = LINUX_COUCHBASE_PORT_CONFIG_PATH + old_config_path = LINUX_COUCHBASE_OLD_CONFIG_PATH + filename = "static_config" + for node in input.servers: + output_lines = '' + remote = RemoteMachineShellConnection(node) + remote.stop_server() + lines = remote.read_remote_file(port_config_path, filename) + for line in lines: + for key in params.keys(): + if key in line: + line = "" + break + output_lines += "{0}".format(line) + for key in params.keys(): + line = "{" + str(key) + ", " + str(params[key]) + "}." + output_lines += "{0}\n".format(line) + output_lines = output_lines.replace(r'"', r'\"') + remote.write_remote_file(port_config_path, filename, output_lines) + remote.delete_file(old_config_path, "/config.dat") + for node in input.servers: + remote = RemoteMachineShellConnection(node) + remote.start_server() + def main(): log.info('*****Starting the complete install process ****') log_install_failed = "some nodes were not install successfully!" @@ -1055,6 +1102,9 @@ def main(): success &= RemoteMachineShellConnection(server).is_moxi_installed() if not success: sys.exit(log_install_failed) + if "change_indexer_ports" in input.test_params and input.test_params["change_indexer_ports"].lower() == 'true'\ + and input.test_params["product"] in ["couchbase", "couchbase-server", "cb"]: + change_couchbase_indexer_ports(input) if __name__ == "__main__": main() diff --git a/scripts/testDispatcher.py b/scripts/testDispatcher.py index 69f07a33e..b57c78dc7 100644 --- a/scripts/testDispatcher.py +++ b/scripts/testDispatcher.py @@ -47,6 +47,7 @@ def main(): parser.add_option('-e','--extraParameters', dest='extraParameters', default=None) parser.add_option('-y','--serverType', dest='serverType', default='VM') parser.add_option('-u','--url', dest='url', default=None) + parser.add_option('-j','--jenkins', dest='jenkins', default=None) options, args = parser.parse_args() @@ -165,15 +166,18 @@ def main(): + # Docker goes somewhere else launchStringBase = 'http://qa.sc.couchbase.com/job/test_suite_executor' - if options.test: - if options.serverType.lower() == 'docker': - launchStringBase = launchStringBase + '-docker-test' - else: - launchStringBase = launchStringBase + '-test' - elif options.serverType.lower() == 'docker': + + # optional add [-docker] [-Jenkins extension] + if options.serverType.lower() == 'docker': launchStringBase = launchStringBase + '-docker' + if options.test: + launchStringBase = launchStringBase + '-test' + elif options.jenkins is not None: + launchStringBase = launchStringBase + '-' + options.jenkins + # this are VM/Docker dependent - or maybe not @@ -289,7 +293,7 @@ def main(): testsToLaunch.pop(i) summary.append( {'test':descriptor, 'time':time.asctime( time.localtime(time.time()) ) } ) if options.serverType.lower() == 'docker': - time.sleep(180) # this is due to the docker port allocation race + time.sleep(240) # this is due to the docker port allocation race else: time.sleep(30) else: