Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

modified testrunner code to work with ns_server

modified testrunner code to work with ns_server

refactored entire test runner and removed hardcoded memcached/moxi
port numebrs from test helper methods and test cases. helper methods and data:loader
methods can discover the memcached/moxi port from :pools/default.
same applies to bucket port and bucket password.

Change-Id: I6ae97c0ccd9a0e313fc44021032bdd29e7897dab
Reviewed-on: http://review.couchbase.org/7589
Reviewed-by: Farshid Ghods <farshid.ghods@gmail.com>
Tested-by: Farshid Ghods <farshid.ghods@gmail.com>
  • Loading branch information...
commit 06a7d5d99b382af04ccbf6ec6c8d4e534fd608f3 1 parent a276622
@farshidce farshidce authored
Showing with 2,513 additions and 2,151 deletions.
  1. +1 −0  .gitignore
  2. +3 −7 TestInput.py
  3. +0 −43 conf/amazon-cluster.conf
  4. +24 −0 conf/bucket-ops.conf
  5. +0 −7 conf/bucketops-membase.conf
  6. +0 −2  conf/incremental-rebalance.conf
  7. +0 −32 conf/py-all-multiple-nodes.conf
  8. +50 −46 conf/py-all.conf
  9. +0 −6 conf/py-failover-medium.conf
  10. +0 −6 conf/py-failover-small.conf
  11. +10 −10 conf/py-multiple-add-nodes.conf
  12. +0 −13 conf/py-multiple-medium-load-ubuntu.conf
  13. +0 −18 conf/py-multiple-small-load-ubuntu.conf
  14. +29 −31 conf/py-multiple-small-load.conf
  15. +1 −1  conf/py-pre-17-all.conf
  16. +0 −17 conf/py-smoke.conf
  17. +7 −7 lib/builds/BeautifulSoup.py
  18. +17 −0 lib/builds/build_query.py
  19. +4 −4 lib/cli_interface.py
  20. +39 −20 lib/load_runner.py
  21. +34 −3 lib/logger.py
  22. +16 −7 lib/mc_bin_client.py
  23. +6 −6 lib/mc_bin_server.py
  24. +7 −0 lib/membase/api/exception.py
  25. +12 −9 lib/membase/api/httplib2/__init__.py
  26. +92 −17 lib/membase/api/rest_client.py
  27. +77 −114 lib/membase/helper/bucket_helper.py
  28. +4 −16 lib/membase/helper/cluster_helper.py
  29. +53 −47 lib/membase/helper/rebalance_helper.py
  30. 0  pytests/management/upgrade/__init__.py → lib/membase/helper/stats.py
  31. +1 −1  lib/membase_install.py
  32. +73 −82 lib/memcached/helper/data_helper.py
  33. +0 −1  lib/remote/Installer.py
  34. +139 −88 lib/remote/remote_util.py
  35. +4 −4 lib/testrunner_common.py
  36. +74 −29 lib/xunit.py
  37. 0  pytests/management/bucketops/singlenode/create/__init__.py → longevity/backupprocess.py
  38. +1 −0  longevity/loadprocess.py
  39. +16 −22 pytests/{management/addnodes → }/addnodestests.py
  40. +105 −112 pytests/backuptests.py
  41. +83 −117 pytests/{management/bucketops/singlenode/create/createtests.py → createbuckettests.py}
  42. +103 −0 pytests/deletebuckettests.py
  43. +89 −61 pytests/{memcachedops → }/expirytests.py
  44. +17 −11 pytests/{management → }/failovertests.py
  45. +0 −253 pytests/management/add_rebalance.py
  46. +1 −1  pytests/management/backupandrestoretests.py
  47. +1 −1  pytests/management/bucketops/multinode/create/createtests.py
  48. +0 −2  pytests/management/bucketops/singlenode/delete/__init__.py
  49. +0 −98 pytests/management/bucketops/singlenode/delete/deletetests.py
  50. +0 −2  pytests/management/bucketops/singlenode/recreate/__init__.py
  51. +0 −2  pytests/management/replication/__init__.py
  52. +1 −1  pytests/memcachedops/fill_bucket.py
  53. +0 −138 pytests/memcachedops/set_get_test.py
  54. +44 −44 pytests/{memcachedops → }/memcapable.py
  55. +202 −312 pytests/{management → }/rebalancetests.py
  56. +43 −31 pytests/{management/bucketops/singlenode/recreate/recreatetests.py → recreatebuckettests.py}
  57. +21 −122 pytests/{management/replication → }/replicationtests.py
  58. +118 −0 pytests/setgettests.py
  59. +21 −16 pytests/syncreplicationtests.py
  60. +2 −0  pytests/taptests.py
  61. +44 −54 pytests/upgradetests.py
  62. 0  pytests/management/bucketops/singlenode/__init__.py → resources/jenkins/aliaksey.ini
  63. 0  pytests/management/addnodes/__init__.py → resources/jenkins/t.ini
  64. +1 −0  resources/valgrind/heapprof.env
  65. +258 −0 scripts/install.py
  66. +320 −0 scripts/longevity.py
  67. +39 −45 testrunner
  68. +63 −12 unittests/awareness.py
  69. +143 −0 unittests/tapper.py
View
1  .gitignore
@@ -4,3 +4,4 @@
*.DS_*
*.idea
*.log
+*tmp-
View
10 TestInput.py
@@ -17,7 +17,6 @@ class TestInput(object):
def __init__(self):
self.servers = []
self.membase_settings = None
- self.membase_build = None
self.test_params = {}
#servers , each server can have u,p,port,directory
@@ -77,7 +76,6 @@ def get_test_input(argv):
has_ini = True
ini_file = argument
if option == '-p':
- print 'test specific parameters'
pairs = argument.split(',')
for pair in pairs:
key_value = pair.split('=')
@@ -103,8 +101,6 @@ def parse_from_file(file):
for section in sections:
if section == 'servers':
ips = TestInputParser.get_server_ips(config,section)
- elif section == 'build':
- input.membase_build = TestInputParser.get_membase_build(config,section)
elif section == 'membase':
input.membase_settings = TestInputParser.get_membase_settings(config,section)
elif section == 'global':
@@ -164,6 +160,8 @@ def get_server(ip,config):
server.ssh_key = config.get(section,option)
if option == 'port':
server.port = config.get(section,option)
+ if option == 'ip':
+ server.ip = config.get(section,option)
break
#get username
#get password
@@ -212,9 +210,8 @@ def parse_from_command_line(argv):
# -p : for smtp ( taken care of by jenkins)
# -o : taken care of by jenkins
servers = []
- input_build = None
membase_setting = None
- (opts, args) = getopt.getopt(argv[1:],'h:t:c:v:s:i:p:', [])
+ (opts, args) = getopt.getopt(argv[1:],'h:t:c:i:p:', [])
#first let's loop over and find out if user has asked for help
need_help = False
for option,argument in opts:
@@ -268,7 +265,6 @@ def parse_from_command_line(argv):
if not server.port:
server.port = 8091
input.servers = servers
- input.membase_build = input_build
input.membase_settings = membase_setting
return input
except Exception:
View
43 conf/amazon-cluster.conf
@@ -1,43 +0,0 @@
-management.rebalancetests.IncrementalRebalanceInTests.test_medium_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_medium_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_medium_load
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_50_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_50_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_50_percent
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
-management.failovertests.FailoverTests.test_failover_normal_1_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_2_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_3_replica_10_percent
-management.rebalancetests.IncrementalRebalanceInTests.test_small_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_small_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_small_load
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_10_percent
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_3_replica_1_percent
-management.install.InstallTest.test_install
-management.rebalancetests.IncrementalRebalanceInTests.test_heavy_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_heavy_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_heavy_load
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_99_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_99_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_99_percent
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_heavy_load
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_10_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_10_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_10_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_30_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_30_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_30_percent
View
24 conf/bucket-ops.conf
@@ -0,0 +1,24 @@
+recreatebuckettests.RecreateMembaseBuckets.default_moxi
+recreatebuckettests.RecreateMembaseBuckets.default_dedicated
+recreatebuckettests.RecreateMembaseBuckets.default_moxi_sasl
+deletebuckettests.DeleteMembaseBuckets.default_moxi
+deletebuckettests.DeleteMembaseBuckets.non_default_moxi
+createbuckettests.CreateMembaseBucketsTests.default_moxi
+createbuckettests.CreateMembaseBucketsTests.default_case_sensitive_dedicated
+createbuckettests.CreateMembaseBucketsTests.default_on_non_default_port
+createbuckettests.CreateMembaseBucketsTests.non_default_moxi
+createbuckettests.CreateMembaseBucketsTests.default_case_sensitive_different_ports
+createbuckettests.CreateMembaseBucketsTests.non_default_case_sensitive_different_port
+createbuckettests.CreateMembaseBucketsTests.non_default_case_sensitive_same_port
+createbuckettests.CreateMembaseBucketsTests.less_than_minimum_memory_quota
+createbuckettests.CreateMembaseBucketsTests.max_memory_quota
+createbuckettests.CreateMembaseBucketsTests.negative_replica
+createbuckettests.CreateMembaseBucketsTests.zero_replica
+createbuckettests.CreateMembaseBucketsTests.one_replica
+createbuckettests.CreateMembaseBucketsTests.two_replica
+createbuckettests.CreateMembaseBucketsTests.three_replica
+createbuckettests.CreateMembaseBucketsTests.four_replica
+createbuckettests.CreateMembaseBucketsTests.valid_chars
+createbuckettests.CreateMembaseBucketsTests.invalid_chars
+createbuckettests.CreateMembaseBucketsTests.max_buckets
+createbuckettests.CreateMembaseBucketsTests.more_than_max_buckets
View
7 conf/bucketops-membase.conf
@@ -1,7 +0,0 @@
-bucketops/singlenode/create/membasebucket/test_default_case_sensitive_different_port
-bucketops/singlenode/create/membasebucket/test_default_case_sensitive_on_11211
-bucketops/singlenode/create/membasebucket/test_default_on_11211
-bucketops/singlenode/create/membasebucket/test_default_on_non_default_port
-bucketops/singlenode/create/membasebucket/test_non_default
-bucketops/singlenode/create/membasebucket/test_non_default_case_sensitive_different_port
-bucketops/singlenode/create/membasebucket/test_non_default_case_sensitive_same_port
View
2  conf/incremental-rebalance.conf
@@ -1,2 +0,0 @@
-management.rebalancetests.IncrementalRebalanceInTests.test_small_load
-management.rebalancetests.IncrementalRebalanceInTests.test_medium_load
View
32 conf/py-all-multiple-nodes.conf
@@ -1,32 +0,0 @@
-management.install.InstallTest.test_install
-management.version_check.VerifyVersionTest
-management.rebalancetests.IncrementalRebalanceInTests.test_small_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_small_load
-management.rebalancetests.IncrementalRebalanceInTests.test_medium_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_medium_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_small_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_medium_load
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_50_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_50_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_50_percent
-management.addnodes.addnodestests.AddNodesTests.test_add_1_node_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_all_node_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_all_node_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_all_node_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_node_itself_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_node_itself_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_node_already_added_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_node_already_added_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_1_node_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_1_node_with_bucket
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_small_load
View
96 conf/py-all.conf
@@ -1,48 +1,52 @@
management.install.InstallTest.test_install
management.version_check.VerifyVersionTest
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_default_on_11211
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_non_default
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_default_on_non_default_port
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_default_case_sensitive_different_port
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_default_case_sensitive_on_11211
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_non_default_case_sensitive_different_port
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_non_default_case_sensitive_same_port
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_less_than_minimum_memory_quota
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_max_memory_quota
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_negative_replica
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_zero_replica
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_one_replica
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_two_replica
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_three_replica
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_valid_chars
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_invalid_chars
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_max_buckets
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_more_than_max_buckets
-management.bucketops.singlenode.delete.deletetests.DeleteMembaseBucketsTests.test_non_default
-management.bucketops.singlenode.delete.deletetests.DeleteMembaseBucketsTests.test_default_on_11211
-memcachedops.set_get_test.MembaseBucket.value_100b
-memcachedops.set_get_test.MembaseBucket.value_1mb
-memcachedops.set_get_test.MembaseBucket.value_10mb
-memcachedops.set_get_test.MembaseBucket.value_500kb
-memcachedops.expirytests.ExpiryTests.test1
-memcachedops.expirytests.ExpiryTests.test_expired_keys_tap
-memcachedops.memcapable.SimpleSetMembaseBucketDefaultPort.test_set_pos_int_value_pos_flag_key_never_expired
-memcachedops.memcapable.SimpleSetMembaseBucketDefaultPort.test_set_neg_int_value_pos_flag_key_never_expired
-memcachedops.memcapable.SimpleSetMembaseBucketDefaultPort.test_set_pos_float_value_pos_flag_key_never_expired
-memcachedops.memcapable.SimpleSetMembaseBucketDefaultPort.test_set_neg_float_value_pos_flag_key_never_expired
-memcachedops.memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_an_exist_key_never_exp
-memcachedops.memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_non_exist_key
-memcachedops.memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_with_exist_key_and_expired
-memcachedops.memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_with_exist_key_decr_then_incr_never_expired
-memcachedops.memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_an_exist_key_never_exp
-memcachedops.memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_non_exist_key
-memcachedops.memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_with_exist_key_and_expired
-memcachedops.memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_with_exist_key_incr_then_decr_never_expired
-memcachedops.memcapable.GetlTests.getl_expired_item
-memcachedops.memcapable.GetlTests.getl_minus_one
-memcachedops.memcapable.GetlTests.getl_sixty
-memcachedops.memcapable.GetlTests.getl_five
-memcachedops.memcapable.GetlTests.getl_ten
-memcachedops.memcapable.GetlTests.getl_fifteen
-memcachedops.memcapable.GetlTests.getl_thirty
-memcachedops.memcapable.GetlTests.getl_sixty
+recreatebuckettests.RecreateMembaseBuckets.default_moxi
+recreatebuckettests.RecreateMembaseBuckets.default_dedicated
+recreatebuckettests.RecreateMembaseBuckets.default_moxi_sasl
+deletebuckettests.DeleteMembaseBuckets.default_moxi
+deletebuckettests.DeleteMembaseBuckets.non_default_moxi
+createbuckettests.CreateMembaseBucketsTests.default_moxi
+createbuckettests.CreateMembaseBucketsTests.default_case_sensitive_dedicated
+createbuckettests.CreateMembaseBucketsTests.default_on_non_default_port
+createbuckettests.CreateMembaseBucketsTests.non_default_moxi
+createbuckettests.CreateMembaseBucketsTests.default_case_sensitive_different_ports
+createbuckettests.CreateMembaseBucketsTests.non_default_case_sensitive_different_port
+createbuckettests.CreateMembaseBucketsTests.non_default_case_sensitive_same_port
+createbuckettests.CreateMembaseBucketsTests.less_than_minimum_memory_quota
+createbuckettests.CreateMembaseBucketsTests.max_memory_quota
+createbuckettests.CreateMembaseBucketsTests.negative_replica
+createbuckettests.CreateMembaseBucketsTests.zero_replica
+createbuckettests.CreateMembaseBucketsTests.one_replica
+createbuckettests.CreateMembaseBucketsTests.two_replica
+createbuckettests.CreateMembaseBucketsTests.three_replica
+createbuckettests.CreateMembaseBucketsTests.four_replica
+createbuckettests.CreateMembaseBucketsTests.valid_chars
+createbuckettests.CreateMembaseBucketsTests.invalid_chars
+createbuckettests.CreateMembaseBucketsTests.max_buckets
+createbuckettests.CreateMembaseBucketsTests.more_than_max_buckets
+setgettests.MembaseBucket.value_100b
+setgettests.MembaseBucket.value_1mb
+setgettests.MembaseBucket.value_10mb
+setgettests.MembaseBucket.value_500kb
+expirytests.ExpiryTests.test1
+expirytests.ExpiryTests.test_expired_keys_tap
+memcapable.SimpleSetMembaseBucketDefaultPort.test_set_pos_int_value_pos_flag_key_never_expired
+memcapable.SimpleSetMembaseBucketDefaultPort.test_set_neg_int_value_pos_flag_key_never_expired
+memcapable.SimpleSetMembaseBucketDefaultPort.test_set_pos_float_value_pos_flag_key_never_expired
+memcapable.SimpleSetMembaseBucketDefaultPort.test_set_neg_float_value_pos_flag_key_never_expired
+memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_an_exist_key_never_exp
+memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_non_exist_key
+memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_with_exist_key_and_expired
+memcapable.SimpleIncrMembaseBucketDefaultPort.test_incr_with_exist_key_decr_then_incr_never_expired
+memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_an_exist_key_never_exp
+memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_non_exist_key
+memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_with_exist_key_and_expired
+memcapable.SimpleDecrMembaseBucketDefaultPort.test_decr_with_exist_key_incr_then_decr_never_expired
+memcapable.GetlTests.getl_expired_item
+memcapable.GetlTests.getl_minus_one
+memcapable.GetlTests.getl_zero
+memcapable.GetlTests.getl_five
+memcapable.GetlTests.getl_ten
+memcapable.GetlTests.getl_fifteen
+memcapable.GetlTests.getl_thirty
+memcapable.GetlTests.getl_sixty
View
6 conf/py-failover-medium.conf
@@ -1,6 +0,0 @@
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_10_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_10_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_1_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_2_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_3_replica_10_percent
View
6 conf/py-failover-small.conf
@@ -1,6 +0,0 @@
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_3_replica_1_percent
View
20 conf/py-multiple-add-nodes.conf
@@ -1,11 +1,11 @@
management.install.InstallTest.test_install
-management.addnodes.addnodestests.AddNodesTests.test_add_1_node_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_all_node_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_all_node_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_all_node_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_node_itself_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_node_itself_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_node_already_added_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_node_already_added_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_1_node_no_buckets
-management.addnodes.addnodestests.AddNodesTests.test_add_1_node_with_bucket
+addnodestests.AddNodesTests.test_add_1_node_with_bucket
+addnodestests.AddNodesTests.test_add_all_node_no_buckets
+addnodestests.AddNodesTests.test_add_all_node_no_buckets
+addnodestests.AddNodesTests.test_add_all_node_with_bucket
+addnodestests.AddNodesTests.test_add_node_itself_no_buckets
+addnodestests.AddNodesTests.test_add_node_itself_with_bucket
+addnodestests.AddNodesTests.test_add_node_already_added_no_buckets
+addnodestests.AddNodesTests.test_add_node_already_added_with_bucket
+addnodestests.AddNodesTests.test_add_1_node_no_buckets
+addnodestests.AddNodesTests.test_add_1_node_with_bucket
View
13 conf/py-multiple-medium-load-ubuntu.conf
@@ -1,13 +0,0 @@
-management.install.InstallTest.test_install
-management.rebalancetests.IncrementalRebalanceInTests.test_medium_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_medium_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_medium_load
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_50_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_50_percent
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_10_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_10_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_1_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_2_replica_10_percent
-management.failovertests.FailoverTests.test_failover_normal_3_replica_10_percent
View
18 conf/py-multiple-small-load-ubuntu.conf
@@ -1,18 +0,0 @@
-management.install.InstallTest.test_install
-management.version_check.VerifyVersionTest
-management.rebalancetests.IncrementalRebalanceInTests.test_small_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_small_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_small_load
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_10_percent
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_3_replica_1_percent
View
60 conf/py-multiple-small-load.conf
@@ -1,31 +1,29 @@
-management.install.InstallTest.test_install
-management.version_check.VerifyVersionTest
-management.rebalancetests.IncrementalRebalanceInTests.test_small_load_2_replica
-management.rebalancetests.IncrementalRebalanceInTests.test_small_load_3_replica
-management.rebalancetests.IncrementalRebalanceInTests.test_small_load
-management.rebalancetests.RebalanceTestsWithMutationLoadTests.test_small_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_small_load
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_10_percent
-management.replication.replicationtests.ReplicationTests.test_replication_3_replica_0_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_3_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_3_replica_10_percent
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
-management.failovertests.FailoverTests.test_failover_stop_membase_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_stop_membase_3_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_normal_3_replica_1_percent
-management.failovertests.FailoverTests.test_failover_firewall_1_replica_1_percent
-management.failovertests.FailoverTests.test_failover_firewall_1_replica_10_percent
-management.failovertests.FailoverTests.test_failover_firewall_2_replica_1_percent
-management.failovertests.FailoverTests.test_failover_firewall_3_replica_1_percent
-management.bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_max_buckets
-management.bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_ten_buckets
-management.bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_twenty_buckets
-management.bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_five_buckets_small_load
+rebalancetests.IncrementalRebalanceInTests.test_small_load_2_replica
+rebalancetests.IncrementalRebalanceInTests.test_small_load_3_replica
+rebalancetests.IncrementalRebalanceInTests.test_small_load
+rebalancetests.RebalanceTestsWithMutationLoadTests.test_small_load
+rebalancetests.IncrementalRebalanceInWithParallelLoad.test_small_load
+replicationtests.ReplicationTests.test_replication_1_replica_0_1_percent
+replicationtests.ReplicationTests.test_replication_1_replica_1_percent
+replicationtests.ReplicationTests.test_replication_1_replica_10_percent
+replicationtests.ReplicationTests.test_replication_2_replica_0_1_percent
+replicationtests.ReplicationTests.test_replication_2_replica_1_percent
+replicationtests.ReplicationTests.test_replication_2_replica_10_percent
+replicationtests.ReplicationTests.test_replication_3_replica_0_1_percent
+replicationtests.ReplicationTests.test_replication_3_replica_1_percent
+replicationtests.ReplicationTests.test_replication_3_replica_10_percent
+rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
+failovertests.FailoverTests.test_failover_stop_membase_1_replica_1_percent
+failovertests.FailoverTests.test_failover_stop_membase_2_replica_1_percent
+failovertests.FailoverTests.test_failover_stop_membase_3_replica_1_percent
+failovertests.FailoverTests.test_failover_normal_1_replica_1_percent
+failovertests.FailoverTests.test_failover_normal_2_replica_1_percent
+failovertests.FailoverTests.test_failover_normal_3_replica_1_percent
+failovertests.FailoverTests.test_failover_firewall_1_replica_1_percent
+failovertests.FailoverTests.test_failover_firewall_1_replica_10_percent
+failovertests.FailoverTests.test_failover_firewall_2_replica_1_percent
+failovertests.FailoverTests.test_failover_firewall_3_replica_1_percent
+bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_max_buckets
+bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_ten_buckets
+bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_twenty_buckets
+bucketops.multinode.create.createtests.CreateMembaseBucketsTests.test_five_buckets_small_load
View
2  conf/py-pre-17-all.conf
@@ -81,7 +81,7 @@ memcachedops.memcapable.GetlTests.getl_expired_item
management.install.InstallTest.test_reset
memcachedops.memcapable.GetlTests.getl_minus_one
management.install.InstallTest.test_reset
-memcachedops.memcapable.GetlTests.getl_sixty
+memcachedops.memcapable.GetlTests.getl_zero
management.install.InstallTest.test_reset
memcachedops.memcapable.GetlTests.getl_five
management.install.InstallTest.test_reset
View
17 conf/py-smoke.conf
@@ -1,17 +0,0 @@
-management.install.InstallTest.test_install
-management.version_check.VerifyVersionTest
-management.rebalancetests.IncrementalRebalanceInTests.test_medium_load
-management.rebalancetests.IncrementalRebalanceOut.test_rebalance_out_medium_load
-management.rebalancetests.IncrementalRebalanceInWithParallelLoad.test_small_load
-management.addnodes.addnodestests.AddNodesTests.test_add_1_node_with_bucket
-management.addnodes.addnodestests.AddNodesTests.test_add_all_node_no_buckets
-management.replication.replicationtests.ReplicationTests.test_replication_1_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_replication_2_replica_1_percent
-management.replication.replicationtests.ReplicationTests.test_failover_1_replica_1_percent
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_default_on_11211
-management.bucketops.singlenode.create.createtests.CreateMembaseBucketsTests.test_non_default
-memcachedops.set_get_test.SimpleSetGetMembaseBucketNonDefaultDedicatedPort.test_set_get_small_keys
-memcachedops.set_get_test.SimpleSetGetMembaseBucketNonDefaultDedicatedPort.test_set_get_large_keys
-memcachedops.set_get_test.SimpleSetGetMembaseBucketNonDefaultPost11211.test_set_get_small_keys
-memcachedops.set_get_test.SimpleSetGetMembaseBucketNonDefaultPost11211.test_set_get_large_keys
-memcachedops.expirytests.ExpiryTests.test1
View
14 lib/builds/BeautifulSoup.py
@@ -151,7 +151,7 @@ def insert(self, position, newChild):
newChild.parent = self
previousChild = None
- if position == 0:
+ if not position:
newChild.previousSibling = None
newChild.previous = self
else:
@@ -468,7 +468,7 @@ def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
- elif tag.find('__') != 0:
+ elif tag.find('__'):
return self.find(tag)
def __eq__(self, other):
@@ -928,7 +928,7 @@ def __getattr__(self, methodName):
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
- elif methodName.find('__') != 0:
+ elif methodName.find('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
@@ -980,7 +980,7 @@ def endData(self, containerClass=NavigableString):
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
- (not self.parseOnlyThese.text or \
+ (not self.parseOnlyThese.text or
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
@@ -1140,7 +1140,7 @@ def handle_entityref(self, ref):
characters."""
data = None
if self.convertEntities == self.HTML_ENTITIES or \
- (self.convertEntities == self.XML_ENTITIES and \
+ (self.convertEntities == self.XML_ENTITIES and
self.XML_ENTITY_LIST.get(ref)):
try:
data = unichr(name2codepoint[ref])
@@ -1664,8 +1664,8 @@ def _ebcdic_to_ascii(self, s):
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
- c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
- ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
+ c.EBCDIC_TO_ASCII_MAP = string.maketrans(
+ ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
View
17 lib/builds/build_query.py
@@ -52,6 +52,13 @@ def parse_builds(self):
#parse build page and create build object
pass
+ def find_build(self,builds,product,type,arch,version):
+ for build in builds:
+ if build.product_version.find(version) != -1 and product == build.product\
+ and build.architecture_type == arch and type == build.deliverable_type:
+ return build
+ return None
+
def find_membase_build(self, builds, product, deliverable_type, os_architecture, build_version):
for build in builds:
if build.product_version.find(build_version) != -1 and product == build.product\
@@ -75,6 +82,16 @@ def sort_builds_by_version(self, builds):
return sorted(membase_builds,
key=lambda membase_build: membase_build.build_number, reverse=True)
+ def sort_builds_by_time(self, builds):
+ membase_builds = list()
+ for build in builds:
+ if build.product == 'membase-server-enterprise':
+ membase_builds.append(build)
+
+ return sorted(membase_builds,
+ key=lambda membase_build: membase_build.time, reverse=True)
+
+
def get_latest_builds(self):
return self._get_and_parse_builds('http://builds.hq.northscale.net/latestbuilds')
View
8 lib/cli_interface.py
@@ -74,7 +74,7 @@ def rebalance(self):
cmd = " rebalance " + self.acting_server_args
return self.execute_command(cmd)
- def rebalance_stop():
+ def rebalance_stop(self):
cmd = " reblance-stop " + self.acting_server_args
return self.execute_command(cmd)
@@ -116,7 +116,7 @@ def bucket_delete(self, bucket_name):
cmd = " bucket-delete " + self.acting_server_args + " --bucket=%s" % (bucket_name)
return self.execute_command(cmd)
- def bucket_flush():
+ def bucket_flush(self):
return "I don't work yet :-("
def execute_command(self, cmd):
@@ -128,7 +128,7 @@ def execute_command(self, cmd):
def execute_local(self, cmd):
rtn = ""
process = subprocess.Popen(cmd ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
- stdoutdata,stderrdata=process.communicate(None)
+ stdoutdata,stderrdata=process.communicate()
rtn += stdoutdata
return rtn
@@ -138,6 +138,6 @@ def execute_ssh(self, cmd):
process = subprocess.Popen("ssh root@%s \"%s\"" % (self.server,cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
else:
process = subprocess.Popen("ssh -i %s root@%s \"%s\"" % (self.sshkey, self.server, cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
- stdoutdata,stderrdata=process.communicate(None)
+ stdoutdata,stderrdata=process.communicate()
rtn += stdoutdata
return rtn
View
59 lib/load_runner.py
@@ -2,8 +2,12 @@
import time
import fractions
import uuid
+import logger
+from TestInput import TestInputServer
import mc_bin_client
+from membase.api.rest_client import RestConnection
+from memcached.helper.data_helper import VBucketAwareMemcached
class FakeMemcachedClient(object):
@@ -26,6 +30,7 @@ def __init__(self, load_info, server_index):
threading.Thread.__init__(self)
self.daemon = True
+ self.log = logger.Logger()
# thread state info
self.stopped = False
self.paused = True
@@ -70,6 +75,7 @@ def __init__(self, load_info, server_index):
self.max_operation_rate = int(load_info['operation_info'].get('operation_rate', 0) / threads)
self.uuid = uuid.uuid4()
+ self.name = str(self.uuid) + self.name
# limit info
# all but time needs to be divided equally amongst threads
@@ -77,12 +83,21 @@ def __init__(self, load_info, server_index):
self.limit_operations = int(load_info['limit_info'].get('operations', 0) / threads)
self.limit_time = int(load_info['limit_info'].get('time', 0))
self.limit_size = int(load_info['limit_info'].get('size', 0) / threads)
-
+ self.poxi = self._poxi()
# connect
- self.server = mc_bin_client.MemcachedClient(self.server_ip, self.server_port)
- if self.bucket_name or self.bucket_password:
- self.server.sasl_auth_plain(self.bucket_name,self.bucket_password)
+ #self.server should be vbucketaware memcached
+# self.server = mc_bin_client.MemcachedClient(self.server_ip, self.server_port)
+# if self.bucket_name or self.bucket_password:
+# self.server.sasl_auth_plain(self.bucket_name,self.bucket_password)
+
+ def _poxi(self):
+ tServer = TestInputServer()
+ tServer.ip = self.server_ip
+ tServer.rest_username = "Administrator"
+ tServer.rest_password = "password"
+ rest = RestConnection(tServer)
+ return VBucketAwareMemcached(rest, self.bucket_name)
def run(self):
while True:
@@ -98,12 +113,16 @@ def run(self):
# stop thread if we hit a limit (first limit we hit ends the thread)
if self.limit_items and self.items > self.limit_items:
+ self.log.info("items count limit reached")
return
if self.limit_operations and self.operations > self.limit_operations:
+ self.log.info("operations limit reached")
return
if self.limit_time and self.time > self.limit_time:
+ self.log.info("time limit reached")
return
if self.limit_size and self.size > self.limit_size:
+ self.log.info("size limit reached")
return
# rate limit if we need to
@@ -116,7 +135,7 @@ def run(self):
key = self.name + '_' + `self.get_mutation_key()`
try:
# print `self.mutation_index` + " : " + `self.get_mutation_key()`
- self.server.set(key, 0, 0, self.get_data())
+ self.poxi.memcached(key).set(key, 0, 0, self.get_data())
self.operations += 1
self.backoff -= 1
@@ -130,7 +149,7 @@ def run(self):
# TODO: verify that this works, we may need to take the second to max index
# update the size of all data (values, not keys) that is in the system
# this can be either from new keys or overwriting old keys
- prev_indexes = get_mutation_indexes(self.get_mutation_key())
+ prev_indexes = self.get_mutation_indexes(self.get_mutation_key())
prev_size = 0
if prev_indexes:
prev_size = self.get_data_size(max(prev_indexes))
@@ -138,6 +157,8 @@ def run(self):
self.mutation_index += 1
except mc_bin_client.MemcachedError as e:
+ self.poxi.done()
+ self.poxi = self._poxi()
if self.backoff < 0:
self.backoff = 0
if self.backoff > 30:
@@ -153,7 +174,7 @@ def run(self):
elif operation == 'get':
key = self.name + '_' + `self.get_get_key()`
try:
- vdata = self.server.get(key)
+ vdata = self.poxi.memcached(key).get(key)
self.operations += 1
self.backoff -= 1
data = vdata[2]
@@ -162,19 +183,17 @@ def run(self):
if data != data_expected:
self.value_failures += 1
raise
- except:
+ except Exception as e:
print e
-# print self.server.db
-# print "create: " + `self.create`
-# print "nocreate: " + `self.nocreate`
-# print "get_index: " + `self.get_index`
-# print "get_key: " + `self.get_get_key()`
-# print "mutation_index_max: " + `self.mutation_index_max`
-# print "mutation_indexes: " + `self.get_mutation_indexes(self.get_get_key())`
-# print "getting data for mutation index: " + `max(self.get_mutation_indexes(self.get_get_key()))`
-# print "got: \'" + data + "\'"
-# print "expected: \'" + data_expected + "\'"
-# raise ValueError
+ print "create: " + `self.create`
+ print "nocreate: " + `self.nocreate`
+ print "get_index: " + `self.get_index`
+ print "get_key: " + `self.get_get_key()`
+ print "mutation_index_max: " + `self.mutation_index_max`
+ print "mutation_indexes: " + `self.get_mutation_indexes(self.get_get_key())`
+ print "getting data for mutation index: " + `max(self.get_mutation_indexes(self.get_get_key()))`
+ print "got: \'" + data + "\'"
+ raise ValueError
self.get_index += 1
except mc_bin_client.MemcachedError as e:
if self.backoff < 0:
@@ -303,7 +322,7 @@ def query(self):
t.join(0)
self.threads = [t for t in self.threads if t.isAlive()]
- if len(self.threads) == 0:
+ if not len(self.threads):
self.stopped = True
if self.stopped:
View
37 lib/logger.py
@@ -1,5 +1,6 @@
import logging
from logging.handlers import RotatingFileHandler
+import os
import uuid
global _logger
@@ -32,7 +33,11 @@ def start_logger(name):
formatter = logging.Formatter("[%(asctime)s] - [%(module)s] [%(thread)d] - %(levelname)s - %(message)s")
max_size = 20 * 1024 * 1024 #max size is 50 megabytes
- fileHandler = RotatingFileHandler('{0}.log'.format(name), backupCount=2,maxBytes=max_size)
+
+ filename = "{0}.log".format(name)
+ if "TEMP-FOLDER" in os.environ:
+ filename = "{0}/{1}.log".format(os.environ["TEMP-FOLDER"], name)
+ fileHandler = RotatingFileHandler(filename, backupCount=2, maxBytes=max_size)
# add formatter to ch
consoleHandler.setFormatter(formatter)
@@ -42,5 +47,31 @@ def start_logger(name):
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
Logger._logger = logger
- print 'start logging to {0}.log'.format(name)
- return Logger._logger
+ print 'start logging to {0}.log'.format(filename)
+ return Logger._logger
+
+
+def new_logger(name):
+ logger = logging.getLogger(name)
+ logger.setLevel(logging.INFO)
+
+ # create console handler and set level to debug
+ consoleHandler = logging.StreamHandler()
+ consoleHandler.setLevel(logging.DEBUG)
+
+ # create formatter
+ formatter = logging.Formatter("[%(asctime)s] - [%(module)s] [%(thread)d] - %(levelname)s - %(message)s")
+
+ max_size = 20 * 1024 * 1024 #max size is 50 megabytes
+
+ filename = "{0}.log".format(name)
+ fileHandler = RotatingFileHandler(filename, backupCount=2, maxBytes=max_size)
+
+ # add formatter to ch
+ consoleHandler.setFormatter(formatter)
+ fileHandler.setFormatter(formatter)
+
+ # add ch to logger
+ logger.addHandler(consoleHandler)
+ logger.addHandler(fileHandler)
+ return logger
View
23 lib/mc_bin_client.py
@@ -128,8 +128,11 @@ def decr(self, key, amt=1, init=0, exp=0):
"""Decrement or create the named counter."""
return self.__incrdecr(memcacheConstants.CMD_DECR, key, amt, init, exp)
- def set(self, key, exp, flags, val):
- self.vbucketId = crc32.crc32_hash(key) & 1023
+ def set(self, key, exp, flags, val, vbucket=-1):
+ if vbucket == -1:
+ self.vbucketId = crc32.crc32_hash(key) & 1023
+ else:
+ self.vbucketId = vbucket
"""Set a value in the memcached server."""
return self._mutate(memcacheConstants.CMD_SET, key, exp, flags, 0, val)
@@ -152,10 +155,14 @@ def __parseGet(self, data, klen=0):
flags=struct.unpack(memcacheConstants.GET_RES_FMT, data[-1][:4])[0]
return flags, data[1], data[-1][4 + klen:]
- def get(self, key):
+ def get(self, key, vbucket=-1):
"""Get the value for a given key within the memcached server."""
- self.vbucketId = crc32.crc32_hash(key) & 1023
+ if vbucket == -1:
+ self.vbucketId = crc32.crc32_hash(key) & 1023
+ else:
+ self.vbucketId = vbucket
parts=self._doCmd(memcacheConstants.CMD_GET, key, '')
+
return self.__parseGet(parts)
def send_get(self, key):
@@ -307,7 +314,9 @@ def noop(self):
"""Send a noop command."""
return self._doCmd(memcacheConstants.CMD_NOOP, '', '')
- def delete(self, key, cas=0):
+ def delete(self, key, cas=0, vbucket=-1):
+ if vbucket == -1:
+ self.vbucketId = crc32.crc32_hash(key) & 1023
"""Delete the value for a given key within the memcached server."""
return self._doCmd(memcacheConstants.CMD_DELETE, key, '', '', cas)
@@ -414,8 +423,8 @@ def restore_file(self, filename):
def restore_complete(self):
"""Notify the server that we're done restoring."""
- return self._doCmd(memcacheConstants.CMD_RESTORE_COMPLETE, '', '', '', 0)
+ return self._doCmd(memcacheConstants.CMD_RESTORE_COMPLETE, '', '')
def deregister_tap_client(self, tap_name):
"""Deregister the TAP client with a given name."""
- return self._doCmd(memcacheConstants.CMD_DEREGISTER_TAP_CLIENT, tap_name, '', '', 0)
+ return self._doCmd(memcacheConstants.CMD_DEREGISTER_TAP_CLIENT, tap_name, '')
View
12 lib/mc_bin_server.py
@@ -108,7 +108,7 @@ def __init__(self):
+ string.digits, 32))
def __lookup(self, key):
- rv=self.storage.get(key, None)
+ rv=self.storage.get(key)
if rv:
now=time.time()
if now >= rv[1]:
@@ -146,7 +146,7 @@ def handle_getq(self, cmd, hdrs, key, cas, data):
def __handle_unconditional_set(self, cmd, hdrs, key, data):
exp=hdrs[1]
# If it's going to expire soon, tell it to wait a while.
- if exp == 0:
+ if not exp:
exp=float(2 ** 31)
self.storage[key]=(hdrs[0], time.time() + exp, data)
print "Stored", self.storage[key], "in", key
@@ -157,7 +157,7 @@ def __handle_unconditional_set(self, cmd, hdrs, key, data):
def __mutation(self, cmd, hdrs, key, data, multiplier):
amount, initial, expiration=hdrs
rv=self._error(memcacheConstants.ERR_NOT_FOUND, 'Not found')
- val=self.storage.get(key, None)
+ val=self.storage.get(key)
print "Mutating %s, hdrs=%s, val=%s %s" % (key, `hdrs`, `val`,
multiplier)
if val:
@@ -168,7 +168,7 @@ def __mutation(self, cmd, hdrs, key, data, multiplier):
if expiration != memcacheConstants.INCRDECR_SPECIAL:
self.storage[key]=(0, time.time() + expiration, initial)
rv=0, id(self.storage[key]), str(initial)
- if rv[0] == 0:
+ if not rv[0]:
rv = rv[0], rv[1], struct.pack(
memcacheConstants.INCRDECR_RES_FMT, long(rv[2]))
print "Returning", rv
@@ -231,7 +231,7 @@ def handle_version(self, cmd, hdrs, key, cas, data):
return 0, 0, "Python test memcached server %s" % VERSION
def _withCAS(self, key, cas, f):
- val=self.storage.get(key, None)
+ val=self.storage.get(key)
if cas == 0 or (val and cas == id(val)):
rv=f(val)
elif val:
@@ -361,7 +361,7 @@ def handle_close(self):
class MemcachedServer(asyncore.dispatcher):
"""A memcached server."""
- def __init__(self, backend, handler, port=11211):
+ def __init__(self, backend, handler, port=11210):
asyncore.dispatcher.__init__(self)
self.handler=handler
View
7 lib/membase/api/exception.py
@@ -6,6 +6,7 @@ class MembaseHttpExceptionTypes(object):
NODE_ALREADY_JOINED = 1002
NODE_CANT_ADD_TO_ITSELF=1003
BUCKET_CREATION_ERROR = 1004
+ STATS_UNAVAILABLE = 1005
#base exception class for membase apis
class MembaseHttpException(Exception):
@@ -46,6 +47,12 @@ def __init__(self,ip = '',bucket_name = ''):
self._message = 'unable to create bucket {0} on the host @ {1}'\
.format(bucket_name,ip)
+class StatsUnavailableException(MembaseHttpException):
+ def __init__(self):
+ self.type = MembaseHttpExceptionTypes.STATS_UNAVAILABLE
+ self._message = 'unable to get stats'
+
+
class ServerUnavailableException(MembaseHttpException):
def __init__(self,ip = ''):
self.parameters = dict()
View
21 lib/membase/api/httplib2/__init__.py
@@ -426,6 +426,9 @@ def __init__(self, credentials, host, request_uri, headers, response, content, h
self.host = host
self.credentials = credentials
self.http = http
+ self.response = response
+ self.headers = headers
+ self.content = content
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
@@ -468,7 +471,7 @@ class DigestAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
- challenge = _parse_www_authenticate(response, 'www-authenticate')
+ challenge = _parse_www_authenticate(response)
self.challenge = challenge['digest']
qop = self.challenge.get('qop')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
@@ -508,7 +511,7 @@ def request(self, method, request_uri, headers, content, cnonce=None):
def response(self, response, content):
if not response.has_key('authentication-info'):
- challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
+ challenge = _parse_www_authenticate(response).get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
@@ -528,7 +531,7 @@ class HmacDigestAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
- challenge = _parse_www_authenticate(response, 'www-authenticate')
+ challenge = _parse_www_authenticate(response)
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
@@ -583,7 +586,7 @@ def request(self, method, request_uri, headers, content):
)
def response(self, response, content):
- challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
+ challenge = _parse_www_authenticate(response).get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
@@ -620,7 +623,7 @@ def __init__(self, credentials, host, request_uri, headers, response, content, h
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
- challenge = _parse_www_authenticate(response, 'www-authenticate')
+ challenge = _parse_www_authenticate(response)
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
@@ -858,7 +861,7 @@ def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
- challenges = _parse_www_authenticate(response, 'www-authenticate')
+ challenges = _parse_www_authenticate(response)
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
@@ -889,7 +892,7 @@ def _conn_request(self, conn, request_uri, method, body, headers):
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except httplib.HTTPException, e:
- if i == 0:
+ if not i:
conn.close()
conn.connect()
continue
@@ -901,7 +904,7 @@ def _conn_request(self, conn, request_uri, method, body, headers):
if method != "HEAD":
content = _decompressContent(response, content)
- break;
+ break
return (response, content)
@@ -1062,7 +1065,7 @@ def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAU
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
- (response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers=headers,
+ (response, new_content) = self.request(info['-x-permanent-redirect-url'], headers=headers,
redirections=redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
View
109 lib/membase/api/rest_client.py
@@ -106,13 +106,15 @@ def __init__(self, ip, username='Administrator', password='password'):
self.username = username
self.password = password
self.baseUrl = "http://{0}:8091/".format(self.ip)
+ self.port = 8091
def __init__(self, serverInfo):
#throw some error here if the ip is null ?
self.ip = serverInfo.ip
self.username = serverInfo.rest_username
self.password = serverInfo.rest_password
- self.baseUrl = "http://{0}:8091/".format(self.ip)
+ self.port = serverInfo.port
+ self.baseUrl = "http://{0}:{1}/".format(self.ip,self.port)
#authorization mut be a base64 string of username:password
@@ -190,12 +192,12 @@ def init_cluster_memoryQuota(self, username='Administrator',
#can't add the node to itself ( TODO )
#server already added
#returns otpNode
- def add_node(self, user='', password='', remoteIp='' ):
+ def add_node(self, user='', password='', remoteIp='', port='8091' ):
otpNode = None
log.info('adding remote node : {0} to this cluster @ : {1}'\
.format(remoteIp, self.ip))
api = self.baseUrl + 'controller/addNode'
- params = urllib.urlencode({'hostname': remoteIp,
+ params = urllib.urlencode({'hostname': "{0}:{1}".format(remoteIp, port),
'user': user,
'password': password})
try:
@@ -262,11 +264,14 @@ def fail_over(self, otpNode=None ):
params = urllib.urlencode({'otpNode': otpNode})
response, content = httplib2.Http().request(api, 'POST', params,
headers=self._create_headers())
+ log.info("failover response : {0}".format(response))
if response['status'] == '400':
log.error('fail_over error : {0}'.format(content))
+ return False
elif response['status'] == '200':
log.info('fail_over successful')
- return True
+ return True
+ return False
except socket.error:
raise ServerUnavailableException(ip=self.ip)
except httplib2.ServerNotFoundError:
@@ -343,7 +348,7 @@ def _rebalance_progress(self):
percentage = -1
api = self.baseUrl + "pools/default/rebalanceProgress"
try:
- response, content = httplib2.Http().request(api, 'GET',
+ response, content = httplib2.Http().request(api,
headers=self._create_headers())
#if status is 200 then it was a success otherwise it was a failure
if response['status'] == '400':
@@ -359,7 +364,7 @@ def _rebalance_progress(self):
log.error('{0} - rebalance failed'.format(parsed))
elif parsed['status'] == 'running':
for key in parsed:
- if key.find('ns_1') >= 0:
+ if key.find('@') >= 0:
ns_1_dictionary = parsed[key]
percentage = ns_1_dictionary['progress'] * 100
log.info('rebalance percentage : {0} %' .format(percentage))
@@ -371,6 +376,8 @@ def _rebalance_progress(self):
log.error(content)
log.error(response)
percentage = -100
+ if percentage == -1:
+ print response, content
return percentage
except socket.error:
raise ServerUnavailableException(ip=self.ip)
@@ -382,7 +389,7 @@ def _rebalance_progress(self):
def rebalance_statuses(self):
api = self.baseUrl + 'pools/rebalanceStatuses'
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
#if status is 200 then it was a success otherwise it was a failure
if response['status'] == '400':
#extract the error
@@ -415,7 +422,7 @@ def get_nodes_self(self):
node = None
api = self.baseUrl + 'nodes/self'
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
#if status is 200 then it was a success otherwise it was a failure
if response['status'] == '400':
#extract the error
@@ -433,7 +440,7 @@ def node_statuses(self):
nodes = []
api = self.baseUrl + 'nodeStatuses'
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
#if status is 200 then it was a success otherwise it was a failure
if response['status'] == '400':
#extract the error
@@ -448,6 +455,7 @@ def node_statuses(self):
status=value['status'])
if node.ip == '127.0.0.1':
node.ip = self.ip
+ node.port = int(key[key.rfind(":") + 1:])
node.replication = value['replication']
nodes.append(node)
#let's also populate the membase_version_info
@@ -478,7 +486,7 @@ def cluster_status(self):
def get_pools_info(self):
api = self.baseUrl + 'pools'
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
if response['status'] == '400':
log.error('get_pools error {0}'.format(content))
elif response['status'] == '200':
@@ -494,7 +502,7 @@ def get_pools(self):
version = None
api = self.baseUrl + 'pools'
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
if response['status'] == '400':
log.error('get_pools error {0}'.format(content))
elif response['status'] == '200':
@@ -511,7 +519,7 @@ def get_buckets(self):
buckets = []
api = '{0}{1}'.format(self.baseUrl, 'pools/default/buckets/')
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
if response['status'] == '400':
log.error('get_buckets error {0}'.format(content))
elif response['status'] == '200':
@@ -534,7 +542,7 @@ def get_bucket_stats_for_node(self, bucket='default', node_ip=None):
api = "{0}{1}{2}{3}{4}{5}".format(self.baseUrl, 'pools/default/buckets/',
bucket, "/nodes/", node_ip, ":8091/stats")
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
if response['status'] == '400':
log.error('get_bucket error {0}'.format(content))
elif response['status'] == '200':
@@ -554,10 +562,37 @@ def get_bucket_stats_for_node(self, bucket='default', node_ip=None):
except httplib2.ServerNotFoundError:
raise ServerUnavailableException(ip=self.ip)
+ def get_nodes(self):
+ nodes = []
+ api = self.baseUrl + 'pools/default'
+ try:
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
+ #if status is 200 then it was a success otherwise it was a failure
+ if response['status'] == '400':
+ #extract the error
+ log.error('unable to retrieve nodesStatuses')
+ elif response['status'] == '200':
+ parsed = json.loads(content)
+ if "nodes" in parsed:
+ for json_node in parsed["nodes"]:
+ node = RestParser().parse_get_nodes_response(json_node)
+ node.rest_username = self.username
+ node.rest_password = self.password
+ node.port = self.port
+ if node.ip == "127.0.0.1":
+ node.ip = self.ip
+ nodes.append(node)
+ except socket.error:
+ raise ServerUnavailableException(ip=self.ip)
+ except httplib2.ServerNotFoundError:
+ raise ServerUnavailableException(ip=self.ip)
+ return nodes
+
+
def get_bucket_stats(self, bucket='default'):
api = "{0}{1}{2}{3}".format(self.baseUrl, 'pools/default/buckets/', bucket, "/stats")
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
if response['status'] == '400':
log.error('get_bucket error {0}'.format(content))
elif response['status'] == '200':
@@ -584,7 +619,7 @@ def get_bucket(self, bucket='default'):
bucketInfo = None
api = '{0}{1}{2}'.format(self.baseUrl, 'pools/default/buckets/', bucket)
try:
- response, content = httplib2.Http().request(api, 'GET', headers=self._create_headers())
+ response, content = httplib2.Http().request(api, headers=self._create_headers())
if response['status'] == '400':
log.error('get_bucket error {0}'.format(content))
elif response['status'] == '200':
@@ -647,7 +682,7 @@ def create_bucket(self, bucket='',
'authType': authType,
'saslPassword': saslPassword,
'replicaNumber': replicaNumber,
- 'proxyPort': 11211,
+ 'proxyPort': self.get_nodes_self().moxi,
'bucketType': bucketType})
try:
@@ -777,6 +812,7 @@ def __init__(self, id='', status=''):
self.id = id
self.ip = ''
self.replication = ''
+ self.port = 8091
#extract ns ip from the otpNode string
#its normally ns_1@10.20.30.40
if id.find('@') >= 0:
@@ -815,12 +851,16 @@ def __init__(self):
class Bucket(object):
def __init__(self):
self.name = ''
+ self.port = 11211
self.type = ''
self.nodes = None
self.stats = None
self.servers = []
self.vbuckets = []
self.forward_map = []
+ self.numReplicas = 0
+ self.saslPassword = ""
+ self.authType = ""
class Node(object):
@@ -839,6 +879,12 @@ def __init__(self):
self.availableStorage = []
self.storage = []
self.memoryQuota = 0
+ self.moxi =11211
+ self.memcached = 11210
+ self.id = ""
+ self.ip = ""
+ self.rest_username = ""
+ self.rest_password = ""
class NodePort(object):
@@ -878,6 +924,11 @@ def parse_get_nodes_response(self, parsed):
node.clusterCompatibility = parsed['clusterCompatibility']
node.version = parsed['version']
node.os = parsed['os']
+ if "otpNode" in parsed:
+ node.id = parsed["otpNode"]
+ if parsed["otpNode"].find('@') >= 0:
+ node.ip = node.id[node.id.index('@') + 1:]
+
# memoryQuota
if 'memoryQuota' in parsed:
node.memoryQuota = parsed['memoryQuota']
@@ -907,6 +958,14 @@ def parse_get_nodes_response(self, parsed):
dataStorage.state = dict['state']
dataStorage.type = key
node.storage.append(dataStorage)
+
+ # ports":{"proxy":11211,"direct":11210}
+ if "ports" in parsed:
+ ports = parsed["ports"]
+ if "proxy" in ports:
+ node.moxi = ports["proxy"]
+ if "direct" in ports:
+ node.memcached = ports["direct"]
return node
def parse_get_bucket_response(self, response):
@@ -917,11 +976,16 @@ def parse_get_bucket_json(self, parsed):
bucket = Bucket()
bucket.name = parsed['name']
bucket.type = parsed['bucketType']
+ bucket.port = parsed['proxyPort']
+ bucket.authType = parsed["authType"]
+ bucket.saslPassword = parsed["saslPassword"]
bucket.nodes = list()
if 'vBucketServerMap' in parsed:
vBucketServerMap = parsed['vBucketServerMap']
serverList = vBucketServerMap['serverList']
bucket.servers.extend(serverList)
+ if "numReplicas" in vBucketServerMap:
+ bucket.numReplicas = vBucketServerMap["numReplicas"]
#vBucketMapForward
if 'vBucketMapForward' in vBucketServerMap:
#let's gather the forward map
@@ -980,6 +1044,17 @@ def parse_get_bucket_json(self, parsed):
node.clusterCompatibility = nodeDictionary['clusterCompatibility']
node.version = nodeDictionary['version']
node.os = nodeDictionary['os']
- # todo : node.ports
+ if "ports" in nodeDictionary:
+ ports = nodeDictionary["ports"]
+ if "proxy" in ports:
+ node.moxi = ports["proxy"]
+ if "direct" in ports:
+ node.memcached = ports["direct"]
+ if "hostname" in nodeDictionary:
+ value = str(nodeDictionary["hostname"])
+ node.ip = value[:value.rfind(":")]
+ node.port = int(value[value.rfind(":") + 1:])
+ if "otpNode" in nodeDictionary:
+ node.id = nodeDictionary["otpNode"]
bucket.nodes.append(node)
return bucket
View
191 lib/membase/helper/bucket_helper.py
@@ -14,6 +14,56 @@
class BucketOperationHelper():
#this function will assert
+
+ @staticmethod
+ def base_bucket_ratio(servers):
+ ratio = 1.0
+ #check if ip is same for all servers
+ ip = servers[0]
+ dev_environment = True
+ for server in servers:
+ if server.ip != ip:
+ dev_environment = False
+ if dev_environment:
+ ratio = 2.0/3.0 * 1/len(servers)
+ else:
+ ratio = 2.0/3.0
+ return ratio
+
+ @staticmethod
+ def create_multiple_buckets(server, replica, bucket_ram_ratio=(2.0 / 3.0), howmany=3):
+ success = True
+ log = logger.Logger.get_logger()
+ rest = RestConnection(server)
+ info = rest.get_nodes_self()
+ if info.mcdMemoryReserved < 450.0:
+ log.error("at least need 450MB mcdMemoryReserved")
+ success = False
+ else:
+ available_ram = info.mcdMemoryReserved * bucket_ram_ratio
+ if available_ram / howmany > 100:
+ bucket_ram = int(available_ram / howmany)
+ else:
+ bucket_ram = 100
+ #choose a port that is not taken by this ns server
+ port = info.memcached
+# type = ["membase" for i in range(0, howmany)]
+ for i in range(0,howmany):
+ name = "bucket-{0}".format(i)
+ rest.create_bucket(bucket=name,
+ ramQuotaMB=bucket_ram,
+ replicaNumber=replica,
+ authType="sasl",
+ saslPassword="password")
+ port += 1
+ msg = "create_bucket succeeded but bucket \"{0}\" does not exist"
+ bucket_created = BucketOperationHelper.wait_for_bucket_creation(name, rest)
+ if not bucket_created:
+ log.error(msg.format(name))
+ success = False
+ break
+ return success
+
@staticmethod
def create_default_buckets(servers,number_of_replicas=1,assert_on_test = None):
log = logger.Logger.get_logger()
@@ -31,23 +81,30 @@ def create_default_buckets(servers,number_of_replicas=1,assert_on_test = None):
assert_on_test.fail(msg=msg)
@staticmethod
- def create_bucket(serverInfo, name='default', replica=1, port=11210, test_case=None, bucket_ram=-1):
+ def create_bucket(serverInfo, name='default', replica=1, port=11210, test_case=None, bucket_ram=-1,password=""):
log = logger.Logger.get_logger()
rest = RestConnection(serverInfo)
if bucket_ram < 0:
info = rest.get_nodes_self()
bucket_ram = info.mcdMemoryReserved * 2 / 3
+ if password:
+ authType = "sasl"
+ else:
+ authType = "none"
+
rest.create_bucket(bucket=name,
ramQuotaMB=bucket_ram,
replicaNumber=replica,
- proxyPort=port)
- msg = 'create_bucket succeeded but bucket "default" does not exist'
+ proxyPort=port,
+ authType = authType,
+ saslPassword=password)
+ msg = 'create_bucket succeeded but bucket "{0}" does not exist'
bucket_created = BucketOperationHelper.wait_for_bucket_creation(name, rest)
if not bucket_created:
log.error(msg)
if test_case:
- test_case.fail(msg=msg)
+ test_case.fail(msg=msg.format(name))
return bucket_created
@@ -71,8 +128,8 @@ def delete_all_buckets_or_assert(servers, test_case):
msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(bucket.name)
test_case.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(bucket.name, rest, 200)
, msg=msg)
- log.info('sleeping for 10 seconds because we want to :)')
- time.sleep(10)
+# log.info('sleeping for 10 seconds because we want to :)')
+# time.sleep(10)
@staticmethod
def delete_bucket_or_assert(serverInfo, bucket = 'default', test_case = None):
@@ -87,8 +144,8 @@ def delete_bucket_or_assert(serverInfo, bucket = 'default', test_case = None):
if test_case:
test_case.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(bucket, rest, 200), msg=msg)
- log.info('sleeping for 10 seconds because we want to :)')
- time.sleep(10)
+# log.info('sleeping for 10 seconds because we want to :)')
+# time.sleep(10)
#TODO: TRY TO USE MEMCACHED TO VERIFY BUCKET DELETION BECAUSE
# BUCKET DELETION IS A SYNC CALL W.R.T MEMCACHED
@@ -127,8 +184,8 @@ def wait_for_bucket_creation(bucket,
@staticmethod
def wait_for_memcached(node, bucket):
log = logger.Logger.get_logger()
- msg = "waiting for memcached bucket : {0} in {1}:{2} to accept set ops"
- log.info(msg.format(bucket["name"], node.ip, bucket["port"]))
+ msg = "waiting for memcached bucket : {0} in {1} to accept set ops"
+ log.info(msg.format(bucket, node.ip))
start_time = time.time()
end_time = start_time + 300
client = None
@@ -141,11 +198,10 @@ def wait_for_memcached(node, bucket):
while time.time() < end_time and counter < 1024:
try:
if not client:
- client = MemcachedClientHelper.memcached_client(node, bucket)
+ client = MemcachedClientHelper.direct_client(node, bucket)
for vBucketId in keys:
if not keys[vBucketId]["inserted"]:
client.set(keys[vBucketId]['key'], 0, 0, str(uuid.uuid4()))
- client.get(keys[vBucketId]['key'])
keys[vBucketId]["inserted"] = True
counter += 1
except mc_bin_client.MemcachedError as error:
@@ -165,102 +221,11 @@ def wait_for_memcached(node, bucket):
return counter == 1024
@staticmethod
- def wait_till_memcached_is_ready_or_assert(servers,
- bucket_port,
- test,
- bucket_name = 'default',
- bucket_password='password'):
- log = logger.Logger.get_logger()
-
- for serverInfo in servers:
- msg = "waiting for memcached bucket : {0} in {1}:{2} to accept set ops"
- log.info(msg.format(bucket_name, serverInfo.ip, bucket_port))
- start_time = time.time()
- end_time = start_time + 300
- client = None
-
- # build up a list of 1024 keys, 1 per vbucket
- keys = {}
- while len(keys) < 1024:
- if time.time() > end_time:
- test.fail('memcached not ready for {0} after waiting for 5 minutes'.format(serverInfo.ip))
- key = str(uuid.uuid4())
- vBucketId = crc32.crc32_hash(key) & 1023 # or & 0x3FF
- keys[vBucketId] = key
-
- # wait for connect to work
- while not client:
- if time.time() > end_time:
- test.fail('memcached not ready for {0} after waiting for 5 minutes'.format(serverInfo.ip))
- try:
- client = MemcachedClientHelper.create_memcached_client(serverInfo.ip,
- bucket_name,
- bucket_port,
- bucket_password)
- except:
- client = None
-
- # wait for all vbuckets to be ready
- for i in range(1024):
- while keys[i]:
- if time.time() > end_time:
- test.fail('memcached not ready for {0} after waiting for 5 minutes'.format(serverInfo.ip))
- try:
- client.vbucketId = i
- client.set(keys[i], 0, 0, keys[i])
- keys[i] = ''
- except mc_bin_client.MemcachedError as error:
- msg = "memcached not ready yet .. (memcachedError : {0}) - unable to push key : {1} to vbucket : {2}"
- log.error(msg.format(error.status, key, vBucketId))
- time.sleep(3)
- except Exception as ex:
- log.error("general error : {0} while setting key ".format(ex))
- time.sleep(3)
- # problem with the connection, try to reconnect
- client = None
- while not client:
- if time.time() > end_time:
- test.fail('memcached not ready for {0} after waiting for 5 minutes'.format(serverInfo.ip))
- try:
- client = MemcachedClientHelper.create_memcached_client(serverInfo.ip,
- bucket_name,
- bucket_port,
- bucket_password)
- except:
- client = None
- time.sleep(3)
-
- flushed = False
- while not flushed:
- if time.time() > end_time:
- test.fail('memcached not ready for {0} after waiting for 5 minutes'.format(serverInfo.ip))
- try:
- client.flush()
- flushed = True
- except:
- time.sleep(3)
-
- time.sleep(10)
- stats_reset = False
- while not stats_reset:
- if time.time() > end_time:
- test.fail('memcached not ready for {0} after waiting for 5 minutes'.format(serverInfo.ip))
- try:
- client.stats('reset')
- stats_reset = True
- except:
- time.sleep(3)
- client.close()
-
- log.info("inserted {0} keys to all {1} vBuckets".format(len(keys),1024))
-
-
- @staticmethod
- def verify_data(ip, keys, value_equal_to_key, verify_flags, port, test, debug=False):
+ def verify_data(server, keys, value_equal_to_key, verify_flags, test, debug=False,bucket="default"):
log = logger.Logger.get_logger()
log_error_count = 0
#verify all the keys
- client = mc_bin_client.MemcachedClient(ip, port)
+ client = MemcachedClientHelper.direct_client(server, bucket)
#populate key
index = 0
all_verified = True
@@ -293,10 +258,10 @@ def verify_data(ip, keys, value_equal_to_key, verify_flags, port, test, debug=Fa
return all_verified
@staticmethod
- def keys_dont_exist(keys,ip,port,test):
+ def keys_dont_exist(server,keys,bucket):
log = logger.Logger.get_logger()
#verify all the keys
- client = mc_bin_client.MemcachedClient(ip, port)
+ client = MemcachedClientHelper.direct_client(server,bucket)
#populate key
for key in keys:
try:
@@ -313,11 +278,11 @@ def keys_dont_exist(keys,ip,port,test):
return True
@staticmethod
- def keys_exist_or_assert(keys,ip,name,port,password,test):
+ def keys_exist_or_assert(keys,server,name,test):
#we should try out at least three times
log = logger.Logger.get_logger()
#verify all the keys
- client = MemcachedClientHelper.create_memcached_client(ip,name,port,password)
+ client = MemcachedClientHelper.direct_client(server,name)
#populate key
retry = 1
@@ -356,15 +321,13 @@ def keys_exist_or_assert(keys,ip,name,port,password,test):
@staticmethod
- def load_data_or_assert(serverInfo,
+ def load_some_data(serverInfo,
fill_ram_percentage=10.0,
- bucket_name = 'default',
- port=11211,
- test = None):
+ bucket_name = 'default'):
log = logger.Logger.get_logger()
if fill_ram_percentage <= 0.0:
fill_ram_percentage = 5.0
- client = mc_bin_client.MemcachedClient(serverInfo.ip, port)
+ client = MemcachedClientHelper.direct_client(serverInfo,bucket_name)
#populate key
rest = RestConnection(serverInfo)
testuuid = uuid.uuid4()
View
20 lib/membase/helper/cluster_helper.py
@@ -33,6 +33,8 @@ def add_all_nodes_or_assert(master,all_servers,rest_settings,test_case):
def wait_for_ns_servers_or_assert(servers,testcase):
for server in servers:
rest = RestConnection(server)
+ log = logger.Logger.get_logger()
+ log.info("waiting for ns_server @ {0}:{1}".format(server.ip, server.port))
testcase.assertTrue(RestHelper(rest).is_ns_server_running(),
"ns_server is not running in {0}".format(server.ip))
@@ -48,7 +50,7 @@ def cleanup_cluster(servers):
toBeEjectedNodes = []
for node in nodes:
allNodes.append(node.id)
- if node.id.find(master.ip) < 0 and node.id.find('127.0.0.1') < 0:
+ if "{0}:{1}".format(node.ip,node.port) != "{0}:{1}".format(master.ip,master.port):
toBeEjectedNodes.append(node.id)
#let's rebalance to remove all the nodes from the master
#this is not the master , let's remove it
@@ -57,18 +59,4 @@ def cleanup_cluster(servers):
log.info("rebalancing all nodes in order to remove nodes")
helper = RestHelper(rest)
removed = helper.remove_nodes(knownNodes=allNodes,ejectedNodes=toBeEjectedNodes)
- log.info("removed all the nodes from cluster associated with {0} ? {1}".format(master.ip, removed))
-
- @staticmethod
- def rebalance_params_for_declustering(master,all_nodes):
- log = logger.Logger.get_logger()
- otpNodeIds = [node.id for node in all_nodes]
- knownNodes = []
- knownNodes.extend(otpNodeIds)
- if 'ns_1@' + master.ip in otpNodeIds:
- otpNodeIds.remove('ns_1@' + master.ip)
- if 'ns_1@127.0.0.1' in otpNodeIds:
- otpNodeIds.remove('ns_1@127.0.0.1')
- ejectedNodes = otpNodeIds
- log.info('ejectedNodes : {0} , knownNodes : {1}'.format(ejectedNodes,knownNodes))
- return knownNodes,ejectedNodes
+ log.info("removed all the nodes from cluster associated with {0} ? {1}".format(master.ip, removed))
View
100 lib/membase/helper/rebalance_helper.py
@@ -1,5 +1,6 @@
import time
import logger
+from membase.api.exception import StatsUnavailableException
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from memcached.helper.data_helper import MemcachedClientHelper
@@ -8,8 +9,6 @@
log = logger.Logger.get_logger()
class RebalanceHelper():
-
-
@staticmethod
#bucket is a json object that contains name,port,password
def wait_for_stats(master, bucket, stat_key, stat_value, timeout_in_seconds=120):
@@ -18,7 +17,7 @@ def wait_for_stats(master, bucket, stat_key, stat_value, timeout_in_seconds=120)
verified = False
while (time.time() - start) <= timeout_in_seconds:
rest = RestConnection(master)
- stats = rest.get_bucket_stats(bucket['name'])
+ stats = rest.get_bucket_stats(bucket)
if stats and stat_key in stats and stats[stat_key] == stat_value:
log.info("{0} : {1}".format(stat_key, stats[stat_key]))
verified = True
@@ -32,33 +31,37 @@ def wait_for_stats(master, bucket, stat_key, stat_value, timeout_in_seconds=120)
@staticmethod
def wait_till_total_numbers_match(master,
- servers,
bucket,
- port,
- replica_factor,
- timeout_in_seconds=120,
- password = 'password'):
+ timeout_in_seconds=120):
+
log.info('waiting for sum_of_curr_items == total_items....')
start = time.time()
verified = False
while (time.time() - start) <= timeout_in_seconds:
- if RebalanceHelper.verify_items_count(master, servers, bucket, replica_factor):
+ try:
+ if RebalanceHelper.verify_items_count(master,bucket):
+ verified = True
+ break
+ else:
+ time.sleep(2)
+ except StatsUnavailableException:
+ log.info("unable to retrieve stats for any node. returning true")
verified = True
break
- else:
- time.sleep(2)
rest = RestConnection(master)
- RebalanceHelper.print_taps_from_all_nodes(rest,bucket,port,password)
+ RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
return verified
@staticmethod
#TODO: add password and port
- def print_taps_from_all_nodes(rest, bucket='default', port=11210, password='password'):
+ def print_taps_from_all_nodes(rest, bucket='default'):
+ #get the port number from rest ?
+
log = logger.Logger.get_logger()
- nodes_for_stats = rest.node_statuses()
+ nodes_for_stats = rest.get_nodes()
for node_for_stat in nodes_for_stats:
try:
- client = MemcachedClientHelper.create_memcached_client(node_for_stat.ip, bucket, port,password)
+ client = MemcachedClientHelper.direct_client(node_for_stat, bucket)
log.info("getting tap stats.. for {0}".format(node_for_stat.ip))
tap_stats = client.stats('tap')
if tap_stats:
@@ -84,20 +87,29 @@ def log_interesting_taps(node, tap_stats, logger):
@staticmethod
- def verify_items_count(master,servers,bucket,replica_factor):
- #print out vb_pending_num,vb_active_num,vb_replica_num as well
+ def verify_items_count(master,bucket):
+ #get the #of buckets from rest
rest = RestConnection(master)
+ bucket_info = rest.get_bucket(bucket)
+ replica_factor = bucket_info.numReplicas
+ #print out vb_pending_num,vb_active_num,vb_replica_num as well
master_stats = rest.get_bucket_stats(bucket)
vbucket_active_sum = 0
vbucket_replica_sum = 0
vbucket_pending_sum = 0
all_server_stats = []
- for server in servers:
+ stats_received = 0
+ nodes = rest.get_nodes()
+ for server in nodes:
#get the stats
server_stats = rest.get_bucket_stats_for_node(bucket, server.ip)
if not server_stats:
log.info("unable to get stats from {0}".format(server.ip))
- all_server_stats.append((server,server_stats))
+ else:
+ stats_received += 1
+ all_server_stats.append((server, server_stats))
+ if not stats_received:
+ raise StatsUnavailableException()
sum = 0
for server, single_stats in all_server_stats:
if not single_stats or "curr_items" not in single_stats:
@@ -123,27 +135,27 @@ def verify_items_count(master,servers,bucket,replica_factor):
@staticmethod
- def verify_maps(vbucket_map_before,vbucket_map_after):
+ def verify_maps(vbucket_map_before, vbucket_map_after):
#for each bucket check the replicas
- for i in range(0,len(vbucket_map_before)):
+ for i in range(0, len(vbucket_map_before)):
if not vbucket_map_before[i].master == vbucket_map_after[i].master:
log.error(
- 'vbucket[{0}].master mismatch {1} vs {2}'.format(i,vbucket_map_before[i].master,vbucket_map_after[i].master))
+ 'vbucket[{0}].master mismatch {1} vs {2}'.format(i, vbucket_map_before[i].master,
+ vbucket_map_after[i].master))
return False
- for j in range(0,len(vbucket_map_before[i].replica)):
+ for j in range(0, len(vbucket_map_before[i].replica)):
if not (vbucket_map_before[i].replica[j]) == (vbucket_map_after[i].replica[j]):
- log.error('vbucket[{0}].replica[{1} mismatch {2} vs {3}'.format(i,j,
- vbucket_map_before[i].replica[j],vbucket_map_after[i].replica[j] ))
+ log.error('vbucket[{0}].replica[{1} mismatch {2} vs {3}'.format(i, j,
+ vbucket_map_before[i].replica[j],
+ vbucket_map_after[i].replica[j]))
return False
return True
@staticmethod
- def delete_all_buckets_or_assert(ips,test_case):
+ def delete_all_buckets_or_assert(ips, test_case):
log.info('deleting existing buckets on {0}'.format(ips))
for ip in ips:
- rest = RestConnection(ip=ip,
- username='Administrator',
- password='password')
+ rest = RestConnection(ip=ip)
buckets = rest.get_buckets()
for bucket in buckets:
print bucket.name
@@ -151,7 +163,7 @@ def delete_all_buckets_or_assert(ips,test_case):
log.info('deleted bucket : {0}'.format(bucket.name))
msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(bucket.name)
test_case.assertTrue(BucketOperationHelper.wait_for_bucket_deletion(bucket.name, rest, 200)
- , msg=msg)
+ , msg=msg)
@staticmethod
def wait_for_bucket_deletion(bucket,
@@ -181,20 +193,14 @@ def wait_for_bucket_creation(bucket,
time.sleep(2)
return False
- # in this method
-# @staticmethod
-# def add_node_and_rebalance(rest,node_ip):
-# pass
- #read the current nodes
- # if the node_ip already added then just
- #silently return
- #if its not added then let try to add this and then rebalance
- #we should alo try to get the bucket information from
- #rest api instead of passing it to the fucntions
-
-class VBucketHeartbeat(threading.Thread):
-
- #this class will print out the current state of vbucket transfer
- #during a rebalance operation or replication or failover
- vbucket = {'id':0,'dead':'10.1.2.4','pending':'10.1.2.5','active':'10.1.2.6'}
- #get
+ # in this method
+
+ # @staticmethod
+ # def add_node_and_rebalance(rest,node_ip):
+ # pass
+ #read the current nodes
+ # if the node_ip already added then just
+ #silently return
+ #if its not added then let try to add this and then rebalance
+ #we should alo try to get the bucket information from
+ #rest api instead of passing it to the fucntions
View
0  pytests/management/upgrade/__init__.py → lib/membase/helper/stats.py
File renamed without changes
View
2  lib/membase_install.py
@@ -65,6 +65,6 @@ def status_membase(server):
def ssh(host,cmd,key=None):
rtn=""
process = subprocess.Popen("ssh -i ~/.ssh/mikew_key.pem root@%s \"%s\"" % (host,cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
- stdoutdata,stderrdata=process.communicate(None)
+ stdoutdata,stderrdata=process.communicate()
rtn += stdoutdata
return rtn
View
155 lib/memcached/helper/data_helper.py
@@ -13,6 +13,7 @@
class MemcachedClientHelperExcetion(Exception):
def __init__(self, errorcode, message):
+ Exception.__init__(self, errorcode, message)
self._message = message
self.errorcode = errorcode
self._args = (errorcode, message)
@@ -24,7 +25,6 @@ class MemcachedClientHelper(object):
@staticmethod
def create_threads(servers=None,
name='default',
- port=11211,
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
@@ -75,10 +75,7 @@ def create_threads(servers=None,
#choose one of the servers random
thread = WorkerThread(serverInfo=MemcachedClientHelper.random_pick(servers),
name=name,
- port=port,
- password='password',
values_list=list,
- ignore_how_many_errors=5000,
override_vBucketId=override_vBucketId,
write_only=write_only,
moxi=moxi,
@@ -91,7 +88,6 @@ def create_threads(servers=None,
@staticmethod
def create_threads_for_load_bucket(serverInfo=None,
name='default',
- port=11211,
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
@@ -139,10 +135,7 @@ def create_threads_for_load_bucket(serverInfo=None,
for i in range