Permalink
Find file
Fetching contributors…
Cannot retrieve contributors at this time
executable file 946 lines (750 sloc) 27 KB
#!/usr/bin/env ruby
if ARGV.length < 4
print("runtest TEST_NAME PKG_RPM RAM_QUOTA REPLICA_COUNT" \
" NUM_ITEMS NUM_VBUCKETS VAL_SIZE CONCURRENCY [JOIN_URL] [key=val, ...]\n")
exit(-1)
end
# ------------------------------------
$test_name = ARGV[0]
$top_user = "root"
$top_patt = "beam.smp|memcached|moxi|couchjs"
load 'util.rb'
# ------------------------------------
$pkg = ARGV[1]
$pkg_base = $pkg.split('/')[-1].split('-')[0] # Ex: 'couchbase'
$cli_kind = [$pkg_base, 11211]
$cli_kind = ['couchdb', 5984] if $pkg.include?('couchbase-single')
$ram_quota = ARGV[2]
$replica_count = ARGV[3]
$num_items = ARGV[4]
$num_vbuckets = ARGV[5].to_i
$val_size = ARGV[6] || "1000"
$concurrency = (ARGV[7] || "10").to_i
$join_url = ARGV[8]
$opts = { 'target' => '127.0.0.1' }
(ARGV[9] || "").split(',').each {|x|
key, val = x.split('=')
$opts[key] = val
}
$json = ($opts['json'] || '1').to_i
$target = $opts['target']
$target_mc = $opts['target']
$extra_sleep = $opts['extra_sleep']
index = true
index = false if $pkg_base == 'membase' or $json != 1
index = false if $opts['index'] == 'n'
$du_path = "/opt/#{$pkg_base}/var"
# ------------------------------------
step("preparing... #{$test_name}",
nil, nil, nil,
"Cleanup from previous runs.")
run "sudo killall -9 memcached || true"
['couchbase', 'membase'].each do |x|
if $opts['install'] != 'n'
run "sudo /etc/init.d/#{x}-server stop || true"
run "sudo rpm -e #{x}-server || true"
run "sudo rm -rf /opt/#{x}/"
end
end
# ------------------------------------
step("installing...",
nil, nil, nil,
<<-'EOD')
Install software, and run the after-install hacks
to override default vbucket config, default nodelay,
allow erlang functions, increase os_process_limits, etc.
See: https://github.com/couchbaselabs/rampup/blob/master/after-install
EOD
if $opts['install'] != 'n'
run "sudo rpm -i #{$pkg}"
sleep 5
sleep $extra_sleep.to_i if $extra_sleep
end
if $pkg_base != 'membase'
run "rm -f after-install"
run "wget https://raw.github.com/couchbaselabs/rampup/master/after-install"
run "chmod a+x after-install"
run "sudo ./after-install #{$num_vbuckets}"
end
# ------------------------------------
step("configuring...",
nil, nil, nil,
<<-'EOD')
Create the default bucket with the target RAM quota.
And, potentially join to cluster for multi-node tests.
Also, create first design doc for the 'last' (high-fan-out) index.
EOD
if $join_url and $join_url.length > 0 and $join_url != "none"
sleep(3)
my_ip = `/sbin/ifconfig eth0|grep inet|awk {'print $2'}|cut -d":" -f2|head -n 1`
run "/opt/#{$pkg_base}/bin/membase server-add -c #{$join_url} \
-u Administrator -p password \
--server-add=#{my_ip}"
sleep(3)
run "/opt/#{$pkg_base}/bin/membase rebalance -c #{$join_url} \
-u Administrator -p password"
else
sleep(3)
run "/opt/#{$pkg_base}/bin/membase cluster-init -c #{$target} \
--cluster-init-username=Administrator \
--cluster-init-password=password"
sleep(3)
run "/opt/#{$pkg_base}/bin/membase bucket-create -c #{$target} \
-u Administrator -p password \
--bucket=default --bucket-type=membase --bucket-password= \
--bucket-ramsize=#{$ram_quota} --bucket-replica=#{$replica_count}"
end
sleep 20
sleep $extra_sleep.to_i if $extra_sleep
if $cli_kind[0] == 'couchdb' and $num_vbuckets > 1
(0..($num_vbuckets - 1)).each do |partition|
run "curl -vX PUT http://#{$target}:5984/default%2F#{partition}"
end
end
def load_design_doc(kind)
if $cli_kind[0] == 'couchdb' and $num_vbuckets > 1
run "curl -vX PUT http://#{$target}:5984/default%2Fmaster"
run "curl -vX PUT http://#{$target}:5984/default%2Fmaster/_design/rampup-#{kind} -d @index/rampup-#{kind}.json"
else
run "curl -vX PUT http://#{$target}:5984/default"
run "curl -vX PUT http://#{$target}:5984/default/_design/rampup-#{kind} -d @index/rampup-#{kind}.json"
end
sleep 1
end
load_design_doc('last') if $pkg_base != 'membase'
# ------------------------------------
if $opts['client-side-moxi'] == 'y'
$target_mc = "127.0.0.1"
$cli_kind[1] = 11511
moxi = "/opt/#{$pkg_base}/bin/moxi" \
" -Z port_listen=#{$cli_kind[1]}," \
"default_bucket_name=default,downstream_max=1024,downstream_conn_max=4," \
"connect_max_errors=5,connect_retry_interval=30000,connect_timeout=400," \
"auth_timeout=100,cycle=200," \
"downstream_conn_queue_timeout=200,downstream_timeout=5000,wait_queue_timeout=200" \
" -z url=http://#{$target}:8091/pools/default/bucketsStreaming/default" \
" -p 0 -O stderr -u `whoami` &"
system(moxi)
sleep(1)
end
bulk_load_batch = 100
used_memcachetest = false
step("loading-docs...",
nil, nil, $num_items,
<<-'EOD')
Load all documents, with 10 concurrent clients.
The load pathway is via either moxi, mongos, or REST (for couchbase-single),
depending on the target server. REST-based loading will use bulk-docs.
EOD
proc_stats_start()
if $opts['memcachetest'] == 'y' and $json == 0 and ['couchbase', 'membase'].include?($cli_kind[0])
run("/opt/#{$pkg_base}/bin/memcachetest" \
" -h #{$target_mc}:#{$cli_kind[1]}" \
" -t #{$concurrency}" \
" -i #{$num_items}" \
" -m #{$val_size.to_i}" \
" -M #{$val_size.to_i + 1}" \
" -c 0")
used_memcachetest = true
else
child_items = $num_items.to_i / $concurrency
pids = (1..$concurrency).map do |x|
fork do
run("./rampup-client #{$cli_kind[0]} #{$target_mc}:#{$cli_kind[1]}" \
" ratio_sets=1.0 ratio_creates=1.0" \
" min_value_size=#{$val_size}" \
" max_items=#{$num_items}" \
" num_items=#{child_items * (x - 1)}" \
" max_creates=#{child_items} exit_after_creates=1" \
" num_vbuckets=#{$num_vbuckets}" \
" bulk_load_batch=#{bulk_load_batch}" \
" json=#{$json}")
exit
end
end
pids.map {|pid| [pid, Process.wait2(pid)] }
end
proc_stats_end()
killall("rampup-client")
# ------------------------------------
step("loading-persisted...",
nil, nil, nil,
<<-'EOD')
Right after the loading-docs step, measure the time for
the persistence queue to drain, by watching ep_queue_size / ep_flusher_todo.
EOD
def wait_until_queue_drained()
i = 0
while i < 100000
w = `/opt/#{$pkg_base}/bin/mbstats #{$target}:11210 all | egrep "ep_queue_size|ep_flusher_todo"`
w = w.strip.split(/[\s\n\t]+/).select {|x| !x.include?(':') and x != '0' }
break if w.empty?
i = i + 1
sleep 0.1
end
end
proc_stats_start()
wait_until_queue_drained()
proc_stats_end()
# ------------------------------------
unless used_memcachetest
step("reading-docs...",
"./rampup-client #{$cli_kind[0]} #{$target_mc}:#{$cli_kind[1]}" \
" ratio_sets=0.0" \
" min_value_size=#{$val_size}" \
" num_items=#{$num_items}" \
" num_vbuckets=#{$num_vbuckets}" \
" max_ops=10000" \
" json=#{$json}",
nil, 10000,
<<-'EOD')
Serially read 10,000 documents.
EOD
end
# ------------------------------------
def capture_response(name)
return $out_file + "_#{name}.res"
end
if index
num_views = 10000
step("pre-index-build-quiescing...",
nil, nil, nil,
<<-'EOD')
Before doing the first index build step, let everything quiesce
by sleeping 5 seconds. We shouldn't need this, but some
dev build of couchbase 2.0.0r needed it.
EOD
sleep 5
# ------------------------------------
def index_cmd(name)
index_build_cmd = "curl -v http://#{$target}:5984/default/_design/rampup-#{name}/_view/#{name}?limit=10"
index_access = "_view"
if $cli_kind[0] == 'couchdb' and $num_vbuckets > 1
index_build_cmd = "curl -v \"http://#{$target}:5984/default/_design/rampup-#{name}/_set_view/#{name}?limit=10\&partitions=#{$num_vbuckets}\""
index_access = "_set_view"
end
return index_build_cmd, index_access
end
# ------------------------------------
index_build_cmd, index_access = index_cmd("last")
step("index-building...",
index_build_cmd,
nil, nil,
<<-'EOD')
First index build, of the last (high-fan-out) index.
The high-fan-out index is like an index on a user's country.
The 'last' index has no reduce function.
EOD
print("index_build_cmd: #{index_build_cmd}\n")
# ------------------------------------
step("index-accessing...",
"./do-couch-view #{$target} #{num_views} last gte #{index_access} #{capture_response("index-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the last (high-fan-out) index 10,000 times, serially,
via a greater-than-or-equal query, limit 10 results per query.
Against couch, that means a startkey but no endkey.
EOD
step("index-accessing2...",
"./do-couch-view #{$target} #{num_views} last gte #{index_access} #{capture_response("index-accessing2")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the last (high-fan-out) index 10,000 times, serially,
via a greater-than-or-equal query, limit 10 results per query, but do it again
a second time to see if system caching has an effect.
EOD
step("index-eq-accessing...",
"./do-couch-view #{$target} #{num_views} last eq #{index_access} #{capture_response("index-eq-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the last (high-fan-out) index 10,000 times, serially,
via a equal-to query, limit 10 results per query. Against couch,
that means a startkey is the same as the endkey.
EOD
step("index-eq-accessing2...",
"./do-couch-view #{$target} #{num_views} last eq #{index_access} #{capture_response("index-eq-accessing2")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the last (high-fan-out) index 10,000 times, serially,
via a equal-to query, limit 10 results per query, but do it again
a second time to see if system caching has an effect.
EOD
# ------------------------------------
step("index-mid-design...",
nil, nil, nil,
<<-'EOD')
Define another index via a new design document:
the 'mid' (low-fan-out) index is like a secondary index on
user's email address. This 'mid' index has no reduce function.
EOD
load_design_doc('mid')
# ------------------------------------
index_build_cmd, index_access = index_cmd("mid")
step("index-mid-building...",
index_build_cmd,
nil, nil,
<<-'EOD')
Build the 'mid' index.
EOD
# ------------------------------------
step("index-mid-accessing...",
"./do-couch-view #{$target} #{num_views} mid gte #{index_access} #{capture_response("index-mid-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the mid (low-fan-out) index 10,000 times, serially,
via a greater-than-or-equal query, limit 10 results per query.
Against couch, that means a startkey but no endkey.
EOD
step("index-mid-accessing2...",
"./do-couch-view #{$target} #{num_views} mid gte #{index_access} #{capture_response("index-mid-accessing2")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the mid (low-fan-out) index 10,000 times, serially,
via a greater-than-or-equal query, limit 10 results per query,
a second time to see if system caching has an effect.
EOD
step("index-mid-eq-accessing...",
"./do-couch-view #{$target} #{num_views} mid eq #{index_access} #{capture_response("index-mid-eq-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the mid (low-fan-out) index 10,000 times, serially,
via a equal-to query, limit 10 results per query.
Against couch, that means a startkey but no endkey.
EOD
step("index-mid-eq-accessing2...",
"./do-couch-view #{$target} #{num_views} mid eq #{index_access} #{capture_response("index-mid-eq-accessing2")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the mid (low-fan-out) index 10,000 times, serially,
via a equal-to query, limit 10 results per query,
a second time.
EOD
# ------------------------------------
step("index-lastCount-design...",
nil, nil, nil,
<<-'EOD')
Define another index via a new design document:
the 'lastCount' (high-fan-out) index is like a the 'last' index
but also includes a _count reducer.
EOD
load_design_doc('lastCount')
# ------------------------------------
index_build_cmd, index_access = index_cmd("lastCount")
step("index-lastCount-building...",
index_build_cmd,
nil, nil,
<<-'EOD')
Build the 'lastCount' index.
EOD
# ------------------------------------
step("index-lastCount-accessing...",
"./do-couch-view #{$target} #{num_views} lastCount gte #{index_access} #{capture_response("index-lastCount-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the lastCount index 10,000 times, serially,
via a greater-than-or-equal query, limit 10 results per query.
Against couch, that means a startkey but no endkey.
EOD
# ------------------------------------
step("index-lastCount-group-accessing...",
"./do-couch-view #{$target} #{num_views} lastCount gte #{index_access},group=true #{capture_response("index-lastCount-group-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
Access the lastCount index 10,000 times, serially,
for grouped results, stale=ok,
via a greater-than-or-equal query, limit 10 results per query.
Against couch, that means a startkey but no endkey.
EOD
# ------------------------------------
if $cli_kind[0] == 'couchdb' and $num_vbuckets > 1
step("disabling-partition...",
"curl -vX PUT http://#{$target}:5984/default/_design/rampup-last/_set_view/disable_partition/0",
nil, nil,
<<-'EOD')
Disable partition 0 to simulate a moved vbucket.
EOD
["last", "lastCount"].each do |view|
index_build_cmd, index_access = index_cmd(view)
step("disabled-partition-index-#{view}-accessing...",
"./do-couch-view #{$target} #{num_views} last gte #{index_access} #{capture_response("disabled-partition-#{view}-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
While partition 0 disabled, access the view 10,000 times, serially,
via a greater-than-or-equal query, limit 10 results per query.
EOD
end
step("disabled-partition-index-lastCount-group-accessing...",
"./do-couch-view #{$target} #{num_views} lastCount gte #{index_access},group=true #{capture_response("disabled-partition-lastCount-group-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
While partition 0 is disabled, access the lastCount index 10,000 times, serially,
for grouped results, stale=ok, via a greater-than-or-equal query, limit 10 results per query.
Against couch, that means a startkey but no endkey.
EOD
step("enabling-partition...",
"curl -vX PUT http://#{$target}:5984/default/_design/rampup-last/_set_view/enable_partition/0",
nil, nil,
<<-'EOD')
Re-enable partition 0 so that all partitions are active.
EOD
["last", "lastCount"].each do |view|
index_build_cmd, index_access = index_cmd(view)
step("enabled-partition-index-#{view}-accessing...",
"./do-couch-view #{$target} #{num_views} last gte #{index_access} #{capture_response("enabled-partition-#{view}-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
With partition 0 re-enabled, access the view 10,000 times, serially,
via a greater-than-or-equal query, limit 10 results per query.
EOD
end
step("enabled-partition-index-lastCount-group-accessing...",
"./do-couch-view #{$target} #{num_views} lastCount gte #{index_access},group=true #{capture_response("enabled-partition-lastCount-group-accessing")} >> #{$out_file}",
nil, num_views,
<<-'EOD')
While partition 0 is re-enabled, access the lastCount index 10,000 times, serially,
for grouped results, stale=ok, via a greater-than-or-equal query, limit 10 results per query.
Against couch, that means a startkey but no endkey.
EOD
end
# ------------------------------------
step("index-mid-erlang-design...",
nil, nil, nil,
<<-'EOD')
Define another index via a new design document,
similar to the 'mid' index, but the 'mid-erlang' index
has a map function written in erlang instead of javascript.
EOD
load_design_doc('mid-erlang')
# ------------------------------------
index_build_cmd, index_access = index_cmd("mid-erlang")
step("index-mid-erlang-building...",
index_build_cmd,
nil, nil,
<<-'EOD')
Build the 'mid-erlang' index.
EOD
end
# ------------------------------------
unless used_memcachetest
ops_per_concurrent_client = 100000
step("concurrent-usage...",
nil, nil, $concurrency * ops_per_concurrent_client,
<<-'EOD')
Start #{$concurrency} concurrent number of clients,
each which perform 100000 sequential ops (via port 11211
or equivalent).
10% of the ops will be sets.
90% of the ops will be gets.
95% of the sets will hit a hot item subset.
95% of the gets will hit a hot item subset.
The hot item subset is 20% of all the items.
Items are JSON docs.
Views are defined at this point, but are not accessed.
EOD
time_start_of_concurrent_usage = Time.now
proc_stats_start()
pids = (1..$concurrency).map do |x|
fork do
run("./rampup-client #{$cli_kind[0]} #{$target_mc}:#{$cli_kind[1]}" \
" min_value_size=#{$val_size}" \
" num_items=#{$num_items}" \
" ratio_sets=0.1" \
" ratio_creates=0.0" \
" ratio_misses=0.05" \
" ratio_hot=0.2" \
" ratio_hot_sets=0.2" \
" ratio_hot_gets=0.2" \
" num_vbuckets=#{$num_vbuckets}" \
" max_ops=#{ops_per_concurrent_client}" \
" json=#{$json}")
exit
end
end
pids.map {|pid| [pid, Process.wait2(pid)] }
proc_stats_end()
killall("rampup-client")
# ------------------------------------
step("concurrent-usage-draining...",
nil, nil, nil,
<<-'EOD')
Right after the concurrent-usage step, wait until the
persistence queues have drained to zero.
EOD
wait_until_queue_drained()
# ------------------------------------
step("concurrent-usage-total...",
nil, nil, nil,
<<-'EOD')
Measures the time for the queue to drain to zero,
since the start of the previous concurrent-usage step.
So, this equals elapsed time for concurrent-usage +
elapsed time for concurrent-usage-draining.
EOD
$time_prev = time_start_of_concurrent_usage
# ------------------------------------
step("concurrent-sets...",
nil, nil, $concurrency * ops_per_concurrent_client,
<<-'EOD')
Start #{$concurrency} concurrent number of clients,
each which perform 100000 sequential ops (via port 11211
or equivalent).
100% of the ops will be sets.
95% of the sets will hit a hot item subset.
The hot item subset is 20% of all the items.
Items are JSON docs.
Views are defined at this point, but are not accessed.
EOD
time_start_of_concurrent_sets = Time.now
proc_stats_start()
pids = (1..$concurrency).map do |x|
fork do
run("./rampup-client #{$cli_kind[0]} #{$target_mc}:#{$cli_kind[1]}" \
" min_value_size=#{$val_size}" \
" num_items=#{$num_items}" \
" ratio_sets=1.0" \
" ratio_creates=0.0" \
" ratio_misses=0.05" \
" ratio_hot=0.2" \
" ratio_hot_sets=0.2" \
" num_vbuckets=#{$num_vbuckets}" \
" max_ops=#{ops_per_concurrent_client}" \
" json=#{$json}")
exit
end
end
pids.map {|pid| [pid, Process.wait2(pid)] }
proc_stats_end()
killall("rampup-client")
# ------------------------------------
step("concurrent-sets-draining...",
nil, nil, nil,
<<-'EOD')
Right after the concurrent-sets step, wait until the
persistence queues have drained to zero.
EOD
wait_until_queue_drained()
# ------------------------------------
step("concurrent-sets-total...",
nil, nil, nil,
<<-'EOD')
Measures the time for the queue to drain to zero,
since the start of the previous concurrent-sets step.
So, this equals elapsed time for concurrent-set +
elapsed time for concurrent-sets-draining.
EOD
$time_prev = time_start_of_concurrent_sets
end
# ------------------------------------
if index
step("index-mutating...",
nil, nil, nil,
<<-'EOD')
Start one client, which starts to serially mutate all items once;
against couchbase, that's just blind sets. Concurrently,
serially query the 'last' (high-fan-out) index 10,000 times,
using a greater-than-or-equal query, limit 10 rows per query
and stop this step (the concurrent mutations) when all 10,000
sequential view accesses are done.
EOD
killall("rampup-client")
# Concurrently do some item mutations.
#
pid = fork do
run("./rampup-client #{$cli_kind[0]} #{$target_mc}:#{$cli_kind[1]}" \
" min_value_size=#{$val_size}" \
" ratio_sets=1.0" \
" ratio_creates=0.0" \
" num_items=#{$num_items}" \
" num_vbuckets=#{$num_vbuckets}" \
" max_ops=#{$num_items}" \
" json=#{$json}")
exit
end
proc_stats_start()
run("./do-couch-view #{$target} #{num_views} last gte #{index_access} #{capture_response("index-mutating")} >> #{$out_file}")
proc_stats_end()
killall("rampup-client")
# ------------------------------------
step("index-mutating-draining...",
nil, nil, nil,
<<-'EOD')
Right after the index-mutating step,
wait until persistence queues have drained.
EOD
wait_until_queue_drained()
# ------------------------------------
step("index-mutating-lastCount...",
nil, nil, nil,
<<-'EOD')
Start one client, which starts to serially mutate all items once;
against couchbase, that's just blind sets. Concurrently,
serially query the 'lastCount' (high-fan-out with _count) index 10,000 times,
using a greater-than-or-equal query, limit 10 rows per query
and stop this step (the concurrent mutations) when all 10,000
sequential view accesses are done.
EOD
killall("rampup-client")
# Concurrently do some item mutations.
#
pid = fork do
run("./rampup-client #{$cli_kind[0]} #{$target_mc}:#{$cli_kind[1]}" \
" min_value_size=#{$val_size}" \
" ratio_sets=1.0" \
" ratio_creates=0.0" \
" num_items=#{$num_items}" \
" num_vbuckets=#{$num_vbuckets}" \
" max_ops=#{$num_items}" \
" json=#{$json}")
exit
end
proc_stats_start()
run("./do-couch-view #{$target} #{num_views} last gte #{index_access} #{capture_response("index-mutating")} >> #{$out_file}")
proc_stats_end()
killall("rampup-client")
# ------------------------------------
step("index-mutating-lastCount-draining...",
nil, nil, nil,
<<-'EOD')
Right after the index-mutating-lastCount step,
wait until persistence queues have drained.
EOD
wait_until_queue_drained()
end
# ------------------------------------
step("stopping...",
"sudo /etc/init.d/#{$pkg_base}-server stop",
nil, nil,
<<-'EOD')
Stop the server.
EOD
# ------------------------------------
step("stopping-quiesce...",
nil, nil, nil,
<<-'EOD')
After stopping the server, sleep 5 seconds.
EOD
sleep 5
# ------------------------------------
step("restarting...",
"sudo /etc/init.d/#{$pkg_base}-server start",
nil, nil,
<<-'EOD')
Restart the server.
EOD
# ------------------------------------
step("warming...",
nil, nil, nil,
<<-'EOD')
Measure the warmup time after the restart step, via ep_warmup_time.
EOD
w = 0
if File.exists?("/opt/#{$pkg_base}/bin/mbstats")
proc_stats_start()
i = 0
while i < 100000
sleep 2
w = `/opt/#{$pkg_base}/bin/mbstats #{$target}:11210 all | grep "ep_warmup_thread:"`
w = w.split(' ')[-1]
break if w == "complete"
i = i + 1
end
proc_stats_end()
w = `/opt/#{$pkg_base}/bin/mbstats #{$target}:11210 all | grep "ep_warmup_time:"`
w = (w.split(' ')[-1] || "0").to_i / 1000000 # ep_warmup_time is in microseconds.
end
# ------------------------------------
if index and not ($cli_kind[0] == 'couchdb' and $num_vbuckets > 1)
step("index-rebuilding...",
nil, w, nil,
<<-'EOD')
Measure the _compact time on the 'last' (high-fan-out) design document.
EOD
run('curl -H "Content-Type: application/json"' +
' -X POST http://localhost:5984/default/_compact/rampup-last')
proc_stats_start()
i = 0
while i < 100000
h = `curl -X GET http://#{$target}:5984/default/_design/rampup-last/_info`
break if h.include?('"compact_running":false')
sleep 1
i = i + 1
end
proc_stats_end()
end
# ------------------------------------
if File.exists?("/opt/#{$pkg_base}/bin/membase")
# ------------------------------------
step("bucket-delete...",
"/opt/#{$pkg_base}/bin/membase bucket-delete -c #{$target} \
-u Administrator -p password \
--bucket=default",
nil, nil,
<<-'EOD')
Delete the default bucket.
EOD
sleep(2)
# ------------------------------------
step("bucket-create...",
"/opt/#{$pkg_base}/bin/membase bucket-create -c #{$target} \
-u Administrator -p password \
--bucket=default --bucket-type=membase --bucket-password= \
--bucket-ramsize=#{$ram_quota} --bucket-replica=#{$replica_count}",
nil, nil,
<<-'EOD')
After the default bucket was deleted, create it again with the same config,
so it should be an empty bucket.
EOD
sleep(20)
# ------------------------------------
clog_num_items = 100000
if $opts['memcachetest'] == 'y' and $json == 0 and ['couchbase', 'membase'].include?($cli_kind[0])
clog_num_items = $num_items.to_i
end
step("clogging...", nil, nil, clog_num_items,
<<-'EOD')
After the bucket was re-created (and is now an empty bucket), turn off
persistence draining (via mbflushctl stop). Then load items
so the server queues get clogged up with mutations.
EOD
proc_stats_start()
run("/opt/#{$pkg_base}/bin/mbflushctl #{$target}:11210 stop")
if $opts['memcachetest'] == 'y' and $json == 0 and ['couchbase', 'membase'].include?($cli_kind[0])
run("/opt/#{$pkg_base}/bin/memcachetest" \
" -h #{$target_mc}:#{$cli_kind[1]}" \
" -t #{$concurrency}" \
" -i #{$num_items}" \
" -m #{$val_size.to_i}" \
" -M #{$val_size.to_i + 1}" \
" -c 0")
else
run("./rampup-client #{$cli_kind[0]} #{$target}:#{$cli_kind[1]}" \
" ratio_sets=1.0 ratio_creates=1.0" \
" min_value_size=#{$val_size}" \
" max_creates=#{clog_num_items} exit_after_creates=1" \
" bulk_load_batch=#{bulk_load_batch}" \
" json=#{$json}")
end
proc_stats_end()
# ------------------------------------
step("unclogging...", nil, nil, clog_num_items,
<<-'EOD')
Coming after the clogging step: turn on persistence draining again,
and wait until the persistence queue drains to zero.
EOD
run("/opt/#{$pkg_base}/bin/mbflushctl #{$target}:11210 start")
proc_stats_start()
wait_until_queue_drained()
proc_stats_end()
end
# ------------------------------------
step("done...",
"sudo /etc/init.d/#{$pkg_base}-server stop",
nil, nil,
<<-'EOD')
Stop the server and kill processes.
EOD
run("killall -9 heart")
run("killall -9 couchdb")
run("killall -9 beam.smp")
run("killall -9 epmd")
run("killall -9 moxi")