Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cosbench.py: do not compose "nodes" using user@host #189

Merged
merged 4 commits into from Aug 13, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
25 changes: 12 additions & 13 deletions benchmark/cosbench.py
Expand Up @@ -27,7 +27,6 @@ def __init__(self, cluster, config):
self.containers = config["containers_max"]
self.objects = config["objects_max"]
self.mode = config["mode"]
self.user = settings.cluster.get('user')
self.rgw = settings.cluster.get('rgws').keys()[0]
self.radosgw_admin_cmd = settings.cluster.get('radosgw-admin_cmd', '/usr/bin/radosgw-admin')
self.use_existing = settings.cluster.get('use_existing')
Expand All @@ -52,13 +51,13 @@ def prerun_check(self):
if "username" in cosconf and "password" in cosconf and "url" in cosconf:
if not self.use_existing or self.is_teuthology:
user, subuser = cosconf["username"].split(':')
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.rgw),"radosgw-admin user create --uid='%s' --display-name='%s'" % (user, user)).communicate()
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.rgw),"radosgw-admin subuser create --uid=%s --subuser=%s --access=full" % (user, cosconf["username"])).communicate()
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.rgw),"radosgw-admin key create --uid=%s --subuser=%s --key-type=swift" % (user, cosconf["username"])).communicate()
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.rgw),"radosgw-admin user modify --uid=%s --max-buckets=100000" % (user)).communicate()
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.rgw),"radosgw-admin subuser modify --uid=%s --subuser=%s --secret=%s --key-type=swift" % (user, cosconf["username"], cosconf["password"])).communicate()
stdout, stderr = common.pdsh(self.rgw, "radosgw-admin user create --uid='%s' --display-name='%s'" % (user, user)).communicate()
stdout, stderr = common.pdsh(self.rgw, "radosgw-admin subuser create --uid=%s --subuser=%s --access=full" % (user, cosconf["username"])).communicate()
stdout, stderr = common.pdsh(self.rgw, "radosgw-admin key create --uid=%s --subuser=%s --key-type=swift" % (user, cosconf["username"])).communicate()
stdout, stderr = common.pdsh(self.rgw, "radosgw-admin user modify --uid=%s --max-buckets=100000" % (user)).communicate()
stdout, stderr = common.pdsh(self.rgw, "radosgw-admin subuser modify --uid=%s --subuser=%s --secret=%s --key-type=swift" % (user, cosconf["username"], cosconf["password"])).communicate()

stdout, stderr = common.pdsh("%s@%s" % (self.user, self.config["controller"]),"curl -D - -H 'X-Auth-User: %s' -H 'X-Auth-Key: %s' %s" % (cosconf["username"], cosconf["password"], cosconf["url"])).communicate()
stdout, stderr = common.pdsh(self.config["controller"], "curl -D - -H 'X-Auth-User: %s' -H 'X-Auth-Key: %s' %s" % (cosconf["username"], cosconf["password"], cosconf["url"])).communicate()

else:
logger.error("Auth Configuration in Yaml file is not in correct format")
Expand All @@ -72,7 +71,7 @@ def prerun_check(self):
#3. check if container and obj created
target_name = "%s-%s-%s" % (self.config["obj_size"], self.config["mode"], self.config["objects_max"])
container_count = 0
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.rgw),"swift -A %s -U %s -K %s list" % (cosconf["url"], cosconf["username"], cosconf["password"])).communicate()
stdout, stderr = common.pdsh(self.rgw, "swift -A %s -U %s -K %s list" % (cosconf["url"], cosconf["username"], cosconf["password"])).communicate()
if stderr != "":
self.container_prepared = False
return
Expand Down Expand Up @@ -231,7 +230,7 @@ def run(self):
except KeyboardInterrupt:
logger.warning("accept keyboard interrupt, cancel this run")
conf = self.config
stdout, stderr = common.pdsh("%s@%s" % (self.user, conf["controller"]),'sh %s/cli.sh cancel %s' % (conf["cosbench_dir"], self.runid)).communicate()
stdout, stderr = common.pdsh(conf["controller"],'sh %s/cli.sh cancel %s' % (conf["cosbench_dir"], self.runid)).communicate()
logger.info("%s", stdout)

self.check_workload_status()
Expand All @@ -249,7 +248,7 @@ def check_workload_status(self):
except:
wait = False
while wait:
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.config["controller"]),"sh %s/cli.sh info | grep %s | awk '{print $8}'" % (self.config["cosbench_dir"], self.runid)).communicate()
stdout, stderr = common.pdsh(self.config["controller"], "sh %s/cli.sh info | grep %s | awk '{print $8}'" % (self.config["cosbench_dir"], self.runid)).communicate()
if stderr:
logger.info("Cosbench Deamon is not running on %s", self.config["controller"])
return False
Expand All @@ -260,7 +259,7 @@ def check_workload_status(self):
except:
wait = False
time.sleep(1)
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.config["controller"]),"sh %s/cli.sh info " % (self.config["cosbench_dir"])).communicate()
stdout, stderr = common.pdsh(self.config["controller"], "sh %s/cli.sh info " % (self.config["cosbench_dir"])).communicate()
logger.debug(stdout)
time.sleep(15)
return True
Expand All @@ -269,7 +268,7 @@ def check_cosbench_res_dir(self):
#check res dir
check_time = 0
while True:
stdout, stderr = common.pdsh("%s@%s" % (self.user, self.config["controller"]), "find %s/archive -maxdepth 1 -name '%s-*'" % (self.config["cosbench_dir"], self.runid)).communicate()
stdout, stderr = common.pdsh(self.config["controller"], "find %s/archive -maxdepth 1 -name '%s-*'" % (self.config["cosbench_dir"], self.runid)).communicate()
if stdout:
return True
if check_time == 3000:
Expand All @@ -279,7 +278,7 @@ def check_cosbench_res_dir(self):

def _run(self):
conf = self.config
stdout, stderr = common.pdsh("%s@%s" % (self.user, conf["controller"]),'sh %s/cli.sh submit %s/%s.xml' % (conf["cosbench_dir"], conf["cosbench_xml_dir"], conf["xml_name"])).communicate()
stdout, stderr = common.pdsh(conf["controller"], 'sh %s/cli.sh submit %s/%s.xml' % (conf["cosbench_dir"], conf["cosbench_xml_dir"], conf["xml_name"])).communicate()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the constructor we still do:

    self.user = settings.cluster.get('user')

which might be unnecessary after the changes. The user key is also present in some example files (like example/wip-cosbench/cosbench_ex.yaml).

In the ceph repo we use this setting (user: 'ubuntu') in:

qa/suites/rados/perf/workloads/cosbench_64K_write.yaml
qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml
qa/suites/perf-basic/workloads/cosbench_64K_write.yaml

Copy link
Contributor Author

@tchaikov tchaikov Aug 13, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rzarzynski removed from Cosbench.

user: 'ubuntu' is used. but i am not sure what you suggest. what i can tell is that it's still needed in cosbench tests. as we might want to use it when deploying ceph cluster if it's not a use_existing cluster or a local run.

m = re.findall('Accepted with ID:\s*(\w+)', stdout )
if not m:
logger.error("cosbench start failing with error: %s", stderr)
Expand Down
2 changes: 1 addition & 1 deletion cluster/ceph.py
Expand Up @@ -120,7 +120,7 @@ def __init__(self, config):
self.tmp_conf = '%s/ceph.conf' % self.tmp_dir
# If using an existing cluster, defualt to /etc/ceph/ceph.conf
if self.use_existing:
self.tmp_conf = self.config.get('conf_file', '/etc/ceph/ceph.conf')
self.tmp_conf = self.config.get('conf_file')

self.osd_valgrind = config.get('osd_valgrind', None)
self.mon_valgrind = config.get('mon_valgrind', None)
Expand Down
4 changes: 2 additions & 2 deletions common.py
Expand Up @@ -102,7 +102,7 @@ def pdsh(nodes, command, continue_if_error=True):
if local_node:
return sh(local_node, command, continue_if_error=continue_if_error)
else:
args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command]
args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, join_nostr(command)]
# -S means pdsh fails if any host fails
if not continue_if_error: args.insert(1, '-S')
return CheckedPopen(args,continue_if_error=continue_if_error)
Expand Down Expand Up @@ -132,7 +132,7 @@ def rpdcp(nodes, flags, remotefile, localdir):
args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes]
if flags:
args += [flags]
return CheckedPopen(args + [remotefile, localfile],
return CheckedPopen(args + [remotefile, localdir],
continue_if_error=False)


Expand Down
29 changes: 20 additions & 9 deletions settings.py
Expand Up @@ -43,13 +43,19 @@ def initialize(ctx):
if not benchmarks:
shutdown('No benchmarks section found in config file, bailing.')

# set the archive_dir from the commandline if present
if ctx.archive:
cluster['archive_dir'] = ctx.archive
if 'archive_dir' not in cluster:
shutdown('No archive dir has been set.')

_handle_monitoring_legacy()

# store cbt configuration in the archive directory
cbt_results = os.path.join(ctx.archive, 'results')
cbt_results = os.path.join(cluster['archive_dir'], 'results')
config_file = os.path.join(cbt_results, 'cbt_config.yaml')
if not os.path.exists(ctx.archive):
os.makedirs(ctx.archive)
if not os.path.exists(cluster['archive_dir']):
os.makedirs(cluster['archive_dir'])
if not os.path.exists(cbt_results):
os.makedirs(cbt_results)
if not os.path.exists(config_file):
Expand All @@ -61,14 +67,19 @@ def initialize(ctx):
if 'tmp_dir' not in cluster:
cluster['tmp_dir'] = '/tmp/cbt.%s' % os.getpid()

# set the ceph.conf file from the commandline, yaml, or default
# set the ceph.conf file from the commandline if present
if ctx.conf:
cluster['conf_file'] = ctx.conf
elif 'conf_file' not in cluster:
cluster['conf_file'] = "%s/ceph.conf" % (cluster.get('conf_file'),)

if ctx.archive:
cluster['archive_dir'] = ctx.archive
# If no conf file is set, default to /etc/ceph/ceph.conf
# FIXME: We shouldn't have cluster specific defaults in settings.
# Eventually make a base class with specific cluster implementations.
if 'conf_file' not in cluster:
cluster['conf_file'] = '/etc/ceph/ceph.conf'
try:
f = open(cluster['conf_file'])
f.close()
except IOError, e:
shutdown('Was not able to access conf file: %s' % cluster['conf_file'])

def host_info(host):
ret = {}
Expand Down