Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mimic: tests: do not take ceph.conf.template from ceph/teuthology.git #30841

Merged
merged 2 commits into from Oct 23, 2019
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
91 changes: 91 additions & 0 deletions qa/tasks/ceph.conf.template
@@ -0,0 +1,91 @@
[global]
chdir = ""
pid file = /var/run/ceph/$cluster-$name.pid
auth supported = cephx

filestore xattr use omap = true

mon clock drift allowed = 1.000

osd crush chooseleaf type = 0
auth debug = true

ms die on old message = true

mon pg warn min per osd = 1
mon pg warn max per osd = 10000 # <= luminous
mon max pg per osd = 10000 # >= luminous
mon pg warn max object skew = 0

osd pool default size = 2

mon osd allow primary affinity = true
mon osd allow pg remap = true
mon warn on legacy crush tunables = false
mon warn on crush straw calc version zero = false
mon warn on no sortbitwise = false
mon warn on osd down out interval zero = false

osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd"

osd default data pool replay window = 5

mon allow pool delete = true

mon cluster log file level = debug
debug asserts on shutdown = true

[osd]
osd journal size = 100

osd scrub load threshold = 5.0
osd scrub max interval = 600

osd recover clone overlap = true
osd recovery max chunk = 1048576

osd debug shutdown = true
osd debug op order = true
osd debug verify stray on activate = true

osd open classes on start = true
osd debug pg log writeout = true

osd deep scrub update digest min age = 30

osd map max advance = 10

journal zero on create = true

filestore ondisk finisher threads = 3
filestore apply finisher threads = 3

bdev debug aio = true
osd debug misdirected ops = true

[mgr]
debug ms = 1
debug mgr = 20
debug mon = 20
debug auth = 20
mon reweight min pgs per osd = 4
mon reweight min bytes per osd = 10

[mon]
debug ms = 1
debug mon = 20
debug paxos = 20
debug auth = 20
mon data avail warn = 5
mon mgr mkfs grace = 120
mon reweight min pgs per osd = 4
mon osd reporter subtree level = osd
mon osd prime pg temp = true
mon reweight min bytes per osd = 10

[client]
rgw cache enabled = true
rgw enable ops log = true
rgw enable usage log = true
log file = /var/log/ceph/$cluster-$name.$pid.log
admin socket = /var/run/ceph/$cluster-$name.$pid.asok
118 changes: 116 additions & 2 deletions qa/tasks/ceph.py
Expand Up @@ -6,13 +6,15 @@
from cStringIO import StringIO

import argparse
import configobj
import contextlib
import errno
import logging
import os
import json
import time
import gevent
import re
import socket

from paramiko import SSHException
Expand Down Expand Up @@ -381,6 +383,114 @@ def cephfs_setup(ctx, config):
yield


def get_mons(roles, ips):
"""
Get monitors and their associated addresses
"""
mons = {}
ports = {}
mon_id = 0
is_mon = teuthology.is_type('mon')
for idx, roles in enumerate(roles):
for role in roles:
if not is_mon(role):
continue
if ips[idx] not in ports:
ports[ips[idx]] = 6789
else:
ports[ips[idx]] += 1
addr = '{ip}:{port}'.format(
ip=ips[idx],
port=ports[ips[idx]],
)
mon_id += 1
mons[role] = addr
assert mons
return mons

def skeleton_config(ctx, roles, ips, mons, cluster='ceph'):
"""
Returns a ConfigObj that is prefilled with a skeleton config.

Use conf[section][key]=value or conf.merge to change it.

Use conf.write to write it out, override .filename first if you want.
"""
path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template')
t = open(path, 'r')
skconf = t.read().format(testdir=teuthology.get_testdir(ctx))
conf = configobj.ConfigObj(StringIO(skconf), file_error=True)
mon_hosts = []
for role, addr in mons.iteritems():
mon_cluster, _, _ = teuthology.split_role(role)
if mon_cluster != cluster:
continue
name = teuthology.ceph_role(role)
conf.setdefault(name, {})
mon_hosts.append(addr)
conf.setdefault('global', {})
conf['global']['mon host'] = ','.join(mon_hosts)
# set up standby mds's
is_mds = teuthology.is_type('mds', cluster)
for roles_subset in roles:
for role in roles_subset:
if is_mds(role):
name = teuthology.ceph_role(role)
conf.setdefault(name, {})
if '-s-' in name:
standby_mds = name[name.find('-s-') + 3:]
conf[name]['mds standby for name'] = standby_mds
return conf

def create_simple_monmap(ctx, remote, conf, mons,
path=None,
mon_bind_addrvec=False):
"""
Writes a simple monmap based on current ceph.conf into path, or
<testdir>/monmap by default.

Assumes ceph_conf is up to date.

Assumes mon sections are named "mon.*", with the dot.

:return the FSID (as a string) of the newly created monmap
"""

addresses = list(mons.iteritems())
assert addresses, "There are no monitors in config!"
log.debug('Ceph mon addresses: %s', addresses)

testdir = teuthology.get_testdir(ctx)
args = [
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'monmaptool',
'--create',
'--clobber',
]
for (name, addr) in addresses:
n = name[4:]
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think the name could be cluster1.mon.a so fixing this to 4 would be incorrect. Need teuthology.split_role(name)?

if mon_bind_addrvec:
args.extend(('--addv', n, addr))
else:
args.extend(('--add', n, addr))
if not path:
path = '{tdir}/monmap'.format(tdir=testdir)
args.extend([
'--print',
path
])

r = remote.run(
args=args,
stdout=StringIO()
)
monmap_output = r.stdout.getvalue()
fsid = re.search("generated fsid (.+)$",
monmap_output, re.MULTILINE).group(1)
return fsid

@contextlib.contextmanager
def cluster(ctx, config):
"""
Expand Down Expand Up @@ -480,7 +590,10 @@ def cluster(ctx, config):
roles = [role_list for (remote, role_list) in remotes_and_roles]
ips = [host for (host, port) in
(remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)]
conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips, cluster=cluster_name)
mons = get_mons(roles, ips)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe this is breaking the RBD mirroring test cases since support for multiple clusters seems to have been dropped here.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yikes! fixing

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yikes! fixing

conf = skeleton_config(
ctx, roles=roles, ips=ips, mons=mons, cluster=cluster_name,
)
for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
for role, journal in roles_to_journals.iteritems():
name = teuthology.ceph_role(role)
Expand Down Expand Up @@ -544,10 +657,11 @@ def cluster(ctx, config):
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir,
cluster=cluster_name)
fsid = teuthology.create_simple_monmap(
fsid = create_simple_monmap(
ctx,
remote=mon0_remote,
conf=conf,
mons=mons,
path=monmap_path,
)
if not 'global' in conf:
Expand Down