diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index 1cbbe0076cb43d..e1355dc2c02473 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -315,10 +315,8 @@ function test_tiering() # make sure we can't clobber snapshot state ceph osd pool create snap_base 2 ceph osd pool create snap_cache 2 - rbd -p snap_cache create foo --size 10 - rbd -p snap_cache snap create foo --snap snap1 - rbd -p snap_cache snap rm foo --snap snap1 - expect_false ceph osd tier add snap_base snap_cache --force-nonempty + ceph osd pool mksnap snap_cache snapname + expect_false ceph osd tier add snap_base snap_cache ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it diff --git a/qa/workunits/mon/crush_ops.sh b/qa/workunits/mon/crush_ops.sh index 80950032fb2f97..adb3162038cb6f 100755 --- a/qa/workunits/mon/crush_ops.sh +++ b/qa/workunits/mon/crush_ops.sh @@ -63,6 +63,12 @@ ceph osd tree | grep -c host1 | grep -q 0 expect_false ceph osd crush rm bar # not empty ceph osd crush unlink host2 + +# reference foo and bar with a rule +ceph osd crush rule create-simple foo-rule foo host firstn +expect_false ceph osd crush rm foo +ceph osd crush rule rm foo-rule + ceph osd crush rm bar ceph osd crush rm foo ceph osd crush rm osd.$o2 host2 diff --git a/src/common/config_opts.h b/src/common/config_opts.h index aee900d4bb03fe..85a602221f2f57 100644 --- a/src/common/config_opts.h +++ b/src/common/config_opts.h @@ -263,6 +263,10 @@ OPTION(mon_sync_provider_kill_at, OPT_INT, 0) // kill the sync provider at a sp OPTION(mon_sync_requester_kill_at, OPT_INT, 0) // kill the sync requester at a specific point in the work flow OPTION(mon_force_quorum_join, OPT_BOOL, false) // force monitor to join quorum even if it has been previously removed from the map OPTION(mon_keyvaluedb, OPT_STR, "leveldb") // type of keyvaluedb backend + +// UNSAFE -- TESTING ONLY! Allows addition of a cache tier with preexisting snaps +OPTION(mon_debug_unsafe_allow_tier_with_nonempty_snaps, OPT_BOOL, false) + OPTION(paxos_stash_full_interval, OPT_INT, 25) // how often (in commits) to stash a full copy of the PaxosService state OPTION(paxos_max_join_drift, OPT_INT, 10) // max paxos iterations before we must first sync the monitor stores OPTION(paxos_propose_interval, OPT_DOUBLE, 1.0) // gather updates for this long before proposing a map update @@ -564,16 +568,22 @@ OPTION(osd_recover_clone_overlap_limit, OPT_INT, 10) OPTION(osd_backfill_scan_min, OPT_INT, 64) OPTION(osd_backfill_scan_max, OPT_INT, 512) OPTION(osd_op_thread_timeout, OPT_INT, 15) +OPTION(osd_op_thread_suicide_timeout, OPT_INT, 150) OPTION(osd_recovery_thread_timeout, OPT_INT, 30) +OPTION(osd_recovery_thread_suicide_timeout, OPT_INT, 300) OPTION(osd_snap_trim_thread_timeout, OPT_INT, 60*60*1) +OPTION(osd_snap_trim_thread_suicide_timeout, OPT_INT, 60*60*10) OPTION(osd_snap_trim_sleep, OPT_FLOAT, 0) OPTION(osd_scrub_thread_timeout, OPT_INT, 60) +OPTION(osd_scrub_thread_suicide_timeout, OPT_INT, 60) OPTION(osd_scrub_finalize_thread_timeout, OPT_INT, 60*10) OPTION(osd_scrub_invalid_stats, OPT_BOOL, true) OPTION(osd_remove_thread_timeout, OPT_INT, 60*60) +OPTION(osd_remove_thread_suicide_timeout, OPT_INT, 10*60*60) OPTION(osd_command_thread_timeout, OPT_INT, 10*60) OPTION(osd_age, OPT_FLOAT, .8) OPTION(osd_age_time, OPT_INT, 0) +OPTION(osd_command_thread_suicide_timeout, OPT_INT, 15*60) OPTION(osd_heartbeat_addr, OPT_ADDR, entity_addr_t()) OPTION(osd_heartbeat_interval, OPT_INT, 6) // (seconds) how often we ping peers OPTION(osd_heartbeat_grace, OPT_INT, 20) // (seconds) how long before we decide a peer has failed diff --git a/src/crush/CrushWrapper.cc b/src/crush/CrushWrapper.cc index 497c2196958a2f..ec3414b3dda600 100644 --- a/src/crush/CrushWrapper.cc +++ b/src/crush/CrushWrapper.cc @@ -192,6 +192,9 @@ bool CrushWrapper::_maybe_remove_last_instance(CephContext *cct, int item, bool if (_search_item_exists(item)) { return false; } + if (item < 0 && _bucket_is_in_use(cct, item)) { + return false; + } if (item < 0 && !unlink_only) { crush_bucket *t = get_bucket(item); @@ -219,6 +222,9 @@ int CrushWrapper::remove_item(CephContext *cct, int item, bool unlink_only) << " items, not empty" << dendl; return -ENOTEMPTY; } + if (_bucket_is_in_use(cct, item)) { + return -EBUSY; + } } for (int i = 0; i < crush->max_buckets; i++) { @@ -258,6 +264,22 @@ bool CrushWrapper::_search_item_exists(int item) const return false; } +bool CrushWrapper::_bucket_is_in_use(CephContext *cct, int item) +{ + for (unsigned i = 0; i < crush->max_rules; ++i) { + crush_rule *r = crush->rules[i]; + if (!r) + continue; + for (unsigned j = 0; j < r->len; ++j) { + if (r->steps[j].op == CRUSH_RULE_TAKE && + r->steps[j].arg1 == item) { + return true; + } + } + } + return false; +} + int CrushWrapper::_remove_item_under(CephContext *cct, int item, int ancestor, bool unlink_only) { ldout(cct, 5) << "_remove_item_under " << item << " under " << ancestor @@ -293,6 +315,11 @@ int CrushWrapper::remove_item_under(CephContext *cct, int item, int ancestor, bo { ldout(cct, 5) << "remove_item_under " << item << " under " << ancestor << (unlink_only ? " unlink_only":"") << dendl; + + if (!unlink_only && _bucket_is_in_use(cct, item)) { + return -EBUSY; + } + int ret = _remove_item_under(cct, item, ancestor, unlink_only); if (ret < 0) return ret; diff --git a/src/crush/CrushWrapper.h b/src/crush/CrushWrapper.h index cfafab067dee29..43acc48f49e508 100644 --- a/src/crush/CrushWrapper.h +++ b/src/crush/CrushWrapper.h @@ -587,6 +587,7 @@ class CrushWrapper { private: bool _maybe_remove_last_instance(CephContext *cct, int id, bool unlink_only); int _remove_item_under(CephContext *cct, int id, int ancestor, bool unlink_only); + bool _bucket_is_in_use(CephContext *cct, int id); public: int remove_item_under(CephContext *cct, int id, int ancestor, bool unlink_only); diff --git a/src/crush/mapper.c b/src/crush/mapper.c index 251ab4af1f8a3e..916790d74672d8 100644 --- a/src/crush/mapper.c +++ b/src/crush/mapper.c @@ -839,8 +839,15 @@ int crush_do_rule(const struct crush_map *map, switch (curstep->op) { case CRUSH_RULE_TAKE: - w[0] = curstep->arg1; - wsize = 1; + if ((curstep->arg1 >= 0 && + curstep->arg1 < map->max_devices) || + (-1-curstep->arg1 < map->max_buckets && + map->buckets[-1-curstep->arg1])) { + w[0] = curstep->arg1; + wsize = 1; + } else { + dprintk(" bad take value %d\n", curstep->arg1); + } break; case CRUSH_RULE_SET_CHOOSE_TRIES: diff --git a/src/include/krbd.h b/src/include/krbd.h index 702a76dbc6acf2..d7e868c438d2d9 100644 --- a/src/include/krbd.h +++ b/src/include/krbd.h @@ -13,14 +13,15 @@ #ifndef CEPH_KRBD_H #define CEPH_KRBD_H +#include "rados/librados.h" + #ifdef __cplusplus extern "C" { #endif struct krbd_ctx; -struct CephContext; -int krbd_create_from_context(struct CephContext *cct, struct krbd_ctx **pctx); +int krbd_create_from_context(rados_config_t cct, struct krbd_ctx **pctx); void krbd_destroy(struct krbd_ctx *ctx); int krbd_map(struct krbd_ctx *ctx, const char *pool, const char *image, @@ -38,7 +39,7 @@ namespace ceph { class Formatter; } -int krbd_showmapped(struct krbd_ctx *ctx, Formatter *f); +int krbd_showmapped(struct krbd_ctx *ctx, ceph::Formatter *f); #endif /* __cplusplus */ diff --git a/src/krbd.cc b/src/krbd.cc index 04f40047542984..3fb64fd67e631c 100644 --- a/src/krbd.cc +++ b/src/krbd.cc @@ -34,6 +34,7 @@ #include "common/TextTable.h" #include "include/assert.h" #include "include/stringify.h" +#include "include/krbd.h" #include "mon/MonMap.h" #include @@ -582,12 +583,12 @@ int dump_images(struct krbd_ctx *ctx, Formatter *f) return r; } -extern "C" int krbd_create_from_context(struct CephContext *cct, +extern "C" int krbd_create_from_context(rados_config_t cct, struct krbd_ctx **pctx) { struct krbd_ctx *ctx = new struct krbd_ctx(); - ctx->cct = cct; + ctx->cct = reinterpret_cast(cct); ctx->udev = udev_new(); if (!ctx->udev) { delete ctx; diff --git a/src/librbd/TaskFinisher.h b/src/librbd/TaskFinisher.h index 14dcd3011fd0ca..4942a38bbdc211 100644 --- a/src/librbd/TaskFinisher.h +++ b/src/librbd/TaskFinisher.h @@ -35,6 +35,7 @@ class TaskFinisher { delete m_safe_timer; } + m_finisher->wait_for_empty(); m_finisher->stop(); delete m_finisher; } diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 2857e585994917..436035649077ad 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -6121,7 +6121,9 @@ bool OSDMonitor::prepare_command_impl(MMonCommand *m, err = -ENOTSUP; goto reply; } - if (!tp->removed_snaps.empty() || !tp->snaps.empty()) { + if ((!tp->removed_snaps.empty() || !tp->snaps.empty()) && + ((force_nonempty != "--force-nonempty") || + (!g_conf->mon_debug_unsafe_allow_tier_with_nonempty_snaps))) { ss << "tier pool '" << tierpoolstr << "' has snapshot state; it cannot be added as a tier without breaking the pool"; err = -ENOTEMPTY; goto reply; diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index c126c3b105d0df..7dbcfc5ae60306 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -1543,9 +1543,17 @@ OSD::OSD(CephContext *cct_, ObjectStore *store_, op_tracker(cct, cct->_conf->osd_enable_op_tracker, cct->_conf->osd_num_op_tracker_shard), test_ops_hook(NULL), - op_shardedwq(cct->_conf->osd_op_num_shards, this, - cct->_conf->osd_op_thread_timeout, &osd_op_tp), - peering_wq(this, cct->_conf->osd_op_thread_timeout, &osd_tp), + op_shardedwq( + cct->_conf->osd_op_num_shards, + this, + cct->_conf->osd_op_thread_timeout, + cct->_conf->osd_op_thread_suicide_timeout, + &osd_op_tp), + peering_wq( + this, + cct->_conf->osd_op_thread_timeout, + cct->_conf->osd_op_thread_suicide_timeout, + &osd_tp), map_lock("OSD::map_lock"), pg_map_lock("OSD::pg_map_lock"), debug_drop_pg_create_probability(cct->_conf->osd_debug_drop_pg_create_probability), @@ -1557,14 +1565,38 @@ OSD::OSD(CephContext *cct_, ObjectStore *store_, pg_stat_queue_lock("OSD::pg_stat_queue_lock"), osd_stat_updated(false), pg_stat_tid(0), pg_stat_tid_flushed(0), - command_wq(this, cct->_conf->osd_command_thread_timeout, &command_tp), + command_wq( + this, + cct->_conf->osd_command_thread_timeout, + cct->_conf->osd_command_thread_suicide_timeout, + &command_tp), recovery_ops_active(0), - recovery_wq(this, cct->_conf->osd_recovery_thread_timeout, &recovery_tp), + recovery_wq( + this, + cct->_conf->osd_recovery_thread_timeout, + cct->_conf->osd_recovery_thread_suicide_timeout, + &recovery_tp), replay_queue_lock("OSD::replay_queue_lock"), - snap_trim_wq(this, cct->_conf->osd_snap_trim_thread_timeout, &disk_tp), - scrub_wq(this, cct->_conf->osd_scrub_thread_timeout, &disk_tp), - rep_scrub_wq(this, cct->_conf->osd_scrub_thread_timeout, &disk_tp), - remove_wq(store, cct->_conf->osd_remove_thread_timeout, &disk_tp), + snap_trim_wq( + this, + cct->_conf->osd_snap_trim_thread_timeout, + cct->_conf->osd_snap_trim_thread_suicide_timeout, + &disk_tp), + scrub_wq( + this, + cct->_conf->osd_scrub_thread_timeout, + cct->_conf->osd_scrub_thread_suicide_timeout, + &disk_tp), + rep_scrub_wq( + this, + cct->_conf->osd_scrub_thread_timeout, + cct->_conf->osd_scrub_thread_suicide_timeout, + &disk_tp), + remove_wq( + store, + cct->_conf->osd_remove_thread_timeout, + cct->_conf->osd_remove_thread_suicide_timeout, + &disk_tp), service(this) { monc->set_messenger(client_messenger); diff --git a/src/osd/OSD.h b/src/osd/OSD.h index c8e5b8fbe58083..f5021ef159bb4e 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -1452,8 +1452,8 @@ class OSD : public Dispatcher, uint32_t num_shards; public: - ShardedOpWQ(uint32_t pnum_shards, OSD *o, time_t ti, ShardedThreadPool* tp): - ShardedThreadPool::ShardedWQ < pair >(ti, ti*10, tp), + ShardedOpWQ(uint32_t pnum_shards, OSD *o, time_t ti, time_t si, ShardedThreadPool* tp): + ShardedThreadPool::ShardedWQ < pair >(ti, si, tp), osd(o), num_shards(pnum_shards) { for(uint32_t i = 0; i < num_shards; i++) { char lock_name[32] = {0}; @@ -1563,9 +1563,9 @@ class OSD : public Dispatcher, list peering_queue; OSD *osd; set in_use; - PeeringWQ(OSD *o, time_t ti, ThreadPool *tp) + PeeringWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp) : ThreadPool::BatchWorkQueue( - "OSD::PeeringWQ", ti, ti*10, tp), osd(o) {} + "OSD::PeeringWQ", ti, si, tp), osd(o) {} void _dequeue(PG *pg) { for (list::iterator i = peering_queue.begin(); @@ -1944,8 +1944,8 @@ class OSD : public Dispatcher, list command_queue; struct CommandWQ : public ThreadPool::WorkQueue { OSD *osd; - CommandWQ(OSD *o, time_t ti, ThreadPool *tp) - : ThreadPool::WorkQueue("OSD::CommandWQ", ti, 0, tp), osd(o) {} + CommandWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp) + : ThreadPool::WorkQueue("OSD::CommandWQ", ti, si, tp), osd(o) {} bool _empty() { return osd->command_queue.empty(); @@ -1998,8 +1998,8 @@ class OSD : public Dispatcher, struct RecoveryWQ : public ThreadPool::WorkQueue { OSD *osd; - RecoveryWQ(OSD *o, time_t ti, ThreadPool *tp) - : ThreadPool::WorkQueue("OSD::RecoveryWQ", ti, ti*10, tp), osd(o) {} + RecoveryWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp) + : ThreadPool::WorkQueue("OSD::RecoveryWQ", ti, si, tp), osd(o) {} bool _empty() { return osd->recovery_queue.empty(); @@ -2056,8 +2056,8 @@ class OSD : public Dispatcher, struct SnapTrimWQ : public ThreadPool::WorkQueue { OSD *osd; - SnapTrimWQ(OSD *o, time_t ti, ThreadPool *tp) - : ThreadPool::WorkQueue("OSD::SnapTrimWQ", ti, 0, tp), osd(o) {} + SnapTrimWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp) + : ThreadPool::WorkQueue("OSD::SnapTrimWQ", ti, si, tp), osd(o) {} bool _empty() { return osd->snap_trim_queue.empty(); @@ -2102,8 +2102,8 @@ class OSD : public Dispatcher, struct ScrubWQ : public ThreadPool::WorkQueue { OSD *osd; - ScrubWQ(OSD *o, time_t ti, ThreadPool *tp) - : ThreadPool::WorkQueue("OSD::ScrubWQ", ti, 0, tp), osd(o) {} + ScrubWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp) + : ThreadPool::WorkQueue("OSD::ScrubWQ", ti, si, tp), osd(o) {} bool _empty() { return osd->scrub_queue.empty(); @@ -2149,8 +2149,8 @@ class OSD : public Dispatcher, list rep_scrub_queue; public: - RepScrubWQ(OSD *o, time_t ti, ThreadPool *tp) - : ThreadPool::WorkQueue("OSD::RepScrubWQ", ti, 0, tp), osd(o) {} + RepScrubWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp) + : ThreadPool::WorkQueue("OSD::RepScrubWQ", ti, si, tp), osd(o) {} bool _empty() { return rep_scrub_queue.empty(); @@ -2202,9 +2202,9 @@ class OSD : public Dispatcher, public ThreadPool::WorkQueueVal > { ObjectStore *&store; list > remove_queue; - RemoveWQ(ObjectStore *&o, time_t ti, ThreadPool *tp) + RemoveWQ(ObjectStore *&o, time_t ti, time_t si, ThreadPool *tp) : ThreadPool::WorkQueueVal >( - "OSD::RemoveWQ", ti, 0, tp), + "OSD::RemoveWQ", ti, si, tp), store(o) {} bool _empty() { diff --git a/src/osdc/ObjectCacher.cc b/src/osdc/ObjectCacher.cc index 75174a68ff290c..95f4b8f504b826 100644 --- a/src/osdc/ObjectCacher.cc +++ b/src/osdc/ObjectCacher.cc @@ -1144,6 +1144,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, ++bh_it) { uint64_t rx_bytes = static_cast( stat_rx + bh_it->second->length()); + bytes_not_in_cache += bh_it->second->length(); if (!waitfor_read.empty() || rx_bytes > max_size) { // cache is full with concurrent reads -- wait for rx's to complete // to constrain memory growth (especially during copy-ups) @@ -1165,7 +1166,6 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish, bh_it->second->waitfor_read[bh_it->first].push_back( new C_RetryRead(this, rd, oset, onfinish) ); } } - bytes_not_in_cache += bh_it->second->length(); success = false; } diff --git a/src/pybind/rbd.py b/src/pybind/rbd.py index 334e4c750103c6..35bf5c7613a674 100644 --- a/src/pybind/rbd.py +++ b/src/pybind/rbd.py @@ -466,8 +466,12 @@ def parent_info(self): pool = create_string_buffer(size) name = create_string_buffer(size) snapname = create_string_buffer(size) - ret = self.librbd.rbd_get_parent_info(self.image, pool, len(pool), - name, len(name), snapname, len(snapname)) + ret = self.librbd.rbd_get_parent_info(self.image, byref(pool), + c_size_t(size), + byref(name), + c_size_t(size), + byref(snapname), + c_size_t(size)) if ret == -errno.ERANGE: size *= 2 diff --git a/src/test/Makefile-client.am b/src/test/Makefile-client.am index e83a13107b996f..ca5ce803ea679c 100644 --- a/src/test/Makefile-client.am +++ b/src/test/Makefile-client.am @@ -333,12 +333,11 @@ ceph_test_librbd_LDADD += $(LIBRBD_TP) endif if LINUX -# Force use of C++ linker with dummy.cc - LIBKRBD is a C++ library -ceph_test_librbd_fsx_SOURCES = test/librbd/fsx.c common/dummy.cc +ceph_test_librbd_fsx_SOURCES = test/librbd/fsx.cc ceph_test_librbd_fsx_LDADD = \ $(LIBKRBD) $(LIBRBD) $(LIBRADOS) \ $(CRYPTO_LIBS) $(PTHREAD_LIBS) -luuid -ceph_test_librbd_fsx_CFLAGS = ${AM_CFLAGS} +ceph_test_librbd_fsx_CXXFLAGS = $(UNITTEST_CXXFLAGS) bin_DEBUGPROGRAMS += ceph_test_librbd_fsx endif endif # WITH_RBD diff --git a/src/test/librados_test_stub/LibradosTestStub.cc b/src/test/librados_test_stub/LibradosTestStub.cc index f9cf32bb0797f6..f7f597c6f31d2b 100644 --- a/src/test/librados_test_stub/LibradosTestStub.cc +++ b/src/test/librados_test_stub/LibradosTestStub.cc @@ -15,6 +15,7 @@ #include "test/librados_test_stub/TestMemRadosClient.h" #include "objclass/objclass.h" #include +#include #include #include #include @@ -22,29 +23,39 @@ #define dout_subsys ceph_subsys_rados +namespace { + +static void DeallocateRadosClient(librados::TestRadosClient* client) +{ + client->put(); +} + +} // anonymous namespace + + static librados::TestClassHandler *get_class_handler() { - static librados::TestClassHandler *s_class_handler = NULL; - if (s_class_handler == NULL) { - s_class_handler = new librados::TestClassHandler(); + static boost::shared_ptr s_class_handler; + if (!s_class_handler) { + s_class_handler.reset(new librados::TestClassHandler()); s_class_handler->open_all_classes(); } - return s_class_handler; + return s_class_handler.get(); } static librados::TestRadosClient *get_rados_client() { // TODO: use factory to allow tests to swap out impl - static librados::TestRadosClient *s_rados_client = NULL; - if (s_rados_client == NULL) { + static boost::shared_ptr s_rados_client; + if (!s_rados_client) { CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT); CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0); cct->_conf->parse_env(); cct->_conf->apply_changes(NULL); - g_ceph_context = cct; - s_rados_client = new librados::TestMemRadosClient(cct); + s_rados_client.reset(new librados::TestMemRadosClient(cct), + &DeallocateRadosClient); cct->put(); } s_rados_client->get(); - return s_rados_client; + return s_rados_client.get(); } static void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen) { diff --git a/src/test/librados_test_stub/TestWatchNotify.cc b/src/test/librados_test_stub/TestWatchNotify.cc index 0a68c3cc424866..6fd77483bf27f4 100644 --- a/src/test/librados_test_stub/TestWatchNotify.cc +++ b/src/test/librados_test_stub/TestWatchNotify.cc @@ -12,12 +12,14 @@ namespace librados { TestWatchNotify::TestWatchNotify(CephContext *cct) : m_cct(cct), m_finisher(new Finisher(cct)), m_handle(), m_notify_id(), m_file_watcher_lock("librados::TestWatchNotify::m_file_watcher_lock") { + m_cct->get(); m_finisher->start(); } TestWatchNotify::~TestWatchNotify() { m_finisher->stop(); delete m_finisher; + m_cct->put(); } TestWatchNotify::NotifyHandle::NotifyHandle() diff --git a/src/test/librbd/fsx.c b/src/test/librbd/fsx.cc similarity index 98% rename from src/test/librbd/fsx.c rename to src/test/librbd/fsx.cc index e2ad45c4bb3f64..c5ed1e60a8ca9f 100644 --- a/src/test/librbd/fsx.c +++ b/src/test/librbd/fsx.cc @@ -42,6 +42,7 @@ #include "include/krbd.h" #include "include/rados/librados.h" #include "include/rbd/librbd.h" +#include "common/ceph_crypto.h" #define NUMPRINTCOLUMNS 32 /* # columns of data to print on each line */ @@ -196,7 +197,7 @@ warn(const char * fmt, ...) { #define BUF_SIZE 1024 void -prt(char *fmt, ...) +prt(const char *fmt, ...) { va_list args; char buffer[BUF_SIZE]; @@ -210,13 +211,13 @@ prt(char *fmt, ...) } void -prterr(char *prefix) +prterr(const char *prefix) { prt("%s%s%s\n", prefix, prefix ? ": " : "", strerror(errno)); } void -prterrcode(char *prefix, int code) +prterrcode(const char *prefix, int code) { prt("%s%s%s\n", prefix, prefix ? ": " : "", strerror(-code)); } @@ -264,8 +265,8 @@ struct rbd_ctx { struct rbd_operations { int (*open)(const char *name, struct rbd_ctx *ctx); int (*close)(struct rbd_ctx *ctx); - ssize_t (*read)(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf); - ssize_t (*write)(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf); + ssize_t (*read)(struct rbd_ctx *ctx, uint64_t off, size_t len, char *buf); + ssize_t (*write)(struct rbd_ctx *ctx, uint64_t off, size_t len, const char *buf); int (*flush)(struct rbd_ctx *ctx); int (*discard)(struct rbd_ctx *ctx, uint64_t off, uint64_t len); int (*get_size)(struct rbd_ctx *ctx, uint64_t *size); @@ -362,7 +363,7 @@ librbd_verify_object_map(struct rbd_ctx *ctx) } ssize_t -librbd_read(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf) +librbd_read(struct rbd_ctx *ctx, uint64_t off, size_t len, char *buf) { ssize_t n; @@ -374,7 +375,7 @@ librbd_read(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf) } ssize_t -librbd_write(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf) +librbd_write(struct rbd_ctx *ctx, uint64_t off, size_t len, const char *buf) { ssize_t n; int ret; @@ -525,16 +526,16 @@ librbd_flatten(struct rbd_ctx *ctx) } const struct rbd_operations librbd_operations = { - .open = librbd_open, - .close = librbd_close, - .read = librbd_read, - .write = librbd_write, - .flush = librbd_flush, - .discard = librbd_discard, - .get_size = librbd_get_size, - .resize = librbd_resize, - .clone = librbd_clone, - .flatten = librbd_flatten, + librbd_open, + librbd_close, + librbd_read, + librbd_write, + librbd_flush, + librbd_discard, + librbd_get_size, + librbd_resize, + librbd_clone, + librbd_flatten }; int @@ -595,7 +596,7 @@ krbd_close(struct rbd_ctx *ctx) } ssize_t -krbd_read(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf) +krbd_read(struct rbd_ctx *ctx, uint64_t off, size_t len, char *buf) { ssize_t n; @@ -610,7 +611,7 @@ krbd_read(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf) } ssize_t -krbd_write(struct rbd_ctx *ctx, uint64_t off, size_t len, void *buf) +krbd_write(struct rbd_ctx *ctx, uint64_t off, size_t len, const char *buf) { ssize_t n; @@ -738,16 +739,16 @@ krbd_flatten(struct rbd_ctx *ctx) } const struct rbd_operations krbd_operations = { - .open = krbd_open, - .close = krbd_close, - .read = krbd_read, - .write = krbd_write, - .flush = krbd_flush, - .discard = krbd_discard, - .get_size = krbd_get_size, - .resize = krbd_resize, - .clone = krbd_clone, - .flatten = krbd_flatten, + krbd_open, + krbd_close, + krbd_read, + krbd_write, + krbd_flush, + krbd_discard, + krbd_get_size, + krbd_resize, + krbd_clone, + krbd_flatten, }; struct rbd_ctx ctx = RBD_CTX_INIT; @@ -793,7 +794,7 @@ logdump(void) { int i, count, down; struct log_entry *lp; - char *falloc_type[3] = {"PAST_EOF", "EXTENDING", "INTERIOR"}; + const char *falloc_type[3] = {"PAST_EOF", "EXTENDING", "INTERIOR"}; prt("LOG DUMP (%d total operations):\n", logcount); if (logcount < LOGSIZE) { @@ -1744,8 +1745,7 @@ test(void) void -cleanup(sig) - int sig; +cleanup(int sig) { if (sig) prt("signal %d\n", sig); @@ -2312,6 +2312,7 @@ main(int argc, char **argv) krbd_destroy(krbd); rados_shutdown(cluster); + ceph::crypto::shutdown(); free(original_buf); free(good_buf); free(temp_buf); diff --git a/src/test/librbd/test_internal.cc b/src/test/librbd/test_internal.cc index 0721ed2dd7ecc2..93daeadbdf477c 100644 --- a/src/test/librbd/test_internal.cc +++ b/src/test/librbd/test_internal.cc @@ -256,6 +256,9 @@ TEST_F(TestInternal, AioWriteRequestsLock) { ASSERT_EQ(0, librbd::is_exclusive_lock_owner(ictx, &is_owner)); ASSERT_FALSE(is_owner); ASSERT_FALSE(c->is_complete()); + + unlock_image(); + ASSERT_EQ(0, c->wait_for_complete()); c->put(); } @@ -276,6 +279,9 @@ TEST_F(TestInternal, AioDiscardRequestsLock) { ASSERT_EQ(0, librbd::is_exclusive_lock_owner(ictx, &is_owner)); ASSERT_FALSE(is_owner); ASSERT_FALSE(c->is_complete()); + + unlock_image(); + ASSERT_EQ(0, c->wait_for_complete()); c->put(); } diff --git a/src/test/librbd/test_librbd.cc b/src/test/librbd/test_librbd.cc index 3ae9112e76ef18..2f2082660dd1d1 100644 --- a/src/test/librbd/test_librbd.cc +++ b/src/test/librbd/test_librbd.cc @@ -137,10 +137,12 @@ class TestLibRBD : public ::testing::Test { _unique_pool_names.clear(); _image_number = 0; ASSERT_EQ("", connect_cluster(&_cluster)); + ASSERT_EQ("", connect_cluster_pp(_rados)); } static void TearDownTestCase() { rados_shutdown(_cluster); + _rados.wait_for_latest_osdmap(); _pool_names.insert(_pool_names.end(), _unique_pool_names.begin(), _unique_pool_names.end()); for (size_t i = 1; i < _pool_names.size(); ++i) { @@ -173,16 +175,17 @@ class TestLibRBD : public ::testing::Test { } std::string create_pool(bool unique = false) { + librados::Rados rados; std::string pool_name; if (unique) { pool_name = get_temp_pool_name(); - EXPECT_EQ("", create_one_pool_pp(pool_name, _rados)); + EXPECT_EQ("", create_one_pool_pp(pool_name, rados)); _unique_pool_names.push_back(pool_name); } else if (m_pool_number < _pool_names.size()) { pool_name = _pool_names[m_pool_number]; } else { pool_name = get_temp_pool_name(); - EXPECT_EQ("", create_one_pool_pp(pool_name, _rados)); + EXPECT_EQ("", create_one_pool_pp(pool_name, rados)); _pool_names.push_back(pool_name); } ++m_pool_number; @@ -1122,6 +1125,7 @@ TEST_F(TestLibRBD, TestIOPPWithIOHint) ASSERT_EQ(0, rbd.open(ioctx, image, name.c_str(), NULL)); char test_data[TEST_IO_SIZE + 1]; + test_data[TEST_IO_SIZE] = '\0'; int i; for (i = 0; i < TEST_IO_SIZE; ++i) { @@ -1954,14 +1958,15 @@ TEST_F(TestLibRBD, FlushAioPP) for (i = 0; i < TEST_IO_SIZE; ++i) { test_data[i] = (char) (rand() % (126 - 33) + 33); } + test_data[TEST_IO_SIZE] = '\0'; librbd::RBD::AioCompletion *write_comps[num_aios]; + ceph::bufferlist bls[num_aios]; for (i = 0; i < num_aios; ++i) { - ceph::bufferlist bl; - bl.append(test_data, strlen(test_data)); + bls[i].append(test_data, strlen(test_data)); write_comps[i] = new librbd::RBD::AioCompletion(NULL, NULL); uint64_t offset = rand() % (size - TEST_IO_SIZE); - ASSERT_EQ(0, image.aio_write(offset, TEST_IO_SIZE, bl, + ASSERT_EQ(0, image.aio_write(offset, TEST_IO_SIZE, bls[i], write_comps[i])); } @@ -1970,12 +1975,12 @@ TEST_F(TestLibRBD, FlushAioPP) ASSERT_EQ(0, image.aio_flush(flush_comp)); ASSERT_EQ(0, flush_comp->wait_for_complete()); ASSERT_EQ(1, flush_comp->is_complete()); - delete flush_comp; + flush_comp->release(); for (i = 0; i < num_aios; ++i) { librbd::RBD::AioCompletion *comp = write_comps[i]; ASSERT_EQ(1, comp->is_complete()); - delete comp; + comp->release(); } ASSERT_PASSED(validate_object_map, image); } diff --git a/src/test/librbd/test_main.cc b/src/test/librbd/test_main.cc index 4b72eb464dece1..52c1c46a226297 100644 --- a/src/test/librbd/test_main.cc +++ b/src/test/librbd/test_main.cc @@ -3,6 +3,7 @@ #include "gtest/gtest.h" #include "common/ceph_argparse.h" +#include "common/ceph_crypto.h" #include "global/global_context.h" #include "global/global_init.h" #include @@ -25,5 +26,8 @@ int main(int argc, char **argv) global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); common_init_finish(g_ceph_context); - return RUN_ALL_TESTS(); + int r = RUN_ALL_TESTS(); + g_ceph_context->put(); + ceph::crypto::shutdown(); + return r; } diff --git a/src/test/run-rbd-valgrind-unit-tests.sh b/src/test/run-rbd-valgrind-unit-tests.sh new file mode 100755 index 00000000000000..da18333be9d209 --- /dev/null +++ b/src/test/run-rbd-valgrind-unit-tests.sh @@ -0,0 +1,11 @@ +#!/bin/bash -ex + +# this should be run from the src directory in the ceph.git + +CEPH_SRC=$(pwd) +export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$CEPH_SRC/.libs" +PATH="$CEPH_SRC:$PATH" + +RBD_FEATURES=13 valgrind --tool=memcheck --leak-check=full --suppressions=valgrind.supp unittest_librbd + +echo OK diff --git a/src/valgrind.supp b/src/valgrind.supp index c46970e023a99d..2158207a1e19a2 100644 --- a/src/valgrind.supp +++ b/src/valgrind.supp @@ -186,3 +186,15 @@ fun:main } +# lttng-ust +{ + + Memcheck:Leak + fun:calloc + fun:_dl_allocate_tls + fun:pthread_create@@GLIBC_2.2.5 + obj:/usr/*lib*/liblttng-ust.* + fun:call_init.part.0 + fun:_dl_init + obj:* +}