Skip to content

Commit

Permalink
test/objectstore/store_test: kill ExcessiveFragmentation test case.
Browse files Browse the repository at this point in the history
This test case was introduced by ceph#18494
to verify allocation failure handling while gifting during bluefs rebalance
Not it looks outdated as there is no periodic gifting any more.

Fixes: https://tracker.ceph.com/issues/45788

Signed-off-by: Igor Fedotov <ifedotov@suse.com>
  • Loading branch information
ifed01 committed Jun 9, 2020
1 parent 07f9d8b commit b852703
Showing 1 changed file with 0 additions and 100 deletions.
100 changes: 0 additions & 100 deletions src/test/objectstore/store_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7597,106 +7597,6 @@ TEST_P(StoreTestSpecificAUSize, SmallWriteOnShardedExtents) {
}
}

TEST_P(StoreTestSpecificAUSize, ExcessiveFragmentation) {
if (string(GetParam()) != "bluestore")
return;

SetVal(g_conf(), "bluestore_block_size",
stringify((uint64_t)2048 * 1024 * 1024).c_str());

ASSERT_EQ(g_conf().get_val<Option::size_t>("bluefs_alloc_size"),
1024 * 1024U);

size_t block_size = 0x10000;
StartDeferred(block_size);

int r;
coll_t cid;
ghobject_t hoid1(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);

{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// create 2x400MB objects in a way that their pextents are interleaved
ObjectStore::Transaction t;
bufferlist bl;

bl.append(std::string(block_size * 4, 'a')); // 256KB
uint64_t offs = 0;
while(offs < (uint64_t)400 * 1024 * 1024) {
t.write(cid, hoid1, offs, bl.length(), bl, 0);
t.write(cid, hoid2, offs, bl.length(), bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
offs += bl.length();
if( (offs % (100 * 1024 * 1024)) == 0) {
std::cout<<"written " << offs << std::endl;
}
}
}
std::cout<<"written 800MB"<<std::endl;
{
// Partially overwrite objects with 100MB each leaving space
// fragmented and occuping still unfragmented space at the end
// So we'll have enough free space but it'll lack long enough (e.g. 1MB)
// contiguous pextents.
ObjectStore::Transaction t;
bufferlist bl;

bl.append(std::string(block_size * 4, 'a'));
uint64_t offs = 0;
while(offs < 112 * 1024 * 1024) {
t.write(cid, hoid1, offs, bl.length(), bl, 0);
t.write(cid, hoid2, offs, bl.length(), bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
// this will produce high fragmentation if original allocations
// were contiguous
offs += bl.length();
if( (offs % (10 * 1024 * 1024)) == 0) {
std::cout<<"written " << offs << std::endl;
}
}
}
{
// remove one of the object producing much free space
// and hence triggering bluefs rebalance.
// Which should fail as there is no long enough pextents.
ObjectStore::Transaction t;
t.remove(cid, hoid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}

auto to_sleep = 5 *
(int)g_conf().get_val<double>("bluestore_bluefs_balance_interval");
std::cout<<"sleeping... " << std::endl;
sleep(to_sleep);

{
// touch another object to triggerrebalance
ObjectStore::Transaction t;
t.touch(cid, hoid1);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid1);
t.remove(cid, hoid2);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}

#endif //#if defined(WITH_BLUESTORE)

TEST_P(StoreTest, KVDBHistogramTest) {
Expand Down

0 comments on commit b852703

Please sign in to comment.