Skip to content

Commit

Permalink
Merge 5c78c43 into b4c1c19
Browse files Browse the repository at this point in the history
  • Loading branch information
LanderlYoung committed Dec 3, 2023
2 parents b4c1c19 + 5c78c43 commit 39bfe8f
Show file tree
Hide file tree
Showing 6 changed files with 58 additions and 26 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Expand Up @@ -43,7 +43,7 @@ cmake-build-*
.vscode
.vs
/compile_commands.json
/.cache/clangd
.cache

# js
node_modules
Expand Down
8 changes: 4 additions & 4 deletions backend/Lua/LuaEngine.cc
Expand Up @@ -206,19 +206,19 @@ size_t LuaEngine::globalIdCounter() {
}

Local<Value> LuaEngine::get(const Local<String>& key) {
auto lua = lua_backend::currentLua();
auto lua = lua_;
auto keyString = lua_tostring(lua, key.val_);
return get(keyString);
}

void LuaEngine::set(const Local<String>& key, const Local<Value>& value) {
auto lua = lua_backend::currentLua();
auto lua = lua_;
auto keyString = lua_tostring(lua, key.val_);
set(keyString, value);
}

Local<Value> LuaEngine::get(const char* key) {
auto lua = lua_backend::currentLua();
auto lua = lua_;

lua_backend::luaEnsureStack(lua, 1);
lua_getglobal(lua, key);
Expand All @@ -227,7 +227,7 @@ Local<Value> LuaEngine::get(const char* key) {
}

void LuaEngine::set(const char* key, const Local<Value>& value) {
auto lua = lua_backend::currentLua();
auto lua = lua_;

lua_backend::luaStackScope(lua, [lua, key, &value]() {
lua_backend::luaEnsureStack(lua, 2);
Expand Down
1 change: 1 addition & 0 deletions backend/Lua/LuaScope.cc
Expand Up @@ -16,6 +16,7 @@
*/

#include "LuaScope.hpp"
#include "LuaReference.hpp"
#include "trait/TraitScope.h"

namespace script::lua_backend {
Expand Down
11 changes: 10 additions & 1 deletion src/utils/MessageQueue.cc
Expand Up @@ -234,7 +234,16 @@ int32_t MessageQueue::postMessage(Message* msg, int64_t delayNanos) {

std::deque<Message*>::const_iterator MessageQueue::findInsertPositionLocked(
std::chrono::nanoseconds dueTime, int32_t priority) const {
auto it = queue_.begin();
if (queue_.empty()) {
return queue_.end();
}

// search backwords, since add to queue-end is the most common case
auto it = queue_.end() - 1;
while (it != queue_.begin() && (*it)->dueTime >= dueTime) {
--it;
}

// search by due-time
while (it != queue_.end() && (*it)->dueTime < dueTime) {
++it;
Expand Down
2 changes: 1 addition & 1 deletion src/utils/MessageQueue.h
Expand Up @@ -350,7 +350,7 @@ class MessageQueue {
* \endcode
*
*/
template <class Rep = int, class Period = std::milli>
template <class Rep = int64_t, class Period = std::milli>
int32_t postMessage(const Message& message,
std::chrono::duration<Rep, Period> delay = std::chrono::milliseconds(0)) {
using std::chrono::duration_cast;
Expand Down
60 changes: 41 additions & 19 deletions test/src/ThreadPoolTest.cc
Expand Up @@ -17,10 +17,10 @@

#include <array>
#include <atomic>
#include <iomanip>
#include "test.h"

namespace script::utils::test {

static void handleMessage(Message& msg) {
auto* i = static_cast<std::atomic_int64_t*>(msg.ptr0);
(*i)++;
Expand Down Expand Up @@ -74,19 +74,21 @@ TEST(ThreadPool, MultiThreadRun) {
EXPECT_EQ(max * kProducerCount, i->load());
}

TEST(ThreadPool, Benchmark) {
static constexpr auto kEnableMultiThreadTest = true;

template <size_t kProducerThreads, size_t kWorkerThreads>
void runThreadpoolBenchmark() {
using std::chrono::duration;
using std::chrono::duration_cast;
using std::chrono::milliseconds;
using std::chrono::nanoseconds;
using std::chrono::steady_clock;
using std::chrono_literals::operator""ms;

constexpr auto kEnable = false;
constexpr auto kRunTime = 5000ms;
constexpr auto kWorkerThreads = 4;
constexpr auto kProducerThreads = 4;
constexpr auto kRunTimeMs = 1000ms;

// simple benchmark
if (!kEnable) return;
if (!kEnableMultiThreadTest) return;

auto start = steady_clock::now();

Expand All @@ -97,30 +99,50 @@ TEST(ThreadPool, Benchmark) {
nullptr);
stopMsg.ptr0 = &tp;

tp.postMessage(stopMsg, kRunTime);
tp.postMessage(stopMsg, kRunTimeMs);

for (int j = 0; j < kProducerThreads; ++j) {
std::thread([&]() {
std::array<std::unique_ptr<std::thread>, kProducerThreads> p;
for (auto& t : p) {
t = std::make_unique<std::thread>([&]() {
while (true) {
Message msg(handleMessage, nullptr);
msg.ptr0 = i.get();
try {
tp.postMessage(msg);
} catch (std::runtime_error&) {
if (tp.postMessage(msg) == 0) {
break;
}
}
}).detach();
});
}

tp.awaitTermination();
for (auto& t : p) {
t->join();
}

// run time should be close to kRunTime
auto runTimeMillis = duration_cast<milliseconds>((steady_clock::now() - start)).count();
const auto runTimeMillis = duration_cast<milliseconds>((steady_clock::now() - start)).count();

std::cout << "run time:" << runTimeMillis << "ms, " << i->load() << "ops"
<< " [" << i->load() * 1000 / static_cast<double>(runTimeMillis) << " ops/S]"
<< std::endl;
const auto opsPerSecond = i->load() * 1000 / runTimeMillis;
const auto nanosencodsPerOp = duration_cast<duration<double, std::nano>>(
duration<double, std::milli>(static_cast<double>(runTimeMillis) / i->load()));

std::cout << kProducerThreads << "-producers " << kWorkerThreads << "-workers "
<< "time:" << runTimeMillis << "ms, " << std::setw(9) << i->load() << " ops"
<< " [" << std::setw(9) << opsPerSecond << " ops/s]"
<< " [" << std::setw(9) << nanosencodsPerOp.count() << " ns/op]" << std::endl;
}

} // namespace script::utils::test
TEST(ThreadPool, Benchmark_1p_1w) { runThreadpoolBenchmark<1, 1>(); }

TEST(ThreadPool, Benchmark_1p_2w) { runThreadpoolBenchmark<1, 2>(); }

TEST(ThreadPool, Benchmark_1p_4w) { runThreadpoolBenchmark<1, 4>(); }

TEST(ThreadPool, Benchmark_2p_1w) { runThreadpoolBenchmark<2, 1>(); }

TEST(ThreadPool, Benchmark_4p_1w) { runThreadpoolBenchmark<4, 1>(); }

TEST(ThreadPool, Benchmark_2p_2w) { runThreadpoolBenchmark<2, 2>(); }

TEST(ThreadPool, Benchmark_4p_4w) { runThreadpoolBenchmark<4, 4>(); }
} // namespace script::utils::test

0 comments on commit 39bfe8f

Please sign in to comment.