Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -215,14 +215,12 @@ object VeloxConfig extends ConfigRegistry {
.bytesConf(ByteUnit.BYTE)
.createWithDefaultString("32MB")

val COLUMNAR_VELOX_ASYNC_TIMEOUT =
val COLUMNAR_VELOX_ASYNC_TIMEOUT_ON_TASK_STOPPING =
buildStaticConf("spark.gluten.sql.columnar.backend.velox.asyncTimeoutOnTaskStopping")
.doc(
"Timeout for asynchronous execution when task is being stopped in Velox backend. " +
"It's recommended to set to a number larger than network connection timeout that the " +
"possible async tasks are relying on.")
.doc("Timeout in milliseconds when waiting for runtime-scoped async work to finish during" +
" teardown.")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefault(30000)
.createWithDefault(30000L)

val COLUMNAR_VELOX_SPLIT_PRELOAD_PER_DRIVER =
buildConf("spark.gluten.sql.columnar.backend.velox.SplitPreloadPerDriver")
Expand Down
58 changes: 0 additions & 58 deletions cpp/core/jni/JniCommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,6 @@
#include <arrow/ipc/writer.h>
#include <execinfo.h>
#include <jni.h>
#include <thread>

#include <folly/executors/thread_factory/ThreadFactory.h>

#include "compute/ProtobufUtils.h"
#include "compute/Runtime.h"
Expand Down Expand Up @@ -165,11 +162,6 @@ class JniCommonState {

jmethodID runtimeAwareCtxHandle();

JavaVM* vm() const {
assertInitialized();
return vm_;
}

private:
void initialize(JNIEnv* env);

Expand All @@ -187,56 +179,6 @@ inline JniCommonState* getJniCommonState() {
return &jniCommonState;
}

/// A folly::ThreadFactory for spill thread pools. Attaches each thread to the
/// JVM as a daemon at creation and calls DetachCurrentThread inside the thread
/// function body — after all work completes but before any pthread_key destructor
/// fires — to prevent unbounded JavaThread accumulation.
///
/// INVARIANT: threads created by this factory must never call libhdfs. libhdfs
/// registers hdfsThreadDestructor via pthread_key on first HDFS call; that
/// destructor calls DetachCurrentThread at actual thread exit. Calling it
/// earlier (inside the thread body) would invalidate libhdfs's cached JNIEnv*,
/// causing SIGSEGV on the next HDFS call.
///
/// REQUIRES: JniCommonState::ensureInitialized() must have been called before
/// constructing this factory (i.e. after JNI_OnLoad completes).
class JniAwareThreadFactory : public folly::ThreadFactory {
public:
JniAwareThreadFactory() : vm_(getJniCommonState()->vm()) {}

std::thread newThread(folly::Func&& func) override {
return std::thread([vm = vm_, f = std::move(func)]() mutable {
JNIEnv* env = nullptr;
bool weAttached = (vm->GetEnv(reinterpret_cast<void**>(&env), jniVersion) == JNI_EDETACHED);
if (weAttached) {
if (vm->AttachCurrentThreadAsDaemon(reinterpret_cast<void**>(&env), nullptr) != JNI_OK) {
LOG(WARNING) << "JniAwareThreadFactory: failed to attach thread to JVM";
weAttached = false;
}
}
// RAII guard: ensures DetachCurrentThread is called even if f() throws.
struct DetachGuard {
JavaVM* vm;
bool active;
~DetachGuard() {
if (active) {
vm->DetachCurrentThread();
}
}
} guard{vm, weAttached};
f();
});
}

const std::string& getNamePrefix() const override {
static const std::string kEmpty;
return kEmpty;
}

private:
JavaVM* vm_;
};

Runtime* getRuntime(JNIEnv* env, jobject runtimeAware);

// Safe version of JNI {Get|Release}<PrimitiveType>ArrayElements routines.
Expand Down
67 changes: 34 additions & 33 deletions cpp/velox/compute/VeloxBackend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,6 @@ DECLARE_bool(velox_ssd_odirect);
DECLARE_bool(velox_memory_pool_capacity_transfer_across_tasks);
DECLARE_int32(cache_prefetch_min_pct);

DECLARE_int32(gluten_velox_async_timeout_on_task_stopping);
DEFINE_int32(gluten_velox_async_timeout_on_task_stopping, 30000, "Async timout when task is being stopped");

using namespace facebook;

namespace gluten {
Expand Down Expand Up @@ -146,14 +143,10 @@ void VeloxBackend::init(
// Set velox_memory_use_hugepages.
FLAGS_velox_memory_use_hugepages = backendConf_->get<bool>(kMemoryUseHugePages, kMemoryUseHugePagesDefault);

// Async timeout.
FLAGS_gluten_velox_async_timeout_on_task_stopping =
backendConf_->get<int32_t>(kVeloxAsyncTimeoutOnTaskStopping, kVeloxAsyncTimeoutOnTaskStoppingDefault);

// Set cache_prefetch_min_pct default as 0 to force all loads are prefetched in DirectBufferInput.
FLAGS_cache_prefetch_min_pct = backendConf_->get<int>(kCachePrefetchMinPct, 0);

auto hiveConf = createHiveConnectorConfig(backendConf_);
hiveConnectorConfig_ = createHiveConnectorConfig(backendConf_);

// Setup and register.
velox::filesystems::registerLocalFileSystem();
Expand All @@ -169,7 +162,7 @@ void VeloxBackend::init(
#endif
#ifdef ENABLE_ABFS
velox::filesystems::registerAbfsFileSystem();
velox::filesystems::registerAzureClientProvider(*hiveConf);
velox::filesystems::registerAzureClientProvider(*hiveConnectorConfig_);
#endif

#ifdef GLUTEN_ENABLE_GPU
Expand All @@ -190,8 +183,20 @@ void VeloxBackend::init(
}
#endif

const auto spillThreadNum = backendConf_->get<uint32_t>(kSpillThreadNum, kSpillThreadNumDefaultValue);
if (spillThreadNum > 0) {
spillExecutor_ = std::make_unique<folly::CPUThreadPoolExecutor>(spillThreadNum);
}
auto ioThreads = backendConf_->get<int32_t>(kVeloxIOThreads, kVeloxIOThreadsDefault);
GLUTEN_CHECK(
ioThreads >= 0,
kVeloxIOThreads + " was set to negative number " + std::to_string(ioThreads) + ", this should not happen.");
if (ioThreads > 0) {
ioExecutor_ = std::make_unique<folly::CPUThreadPoolExecutor>(
ioThreads, std::make_unique<folly::UnboundedBlockingQueue<folly::CPUThreadPoolExecutor::CPUTask>>());
}

initJolFilesystem();
initConnector(hiveConf);

velox::dwio::common::registerFileSinks();
velox::parquet::registerParquetReaderFactory();
Expand Down Expand Up @@ -312,33 +317,26 @@ void VeloxBackend::initCache() {
}
}

void VeloxBackend::initConnector(const std::shared_ptr<velox::config::ConfigBase>& hiveConf) {
auto ioThreads = backendConf_->get<int32_t>(kVeloxIOThreads, kVeloxIOThreadsDefault);
GLUTEN_CHECK(
ioThreads >= 0,
kVeloxIOThreads + " was set to negative number " + std::to_string(ioThreads) + ", this should not happen.");
if (ioThreads > 0) {
ioExecutor_ = std::make_unique<folly::CPUThreadPoolExecutor>(
ioThreads, std::make_unique<folly::UnboundedBlockingQueue<folly::CPUThreadPoolExecutor::CPUTask>>());
}
velox::connector::registerConnector(
std::make_shared<velox::connector::hive::HiveConnector>(kHiveConnectorId, hiveConf, ioExecutor_.get()));
std::shared_ptr<facebook::velox::connector::Connector> VeloxBackend::createHiveConnector(
const std::string& connectorId,
folly::Executor* ioExecutor) const {
return std::make_shared<velox::connector::hive::HiveConnector>(connectorId, hiveConnectorConfig_, ioExecutor);
}

// Register value-stream connector for runtime iterator-based inputs
auto valueStreamDynamicFilterEnabled =
backendConf_->get<bool>(kValueStreamDynamicFilterEnabled, kValueStreamDynamicFilterEnabledDefault);
velox::connector::registerConnector(
std::make_shared<ValueStreamConnector>(kIteratorConnectorId, hiveConf, valueStreamDynamicFilterEnabled));
std::shared_ptr<facebook::velox::connector::Connector> VeloxBackend::createValueStreamConnector(
const std::string& connectorId,
bool dynamicFilterEnabled) const {
return std::make_shared<ValueStreamConnector>(connectorId, hiveConnectorConfig_, dynamicFilterEnabled);
}

#ifdef GLUTEN_ENABLE_GPU
if (backendConf_->get<bool>(kCudfEnableTableScan, kCudfEnableTableScanDefault) &&
backendConf_->get<bool>(kCudfEnabled, kCudfEnabledDefault)) {
facebook::velox::cudf_velox::connector::hive::CudfHiveConnectorFactory factory;
auto hiveConnector = factory.newConnector(kCudfHiveConnectorId, hiveConf, ioExecutor_.get());
facebook::velox::connector::registerConnector(hiveConnector);
}
#endif
std::shared_ptr<facebook::velox::connector::Connector> VeloxBackend::createCudfHiveConnector(
const std::string& connectorId,
folly::Executor* ioExecutor) const {
facebook::velox::cudf_velox::connector::hive::CudfHiveConnectorFactory factory;
return factory.newConnector(connectorId, hiveConnectorConfig_, ioExecutor);
}
#endif

void VeloxBackend::initUdf() {
auto got = backendConf_->get<std::string>(kVeloxUdfLibraryPaths, "");
Expand Down Expand Up @@ -378,7 +376,10 @@ void VeloxBackend::tearDown() {
// Destruct IOThreadPoolExecutor will join all threads.
// On threads exit, thread local variables can be constructed with referencing global variables.
// So, we need to destruct IOThreadPoolExecutor and stop the threads before global variables get destructed.
executor_.reset();
spillExecutor_.reset();
ioExecutor_.reset();
ssdCacheExecutor_.reset();
globalMemoryManager_.reset();

// dump cache stats on exit if enabled
Expand Down
29 changes: 27 additions & 2 deletions cpp/velox/compute/VeloxBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "velox/common/caching/AsyncDataCache.h"
#include "velox/common/config/Config.h"
#include "velox/common/memory/MmapAllocator.h"
#include "velox/connectors/Connector.h"

#include "jni/JniHashTable.h"
#include "memory/VeloxMemoryManager.h"
Expand Down Expand Up @@ -58,9 +59,31 @@ class VeloxBackend {
}

folly::Executor* executor() const {
return executor_.get();
}

folly::Executor* spillExecutor() const {
return spillExecutor_.get();
}

folly::Executor* ioExecutor() const {
return ioExecutor_.get();
}

std::shared_ptr<facebook::velox::connector::Connector> createHiveConnector(
const std::string& connectorId,
folly::Executor* ioExecutor) const;

std::shared_ptr<facebook::velox::connector::Connector> createValueStreamConnector(
const std::string& connectorId,
bool dynamicFilterEnabled) const;

#ifdef GLUTEN_ENABLE_GPU
std::shared_ptr<facebook::velox::connector::Connector> createCudfHiveConnector(
const std::string& connectorId,
folly::Executor* ioExecutor) const;
#endif

void tearDown();

private:
Expand All @@ -72,7 +95,6 @@ class VeloxBackend {

void init(std::unique_ptr<AllocationListener> listener, const std::unordered_map<std::string, std::string>& conf);
void initCache();
void initConnector(const std::shared_ptr<facebook::velox::config::ConfigBase>& hiveConf);
void initUdf();
std::unique_ptr<facebook::velox::cache::SsdCache> initSsdCache(uint64_t ssdSize);

Expand All @@ -89,9 +111,12 @@ class VeloxBackend {
// Instance of AsyncDataCache used for all large allocations.
std::shared_ptr<facebook::velox::cache::AsyncDataCache> asyncDataCache_;

std::unique_ptr<folly::Executor> ssdCacheExecutor_;
std::unique_ptr<folly::Executor> executor_;
std::unique_ptr<folly::Executor> spillExecutor_;
std::unique_ptr<folly::Executor> ioExecutor_;
std::unique_ptr<folly::Executor> ssdCacheExecutor_;
std::shared_ptr<facebook::velox::memory::MmapAllocator> cacheAllocator_;
std::shared_ptr<facebook::velox::config::ConfigBase> hiveConnectorConfig_;

std::string cachePathPrefix_;
std::string cacheFilePrefix_;
Expand Down
33 changes: 33 additions & 0 deletions cpp/velox/compute/VeloxConnectorIds.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include <string>

namespace gluten {

struct VeloxConnectorIds {
std::string hive;
std::string iterator;
std::string cudfHive;
bool hiveRegistered{false};
bool iteratorRegistered{false};
bool cudfHiveRegistered{false};
};

} // namespace gluten
10 changes: 9 additions & 1 deletion cpp/velox/compute/VeloxPlanConverter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,20 @@ VeloxPlanConverter::VeloxPlanConverter(
velox::memory::MemoryPool* veloxPool,
const facebook::velox::config::ConfigBase* veloxCfg,
const std::vector<std::shared_ptr<ResultIterator>>& rowVectors,
VeloxConnectorIds connectorIds,
const std::optional<std::string> writeFilesTempPath,
const std::optional<std::string> writeFileName,
bool validationMode)
: validationMode_(validationMode),
veloxCfg_(veloxCfg),
substraitVeloxPlanConverter_(veloxPool, veloxCfg, rowVectors, writeFilesTempPath, writeFileName, validationMode) {
substraitVeloxPlanConverter_(
veloxPool,
veloxCfg,
rowVectors,
std::move(connectorIds),
writeFilesTempPath,
writeFileName,
validationMode) {
VELOX_USER_CHECK_NOT_NULL(veloxCfg_);
}

Expand Down
2 changes: 2 additions & 0 deletions cpp/velox/compute/VeloxPlanConverter.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <velox/core/PlanNode.h>
#include <velox/exec/Split.h>

#include "compute/VeloxConnectorIds.h"
#include "substrait/SubstraitToVeloxPlan.h"
#include "substrait/plan.pb.h"

Expand All @@ -33,6 +34,7 @@ class VeloxPlanConverter {
facebook::velox::memory::MemoryPool* veloxPool,
const facebook::velox::config::ConfigBase* veloxCfg,
const std::vector<std::shared_ptr<ResultIterator>>& rowVectors,
VeloxConnectorIds connectorIds,
const std::optional<std::string> writeFilesTempPath = std::nullopt,
const std::optional<std::string> writeFileName = std::nullopt,
bool validationMode = false);
Expand Down
Loading
Loading