Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions xllm/core/framework/eplb/eplb_policy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ std::pair<torch::Tensor, std::vector<bool>> EplbPolicy::rebalance_experts(
torch::Tensor EplbPolicy::compute_balanced_pack(
const torch::Tensor& expert_loads) {
// Parameter Validation
TORCH_CHECK(expert_loads.dim() == 1, "expert_loads must be 1D tensor");
CHECK_EQ(expert_loads.dim(), 1) << "expert_loads must be 1D tensor";
const int64_t num_experts = expert_loads.size(0);

// Generate Redundant Experts
Expand Down Expand Up @@ -139,7 +139,7 @@ std::pair<torch::Tensor, torch::Tensor> EplbPolicy::update_origin_weights(
torch::Tensor expert_loads,
int32_t redundancy_experts) {
// Parameter Validation
TORCH_CHECK(expert_loads.dim() == 1, "expert_loads must be 1D tensor");
CHECK_EQ(expert_loads.dim(), 1) << "expert_loads must be 1D tensor";
const int64_t num_experts = expert_loads.size(0);

// Initialize Data Structures
Expand Down
2 changes: 1 addition & 1 deletion xllm/core/framework/parallel_state/npu_process_group.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ HcclDataType to_hccl_data_type(const torch::Tensor& input) {
case at::kBFloat16:
return HCCL_DATA_TYPE_BFP16;
default:
TORCH_CHECK(false, "Unconvertible HCCL type ", type);
LOG(FATAL) << "Unconvertible HCCL type: " << type;
}
}

Expand Down
4 changes: 2 additions & 2 deletions xllm/core/layers/common/tests/tests_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -249,10 +249,10 @@ torch::Tensor seeded_tensor(const std::string& key,
out_cpu = map_mod_span(int64_t{});
break;
default:
TORCH_CHECK(false, "Unsupported integer dtype: ", dtype);
LOG(FATAL) << "Unsupported integer dtype: " << dtype;
}
} else {
TORCH_CHECK(false, "Unsupported dtype for seeded_tensor");
LOG(FATAL) << "Unsupported dtype for seeded_tensor";
}

// Shape & device
Expand Down
4 changes: 2 additions & 2 deletions xllm/core/layers/mlu/deepseek_v2_attention.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ DeepseekV2AttentionImpl::DeepseekV2AttentionImpl(
int64_t max_position_embeddings = args.max_position_embeddings();

qk_head_dim_ = qk_nope_head_dim_ + qk_rope_head_dim_;
TORCH_CHECK(num_heads % tp_size == 0,
"num_heads must be divisible by tensor parallel size");
CHECK_EQ(num_heads % tp_size, 0)
<< "num_heads must be divisible by tensor parallel size";
num_local_heads_ = num_heads / tp_size;
float scaling = std::pow(qk_head_dim_, -0.5f);

Expand Down
7 changes: 2 additions & 5 deletions xllm/core/layers/npu/npu_deepseek_v2_decoder_layer_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,8 @@ class ExpertBuffer {
} else {
auto validate_shape = [](const torch::Tensor& t,
const std::vector<int64_t>& expected) {
TORCH_CHECK(t.sizes() == expected,
"Shape mismatch. Expected ",
expected,
" got ",
t.sizes());
CHECK_EQ(t.sizes(), expected)
<< "Shape mismatch. Expected " << expected << " got " << t.sizes();
};

validate_shape(gateup_weight, gateup_weight_shape);
Expand Down
2 changes: 1 addition & 1 deletion xllm/models/dit/dit.h
Original file line number Diff line number Diff line change
Expand Up @@ -592,7 +592,7 @@ inline torch::Tensor get_timestep_embedding(const torch::Tensor& timesteps,
float downscale_freq_shift = 1.0f,
float scale = 1.0f,
int64_t max_period = 10000) {
TORCH_CHECK(timesteps.dim() == 1, "Timesteps should be a 1d-array");
CHECK_EQ(timesteps.dim(), 1) << "Timesteps should be a 1d-array";
int64_t half_dim = embedding_dim / 2;
// -ln(max_period) * [0, 1, ..., half_dim-1] / (half_dim -
// downscale_freq_shift
Expand Down
2 changes: 1 addition & 1 deletion xllm/models/dit/pipeline_flux_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ torch::Tensor get_1d_rotary_pos_embed(
float ntk_factor = 1.0,
bool repeat_interleave_real = true,
torch::Dtype freqs_dtype = torch::kFloat32) {
TORCH_CHECK(dim % 2 == 0, "Dimension must be even");
CHECK_EQ(dim % 2, 0) << "Dimension must be even";

torch::Tensor pos_tensor = pos;
if (pos.dim() == 0) {
Expand Down
6 changes: 3 additions & 3 deletions xllm/models/vlm/minicpmv.h
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ torch::Tensor get_1d_sincos_pos_embed_from_grid(int embed_dim,
std::pair<int, int> version = {
2,
0}) {
TORCH_CHECK(embed_dim % 2 == 0, "embed_dim must be even");
CHECK_EQ(embed_dim % 2, 0) << "embed_dim must be even";

// compute omega
auto omega = torch::arange(embed_dim / 2, torch::kFloat32);
Expand All @@ -332,7 +332,7 @@ torch::Tensor get_2d_sincos_pos_embed_from_grid(int embed_dim,
std::pair<int, int> version = {
2,
0}) {
TORCH_CHECK(embed_dim % 2 == 0, "embed_dim must be even");
CHECK_EQ(embed_dim % 2, 0) << "embed_dim must be even";

auto emb_h =
get_1d_sincos_pos_embed_from_grid(embed_dim / 2, grid[0], version);
Expand Down Expand Up @@ -382,7 +382,7 @@ class Resampler2_5Impl : public BaseResamplerImpl {
}

torch::Tensor forward(torch::Tensor x, torch::Tensor tgt_sizes) {
TORCH_CHECK(x.size(0) == tgt_sizes.size(0), "Batch size mismatch!");
CHECK_EQ(x.size(0), tgt_sizes.size(0)) << "Batch size mismatch!";

int64_t batch_size = x.size(0);
auto device = x.device();
Expand Down
Loading