Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[python-package] Expose ObjectiveFunction class #6586

Open
wants to merge 18 commits into
base: master
Choose a base branch
from
Prev Previous commit
Next Next commit
Fix tests
  • Loading branch information
Atanas Dimitrov committed Sep 2, 2024
commit 847a34c0aad44baf1be08a12599bfdec5e0c623e
2 changes: 1 addition & 1 deletion include/LightGBM/objective_function.h
Original file line number Diff line number Diff line change
@@ -70,7 +70,7 @@ class ObjectiveFunction {
virtual void ConvertOutputs(const int num_data, const double* inputs, double* outputs) const {
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
for (int i = 0; i < num_data; i ++) {
ConvertOutput(inputs + i, outputs + i);
ConvertOutput(&inputs[i], &outputs[i]);
}
}

7 changes: 7 additions & 0 deletions python-package/lightgbm/basic.py
Original file line number Diff line number Diff line change
@@ -5386,7 +5386,14 @@ def get_gradients(self, y_pred: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
if self.num_data is None or self.num_class is None:
raise ValueError("ObjectiveFunction was not created properly")

if y_pred.shape[0] != self.num_data:
raise ValueError("Gradients cannot be computed as the number of predictions is wrong")

if self.num_class != 1 and (y_pred.ndim != 2 or y_pred.shape[1] != self.num_class):
raise ValueError("Multiclass gradient computation should be called with the correct shape")

data_shape = self.num_data * self.num_class
y_pred = np.asfortranarray(y_pred)
grad = np.empty(dtype=np.float32, shape=data_shape)
hess = np.empty(dtype=np.float32, shape=data_shape)

4 changes: 2 additions & 2 deletions src/objective/multiclass_objective.hpp
Original file line number Diff line number Diff line change
@@ -132,7 +132,7 @@ class MulticlassSoftmax: public ObjectiveFunction {
void ConvertOutputs(const int num_data, const double* inputs, double* outputs) const override {
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
for (int i = 0; i < num_data; i += num_class_) {
ConvertOutput(inputs + i, outputs + i);
ConvertOutput(&inputs[i], &outputs[i]);
}
}

@@ -246,7 +246,7 @@ class MulticlassOVA: public ObjectiveFunction {
void ConvertOutputs(const int num_data, const double* inputs, double* outputs) const override {
#pragma omp parallel for num_threads(OMP_NUM_THREADS()) schedule(static)
for (int i = 0; i < num_data; i += num_class_) {
ConvertOutput(inputs + i, outputs + i);
ConvertOutput(&inputs[i], &outputs[i]);
}
}

2 changes: 1 addition & 1 deletion tests/python_package_test/test_engine.py
Original file line number Diff line number Diff line change
@@ -4450,7 +4450,7 @@ def loss(y_pred, dtrain):
np.testing.assert_allclose(booster_exposed.predict(X), booster.predict(X, raw_score=True))
np.testing.assert_allclose(booster_exposed.predict(X), booster_custom.predict(X))

y_pred = np.zeros_like(booster.predict(X, raw_score=True))
y_pred = booster.predict(X, raw_score=True)
np.testing.assert_allclose(builtin_loss(y_pred, lgb_train), custom_objective(y_pred, lgb_train))

np.testing.assert_allclose(builtin_convert_scores(booster_exposed.predict(X)), booster.predict(X))
Loading
Oops, something went wrong.