Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
66 commits
Select commit Hold shift + click to select a range
6f88f67
Don't add extra slacks (artifical variables) for ranged rows
chris-maes Jun 11, 2025
94307c9
Merge branch 'branch-25.05' into less_artificals
rgsl888prabhu Jul 17, 2025
c0f5886
Add callbacks to send root solution from original problem
hlinsen Nov 3, 2025
3cb93e7
Fix reduced costs and crossover call
hlinsen Nov 5, 2025
3eb950b
Pass correct vstatus vector
hlinsen Nov 5, 2025
4a1cf94
Debugging dual inf norm assertion
hlinsen Nov 5, 2025
3229b0c
Implement conversion of ge contraints to le
hlinsen Nov 5, 2025
49c97ba
Lower tolerance and run dual phase 2 validation
hlinsen Nov 6, 2025
d333846
Wip save
hlinsen Nov 7, 2025
0351a83
Pull changes from chris less artificial PR.
hlinsen Nov 7, 2025
00a89fe
Remove prints
hlinsen Nov 8, 2025
ed71447
Merge branch 'main' of github.com:NVIDIA/cuopt into concurrent-root-s…
hlinsen Nov 8, 2025
bfe9be9
Remove prints
hlinsen Nov 8, 2025
2a9c994
Debugging successive calls with vstatus
hlinsen Nov 11, 2025
32c6edb
Fix precision issues
hlinsen Nov 12, 2025
4764d51
Save debugging state
hlinsen Nov 12, 2025
b6aad5d
Remove unecessary original problem argument
hlinsen Nov 12, 2025
1c0d139
Call concurrent mode from mip root solve
hlinsen Nov 12, 2025
a198738
Use atomic flag to send root solution
hlinsen Nov 13, 2025
031b7de
Use atomic flag to send root solution
hlinsen Nov 13, 2025
55de62d
Fix merge conflicts
hlinsen Nov 13, 2025
63d8b87
Merge branch 'concurrent-root-solve' of github.com:hlinsen/cuopt into…
hlinsen Nov 13, 2025
68361a5
Disable print
hlinsen Nov 13, 2025
dcca90b
Set root solution in rins
hlinsen Nov 13, 2025
4a10885
Only use concurrent root solve for main bb thread
hlinsen Nov 13, 2025
32d984a
Merge branch 'main' of github.com:NVIDIA/cuopt into concurrent-root-s…
hlinsen Nov 14, 2025
94182cc
Merge branch 'main' of github.com:NVIDIA/cuopt into concurrent-root-s…
hlinsen Nov 14, 2025
bc002c4
Merge branch 'concurrent-root-solve' of github.com:hlinsen/cuopt into…
hlinsen Nov 14, 2025
26ccae5
Fix asserts
hlinsen Nov 14, 2025
093cbcb
Disable rins logs
hlinsen Nov 14, 2025
ca7c9d3
Recompute dual solution after primal push
hlinsen Nov 17, 2025
19615e4
Fix merge conflicts
hlinsen Nov 30, 2025
5741a17
Fix merge conflicts
hlinsen Dec 1, 2025
8b7b6a6
Fix default value for num cpu thread in benchmark script
hlinsen Dec 1, 2025
558bb10
Implement crossover as stopping criteria
hlinsen Dec 2, 2025
62f33b2
Wip debugging
hlinsen Dec 2, 2025
359ddff
Disable dual simplex in concurrent mode inside mip
hlinsen Dec 2, 2025
dfc3102
Disable prints
hlinsen Dec 2, 2025
b4f2aaa
Revert phase 2 tol change
hlinsen Dec 2, 2025
a18b852
Wip debugging
hlinsen Dec 2, 2025
0158c41
Check for nullptr
hlinsen Dec 2, 2025
628d2af
Fixing issues in future termination and halt
hlinsen Dec 2, 2025
951719c
Fix issue in crossover status
hlinsen Dec 2, 2025
b819417
Add ability to halt dual simplex
hlinsen Dec 2, 2025
27db29a
Use local halting mechanism in bb
hlinsen Dec 3, 2025
eb50dda
Revert concurrent solve and add new inside_mip setting instead of arg
hlinsen Dec 3, 2025
f89562e
Add back dual res inf norm assert
hlinsen Dec 3, 2025
3310d5c
Merge branch 'release/25.12' of github.com:NVIDIA/cuopt into concurre…
hlinsen Dec 3, 2025
8772731
Fix compile error
hlinsen Dec 3, 2025
d33bcc6
Fix bug incorrectly setting crossover root sol
hlinsen Dec 3, 2025
a6770c1
Revert test
hlinsen Dec 3, 2025
deab57a
Handle remaining crossover status
hlinsen Dec 3, 2025
5fe20f9
Merge branch 'release/25.12' of github.com:NVIDIA/cuopt into concurre…
hlinsen Dec 3, 2025
b5a7b39
Fix merge conflicts
hlinsen Dec 4, 2025
3ff3150
Add mg option for mip root solve
hlinsen Dec 4, 2025
86dd5c2
Address review comments
hlinsen Dec 4, 2025
99e4688
Merge branch 'release/25.12' of github.com:NVIDIA/cuopt into concurre…
hlinsen Dec 4, 2025
ceb5862
Add more prints
hlinsen Dec 4, 2025
98c92f2
Wait for future in every case
hlinsen Dec 5, 2025
65e655b
Disable pdlp logs
hlinsen Dec 6, 2025
606ccaf
Add small sleep
hlinsen Dec 6, 2025
140a07f
Merge branch 'release/25.12' of github.com:NVIDIA/cuopt into concurre…
hlinsen Dec 6, 2025
29ba82a
Fix merge conflicts
hlinsen Dec 8, 2025
b960154
Address review comments
hlinsen Dec 8, 2025
a8125c7
Disable crossover logs
hlinsen Dec 9, 2025
cc0b3b8
Encapsulate in a function
hlinsen Dec 9, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion benchmarks/linear_programming/run_mps_files.sh
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ OUTPUT_DIR=${OUTPUT_DIR:-.}
RELAXATION=${RELAXATION:-false}
MIP_HEURISTICS_ONLY=${MIP_HEURISTICS_ONLY:-false}
WRITE_LOG_FILE=${WRITE_LOG_FILE:-false}
NUM_CPU_THREADS=${NUM_CPU_THREADS:-1}
NUM_CPU_THREADS=${NUM_CPU_THREADS:--1}
BATCH_NUM=${BATCH_NUM:-0}
N_BATCHES=${N_BATCHES:-1}
LOG_TO_CONSOLE=${LOG_TO_CONSOLE:-true}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ class mip_solver_settings_t {
f_t time_limit = std::numeric_limits<f_t>::infinity();
bool heuristics_only = false;
i_t num_cpu_threads = -1; // -1 means use default number of threads in branch and bound
i_t num_gpus = 1;
bool log_to_console = true;
std::string log_file;
std::string sol_file;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,9 @@ class pdlp_solver_settings_t {
bool dual_postsolve{true};
int num_gpus{1};
method_t method{method_t::Concurrent};
bool inside_mip{false};
// For concurrent termination
volatile int* concurrent_halt;
volatile int* concurrent_halt{nullptr};
static constexpr f_t minimal_absolute_tolerance = 1.0e-12;

private:
Expand Down
105 changes: 96 additions & 9 deletions cpp/src/dual_simplex/branch_and_bound.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,21 +9,22 @@
#include <algorithm>
#include <dual_simplex/bounds_strengthening.hpp>
#include <dual_simplex/branch_and_bound.hpp>
#include <dual_simplex/crossover.hpp>
#include <dual_simplex/initial_basis.hpp>
#include <dual_simplex/logger.hpp>
#include <dual_simplex/mip_node.hpp>
#include <dual_simplex/phase2.hpp>
#include <dual_simplex/presolve.hpp>
#include <dual_simplex/pseudo_costs.hpp>
#include <dual_simplex/random.hpp>
#include <dual_simplex/solve.hpp>
#include <dual_simplex/tic_toc.hpp>
#include <dual_simplex/user_problem.hpp>

#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <deque>
#include <future>
#include <limits>
#include <optional>
#include <string>
Expand Down Expand Up @@ -217,6 +218,7 @@ branch_and_bound_t<i_t, f_t>::branch_and_bound_t(
original_lp_(user_problem.handle_ptr, 1, 1, 1),
incumbent_(1),
root_relax_soln_(1, 1),
root_crossover_soln_(1, 1),
pc_(1),
solver_status_(mip_exploration_status_t::UNSET)
{
Expand Down Expand Up @@ -1203,6 +1205,81 @@ void branch_and_bound_t<i_t, f_t>::diving_thread(const csr_matrix_t<i_t, f_t>& A
}
}

template <typename i_t, typename f_t>
lp_status_t branch_and_bound_t<i_t, f_t>::solve_root_relaxation(
simplex_solver_settings_t<i_t, f_t> const& lp_settings)
{
// Root node path
lp_status_t root_status;
std::future<lp_status_t> root_status_future;
root_status_future = std::async(std::launch::async,
&solve_linear_program_advanced<i_t, f_t>,
std::ref(original_lp_),
exploration_stats_.start_time,
std::ref(lp_settings),
std::ref(root_relax_soln_),
std::ref(root_vstatus_),
std::ref(edge_norms_));
// Wait for the root relaxation solution to be sent by the diversity manager or dual simplex
// to finish
while (!root_crossover_solution_set_.load(std::memory_order_acquire) &&
*get_root_concurrent_halt() == 0) {
std::this_thread::sleep_for(std::chrono::milliseconds(1));
continue;
}

if (root_crossover_solution_set_.load(std::memory_order_acquire)) {
// Crush the root relaxation solution on converted user problem
std::vector<f_t> crushed_root_x;
crush_primal_solution(
original_problem_, original_lp_, root_crossover_soln_.x, new_slacks_, crushed_root_x);
std::vector<f_t> crushed_root_y;
std::vector<f_t> crushed_root_z;

f_t dual_res_inf = crush_dual_solution(original_problem_,
original_lp_,
new_slacks_,
root_crossover_soln_.y,
root_crossover_soln_.z,
crushed_root_y,
crushed_root_z);

root_crossover_soln_.x = crushed_root_x;
root_crossover_soln_.y = crushed_root_y;
root_crossover_soln_.z = crushed_root_z;

// Call crossover on the crushed solution
auto root_crossover_settings = settings_;
root_crossover_settings.log.log = false;
root_crossover_settings.concurrent_halt = get_root_concurrent_halt();
crossover_status_t crossover_status = crossover(original_lp_,
root_crossover_settings,
root_crossover_soln_,
exploration_stats_.start_time,
root_crossover_soln_,
crossover_vstatus_);

if (crossover_status == crossover_status_t::OPTIMAL) {
settings_.log.printf("Crossover status: %d\n", crossover_status);
}

// Check if crossover was stopped by dual simplex
if (crossover_status == crossover_status_t::OPTIMAL) {
set_root_concurrent_halt(1); // Stop dual simplex
root_status = root_status_future.get();
// Override the root relaxation solution with the crossover solution
root_relax_soln_ = root_crossover_soln_;
root_vstatus_ = crossover_vstatus_;
root_status = lp_status_t::OPTIMAL;
} else {
root_status = root_status_future.get();
}
} else {
root_status = root_status_future.get();
}
return root_status;
}

template <typename i_t, typename f_t>
mip_status_t branch_and_bound_t<i_t, f_t>::solve(mip_solution_t<i_t, f_t>& solution)
{
Expand Down Expand Up @@ -1233,14 +1310,24 @@ mip_status_t branch_and_bound_t<i_t, f_t>::solve(mip_solution_t<i_t, f_t>& solut
root_relax_soln_.resize(original_lp_.num_rows, original_lp_.num_cols);

settings_.log.printf("Solving LP root relaxation\n");
simplex_solver_settings_t lp_settings = settings_;
lp_settings.inside_mip = 1;
lp_status_t root_status = solve_linear_program_advanced(original_lp_,
exploration_stats_.start_time,
lp_settings,
root_relax_soln_,
root_vstatus_,
edge_norms_);

lp_status_t root_status;
simplex_solver_settings_t lp_settings = settings_;
lp_settings.inside_mip = 1;
lp_settings.concurrent_halt = get_root_concurrent_halt();
// RINS/SUBMIP path
if (!enable_concurrent_lp_root_solve()) {
root_status = solve_linear_program_advanced(original_lp_,
exploration_stats_.start_time,
lp_settings,
root_relax_soln_,
root_vstatus_,
edge_norms_);

} else {
root_status = solve_root_relaxation(lp_settings);
}

exploration_stats_.total_lp_iters = root_relax_soln_.iterations;
exploration_stats_.total_lp_solve_time = toc(exploration_stats_.start_time);

Expand Down
31 changes: 31 additions & 0 deletions cpp/src/dual_simplex/branch_and_bound.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
#include <dual_simplex/pseudo_costs.hpp>
#include <dual_simplex/simplex_solver_settings.hpp>
#include <dual_simplex/solution.hpp>
#include <dual_simplex/solve.hpp>
#include <dual_simplex/types.hpp>
#include <utilities/macros.cuh>
#include <utilities/omp_helpers.hpp>

#include <omp.h>
Expand Down Expand Up @@ -78,9 +80,29 @@ class branch_and_bound_t {
// Set an initial guess based on the user_problem. This should be called before solve.
void set_initial_guess(const std::vector<f_t>& user_guess) { guess_ = user_guess; }

// Set the root solution found by PDLP
void set_root_relaxation_solution(const std::vector<f_t>& primal,
const std::vector<f_t>& dual,
const std::vector<f_t>& reduced_costs,
f_t objective,
f_t user_objective,
i_t iterations)
{
root_crossover_soln_.x = primal;
root_crossover_soln_.y = dual;
root_crossover_soln_.z = reduced_costs;
root_objective_ = objective;
root_crossover_soln_.objective = objective;
root_crossover_soln_.user_objective = user_objective;
root_crossover_soln_.iterations = iterations;
root_crossover_solution_set_.store(true, std::memory_order_release);
}

// Set a solution based on the user problem during the course of the solve
void set_new_solution(const std::vector<f_t>& solution);

void set_concurrent_lp_root_solve(bool enable) { enable_concurrent_lp_root_solve_ = enable; }
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Ensure thread-safe access to enable_concurrent_lp_root_solve_ flag.

The enable_concurrent_lp_root_solve_ member (line 170) is a plain bool accessed via setter (line 103) and getter (line 114) without synchronization. If set_concurrent_lp_root_solve() could be called concurrently with enable_concurrent_lp_root_solve() or solve(), this creates a data race (undefined behavior).

Option 1 (Preferred): Make it atomic:

-  bool enable_concurrent_lp_root_solve_{false};
+  std::atomic<bool> enable_concurrent_lp_root_solve_{false};

Option 2: Document single-threaded initialization requirement:

+  // Must be called before solve() in single-threaded context
   void set_concurrent_lp_root_solve(bool enable) { enable_concurrent_lp_root_solve_ = enable; }

As per coding guidelines: "Prevent thread-unsafe use of global and static variables; use proper mutex/synchronization in server code accessing shared solver state."

Also applies to: 114-114, 170-170


// Repair a low-quality solution from the heuristics.
bool repair_solution(const std::vector<f_t>& leaf_edge_norms,
const std::vector<f_t>& potential_solution,
Expand All @@ -90,6 +112,10 @@ class branch_and_bound_t {
f_t get_upper_bound();
f_t get_lower_bound();
i_t get_heap_size();
bool enable_concurrent_lp_root_solve() const { return enable_concurrent_lp_root_solve_; }
volatile int* get_root_concurrent_halt() { return &root_concurrent_halt_; }
void set_root_concurrent_halt(int value) { root_concurrent_halt_ = value; }
Comment on lines +116 to +117
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Replace volatile int with std::atomic for thread synchronization.

The root_concurrent_halt_ member (line 171) uses volatile int, which is incorrect for thread synchronization in C++. volatile only prevents compiler optimizations but provides no atomicity or memory ordering guarantees. This can lead to data races, lost writes, and non-deterministic behavior in concurrent code.

The pointer returned by get_root_concurrent_halt() (line 115) is passed to other components (e.g., line 1242 in the .cpp file) and dereferenced in multi-threaded contexts, making this a critical issue.

Fix: Replace with std::atomic<int> and update all access patterns:

- volatile int* get_root_concurrent_halt() { return &root_concurrent_halt_; }
- void set_root_concurrent_halt(int value) { root_concurrent_halt_ = value; }
+ std::atomic<int>* get_root_concurrent_halt() { return &root_concurrent_halt_; }
+ void set_root_concurrent_halt(int value) { root_concurrent_halt_.store(value, std::memory_order_release); }
- volatile int root_concurrent_halt_{0};
+ std::atomic<int> root_concurrent_halt_{0};

Then update all usages to use .load(std::memory_order_acquire) for reads and .store(val, std::memory_order_release) for writes.

As per coding guidelines: "Prevent thread-unsafe use of global and static variables; use proper mutex/synchronization in server code accessing shared solver state" and "Ensure race conditions are absent in multi-threaded server implementations."

Also applies to: 171-171

🤖 Prompt for AI Agents
In cpp/src/dual_simplex/branch_and_bound.hpp around lines 115-116 (and member at
line 171), replace the volatile int root_concurrent_halt_ with std::atomic<int>
(include <atomic>), change the getter to expose a pointer or reference to
std::atomic<int> (e.g., std::atomic<int>* or std::atomic<int>&) instead of
volatile int*, and update the setter to use .store(value,
std::memory_order_release); then update all call sites that read/write this
variable to use .load(std::memory_order_acquire) for reads and .store(...,
std::memory_order_release) for writes to ensure proper atomicity and memory
ordering.

lp_status_t solve_root_relaxation(simplex_solver_settings_t<i_t, f_t> const& lp_settings);

// The main entry routine. Returns the solver status and populates solution with the incumbent.
mip_status_t solve(mip_solution_t<i_t, f_t>& solution);
Expand Down Expand Up @@ -137,9 +163,14 @@ class branch_and_bound_t {

// Variables for the root node in the search tree.
std::vector<variable_status_t> root_vstatus_;
std::vector<variable_status_t> crossover_vstatus_;
f_t root_objective_;
lp_solution_t<i_t, f_t> root_relax_soln_;
lp_solution_t<i_t, f_t> root_crossover_soln_;
std::vector<f_t> edge_norms_;
std::atomic<bool> root_crossover_solution_set_{false};
bool enable_concurrent_lp_root_solve_{false};
volatile int root_concurrent_halt_{0};

// Pseudocosts
pseudo_costs_t<i_t, f_t> pc_;
Expand Down
6 changes: 5 additions & 1 deletion cpp/src/dual_simplex/crossover.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1204,6 +1204,7 @@ crossover_status_t crossover(const lp_problem_t<i_t, f_t>& lp,
lp, settings, start_time, solution, ft, basic_list, nonbasic_list, superbasic_list, vstatus);
if (primal_push_status < 0) { return return_to_status(primal_push_status); }
print_crossover_info(lp, settings, vstatus, solution, "Primal push complete");
compute_dual_solution_from_basis(lp, ft, basic_list, nonbasic_list, solution.y, solution.z);
} else {
settings.log.printf("No primal push needed. No superbasic variables\n");
}
Expand Down Expand Up @@ -1386,7 +1387,10 @@ crossover_status_t crossover(const lp_problem_t<i_t, f_t>& lp,
crossover_status_t status = crossover_status_t::NUMERICAL_ISSUES;
if (dual_feasible) { status = crossover_status_t::DUAL_FEASIBLE; }
if (primal_feasible) { status = crossover_status_t::PRIMAL_FEASIBLE; }
if (primal_feasible && dual_feasible) { status = crossover_status_t::OPTIMAL; }
if (primal_feasible && dual_feasible) {
status = crossover_status_t::OPTIMAL;
if (settings.concurrent_halt != nullptr) { *settings.concurrent_halt = 1; }
}
return status;
}

Expand Down
4 changes: 4 additions & 0 deletions cpp/src/dual_simplex/phase2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2981,6 +2981,10 @@ dual::status_t dual_phase2_with_advanced_basis(i_t phase,
100.0 * dense_delta_z / (sparse_delta_z + dense_delta_z));
ft.print_stats();
}
if (settings.inside_mip && settings.concurrent_halt != nullptr) {
settings.log.debug("Setting concurrent halt in Dual Simplex Phase 2\n");
*settings.concurrent_halt = 1;
}
}
return status;
}
Expand Down
59 changes: 44 additions & 15 deletions cpp/src/dual_simplex/presolve.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -508,23 +508,31 @@ i_t find_dependent_rows(lp_problem_t<i_t, f_t>& problem,

template <typename i_t, typename f_t>
i_t add_artifical_variables(lp_problem_t<i_t, f_t>& problem,
std::vector<i_t>& equality_rows,
const std::vector<i_t>& range_rows,
const std::vector<i_t>& equality_rows,
std::vector<i_t>& new_slacks)
{
const i_t n = problem.num_cols;
const i_t m = problem.num_rows;
const i_t num_cols = n + equality_rows.size();
const i_t nnz = problem.A.col_start[n] + equality_rows.size();
const i_t n = problem.num_cols;
const i_t m = problem.num_rows;
const i_t num_artificial_vars = equality_rows.size() - range_rows.size();
const i_t num_cols = n + num_artificial_vars;
i_t nnz = problem.A.col_start[n] + num_artificial_vars;
problem.A.col_start.resize(num_cols + 1);
problem.A.i.resize(nnz);
problem.A.x.resize(nnz);
problem.lower.resize(num_cols);
problem.upper.resize(num_cols);
problem.objective.resize(num_cols);

std::vector<bool> is_range_row(problem.num_rows, false);
for (i_t i : range_rows) {
is_range_row[i] = true;
}

i_t p = problem.A.col_start[n];
i_t j = n;
for (i_t i : equality_rows) {
if (is_range_row[i]) { continue; }
// Add an artifical variable z to the equation a_i^T x == b
// This now becomes a_i^T x + z == b, 0 <= z =< 0
problem.A.col_start[j] = p;
Expand All @@ -541,7 +549,7 @@ i_t add_artifical_variables(lp_problem_t<i_t, f_t>& problem,
assert(j == num_cols);
assert(p == nnz);
constexpr bool verbose = false;
if (verbose) { printf("Added %d artificial variables\n", num_cols - n); }
if (verbose) { printf("Added %d artificial variables\n", num_artificial_vars); }
problem.A.n = num_cols;
problem.num_cols = num_cols;
return 0;
Expand Down Expand Up @@ -800,7 +808,9 @@ void convert_user_problem(const user_problem_t<i_t, f_t>& user_problem,
}

// Add artifical variables
if (!settings.barrier_presolve) { add_artifical_variables(problem, equality_rows, new_slacks); }
if (!settings.barrier_presolve) {
add_artifical_variables(problem, user_problem.range_rows, equality_rows, new_slacks);
}
}

template <typename i_t, typename f_t>
Expand Down Expand Up @@ -1227,13 +1237,13 @@ void crush_primal_solution_with_slack(const user_problem_t<i_t, f_t>& user_probl
}

template <typename i_t, typename f_t>
void crush_dual_solution(const user_problem_t<i_t, f_t>& user_problem,
const lp_problem_t<i_t, f_t>& problem,
const std::vector<i_t>& new_slacks,
const std::vector<f_t>& user_y,
const std::vector<f_t>& user_z,
std::vector<f_t>& y,
std::vector<f_t>& z)
f_t crush_dual_solution(const user_problem_t<i_t, f_t>& user_problem,
const lp_problem_t<i_t, f_t>& problem,
const std::vector<i_t>& new_slacks,
const std::vector<f_t>& user_y,
const std::vector<f_t>& user_z,
std::vector<f_t>& y,
std::vector<f_t>& z)
{
y.resize(problem.num_rows);
for (i_t i = 0; i < user_problem.num_rows; i++) {
Expand All @@ -1244,6 +1254,12 @@ void crush_dual_solution(const user_problem_t<i_t, f_t>& user_problem,
z[j] = user_z[j];
}

std::vector<bool> is_range_row(problem.num_rows, false);
for (i_t i = 0; i < user_problem.range_rows.size(); i++) {
is_range_row[user_problem.range_rows[i]] = true;
}
assert(user_problem.num_rows == problem.num_rows);

for (i_t j : new_slacks) {
const i_t col_start = problem.A.col_start[j];
const i_t col_end = problem.A.col_start[j + 1];
Expand All @@ -1255,7 +1271,11 @@ void crush_dual_solution(const user_problem_t<i_t, f_t>& user_problem,
// e_i^T y + z_j = c_j = 0
// y_i + z_j = 0
// z_j = - y_i;
z[j] = -y[i];
if (is_range_row[i]) {
z[j] = y[i];
} else {
z[j] = -y[i];
}
}

// A^T y + z = c or A^T y + z - c = 0
Expand Down Expand Up @@ -1292,6 +1312,7 @@ void crush_dual_solution(const user_problem_t<i_t, f_t>& user_problem,
}
const f_t dual_res_inf = vector_norm_inf<i_t, f_t>(dual_residual);
assert(dual_res_inf < 1e-6);
return dual_res_inf;
}

template <typename i_t, typename f_t>
Expand Down Expand Up @@ -1511,6 +1532,14 @@ template void crush_primal_solution<int, double>(const user_problem_t<int, doubl
const std::vector<int>& new_slacks,
std::vector<double>& solution);

template double crush_dual_solution<int, double>(const user_problem_t<int, double>& user_problem,
const lp_problem_t<int, double>& problem,
const std::vector<int>& new_slacks,
const std::vector<double>& user_y,
const std::vector<double>& user_z,
std::vector<double>& y,
std::vector<double>& z);

template void uncrush_primal_solution<int, double>(const user_problem_t<int, double>& user_problem,
const lp_problem_t<int, double>& problem,
const std::vector<double>& solution,
Expand Down
Loading