Skip to content

Commit

Permalink
Merge branch 'master' into release
Browse files Browse the repository at this point in the history
  • Loading branch information
convexbrain committed Jan 28, 2019
2 parents 7417a2c + 01437f1 commit af9fb16
Show file tree
Hide file tree
Showing 11 changed files with 348 additions and 263 deletions.
2 changes: 1 addition & 1 deletion solver_rust/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "totsu"
version = "0.2.0"
version = "0.3.0"
authors = ["convexbrain <convexbrain@gmail.com>"]
edition = "2018"

Expand Down
14 changes: 7 additions & 7 deletions solver_rust/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ This crate for Rust provides a basic **primal-dual interior-point method** solve
## Target problem

A common target problem is continuous scalar **convex optimization** such as
LP, QP and QCQP. SOCP and SDP can also be handled with elaboration.
LP, QP and QCQP. SOCP and SDP can also be handled with a certain effort.

## Algorithm and design concepts

Expand Down Expand Up @@ -56,17 +56,17 @@ let vec_h = Mat::new_vec(m).set_iter(&[
let mat_a = Mat::new(p, n);
let vec_b = Mat::new_vec(p);

let pdipm = PDIPM::new();
let rslt = pdipm.solve_qp(&mut std::io::sink(),
&mat_p, &vec_q,
&mat_g, &vec_h,
&mat_a, &vec_b).unwrap();
let param = PDIPMParam::default();
let rslt = PDIPM::new().solve_qp(&param, &mut std::io::sink(),
&mat_p, &vec_q,
&mat_g, &vec_h,
&mat_a, &vec_b).unwrap();

let exp = Mat::new_vec(n).set_iter(&[
2., 0.
]);
println!("rslt = {}", rslt);
assert!((&rslt - exp).norm_p2() < pdipm.eps);
assert!((&rslt - exp).norm_p2() < param.eps);
```

You can find other test examples of pre-defined solvers in `lib.rs`.
60 changes: 31 additions & 29 deletions solver_rust/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ This crate for Rust provides a basic **primal-dual interior-point method** solve
# Target problem
A common target problem is continuous scalar **convex optimization** such as
LP, QP and QCQP. SOCP and SDP can also be handled with elaboration.
LP, QP and QCQP. SOCP and SDP can also be handled with a certain effort.
More specifically,
\\[
\\begin{array}{ll}
Expand Down Expand Up @@ -70,17 +70,17 @@ let vec_h = Mat::new_vec(m).set_iter(&[
let mat_a = Mat::new(p, n);
let vec_b = Mat::new_vec(p);
let pdipm = PDIPM::new();
let rslt = pdipm.solve_qp(&mut std::io::sink(),
&mat_p, &vec_q,
&mat_g, &vec_h,
&mat_a, &vec_b).unwrap();
let param = PDIPMParam::default();
let rslt = PDIPM::new().solve_qp(&param, &mut std::io::sink(),
&mat_p, &vec_q,
&mat_g, &vec_h,
&mat_a, &vec_b).unwrap();
let exp = Mat::new_vec(n).set_iter(&[
2., 0.
]);
println!("rslt = {}", rslt);
assert!((&rslt - exp).norm_p2() < pdipm.eps);
assert!((&rslt - exp).norm_p2() < param.eps);
```
You can find other test examples of pre-defined solvers in [`lib.rs`](../src/totsu/lib.rs.html).
Expand All @@ -98,7 +98,7 @@ pub mod sdp;
/// Prelude
pub mod prelude {
pub use crate::mat::{Mat, FP};
pub use crate::pdipm::PDIPM;
pub use crate::pdipm::{PDIPM, PDIPMParam};
}

/// Pre-defined solvers
Expand Down Expand Up @@ -146,16 +146,16 @@ mod tests {
let mat_a = Mat::new(p, n);
let vec_b = Mat::new_vec(p);

let pdipm = PDIPM::new();
let rslt = pdipm.solve_qcqp(&mut std::io::sink(),
&mat_p, &vec_q, &scl_r,
&mat_a, &vec_b).unwrap();
let param = PDIPMParam::default();
let rslt = PDIPM::new().solve_qcqp(&param, &mut std::io::sink(),
&mat_p, &vec_q, &scl_r,
&mat_a, &vec_b).unwrap();

let exp = Mat::new_vec(n).set_iter(&[
5., 4.
]);
println!("rslt = {}", rslt);
assert!((&rslt - exp).norm_p2() < pdipm.eps);
assert!((&rslt - exp).norm_p2() < param.eps);
}

#[test]
Expand All @@ -181,17 +181,17 @@ mod tests {
let mat_a = Mat::new(p, n);
let vec_b = Mat::new_vec(p);

let pdipm = PDIPM::new();
let rslt = pdipm.solve_socp(&mut std::io::sink(),
&vec_f,
&mat_g, &vec_h, &vec_c, &scl_d,
&mat_a, &vec_b).unwrap();
let param = PDIPMParam::default();
let rslt = PDIPM::new().solve_socp(&param, &mut std::io::sink(),
&vec_f,
&mat_g, &vec_h, &vec_c, &scl_d,
&mat_a, &vec_b).unwrap();

let exp = Mat::new_vec(n).set_iter(&[
-1., -1.
]);
println!("rslt = {}", rslt);
assert!((&rslt - exp).norm_p2() < pdipm.eps);
assert!((&rslt - exp).norm_p2() < param.eps);
}

#[test]
Expand All @@ -217,11 +217,11 @@ mod tests {
let mat_a = Mat::new(p, n);
let vec_b = Mat::new_vec(p);

let pdipm = PDIPM::new();
let _rslt = pdipm.solve_lp(&mut std::io::sink(),
&vec_c,
&mat_g, &vec_h,
&mat_a, &vec_b).unwrap_err();
let param = PDIPMParam::default();
let _rslt = PDIPM::new().solve_lp(&param, &mut std::io::sink(),
&vec_c,
&mat_g, &vec_h,
&mat_a, &vec_b).unwrap_err();
}

#[test]
Expand Down Expand Up @@ -252,11 +252,13 @@ mod tests {
let mat_a = Mat::new(p, n);
let vec_b = Mat::new_vec(p);

let mut pdipm = PDIPM::new();
pdipm.eps = 1e-4; // solve_sdp() is not so accurate
let rslt = pdipm.solve_sdp(&mut std::io::sink(),
&vec_c, &mat_f,
&mat_a, &vec_b).unwrap();
let param = PDIPMParam {
eps: 1e-4, // solve_sdp() is not so accurate
.. Default::default()
};
let rslt = PDIPM::new().solve_sdp(&param, &mut std::io::sink(),
&vec_c, &mat_f,
&mat_a, &vec_b).unwrap();

let exp = Mat::new_vec(n).set_iter(&[
3., 4.
Expand Down
34 changes: 19 additions & 15 deletions solver_rust/src/lp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,32 +34,32 @@ use std::io::Write;
///
/// In the following, \\( d \\) does not appear since it does not matter.
pub trait LP {
fn solve_lp<L>(&self, log: &mut L,
fn solve_lp<L>(&mut self, param: &PDIPMParam, log: &mut L,
vec_c: &Mat,
mat_g: &Mat, vec_h: &Mat,
mat_a: &Mat, vec_b: &Mat)
-> Result<Mat, &'static str>
-> Result<Mat, String>
where L: Write;
}

fn check_param(vec_c: &Mat,
mat_g: &Mat, vec_h: &Mat,
mat_a: &Mat, vec_b: &Mat)
-> Result<(usize, usize, usize), &'static str>
-> Result<(usize, usize, usize), String>
{
let (n, _) = vec_c.size();
let (m, _) = mat_g.size();
let (p, _) = mat_a.size();

if n == 0 {return Err("vec_c: 0 rows");}
if n == 0 {return Err("vec_c: 0 rows".into());}
// m = 0 means NO inequality constraints
// p = 0 means NO equality constraints

if vec_c.size() != (n, 1) {return Err("vec_c: size mismatch");}
if mat_g.size() != (m, n) {return Err("mat_g: size mismatch");}
if vec_h.size() != (m, 1) {return Err("vec_h: size mismatch");}
if mat_a.size() != (p, n) {return Err("mat_a: size mismatch");}
if vec_b.size() != (p, 1) {return Err("vec_b: size mismatch");}
if vec_c.size() != (n, 1) {return Err(format!("vec_c: size {:?} must be {:?}", vec_c.size(), (n, 1)));}
if mat_g.size() != (m, n) {return Err(format!("mat_g: size {:?} must be {:?}", mat_g.size(), (m, n)));}
if vec_h.size() != (m, 1) {return Err(format!("vec_h: size {:?} must be {:?}", vec_h.size(), (m, 1)));}
if mat_a.size() != (p, n) {return Err(format!("mat_a: size {:?} must be {:?}", mat_a.size(), (p, n)));}
if vec_b.size() != (p, 1) {return Err(format!("vec_b: size {:?} must be {:?}", vec_b.size(), (p, 1)));}

Ok((n, m, p))
}
Expand All @@ -69,17 +69,18 @@ impl LP for PDIPM
/// Runs the solver with given parameters.
///
/// Returns `Ok` with optimal \\(x\\) or `Err` with message string.
/// * `param` is solver parameters.
/// * `log` outputs solver progress.
/// * `vec_c` is \\(c\\).
/// * `mat_g` is \\(G\\).
/// * `vec_h` is \\(h\\).
/// * `mat_a` is \\(A\\).
/// * `vec_b` is \\(b\\).
fn solve_lp<L>(&self, log: &mut L,
fn solve_lp<L>(&mut self, param: &PDIPMParam, log: &mut L,
vec_c: &Mat,
mat_g: &Mat, vec_h: &Mat,
mat_a: &Mat, vec_b: &Mat)
-> Result<Mat, &'static str>
-> Result<Mat, String>
where L: Write
{
// ----- parameter check
Expand All @@ -89,7 +90,7 @@ impl LP for PDIPM
// ----- initial value of a slack variable

let s = -vec_h.min().unwrap_or(0.);
let mut margin = self.margin;
let mut margin = param.margin;
let mut s_initial = s + margin;
while s_initial <= s {
margin *= 2.;
Expand All @@ -98,8 +99,8 @@ impl LP for PDIPM

// ----- start to solve

let rslt = self.solve(n + 1, m, p + 1, // '+ 1' is for a slack variable
log,
let rslt = self.solve(param, log,
n + 1, m, p + 1, // '+ 1' is for a slack variable
|_, df_o| {
df_o.rows_mut(0 .. n).assign(&vec_c);
// for a slack variable
Expand All @@ -123,19 +124,22 @@ impl LP for PDIPM
ddf_i.assign_all(0.);
},
|a, b| {
a.assign_all(0.);
b.assign_all(0.);
a.slice_mut(0 .. p, 0 .. n).assign(mat_a);
b.rows_mut(0 .. p).assign(vec_b);
// for a slack variable
a[(p, n)] = 1.;
},
|mut x| {
x.assign_all(0.);
x[(n, 0)] = s_initial;
}
);

match rslt {
Ok(y) => Ok(y.rows(0 .. n).clone_sz()),
Err(s) => Err(s)
Err(s) => Err(s.into())
}
}
}
19 changes: 19 additions & 0 deletions solver_rust/src/mat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -483,6 +483,25 @@ impl<V: View> MatGen<V>

sum
}
/// Returns inner product.
pub fn prod<V2: View>(&self, rhs: &MatGen<V2>) -> FP
{
let (l_nrows, l_ncols) = self.size();
let (r_nrows, r_ncols) = rhs.size();

assert_eq!(l_nrows, r_nrows);
assert_eq!(l_ncols, r_ncols);

let mut sum = 0.;

for c in 0 .. l_ncols {
for r in 0 .. l_nrows {
sum += self[(r, c)] * rhs[(r, c)];
}
}

sum
}
//
/// Finds maximum value.
pub fn max(&self) -> Option<FP>
Expand Down
2 changes: 1 addition & 1 deletion solver_rust/src/matsvd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ impl MatSVD
{
let a = self.u.col(c1).norm_p2sq();
let b = self.u.col(c2).norm_p2sq();
let d = (self.u.col(c1).t() * self.u.col(c2))[(0, 0)];
let d = self.u.col(c1).prod(&self.u.col(c2));

if d * d <= TOL_CNV2 * a * b {
true
Expand Down
Loading

0 comments on commit af9fb16

Please sign in to comment.