Skip to content

Commit

Permalink
changed size to shape
Browse files Browse the repository at this point in the history
  • Loading branch information
Steven Diamond committed Nov 25, 2016
1 parent 5cf2d5b commit f8b8e7e
Show file tree
Hide file tree
Showing 75 changed files with 631 additions and 586 deletions.
1 change: 1 addition & 0 deletions cvxpy/__init__.py
Expand Up @@ -25,6 +25,7 @@
from cvxpy.problems.problem import Problem
from cvxpy.problems.objective import Maximize, Minimize
from cvxpy.problems.solvers.utilities import installed_solvers
import cvxpy.interface.scipy_wrapper
from cvxpy.error import SolverError
from cvxpy.settings import (CVXOPT, GLPK, GLPK_MI, CBC, JULIA_OPT,
ECOS, ECOS_BB, SCS, GUROBI, ELEMENTAL, MOSEK, LS,
Expand Down
16 changes: 8 additions & 8 deletions cvxpy/atoms/affine/add_expr.py
Expand Up @@ -38,10 +38,10 @@ def __init__(self, arg_groups):
for group in arg_groups:
self.args += self.expand_args(group)

def size_from_args(self):
"""Returns the (row, col) size of the expression.
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return u.shape.sum_shapes([arg.size for arg in self.args])
return u.shape.sum_shapes([arg.shape for arg in self.args])

def expand_args(self, expr):
"""Helper function to extract the arguments from an AddExpression.
Expand Down Expand Up @@ -83,15 +83,15 @@ def copy(self, args=None):
return copy

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Sum the linear expressions.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand All @@ -101,6 +101,6 @@ def graph_implementation(arg_objs, size, data=None):
(LinOp for objective, list of constraints)
"""
for i, arg in enumerate(arg_objs):
if arg.size != size:
arg_objs[i] = lu.promote(arg, size)
if arg.shape != shape:
arg_objs[i] = lu.promote(arg, shape)
return (lu.sum_expr(arg_objs), [])
12 changes: 6 additions & 6 deletions cvxpy/atoms/affine/affine_atom.py
Expand Up @@ -83,31 +83,31 @@ def _grad(self, values):
if arg.is_constant():
fake_args += [Constant(arg.value).canonical_form[0]]
else:
fake_args += [lu.create_var(arg.size, idx)]
fake_args += [lu.create_var(arg.shape, idx)]
var_offsets[idx] = offset
offset += arg.size[0]*arg.size[1]
fake_expr, _ = self.graph_implementation(fake_args, self.size,
offset += arg.shape[0]*arg.shape[1]
fake_expr, _ = self.graph_implementation(fake_args, self.shape,
self.get_data())
# Get the matrix representation of the function.
V, I, J, _ = canonInterface.get_problem_matrix(
[lu.create_eq(fake_expr)],
var_offsets,
None
)
shape = (offset, self.size[0]*self.size[1])
shape = (offset, self.shape[0]*self.shape[1])
stacked_grad = sp.coo_matrix((V, (J, I)), shape=shape).tocsc()
# Break up into per argument matrices.
grad_list = []
start = 0
for arg in self.args:
if arg.is_constant():
grad_shape = (arg.size[0]*arg.size[1], shape[1])
grad_shape = (arg.shape[0]*arg.shape[1], shape[1])
if grad_shape == (1, 1):
grad_list += [0]
else:
grad_list += [sp.coo_matrix(grad_shape, dtype='float64')]
else:
stop = start + arg.size[0]*arg.size[1]
stop = start + arg.shape[0]*arg.shape[1]
grad_list += [stacked_grad[start:stop, :]]
start = stop
return grad_list
44 changes: 22 additions & 22 deletions cvxpy/atoms/affine/binary_operators.py
Expand Up @@ -57,10 +57,10 @@ class MulExpression(BinaryOperator):
OP_NAME = "*"
OP_FUNC = op.mul

def size_from_args(self):
"""Returns the (row, col) size of the expression.
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return u.shape.mul_shapes(self.args[0].size, self.args[1].size)
return u.shape.mul_shapes(self.args[0].shape, self.args[1].shape)

def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
Expand All @@ -75,18 +75,18 @@ def is_decr(self, idx):
def validate_arguments(self):
"""Validates the dimensions.
"""
u.shape.mul_shapes(self.args[0].size, self.args[1].size)
u.shape.mul_shapes(self.args[0].shape, self.args[1].shape)

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Multiply the linear expressions.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand All @@ -96,10 +96,10 @@ def graph_implementation(arg_objs, size, data=None):
(LinOp for objective, list of constraints)
"""
# Promote the right hand side to a diagonal matrix if necessary.
if size[1] != 1 and arg_objs[1].size == (1, 1):
arg = lu.promote(arg_objs[1], (size[1], 1))
if shape[1] != 1 and arg_objs[1].shape == (1, 1):
arg = lu.promote(arg_objs[1], (shape[1], 1))
arg_objs[1] = lu.diag_vec(arg)
return (lu.mul_expr(arg_objs[0], arg_objs[1], size), [])
return (lu.mul_expr(arg_objs[0], arg_objs[1], shape), [])


class RMulExpression(MulExpression):
Expand All @@ -117,15 +117,15 @@ def is_decr(self, idx):
return self.args[1].is_negative()

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Multiply the linear expressions.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand All @@ -135,10 +135,10 @@ def graph_implementation(arg_objs, size, data=None):
(LinOp for objective, list of constraints)
"""
# Promote the left hand side to a diagonal matrix if necessary.
if size[0] != 1 and arg_objs[0].size == (1, 1):
arg = lu.promote(arg_objs[0], (size[0], 1))
if shape[0] != 1 and arg_objs[0].shape == (1, 1):
arg = lu.promote(arg_objs[0], (shape[0], 1))
arg_objs[0] = lu.diag_vec(arg)
return (lu.rmul_expr(arg_objs[0], arg_objs[1], size), [])
return (lu.rmul_expr(arg_objs[0], arg_objs[1], shape), [])


class DivExpression(BinaryOperator):
Expand All @@ -148,10 +148,10 @@ class DivExpression(BinaryOperator):
def is_quadratic(self):
return self.args[0].is_quadratic() and self.args[1].is_constant()

def size_from_args(self):
"""Returns the (row, col) size of the expression.
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return self.args[0].size
return self.args[0].shape

def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
Expand All @@ -164,15 +164,15 @@ def is_decr(self, idx):
return self.args[1].is_negative()

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Multiply the linear expressions.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand Down
14 changes: 7 additions & 7 deletions cvxpy/atoms/affine/conv.py
Expand Up @@ -48,11 +48,11 @@ def validate_arguments(self):
if not self.args[0].is_constant():
raise ValueError("The first argument to conv must be constant.")

def size_from_args(self):
def shape_from_args(self):
"""The sum of the argument dimensions - 1.
"""
lh_length = self.args[0].size[0]
rh_length = self.args[1].size[0]
lh_length = self.args[0].shape[0]
rh_length = self.args[1].shape[0]
return (lh_length + rh_length - 1, 1)

def sign_from_args(self):
Expand All @@ -71,15 +71,15 @@ def is_decr(self, idx):
return self.args[0].is_negative()

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Convolve two vectors.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand All @@ -88,4 +88,4 @@ def graph_implementation(arg_objs, size, data=None):
tuple
(LinOp for objective, list of constraints)
"""
return (lu.conv(arg_objs[0], arg_objs[1], size), [])
return (lu.conv(arg_objs[0], arg_objs[1], shape), [])
20 changes: 10 additions & 10 deletions cvxpy/atoms/affine/cumsum.py
Expand Up @@ -81,10 +81,10 @@ def numeric(self, values):
"""
return np.cumsum(values[0], axis=self.axis)

def size_from_args(self):
def shape_from_args(self):
"""The same as the input.
"""
return self.args[0].size
return self.args[0].shape

def _grad(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Expand All @@ -103,23 +103,23 @@ def _grad(self, values):
for i in range(dim):
for j in range(i+1):
mat[i, j] = 1
var = Variable(*self.args[0].size)
var = Variable(*self.args[0].shape)
if self.axis == 0:
grad = MulExpression(mat, var)._grad(values)[1]
else:
grad = RMulExpression(var, mat.T)._grad(values)[0]
return [grad]

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Cumulative sum via difference matrix.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand All @@ -130,13 +130,13 @@ def graph_implementation(arg_objs, size, data=None):
"""
# Implicit O(n) definition:
# X = Y[:1,:] - Y[1:,:]
Y = lu.create_var(size)
Y = lu.create_var(shape)
axis = data[0]
dim = size[axis]
dim = shape[axis]
diff_mat = get_diff_mat(dim, axis)
diff_mat = lu.create_const(diff_mat, (dim, dim), sparse=True)
if axis == 0:
diff = lu.mul_expr(diff_mat, Y, size)
diff = lu.mul_expr(diff_mat, Y, shape)
else:
diff = lu.rmul_expr(Y, diff_mat, size)
diff = lu.rmul_expr(Y, diff_mat, shape)
return (Y, [lu.create_eq(arg_objs[0], diff)])
26 changes: 13 additions & 13 deletions cvxpy/atoms/affine/diag.py
Expand Up @@ -39,13 +39,13 @@ def diag(expr):
"""
expr = AffAtom.cast_to_const(expr)
if expr.is_vector():
if expr.size[1] == 1:
if expr.shape[1] == 1:
return diag_vec(expr)
# Convert a row vector to a column vector.
else:
expr = reshape(expr, expr.size[1], 1)
expr = reshape(expr, expr.shape[1], 1)
return diag_vec(expr)
elif expr.size[0] == expr.size[1]:
elif expr.shape[0] == expr.shape[1]:
return diag_mat(expr)
else:
raise ValueError("Argument to diag must be a vector or square matrix.")
Expand All @@ -66,22 +66,22 @@ def numeric(self, values):
value = intf.from_2D_to_1D(values[0])
return np.diag(value)

def size_from_args(self):
def shape_from_args(self):
"""A square matrix.
"""
rows, _ = self.args[0].size
rows, _ = self.args[0].shape
return (rows, rows)

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Convolve two vectors.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand Down Expand Up @@ -110,22 +110,22 @@ def numeric(self, values):
v = v.A[0]
return v

def size_from_args(self):
def shape_from_args(self):
"""A column vector.
"""
rows, _ = self.args[0].size
rows, _ = self.args[0].shape
return (rows, 1)

@staticmethod
def graph_implementation(arg_objs, size, data=None):
def graph_implementation(arg_objs, shape, data=None):
"""Extracts the diagonal of a matrix.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Expand Down
2 changes: 1 addition & 1 deletion cvxpy/atoms/affine/diff.py
Expand Up @@ -20,7 +20,7 @@ def diff(x, k=1, axis=0):
x = Expression.cast_to_const(x)
if axis == 1:
x = x.T
m, n = x.size
m, n = x.shape
if k < 0 or k >= m:
raise ValueError('Must have k >= 0 and X must have < k elements along axis')

Expand Down

0 comments on commit f8b8e7e

Please sign in to comment.