Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove old pythons and leverage @ operator #679

Merged
merged 9 commits into from
Dec 26, 2021
2 changes: 1 addition & 1 deletion .github/workflows/python-package-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ jobs:
strategy:
max-parallel: 5
matrix:
python-version: [3.6, 3.9]
python-version: [3.7, 3.9]
slycot: ["", "conda"]
array-and-matrix: [0]
include:
Expand Down
112 changes: 0 additions & 112 deletions .travis.yml

This file was deleted.

16 changes: 8 additions & 8 deletions control/canonical.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import numpy as np

from numpy import zeros, zeros_like, shape, poly, iscomplex, vstack, hstack, dot, \
from numpy import zeros, zeros_like, shape, poly, iscomplex, vstack, hstack, \
transpose, empty, finfo, float64
from numpy.linalg import solve, matrix_rank, eig

Expand Down Expand Up @@ -149,7 +149,7 @@ def observable_form(xsys):
raise ValueError("Transformation matrix singular to working precision.")

# Finally, compute the output matrix
zsys.B = Tzx.dot(xsys.B)
zsys.B = Tzx @ xsys.B

return zsys, Tzx

Expand Down Expand Up @@ -189,13 +189,13 @@ def rsolve(M, y):

# Update the system matrices
if not inverse:
zsys.A = rsolve(T, dot(T, zsys.A)) / timescale
zsys.B = dot(T, zsys.B) / timescale
zsys.A = rsolve(T, T @ zsys.A) / timescale
zsys.B = T @ zsys.B / timescale
zsys.C = rsolve(T, zsys.C)
else:
zsys.A = solve(T, zsys.A).dot(T) / timescale
zsys.A = solve(T, zsys.A) @ T / timescale
zsys.B = solve(T, zsys.B) / timescale
zsys.C = zsys.C.dot(T)
zsys.C = zsys.C @ T

return zsys

Expand Down Expand Up @@ -405,8 +405,8 @@ def bdschur(a, condmax=None, sort=None):
permidx = np.hstack([blkidxs[i] for i in sortidx])
rperm = np.eye(amodal.shape[0])[permidx]

tmodal = tmodal.dot(rperm)
amodal = rperm.dot(amodal).dot(rperm.T)
tmodal = tmodal @ rperm
amodal = rperm @ amodal @ rperm.T
blksizes = blksizes[sortidx]

elif sort is None:
Expand Down
1 change: 0 additions & 1 deletion control/delay.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
#
# $Id$

from __future__ import division

__all__ = ['pade']

Expand Down
3 changes: 1 addition & 2 deletions control/flatsys/flatsys.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,8 +462,7 @@ def traj_const(null_coeffs):
for type, fun, lb, ub in traj_constraints:
if type == sp.optimize.LinearConstraint:
# `fun` is A matrix associated with polytope...
values.append(
np.dot(fun, np.hstack([states, inputs])))
values.append(fun @ np.hstack([states, inputs]))
elif type == sp.optimize.NonlinearConstraint:
values.append(fun(states, inputs))
else:
Expand Down
12 changes: 6 additions & 6 deletions control/flatsys/linflat.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def __init__(self, linsys, inputs=None, outputs=None, states=None,

# Compute the flat output variable z = C x
Cfz = np.zeros(np.shape(linsys.C)); Cfz[0, 0] = 1
self.Cf = np.dot(Cfz, Tr)
self.Cf = Cfz @ Tr

# Compute the flat flag from the state (and input)
def forward(self, x, u):
Expand All @@ -122,11 +122,11 @@ def forward(self, x, u):
x = np.reshape(x, (-1, 1))
u = np.reshape(u, (1, -1))
zflag = [np.zeros(self.nstates + 1)]
zflag[0][0] = np.dot(self.Cf, x)
zflag[0][0] = self.Cf @ x
H = self.Cf # initial state transformation
for i in range(1, self.nstates + 1):
zflag[0][i] = np.dot(H, np.dot(self.A, x) + np.dot(self.B, u))
H = np.dot(H, self.A) # derivative for next iteration
zflag[0][i] = H @ (self.A @ x + self.B @ u)
H = H @ self.A # derivative for next iteration
return zflag

# Compute state and input from flat flag
Expand All @@ -137,6 +137,6 @@ def reverse(self, zflag):

"""
z = zflag[0][0:-1]
x = np.dot(self.Tinv, z)
u = zflag[0][-1] - np.dot(self.F, z)
x = self.Tinv @ z
u = zflag[0][-1] - self.F @ z
return np.reshape(x, self.nstates), np.reshape(u, self.ninputs)
17 changes: 6 additions & 11 deletions control/frdata.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
# Author: M.M. (Rene) van Paassen (using xferfcn.py as basis)
# Date: 02 Oct 12

from __future__ import division

"""
Frequency response data representation and functions.
Expand All @@ -48,7 +47,7 @@
from warnings import warn
import numpy as np
from numpy import angle, array, empty, ones, \
real, imag, absolute, eye, linalg, where, dot, sort
real, imag, absolute, eye, linalg, where, sort
from scipy.interpolate import splprep, splev
from .lti import LTI, _process_frequency_response
from . import config
Expand Down Expand Up @@ -302,7 +301,7 @@ def __mul__(self, other):
fresp = empty((outputs, inputs, len(self.omega)),
dtype=self.fresp.dtype)
for i in range(len(self.omega)):
fresp[:, :, i] = dot(self.fresp[:, :, i], other.fresp[:, :, i])
fresp[:, :, i] = self.fresp[:, :, i] @ other.fresp[:, :, i]
return FRD(fresp, self.omega,
smooth=(self.ifunc is not None) and
(other.ifunc is not None))
Expand Down Expand Up @@ -330,7 +329,7 @@ def __rmul__(self, other):
fresp = empty((outputs, inputs, len(self.omega)),
dtype=self.fresp.dtype)
for i in range(len(self.omega)):
fresp[:, :, i] = dot(other.fresp[:, :, i], self.fresp[:, :, i])
fresp[:, :, i] = other.fresp[:, :, i] @ self.fresp[:, :, i]
return FRD(fresp, self.omega,
smooth=(self.ifunc is not None) and
(other.ifunc is not None))
Expand Down Expand Up @@ -543,13 +542,9 @@ def feedback(self, other=1, sign=-1):
# TODO: is there a reason to use linalg.solve instead of linalg.inv?
# https://github.com/python-control/python-control/pull/314#discussion_r294075154
for k, w in enumerate(other.omega):
fresp[:, :, k] = np.dot(
self.fresp[:, :, k],
linalg.solve(
eye(self.ninputs)
+ np.dot(other.fresp[:, :, k], self.fresp[:, :, k]),
eye(self.ninputs))
)
fresp[:, :, k] = self.fresp[:, :, k] @ linalg.solve(
eye(self.ninputs) + other.fresp[:, :, k] @ self.fresp[:, :, k],
eye(self.ninputs))

return FRD(fresp, other.omega, smooth=(self.ifunc is not None))

Expand Down
12 changes: 6 additions & 6 deletions control/iosys.py
Original file line number Diff line number Diff line change
Expand Up @@ -843,14 +843,14 @@ def _update_params(self, params={}, warning=True):

def _rhs(self, t, x, u):
# Convert input to column vector and then change output to 1D array
xdot = np.dot(self.A, np.reshape(x, (-1, 1))) \
+ np.dot(self.B, np.reshape(u, (-1, 1)))
xdot = self.A @ np.reshape(x, (-1, 1)) \
+ self.B @ np.reshape(u, (-1, 1))
return np.array(xdot).reshape((-1,))

def _out(self, t, x, u):
# Convert input to column vector and then change output to 1D array
y = np.dot(self.C, np.reshape(x, (-1, 1))) \
+ np.dot(self.D, np.reshape(u, (-1, 1)))
y = self.C @ np.reshape(x, (-1, 1)) \
+ self.D @ np.reshape(u, (-1, 1))
return np.array(y).reshape((-1,))


Expand Down Expand Up @@ -1197,7 +1197,7 @@ def _out(self, t, x, u):
ulist, ylist = self._compute_static_io(t, x, u)

# Make the full set of subsystem outputs to system output
return np.dot(self.output_map, ylist)
return self.output_map @ ylist

def _compute_static_io(self, t, x, u):
# Figure out the total number of inputs and outputs
Expand Down Expand Up @@ -1239,7 +1239,7 @@ def _compute_static_io(self, t, x, u):
output_index += sys.noutputs

# Compute inputs based on connection map
new_ulist = np.dot(self.connect_map, ylist[:noutputs]) \
new_ulist = self.connect_map @ ylist[:noutputs] \
+ np.dot(self.input_map, u)

# Check to see if any of the inputs changed
Expand Down
3 changes: 0 additions & 3 deletions control/margins.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,6 @@
margins.margin
"""

# Python 3 compatibility (needs to go here)
from __future__ import print_function

"""Copyright (c) 2011 by California Institute of Technology
All rights reserved.

Expand Down
30 changes: 15 additions & 15 deletions control/mateqn.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

import warnings

from numpy import shape, size, asarray, copy, zeros, eye, dot, \
from numpy import shape, size, asarray, copy, zeros, eye, \
finfo, inexact, atleast_2d
from scipy.linalg import eigvals, solve_discrete_are, solve
from .exception import ControlSlycot, ControlArgument
Expand Down Expand Up @@ -496,9 +496,9 @@ def care(A, B, Q, R=None, S=None, E=None, stabilizing=True):

# Calculate the gain matrix G
if size(R_b) == 1:
G = dot(dot(1/(R_ba), asarray(B_ba).T), X)
G = 1/(R_ba) * asarray(B_ba).T @ X
else:
G = dot(solve(R_ba, asarray(B_ba).T), X)
G = solve(R_ba, asarray(B_ba).T) @ X

# Return the solution X, the closed-loop eigenvalues L and
# the gain matrix G
Expand Down Expand Up @@ -567,9 +567,9 @@ def care(A, B, Q, R=None, S=None, E=None, stabilizing=True):

# Calculate the gain matrix G
if size(R_b) == 1:
G = dot(1/(R_b), dot(asarray(B_b).T, dot(X, E_b)) + asarray(S_b).T)
G = 1/(R_b) * (asarray(B_b).T @ X @ E_b + asarray(S_b).T)
else:
G = solve(R_b, dot(asarray(B_b).T, dot(X, E_b)) + asarray(S_b).T)
G = solve(R_b, asarray(B_b).T @ X @ E_b + asarray(S_b).T)

# Return the solution X, the closed-loop eigenvalues L and
# the gain matrix G
Expand Down Expand Up @@ -629,8 +629,8 @@ def dare(A, B, Q, R, S=None, E=None, stabilizing=True):
Rmat = _ssmatrix(R)
Qmat = _ssmatrix(Q)
X = solve_discrete_are(A, B, Qmat, Rmat)
G = solve(B.T.dot(X).dot(B) + Rmat, B.T.dot(X).dot(A))
L = eigvals(A - B.dot(G))
G = solve(B.T @ X @ B + Rmat, B.T @ X @ A)
L = eigvals(A - B @ G)
return _ssmatrix(X), L, _ssmatrix(G)


Expand Down Expand Up @@ -718,11 +718,11 @@ def dare_old(A, B, Q, R, S=None, E=None, stabilizing=True):

# Calculate the gain matrix G
if size(R_b) == 1:
G = dot(1/(dot(asarray(B_ba).T, dot(X, B_ba)) + R_ba),
dot(asarray(B_ba).T, dot(X, A_ba)))
G = (1/(asarray(B_ba).T @ X @ B_ba + R_ba) *
asarray(B_ba).T @ X @ A_ba)
else:
G = solve(dot(asarray(B_ba).T, dot(X, B_ba)) + R_ba,
dot(asarray(B_ba).T, dot(X, A_ba)))
G = solve(asarray(B_ba).T @ X @ B_ba + R_ba,
asarray(B_ba).T @ X @ A_ba)

# Return the solution X, the closed-loop eigenvalues L and
# the gain matrix G
Expand Down Expand Up @@ -791,11 +791,11 @@ def dare_old(A, B, Q, R, S=None, E=None, stabilizing=True):

# Calculate the gain matrix G
if size(R_b) == 1:
G = dot(1/(dot(asarray(B_b).T, dot(X, B_b)) + R_b),
dot(asarray(B_b).T, dot(X, A_b)) + asarray(S_b).T)
G = (1/(asarray(B_b).T @ X @ B_b + R_b) *
(asarray(B_b).T @ X @ A_b + asarray(S_b).T))
else:
G = solve(dot(asarray(B_b).T, dot(X, B_b)) + R_b,
dot(asarray(B_b).T, dot(X, A_b)) + asarray(S_b).T)
G = solve(asarray(B_b).T @ X @ B_b + R_b,
asarray(B_b).T @ X @ A_b + asarray(S_b).T)

# Return the solution X, the closed-loop eigenvalues L and
# the gain matrix G
Expand Down