-
Notifications
You must be signed in to change notification settings - Fork 1
/
sysid.py
148 lines (106 loc) · 3.62 KB
/
sysid.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
from optistack import *
from casadi import *
# In this example, we fit a nonlinear model to measurements
#
# This example uses more advanced constructs than the vdp* examples:
# Since the number of control intervals is potentially very large here,
# we use memory-efficient Map and MapAccum, in combination with
# codegeneration.
#
# We will be working with a 2-norm objective:
# || y_measured - y_simulated ||_2^2
#
# This form is well-suited for the Gauss-Newton Hessian approximation.
########### SETTINGS #####################
N = 10000 # Number of samples
fs = 610.1 # Sampling frequency [hz]
param_truth = np.array([5.625e-6,2.3e-4,1,4.69])
param_guess = np.array([5,2,1,5])
scale = np.array([1e-6,1e-4,1,1])
############ MODELING #####################
y = MX.sym('y')
dy = MX.sym('dy')
u = MX.sym('u')
states = vertcat(y,dy)
controls = u
M = optivar()
c = optivar()
k = optivar()
k_NL = optivar()
params = vertcat(M,c,k,k_NL)
rhs = vertcat(dy, (u-k_NL*y**3-k*y-c*dy)/M)
# Form an ode function
ode = Function('ode',[states,controls,params],[rhs])
############ Creating a simulator ##########
N_steps_per_sample = 10
dt = 1/fs/N_steps_per_sample
# Build an integrator for this system: Runge Kutta 4 integrator
k1 = ode(states,controls,params)
k2 = ode(states+dt/2.0*k1[0],controls,params)
k3 = ode(states+dt/2.0*k2[0],controls,params)
k4 = ode(states+dt*k3[0],controls,params)
states_final = states+dt/6.0*(k1+2*k2+2*k3+k4)
# Create a function that simulates one step propagation in a sample
one_step = Function('one_step',[states, controls, params],[states_final])
X = states
for i in range(N_steps_per_sample):
X = one_step(X, controls, params)
# Create a function that simulates all step propagation on a sample
one_sample = Function('one_sample',[states, controls, params], [X])
# speedup trick: expand into scalar operations
one_sample = one_sample.expand()
############ Simulating the system ##########
all_samples = one_sample.mapaccum('all_samples', N)
# Choose an excitation signal
u_data = 0.1*np.random.rand(N,1)
x0 = DM([0,0])
X_measured = all_samples(x0, u_data, repmat(param_truth,1,N) )
y_data = X_measured[0,:].T
############ Identifying the simulated system: single shooting strategy ##########
# Note, it is in general a good idea to scale your decision variables such
# that they are in the order of ~0.1..100
X_symbolic = all_samples(x0, u_data, repmat(params*scale,1,N) )
e = y_data-X_symbolic[0,:].T
M.setInit(param_guess[0])
c.setInit(param_guess[1])
k.setInit(param_guess[2])
k_NL.setInit(param_guess[3])
options = {}
options["codegen"] = True
print 'Single shooting...'
# Hand in a vector objective -> interpreted as 2-norm
# such t hat Gauss-Newton can be performed
optisolve(e,[],options)
optival(M)*1e-6
optival(c)*1e-4
optival(k)
optival(k_NL)
print sqrt(np.mean(optival(e)**2))
assert(sqrt(np.mean(optival(e)**2))<1e-10)
############ Identifying the simulated system: multiple shooting strategy ##########
print 'Multiple shooting...'
X = optivar(2, N)
params_scale = vertcat(1e-6*M,c*1e-4,k,k_NL)
[Xn] = one_sample.map([X, u_data.T, params_scale])
# gap-closing constraints
gaps = Xn[:,:-1]-X[:,1:]
g = gaps == 0
e = (y_data-Xn[0,:].T).T
M.setInit(5)
c.setInit(2.3)
k.setInit(1)
k_NL.setInit(4)
x0 = horzcat(y_data, vertcat(np.diff(y_data.T).T*fs,0))
X.setInit(x0.T)
options = {}
options["codegen"] = True
# Hand in a vector objective -> interpreted as 2-norm
# such that Gauss-Newton can be performed
optisolve(e,[g],options);
optival(M)*1e-6
optival(c)*1e-4
optival(k)
optival(k_NL)
# rms(optival(e))
print sqrt(np.mean(optival(e)**2)) # rms is part of the signal toolbox
assert(sqrt(np.mean(optival(e)**2))<1e-10)