import numpy as np import chaospy as cp from scipy.integrate import odeint from sklearn.neural_network import MLPRegressor import pickle # to use the odeint function, we need to transform the second order differential equation # into a system of two linear equations def normalize(X,X_train): nX = np.copy(X) for i in range(0,np.shape(X)[1]): m_i = np.mean(X_train[:,i]) s_i = np.std(X_train[:,i]) nX[:,i] = (X[:,i]-m_i)/s_i return nX def ml_warp3d(gamma,a_0,b_0,D,sig_0): E = 165000 nu = 0.309 tau_y = 40 tau_v = 12 n = 12 #gamma = float(sys.argv[1]) teta_0 = 800 D_eta = 1.2e-09 eta_0 = 1000000 #a_0 = float(sys.argv[2]) #b_0 = float(sys.argv[3]) #D = float(sys.argv[4]) #sig_0 = float(sys.argv[5]) Fn_NI = 20000 train_features = np.genfromtxt("data_2000_reduced.csv",delimiter=',') X_train = train_features[:,:-1] features = np.array([[E,nu,tau_y,tau_v,n,gamma,teta_0,D_eta,eta_0,a_0,b_0,D,sig_0,Fn_NI]]) X = features[0:1,:] nX = normalize(X,X_train) # load the model from disk MLP_model = pickle.load(open('finalized_model_4.sav', 'rb')) y = MLP_model.predict(nX) return y if __name__ == '__main__': gamma_mu = 9.55E-08 gamma_sig = 2.89E-09 a0_mu = 0.0005 a0_sig = 0.000274241 b0_mu = 0.05 b0_sig = 0.027424138 D_mu = 1.00E-15 D_sig = 8.08E-16 sig0_mu = 525. sig0_sig = 274.2413779 gamma_del = gamma_sig/gamma_mu gamma_mu_g = np.log(gamma_mu)+np.log(1/np.sqrt(gamma_del**2+1)) gamma_sig_g = np.sqrt(np.log(gamma_del**2+1)) a0_del = a0_sig/a0_mu a0_mu_g = np.log(a0_mu)+np.log(1/np.sqrt(a0_del**2+1)) a0_sig_g = np.sqrt(np.log(a0_del**2+1)) b0_del = b0_sig/b0_mu b0_mu_g = np.log(b0_mu)+np.log(1/np.sqrt(b0_del**2+1)) b0_sig_g = np.sqrt(np.log(b0_del**2+1)) D_del = D_sig/D_mu D_mu_g = np.log(D_mu)+np.log(1/np.sqrt(D_del**2+1)) D_sig_g = np.sqrt(np.log(D_del**2+1)) sig0_del = sig0_sig/sig0_mu sig0_mu_g = np.log(sig0_mu)+np.log(1/np.sqrt(sig0_del**2+1)) sig0_sig_g = np.sqrt(np.log(sig0_del**2+1)) # create uniform distribution object distr_gamma = cp.LogNormal(gamma_mu_g, gamma_sig_g) distr_a0 = cp.LogNormal(a0_mu_g, a0_sig_g) distr_b0 = cp.LogNormal(b0_mu_g, b0_sig_g) distr_D = cp.LogNormal(D_mu_g, D_sig_g) distr_sig0 = cp.LogNormal(sig0_mu_g, sig0_sig_g) # create the multivariate distribution distr_5D = cp.J(distr_gamma, distr_a0, distr_b0, distr_D, distr_sig0) # quad deg 1D quad_deg_1D = 3 poly_deg_1D = 2 # create the orthogonal polynomials P = cp.orth_ttr(poly_deg_1D, distr_5D) # monte carlo samples #################### full grid computations ##################### # get the non-sparse quadrature nodes and weight nodes_full, weights_full = cp.generate_quadrature(quad_deg_1D, distr_5D, rule='gaussian', sparse=False) # create vector to save the solution T2M = np.zeros(len(nodes_full.T)) # perform sparse pseudo-spectral approximation for j, n in enumerate(nodes_full.T): # each n is a vector with 5 components gamma = n[0] a_0 = n[1] b_0 = n[2] D = n[3] sig_0 = n[4] T2M[j] = ml_warp3d(gamma,a_0,b_0,D,sig_0) # obtain the gpc approximation T2M_approx = cp.fit_quadrature(P, nodes_full, weights_full, T2M) # compute statistics mean_full = cp.E(T2M_approx, distr_5D) var_full = cp.Var(T2M_approx, distr_5D) ################################################################## #################### full grid computations ##################### # get the sparse quadrature nodes and weight nodes_sparse, weights_sparse = cp.generate_quadrature(quad_deg_1D, distr_5D, rule='gaussian', sparse=True) # create vector to save the solution T2M_sparse = np.zeros(len(nodes_sparse.T)) # perform sparse pseudo-spectral approximation for j, n in enumerate(nodes_sparse.T): # each n is a vector with 5 components gamma = n[0] a_0 = n[1] b_0 = n[2] D = n[3] sig_0 = n[4] T2M_sparse[j] = ml_warp3d(gamma,a_0,b_0,D,sig_0) # obtain the gpc approximation T2M_sparse_approx = cp.fit_quadrature(P, nodes_sparse, weights_sparse, T2M_sparse) # compute statistics mean_sparse = cp.E(T2M_sparse_approx, distr_5D) var_sparse = cp.Var(T2M_sparse_approx, distr_5D) ################################################################## #################### Monte carlo Simulation ##################### nodes_mc = distr_5D.sample(size = 10000) # create vector to save the solution T2M_mc = np.zeros(len(nodes_mc.T)) # perform sparse pseudo-spectral approximation for j, n in enumerate(nodes_mc.T): # each n is a vector with 5 components gamma = n[0] a_0 = n[1] b_0 = n[2] D = n[3] sig_0 = n[4] T2M_mc[j] = ml_warp3d(gamma,a_0,b_0,D,sig_0) # # compute statistics mean_mc = np.mean(T2M_mc) var_mc = np.var(T2M_mc) ################################################################## print('Result:') print("Grid \t| #Points \t| Mean \t\t\t| Std ") print("-----------------------------------------------------------------") print("Full \t|", len(nodes_full.T), "\t\t|", "{a:1.12f}".format(a=mean_full), '\t|', "{a:1.12f}".format(a=np.sqrt(var_full))) print("Sparse \t|", len(nodes_sparse.T), "\t\t|", "{a:1.12f}".format(a=mean_sparse), '\t|', "{a:1.12f}".format(a=np.sqrt(var_sparse))) print("MC \t|", len(nodes_mc.T), "\t\t|", "{a:1.12f}".format(a=mean_mc), '\t|', "{a:1.12f}".format(a=np.sqrt(var_mc)))