-
Notifications
You must be signed in to change notification settings - Fork 0
/
GreedyNAR.m
54 lines (46 loc) · 1.67 KB
/
GreedyNAR.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
function [yPred, model, absId_use] = GreedyNAR(xtr,Ytr,xte,kernel,ntr, option)
% GreedyNAR
%
% logg: v01 uses no low-fidelity groundtruth data
%
% Inputs:
% xtr - [N_train x dim_x] matrix, input parameters
% Ytr - [1 x N_fidelity] cell, each element contains the corresponding output to
% xtr and has to be a [N_train x dim_y] matrix. Note that not all data
% would be used for training
% xte - [N_test x dim_x] matrix, testing inputs
% ntr - [1 x N_fidelity] array, indicating how many training points are provided
% for each fidelity. The first-fidelity use the first N training samples
% for training. The reset is based on a greedy selection.
%
% Outputs:
% yPred - predictions for xte at the highest-fidelity
% model - model info
% idx_abs - important index for all training data
%
% Author: Wei Xing
% email address: wayne.xingle@gmail.com
% Last revision: 21-May-2020
%% initial
if nargin < 6
option.step = 1; %the step of increasing the training data
option.initial_size = 2; %the initial size of training data
end
coreGp_func = @cigp_sequen;
nFidelity = length(Ytr);
%% main
%initial training data choice for F1 (fidelity-1)
absId_use_0 = 1:1:size(xtr,1);
f=1;
Model{f} = coreGp_func(xtr, Ytr{f}, xte, kernel, ntr(f), option);
absId_use{f} = absId_use_0(Model{f}.id_use);
for f = 2:nFidelity
xte_f = [xte, Model{f-1}.yTe_pred];
xtr_f = [xtr(absId_use{f-1},:) ,Ytr{f-1}(absId_use{f-1},:)];
Model{f} = coreGp_func(xtr_f, Ytr{f}(absId_use{f-1},:), xte_f, kernel, ntr(f), option);
absId_use{f} = absId_use{f-1}(Model{f}.id_use);
end
yPred = Model{nFidelity}.yTe_pred;
model.submodels = Model;
model.absId_use = absId_use;
end