Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v1.6.3 #19

Merged
merged 12 commits into from
Jun 30, 2023
7 changes: 3 additions & 4 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
Package: DSWE
Title: Data Science for Wind Energy
Version: 1.6.1
Version: 1.6.3
Authors@R: c(
person(given = "Nitesh", family = "Kumar", role = c("aut"), email = "nitesh.kumar@tamu.edu"),
person(given = "Abhinav", family = "Prakash", role = c("aut"), email = "abhinavp@tamu.edu"),
person(given = "Abhinav", family = "Prakash", role = c("aut"), email = "abhinavp@aggienetwork.com"),
person(given = "Yu", family = "Ding", role = c("aut","cre"), email = "yuding@tamu.edu"),
person(given = "Rui", family = "Tuo", role = c("ctb","cph"), email = "ruituo@tamu.edu"),
person(given = "Effi", family = "Latiffianti", role = c("ctb","cph"), email = "latiffianti@tamu.edu"))
Description: Data science methods used in wind energy applications.
Current functionalities include creating a multi-dimensional power curve model,
Expand All @@ -26,7 +25,7 @@ URL: https://github.com/TAMU-AML/DSWE-Package, https://aml.engr.tamu.edu/book-ds
BugReports: https://github.com/TAMU-AML/DSWE-Package/issues
Encoding: UTF-8
LazyData: true
RoxygenNote: 7.1.2
RoxygenNote: 7.2.3
LinkingTo:
Rcpp (>= 1.0.4.6) ,
RcppArmadillo (>= 0.9.870.2.0)
Expand Down
2 changes: 1 addition & 1 deletion NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,4 @@ importFrom(stats,predict)
importFrom(stats,sd)
importFrom(stats,var)
importFrom(utils,write.table)
useDynLib(DSWE)
useDynLib(DSWE, .registration = TRUE)
16 changes: 13 additions & 3 deletions R/AMKGapSubroutines.R
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,21 @@

get.dpill = function (cov, y)
{
bw <- KernSmooth::dpill(cov, y)
bw <- try(
expr = KernSmooth::dpill(cov, y), silent = TRUE
)
if (inherits(bw, "try-error")){
bw = sd(cov)
}
if (is.nan(bw)) {
par <- 0.06
while (is.nan(bw)) {
bw <- KernSmooth::dpill(cov, y, proptrun = par)
bw <- try(
expr = KernSmooth::dpill(cov, y, proptrun = par), silent = TRUE
)
if (inherits(bw, "try-error")){
bw = sd(cov)
}
par <- par + 0.01
}
}
Expand Down Expand Up @@ -172,7 +182,7 @@ bw.gap = function (y, x, id.dir = NA, id.adp = id.dir) {
0 & cutpt[[p]][length(cutpt[[p]])] >= 360)
cov.sub[which(cov.sub < cutpt[[p]][2])] <- cov.sub[which(cov.sub <
cutpt[[p]][2])] + 360
KernSmooth::dpill(cov.sub, y[id.bin])
get.dpill(cov.sub, y[id.bin])
})
})
list(bw.fix = bw.fix, bw.adp = bw.adp, id.adp = id.adp,
Expand Down
9 changes: 8 additions & 1 deletion R/AMKSubroutines.R
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,14 @@ calculateWeights = function(trainX,testpoint,bandwidth,nMultiCov,fixedCov,cirCov
computeBandwidth = function(trainY,trainX,cirCov){
bandwidth = rep(0,ncol(trainX))
for (i in 1:ncol(trainX)){
bandwidth[i] = KernSmooth::dpill(trainX[,i],trainY)
bw = try(
expr = KernSmooth::dpill(trainX[,i],trainY), silent=TRUE
)
if (inherits(bw, "try-error")) {
bandwidth[i] = sd(trainX[,i])
} else {
bandwidth[i] = bw
}
}
if(all(!is.na(cirCov))){
for (i in cirCov){
Expand Down
2 changes: 1 addition & 1 deletion R/CovMatchSubroutines.R
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
# SOFTWARE.

#' @importFrom matrixStats colMins colMaxs colSds
#' @useDynLib DSWE
#' @useDynLib DSWE, .registration = TRUE
#' @importFrom Rcpp sourceCpp

CovMatch.Mult = function(dname, cov, wgt, cov.circ){
Expand Down
2 changes: 2 additions & 0 deletions R/KnnPCFit.R
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,9 @@
#' knn_model = KnnPCFit(data, xCol, yCol, subsetSelection)
#'
#' @export
#' @useDynLib DSWE, .registration = TRUE
#' @importFrom FNN knn.reg knnx.index
#'
KnnPCFit = function(data, xCol, yCol, subsetSelection = FALSE){

if(!is.matrix(data) & !is.data.frame(data)){
Expand Down
16 changes: 8 additions & 8 deletions R/RcppExports.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,34 +2,34 @@
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393

computeWeightedY <- function(X, y, params) {
.Call('_DSWE_computeWeightedY', PACKAGE = 'DSWE', X, y, params)
.Call(`_DSWE_computeWeightedY`, X, y, params)
}

predictGP <- function(X, weightedY, Xnew, params) {
.Call('_DSWE_predictGP', PACKAGE = 'DSWE', X, weightedY, Xnew, params)
.Call(`_DSWE_predictGP`, X, weightedY, Xnew, params)
}

computeLogLikGP_ <- function(X, y, params) {
.Call('_DSWE_computeLogLikGP_', PACKAGE = 'DSWE', X, y, params)
.Call(`_DSWE_computeLogLikGP_`, X, y, params)
}

computeLogLikGradGP_ <- function(X, y, params) {
.Call('_DSWE_computeLogLikGradGP_', PACKAGE = 'DSWE', X, y, params)
.Call(`_DSWE_computeLogLikGradGP_`, X, y, params)
}

computeLogLikGradGPZeroMean_ <- function(X, y, params) {
.Call('_DSWE_computeLogLikGradGPZeroMean_', PACKAGE = 'DSWE', X, y, params)
.Call(`_DSWE_computeLogLikGradGPZeroMean_`, X, y, params)
}

computeDiffCov_ <- function(X1, y1, X2, y2, XT, theta, sigma_f, sigma_n, beta) {
.Call('_DSWE_computeDiffCov_', PACKAGE = 'DSWE', X1, y1, X2, y2, XT, theta, sigma_f, sigma_n, beta)
.Call(`_DSWE_computeDiffCov_`, X1, y1, X2, y2, XT, theta, sigma_f, sigma_n, beta)
}

computeConfBand_ <- function(diffCovMat, confLevel) {
.Call('_DSWE_computeConfBand_', PACKAGE = 'DSWE', diffCovMat, confLevel)
.Call(`_DSWE_computeConfBand_`, diffCovMat, confLevel)
}

matchcov <- function(ref, obj, thres, circ_pos, flag) {
.Call('_DSWE_matchcov', PACKAGE = 'DSWE', ref, obj, thres, circ_pos, flag)
.Call(`_DSWE_matchcov`, ref, obj, thres, circ_pos, flag)
}

2 changes: 1 addition & 1 deletion R/funGPSubroutines.R
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

#' @useDynLib DSWE
#' @useDynLib DSWE, .registration = TRUE
#' @importFrom Rcpp sourceCpp
#'
estimateParameters= function(datalist, covCols, yCol, opt_method, limitMemory, optimSize, rngSeed){
Expand Down
10 changes: 8 additions & 2 deletions R/tempGP.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# MIT License
#
# Copyright (c) 2020 Abhinav Prakash, Rui Tuo, and Yu Ding
# Copyright (c) 2020-2022 Abhinav Prakash and Yu Ding
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
Expand All @@ -25,6 +25,7 @@
#' @param trainX A matrix with each column corresponding to one input variable.
#' @param trainY A vector with each element corresponding to the output at the corresponding row of \code{trainX}.
#' @param trainT A vector for time indices of the data points. By default, the function assigns natural numbers starting from 1 as the time indices.
#' @param max_thinning_number An integer specifying the max lag to compute the thinning number. If the PACF does not become insignificant till \code{max_thinning_number}, then \code{max_thinning_number} is used for thinning.
#' @param fast_computation A Boolean that specifies whether to do exact inference or fast approximation. Default is \code{TRUE}.
#' @param limit_memory An integer or \code{NULL}. The integer is used sample training points during prediction to limit the total memory requirement. Setting the value to \code{NULL} would result in no sampling, that is, full training data is used for prediction. Default value is \code{5000}.
#' @param optim_control A list parameters passed to the Adam optimizer when \code{fast_computation} is set to \code{TRUE}. The default values have been tested rigorously and tend to strike a balance between accuracy and speed. \itemize{
Expand Down Expand Up @@ -82,6 +83,7 @@
tempGP = function(trainX, trainY, trainT = NULL,
fast_computation = TRUE,
limit_memory = 5000L,
max_thinning_number = 20,
optim_control = list(batch_size = 100L,
learn_rate = 0.05,
max_iter = 5000L,
Expand Down Expand Up @@ -131,6 +133,10 @@ tempGP = function(trainX, trainY, trainT = NULL,
stop('trainX, trainY, and trainT must have the same number of data points.')
}

if (!inherits(max_thinning_number, "integer")){
stop('max_thinning_number must be an integer.')
}

trainX = trainX[order(trainT),,drop = FALSE]
trainY = trainY[order(trainT)]
trainT = trainT[order(trainT)]
Expand All @@ -145,7 +151,7 @@ tempGP = function(trainX, trainY, trainT = NULL,
limit_memory = as.integer(limit_memory)
}

thinningNumber = computeThinningNumber(trainX)
thinningNumber = computeThinningNumber(trainX, max_thinning_number)

if (thinningNumber > 0){
thinnedBins = createThinnedBins(trainX,trainY,thinningNumber)
Expand Down
16 changes: 12 additions & 4 deletions R/tempGPSubroutines.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# MIT License
#
# Copyright (c) 2020 Abhinav Prakash, Rui Tuo, and Yu Ding
# Copyright (c) 2020-2022 Abhinav Prakash and Yu Ding
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
Expand All @@ -20,9 +20,17 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

computeThinningNumber = function(trainX){
thinningNumber = max(apply(trainX,2,function(col)
min(which(c(1,abs(stats::pacf(col, plot = FALSE)$acf[,1,1])) <= (2/sqrt(nrow(trainX)))))))
#' @useDynLib DSWE, .registration = TRUE

computeThinningNumber = function(trainX, max_thinning_number){
thinning_vec = rep(max_thinning_number, ncol(trainX))
for (col_idx in c(1:length(thinning_vec))){
col_thinning_vec = which(c(1,abs(stats::pacf(trainX[,col_idx], plot = FALSE, lag.max = max_thinning_number)$acf[,1,1])) <= (2/sqrt(nrow(trainX))))
if (length(col_thinning_vec) != 0){
thinning_vec[col_idx] = min(col_thinning_vec)
}
}
thinningNumber = max(thinning_vec)
return(thinningNumber)
}

Expand Down
Loading