Skip to content

Commit

Permalink
Changes for Julia v0.6
Browse files Browse the repository at this point in the history
  • Loading branch information
dmbates committed Jan 3, 2018
1 parent 787e938 commit 14dd880
Show file tree
Hide file tree
Showing 12 changed files with 75 additions and 66 deletions.
10 changes: 7 additions & 3 deletions .travis.yml
@@ -1,11 +1,15 @@
language: julia
os:
- linux
- osx
julia:
- 0.5
- 0.6
- nightly
notifications:
email: false
matrix:
allow_failures:
- julia: nightly
before_install:
- if [ `uname` = "Linux" ]; then
sudo apt-get install gfortran -y;
Expand All @@ -14,6 +18,6 @@ before_install:
fi
script:
- if [[ -a .git/shallow ]]; then git fetch --unshallow; fi
- if [ $TRAVIS_JULIA_VERSION = "nightly" ]; then julia --check-bounds=yes -e 'Pkg.clone(pwd()); Pkg.test("Lasso"; coverage=true)'; else julia -e 'Pkg.clone(pwd()); Pkg.test("Lasso")'; fi
- if [ $TRAVIS_JULIA_VERSION = "0.6" ]; then julia --check-bounds=yes -e 'Pkg.clone(pwd()); Pkg.test("Lasso"; coverage=true)'; else julia -e 'Pkg.clone(pwd()); Pkg.test("Lasso")'; fi
after_success:
- if [ $TRAVIS_JULIA_VERSION = "nightly" ]; then julia -e 'cd(Pkg.dir("Lasso")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(Coveralls.process_folder())'; fi
- if [ $TRAVIS_JULIA_VERSION = "0.6" ]; then julia -e 'cd(Pkg.dir("Lasso")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(Coveralls.process_folder())'; fi
4 changes: 2 additions & 2 deletions REQUIRE
@@ -1,6 +1,6 @@
julia 0.5
julia 0.6
StatsBase 0.8
Distributions
GLM 0.6.1
GLM 0.10.1
Reexport
MLBase
6 changes: 3 additions & 3 deletions perf/perf.jl
Expand Up @@ -15,14 +15,14 @@ function makeXY(ρ, nsamples, nfeatures)
(X, y)
end

type GLMNetOp{Dist,Naive} end
mutable struct GLMNetOp{Dist,Naive} end
calc{Dist,Naive}(::GLMNetOp{Dist,Naive}, X, y) = glmnet(X, y, Dist(), naivealgorithm=Naive)
calc(::GLMNetOp{Binomial}, X, y) = glmnet(X, y, Binomial())

type LassoOp{Dist,Naive} end
mutable struct LassoOp{Dist,Naive} end
calc{Dist,Naive}(::LassoOp{Dist,Naive}, X, y) = fit(LassoPath, X, y, Dist(), naivealgorithm=Naive, criterion=:coef)

type LassoBenchmark{Op} <: Proc end
mutable struct LassoBenchmark{Op} <: Proc end
Base.length(p::LassoBenchmark, n) = 0
Base.isvalid(p::LassoBenchmark, n) = true
Base.start(p::LassoBenchmark, n) = (gc(); inputs[n])
Expand Down
28 changes: 14 additions & 14 deletions src/FusedLasso.jl
Expand Up @@ -8,47 +8,47 @@ export FusedLasso
# L0-Segmentation. Journal of Computational and Graphical Statistics,
# 22(2), 246–260. doi:10.1080/10618600.2012.681238

immutable NormalCoefs{T}
struct NormalCoefs{T}
lin::T
quad::T

NormalCoefs(lin::Real) = new(lin, 0)
NormalCoefs{T}(lin::S) where {T,S <: Real} = new(T(lin), zero(T))
NormalCoefs(lin::Real, quad::Real) = new(lin, quad)
end
+{T}(a::NormalCoefs{T}, b::NormalCoefs{T}) = NormalCoefs{T}(a.lin+b.lin, a.quad+b.quad)
-{T}(a::NormalCoefs{T}, b::NormalCoefs{T}) = NormalCoefs{T}(a.lin-b.lin, a.quad-b.quad)
+{T}(a::NormalCoefs{T}, b::Real) = NormalCoefs{T}(a.lin+b, a.quad)
-{T}(a::NormalCoefs{T}, b::Real) = NormalCoefs{T}(a.lin-b, a.quad)
*{T}(a::Real, b::NormalCoefs{T}) = NormalCoefs{T}(a*b.lin, a*b.quad)
+(a::NormalCoefs{T}, b::NormalCoefs{T}) where {T} = NormalCoefs{T}(a.lin+b.lin, a.quad+b.quad)
-(a::NormalCoefs{T}, b::NormalCoefs{T}) where {T} = NormalCoefs{T}(a.lin-b.lin, a.quad-b.quad)
+(a::NormalCoefs{T}, b::Real) where {T} = NormalCoefs{T}(a.lin+b, a.quad)
-(a::NormalCoefs{T}, b::Real) where {T} = NormalCoefs{T}(a.lin-b, a.quad)
*(a::Real, b::NormalCoefs{T}) where {T} = NormalCoefs{T}(a*b.lin, a*b.quad)

# Implements Algorithm 2 lines 8 and 19
solveforbtilde{T}(a::NormalCoefs{T}, lhs::Real) = (lhs - a.lin)/(2 * a.quad)

# These are marginally faster than computing btilde explicitly because
# they avoid division
btilde_lt{T}(a::NormalCoefs{T}, lhs::Real, x::Real) = lhs - a.lin > 2 * a.quad * x
btilde_gt{T}(a::NormalCoefs{T}, lhs::Real, x::Real) = lhs - a.lin < 2 * a.quad * x
btilde_lt(a::NormalCoefs{T}, lhs::Real, x::Real) where {T} = lhs - a.lin > 2 * a.quad * x
btilde_gt(a::NormalCoefs{T}, lhs::Real, x::Real) where {T} = lhs - a.lin < 2 * a.quad * x

immutable Knot{T,S}
struct Knot{T,S}
pos::T
coefs::S
sign::Int8
end

immutable FusedLasso{T,S} <: RegressionModel
struct FusedLasso{T,S} <: RegressionModel
β::Vector{T} # Coefficients
knots::Vector{Knot{T,S}} # Active knots
bp::Matrix{T} # Backpointers
end

function StatsBase.fit{T}(::Type{FusedLasso}, y::AbstractVector{T}, λ::Real; dofit::Bool=true)
function StatsBase.fit(::Type{FusedLasso}, y::AbstractVector{T}, λ::Real; dofit::Bool=true) where T
S = NormalCoefs{T}
flsa = FusedLasso{T,S}(Array(T, length(y)), Array(Knot{T,S}, 2), Array(T, 2, length(y)-1))
flsa = FusedLasso{T,S}(Vector{T}(length(y)), Vector{Knot{T,S}}(2), Matrix{T}(2, length(y)-1))
dofit && fit!(flsa, y, λ)
flsa
end

function StatsBase.fit!{T,S}(flsa::FusedLasso{T,S}, y::AbstractVector{T}, λ::Real)
function StatsBase.fit!(flsa::FusedLasso{T,S}, y::AbstractVector{T}, λ::Real) where {T,S}
β = flsa.β
knots = flsa.knots
bp = flsa.bp
Expand Down
21 changes: 11 additions & 10 deletions src/Lasso.jl
Expand Up @@ -23,12 +23,12 @@ export RegularizationPath, LassoPath, GammaLassoPath, NaiveCoordinateDescent,

## HELPERS FOR SPARSE COEFFICIENTS

immutable SparseCoefficients{T} <: AbstractVector{T}
struct SparseCoefficients{T} <: AbstractVector{T}
coef::Vector{T} # Individual coefficient values
coef2predictor::Vector{Int} # Mapping from indices in coef to indices in original X
predictor2coef::Vector{Int} # Mapping from indices in original X to indices in coef

SparseCoefficients(n::Int) = new(T[], Int[], zeros(Int, n))
SparseCoefficients{T}(n::Int) where {T} = new(T[], Int[], zeros(Int, n))
end

function Base.A_mul_B!{T}(out::Vector, X::Matrix, coef::SparseCoefficients{T})
Expand Down Expand Up @@ -120,7 +120,7 @@ function addcoefs!(coefs::SparseMatrixCSC, newcoef::SparseCoefficients, i::Int)
end

## COEFFICIENT ITERATION IN SEQUENTIAL OR RANDOM ORDER
immutable RandomCoefficientIterator
struct RandomCoefficientIterator
rng::MersenneTwister
rg::Base.Random.RangeGeneratorInt{Int,UInt}
coeforder::Vector{Int}
Expand All @@ -129,7 +129,8 @@ const RANDOMIZE_DEFAULT = true

RandomCoefficientIterator() =
RandomCoefficientIterator(MersenneTwister(1337), Base.Random.RangeGenerator(1:2), Int[])
typealias CoefficientIterator Union{UnitRange{Int},RandomCoefficientIterator}

const CoefficientIterator = Union{UnitRange{Int},RandomCoefficientIterator}

# Iterate over coefficients in random order
function Base.start(x::RandomCoefficientIterator)
Expand All @@ -151,10 +152,10 @@ function addcoef(x::RandomCoefficientIterator, icoef::Int)
end
addcoef(x::UnitRange{Int}, icoef::Int) = 1:length(x)+1

abstract RegularizationPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: RegressionModel
abstract type RegularizationPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: RegressionModel end
## LASSO PATH

type LassoPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: RegularizationPath{S,T}
mutable struct LassoPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: RegularizationPath{S,T}
m::S
nulldev::T # null deviance
nullb0::T # intercept of null model, if one was fit
Expand All @@ -166,7 +167,7 @@ type LassoPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: Regularization
b0::Vector{T} # model intercepts
niter::Int # number of coordinate descent iterations

LassoPath(m, nulldev::T, nullb0::T, λ::Vector{T}, autoλ::Bool, Xnorm::Vector{T}) =
LassoPath{S,T}(m, nulldev::T, nullb0::T, λ::Vector{T}, autoλ::Bool, Xnorm::Vector{T}) where {S,T} =
new(m, nulldev, nullb0, λ, autoλ, Xnorm)
end

Expand Down Expand Up @@ -231,7 +232,7 @@ function build_model{T}(X::AbstractMatrix{T}, y::FPVector, d::Normal, l::Identit
if λ == nothing
# Find max λ
if intercept
muscratch = Array(T, length(mu))
muscratch = Vector{T}(length(mu))
@simd for i = 1:length(mu)
@inbounds muscratch[i] = (mu[i] - nullb0)*wts[i]
end
Expand Down Expand Up @@ -430,7 +431,7 @@ GLM.linkfun{M<:LinearModel}(path::RegularizationPath{M}) = IdentityLink()
GLM.linkfun{V<:FPVector,D<:UnivariateDistribution,L<:Link,L2<:GLM.LinPred}(path::RegularizationPath{GeneralizedLinearModel{GlmResp{V,D,L},L2}}) = L()

## Prediction function for GLMs
function StatsBase.predict{T<:AbstractFloat}(path::RegularizationPath, newX::AbstractMatrix{T}; offset::FPVector=Array(T,0), select=:all)
function StatsBase.predict(path::RegularizationPath, newX::AbstractMatrix{T}; offset::FPVector=T[], select=:all) where {T<:AbstractFloat}
# add an interecept to newX if the model has one
if hasintercept(path)
newX = [ones(eltype(newX),size(newX,1),1) newX]
Expand Down Expand Up @@ -472,7 +473,7 @@ deviance at each segement of the path for (potentially new) data X and y
select=:all or :AICc like in coef()
"""
function StatsBase.deviance{T<:AbstractFloat,V<:FPVector}(path::RegularizationPath, X::AbstractMatrix{T}, y::V;
offset::FPVector=Array(T,0), select=:all,
offset::FPVector=T[], select=:all,
wts::FPVector=ones(T, length(y)))
μ = predict(path, X; offset=offset, select=select)
deviance(path, y, μ, wts)
Expand Down
14 changes: 7 additions & 7 deletions src/TrendFiltering.jl
Expand Up @@ -8,13 +8,13 @@ export TrendFilter
# Preprint arXiv:1406.2082. Retrieved from
# http://arxiv.org/abs/1406.2082

immutable DifferenceMatrix{T} <: AbstractMatrix{T}
struct DifferenceMatrix{T} <: AbstractMatrix{T}
k::Int
n::Int
b::Vector{T} # Coefficients for A_mul_B!
si::Vector{T} # State for A_mul_B!/At_mul_B!

function DifferenceMatrix(k, n)
function DifferenceMatrix{T}(k, n) where T
n >= 2*k+2 || throw(ArgumentError("signal must have length >= 2*order+2"))
b = T[ifelse(isodd(i), -1, 1)*binomial(k+1, i) for i = 0:k+1]
new(k, n, b, zeros(T, k+1))
Expand Down Expand Up @@ -93,9 +93,9 @@ function computeDtD(c, n)
end
end
filt!(sides, c, [one(eltype(c))], sides)
colptr = Array(Int, n+1)
rowval = Array(Int, (k+2)*(n-k-1)+(k+1)*n)
nzval = Array(Float64, (k+2)*(n-k-1)+(k+1)*n)
colptr = Vector{Int}(n+1)
rowval = Vector{Int}((k+2)*(n-k-1)+(k+1)*n)
nzval = Vector{Float64}((k+2)*(n-k-1)+(k+1)*n)
idx = 1
for i = 1:k+1
colptr[i] = idx
Expand Down Expand Up @@ -138,7 +138,7 @@ end
# Soft threshold
S(z, γ) = abs(z) <= γ ? zero(z) : ifelse(z > 0, z - γ, z + γ)

type TrendFilter{T,S,VT}
mutable struct TrendFilter{T,S,VT}
Dkp1::DifferenceMatrix{T} # D(k+1)
Dk::DifferenceMatrix{T} # D(k)
DktDk::SparseMatrixCSC{T,Int} # Dk'Dk
Expand Down Expand Up @@ -186,7 +186,7 @@ function StatsBase.fit!{T}(tf::TrendFilter{T}, y::AbstractVector{T}, λ::Real; n
# Check for convergence
A_mul_B!(Dkp1β, Dkp1, β)
oldobj = obj
obj = sumsqdiff(y, β)/2 + λ*sumabs(Dkp1β)
obj = sumsqdiff(y, β)/2 + λ*sum(abs, Dkp1β)
abs(oldobj - obj) < abs(obj * tol) && break

# Eq. 12 (update α)
Expand Down
33 changes: 17 additions & 16 deletions src/coordinate_descent.jl
Expand Up @@ -18,8 +18,9 @@ function P{T}(α::T, β::SparseCoefficients{T}, ω::Vector{T})
x
end

abstract CoordinateDescent{T,Intercept,M<:AbstractMatrix} <: LinPred
type NaiveCoordinateDescent{T,Intercept,M<:AbstractMatrix,S<:CoefficientIterator,W<:Union{Vector,Void}} <: CoordinateDescent{T,Intercept,M}
abstract type CoordinateDescent{T,Intercept,M<:AbstractMatrix} <: LinPred end

mutable struct NaiveCoordinateDescent{T,Intercept,M<:AbstractMatrix,S<:CoefficientIterator,W<:Union{Vector,Void}} <: CoordinateDescent{T,Intercept,M}
X::M # original design matrix
μy::T # mean of y at current weights
μX::Vector{T} # mean of X at current weights (in predictor order)
Expand All @@ -37,9 +38,9 @@ type NaiveCoordinateDescent{T,Intercept,M<:AbstractMatrix,S<:CoefficientIterator
tol::T # tolerance
ω::W # coefficient-specific penalty weights

NaiveCoordinateDescent(X::M, α::Real, maxncoef::Int, tol::Real, coefitr::S, ω::Union{Vector{T},Void}) =
new(X, zero(T), zeros(T, size(X, 2)), zeros(T, maxncoef), Array(T, size(X, 1)), zero(T),
Array(T, size(X, 1)), Array(T, size(X, 1)), convert(T, NaN), coefitr, convert(T, NaN),
NaiveCoordinateDescent{T,Intercept,M,S,W}(X::M, α::Real, maxncoef::Int, tol::Real, coefitr::S, ω::Union{Vector{T},Void}) where {T,Intercept,M,S,W} =
new(X, zero(T), zeros(T, size(X, 2)), zeros(T, maxncoef), Vector{T}(size(X, 1)), zero(T),
Vector{T}(size(X, 1)), Vector{T}(size(X, 1)), convert(T, NaN), coefitr, convert(T, NaN),
α, typemax(Int), maxncoef, tol, ω)
end

Expand Down Expand Up @@ -82,7 +83,7 @@ function computeXssq{T,Intercept}(cd::NaiveCoordinateDescent{T,Intercept}, ipred
ssq
end

function computeXssq{T,Intercept,M<:SparseMatrixCSC}(cd::NaiveCoordinateDescent{T,Intercept,M}, ipred::Int)
function computeXssq(cd::NaiveCoordinateDescent{T,Intercept,M}, ipred::Int) where {T,Intercept,M<:SparseMatrixCSC}
@extractfields cd X weights
@extractfields X rowval nzval colptr
μ = Intercept ? cd.μX[ipred] : zero(T)
Expand All @@ -98,8 +99,8 @@ end

# Updates CoordinateDescent object with (possibly) new y vector and
# weights
function update!{T,Intercept}(cd::NaiveCoordinateDescent{T,Intercept}, coef::SparseCoefficients{T},
y::Vector{T}, wt::Vector{T})
function update!(cd::NaiveCoordinateDescent{T,Intercept}, coef::SparseCoefficients{T},
y::Vector{T}, wt::Vector{T}) where {T,Intercept}
@extractfields cd residuals X Xssq weights oldy
copy!(weights, wt)
weightsum = cd.weightsum = sum(weights)
Expand Down Expand Up @@ -148,8 +149,8 @@ end
# changed coefficient is non-zero instead of updating all of them. In
# the dense case, we need to update all coefficients anyway, so this
# strategy is unneeded.
residualoffset{T}(cd::NaiveCoordinateDescent{T}) = zero(T)
residualoffset{T,M<:SparseMatrixCSC}(cd::NaiveCoordinateDescent{T,true,M}) = cd.residualoffset
residualoffset(cd::NaiveCoordinateDescent{T}) where {T} = zero(T)
residualoffset(cd::NaiveCoordinateDescent{T,true,M}) where {T,M<:SparseMatrixCSC} = cd.residualoffset

# Compute the gradient term (first term of RHS of eq. 8)
@inline function compute_grad{T}(::NaiveCoordinateDescent{T}, X::AbstractMatrix{T},
Expand Down Expand Up @@ -297,7 +298,7 @@ function linpred!{T}(mu::Vector{T}, cd::NaiveCoordinateDescent{T}, coef::SparseC
mu
end

type CovarianceCoordinateDescent{T,Intercept,M<:AbstractMatrix,S<:CoefficientIterator,W<:Union{Vector,Void}} <: CoordinateDescent{T,Intercept,M}
mutable struct CovarianceCoordinateDescent{T,Intercept,M<:AbstractMatrix,S<:CoefficientIterator,W<:Union{Vector,Void}} <: CoordinateDescent{T,Intercept,M}
X::M # original design matrix
μy::T # mean of y at current weights
μX::Vector{T} # mean of X at current weights
Expand All @@ -316,10 +317,10 @@ type CovarianceCoordinateDescent{T,Intercept,M<:AbstractMatrix,S<:CoefficientIte
tol::T # tolerance
ω::W # coefficient-specific penalty weights

function CovarianceCoordinateDescent(X::M, α::Real, maxncoef::Int, tol::Real, coefiter::S, ω::Union{Vector{T},Void})
new(X, zero(T), zeros(T, size(X, 2)), convert(T, NaN), Array(T, size(X, 2)),
Array(T, size(X, 2)), Array(T, maxncoef, size(X, 2)), Array(T, size(X, 1)),
Array(T, size(X, 1)), convert(T, NaN), coefiter, convert(T, NaN), α,
function CovarianceCoordinateDescent{T,Intercept,M,S,W}(X::M, α::Real, maxncoef::Int, tol::Real, coefiter::S, ω::Union{Vector{T},Void}) where {T,Intercept,M,S,W}
new(X, zero(T), zeros(T, size(X, 2)), convert(T, NaN), Vector{T}(size(X, 2)),
Vector{T}(size(X, 2)), Matrix{T}(maxncoef, size(X, 2)), Vector{T}(size(X, 1)),
Vector{T}(size(X, 1)), convert(T, NaN), coefiter, convert(T, NaN), α,
typemax(Int), maxncoef, tol, ω)
end
end
Expand Down Expand Up @@ -667,7 +668,7 @@ function StatsBase.fit!{S<:GeneralizedLinearModel,T}(path::RegularizationPath{S,
dev_ratio = convert(T, NaN)
dev = convert(T, NaN)
b0 = zero(T)
scratchmu = Array(T, size(X, 1))
scratchmu = Vector{T}(size(X, 1))
objold = convert(T, Inf)

if autoλ
Expand Down
2 changes: 1 addition & 1 deletion src/cross_validation.jl
Expand Up @@ -42,7 +42,7 @@ function cross_validate_path{T<:AbstractFloat,V<:FPVector}(path::RegularizationP
X::AbstractMatrix{T}, y::V; # potentially new data
gen=Kfold(length(y),10), # folds generator (see MLBase)
select=:CVmin, # :CVmin or :CV1se
offset::FPVector=Array(T,0),
offset::FPVector=T[],
fitargs...)
@extractfields path m λ
n,p = size(X)
Expand Down
4 changes: 2 additions & 2 deletions src/gammalasso.jl
Expand Up @@ -5,7 +5,7 @@

## GAMMA LASSO PATH

type GammaLassoPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: RegularizationPath{S,T}
mutable struct GammaLassoPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: RegularizationPath{S,T}
m::S
nulldev::T # null deviance
nullb0::T # intercept of null model, if one was fit
Expand All @@ -18,7 +18,7 @@ type GammaLassoPath{S<:Union{LinearModel,GeneralizedLinearModel},T} <: Regulariz
b0::Vector{T} # model intercepts
niter::Int # number of coordinate descent iterations

GammaLassoPath(m, nulldev::T, nullb0::T, λ::Vector{T}, autoλ::Bool, γ::Vector{T}, Xnorm::Vector{T}) =
GammaLassoPath{S,T}(m, nulldev::T, nullb0::T, λ::Vector{T}, autoλ::Bool, γ::Vector{T}, Xnorm::Vector{T}) where {S,T} =
new(m, nulldev, nullb0, λ, autoλ, γ, Xnorm)
end

Expand Down
1 change: 1 addition & 0 deletions test/REQUIRE
@@ -1,3 +1,4 @@
GLMNet
FactCheck
DataFrames
CSV
10 changes: 6 additions & 4 deletions test/gammalasso.jl
@@ -1,7 +1,7 @@
# Comparing with Matt Taddy's gamlr.R
# To rebuild the test cases source(gammalasso.R)
using Lasso
using GLM, FactCheck, DataFrames
using CSV, GLM, FactCheck, DataFrames

# often path length is different because of different stopping rules...
function issimilarhead(a::AbstractVector,b::AbstractVector;rtol=1e-4)
Expand All @@ -21,15 +21,17 @@ srand(243214)
facts("GammaLassoPath") do
for (family, dist, link) in (("gaussian", Normal(), IdentityLink()), ("binomial", Binomial(), LogitLink()), ("poisson", Poisson(), LogLink()))
context(family) do
data = readcsv(joinpath(datapath,"gamlr.$family.data.csv"))
data = readcsv(joinpath(datapath,"gamlr.$family.data.csv"), header=false)
y = convert(Vector{Float64},data[:,1])
X = convert(Matrix{Float64},data[:,2:end])
(n,p) = size(X)
for γ in [0 2 10]
fitname = "gamma"
# get gamlr.R params and estimates
params = readtable(joinpath(datapath,"gamlr.$family.$fitname.params.csv"))
fittable = readtable(joinpath(datapath,"gamlr.$family.$fitname.fit.csv"))
params = CSV.read(joinpath(datapath,"gamlr.$family.$fitname.params.csv"))
names!(params, Symbol.(replace.(string.(names(params)), '.', '_'))) # CSV.read does not convert '.' in column names
fittable = CSV.read(joinpath(datapath,"gamlr.$family.$fitname.fit.csv"))
names!(fittable, Symbol.(replace.(string.(names(fittable)), '.', '_')))
gcoefs = convert(Matrix{Float64},readcsv(joinpath(datapath,"gamlr.$family.$fitname.coefs.csv")))
family = params[1,:fit_family]
γ=params[1,:fit_gamma]
Expand Down

0 comments on commit 14dd880

Please sign in to comment.