From 1e246cb6abf826e940d6d972bdf36190f9999575 Mon Sep 17 00:00:00 2001 From: Deniz Yuret Date: Tue, 29 Sep 2015 17:06:45 -0700 Subject: [PATCH] KUnet->Knet --- LICENSE.md | 2 +- examples/adding.jl | 6 +++--- examples/linreg.jl | 2 +- examples/lstm.jl | 2 +- examples/mnist2d.jl | 2 +- examples/mnist4d.jl | 4 ++-- examples/mnistpixels.jl | 4 ++-- examples/mnistsparse.jl | 2 +- examples/predict.jl | 4 ++-- examples/rnnlm.jl | 2 +- examples/s2c.jl | 4 ++-- examples/train.jl | 4 ++-- examples/tutorial.jl | 4 ++-- src/{KUnet.jl => Knet.jl} | 2 +- src/nettest.jl | 14 +++++++------- src/util/gpu.jl | 4 ++-- src/util/linalg.jl | 2 +- test/cusparse.jl | 2 +- test/isapprox.jl | 2 +- test/rnntest.jl | 6 +++--- test/runtests.jl | 2 +- test/testcolops.jl | 2 +- test/testconvert.jl | 2 +- test/testdense.jl | 2 +- test/testkperceptron.jl | 24 ++++++++++++------------ test/testlayers.jl | 12 ++++++------ test/testlinalg.jl | 2 +- test/testloss.jl | 4 ++-- test/testperceptron.jl | 18 +++++++++--------- 29 files changed, 71 insertions(+), 71 deletions(-) rename src/{KUnet.jl => Knet.jl} (99%) diff --git a/LICENSE.md b/LICENSE.md index 418ebb7fb..19731106d 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,4 +1,4 @@ -The KUnet.jl package is licensed under the MIT "Expat" License: +The Knet.jl package is licensed under the MIT "Expat" License: > Copyright (c) 2015: Deniz Yuret. > diff --git a/examples/adding.jl b/examples/adding.jl index 56e293b52..b3bac8318 100644 --- a/examples/adding.jl +++ b/examples/adding.jl @@ -4,7 +4,7 @@ # Usage: julia adding.jl [opts], use --help for a full list of opts. using ArgParse -using KUnet +using Knet import Base: start, next, done include("irnn.jl") include("lstm.jl") @@ -244,8 +244,8 @@ main() # this way forw calculation by the last three layers are wasted # but this way we are testing the general input/output -# import KUnet: forw, back, ninputs, param, similar!, gpu, initforw, initback, setparam!, update, loss, axpy! # push, pop, get1 -# import KUnet: backprop, train, predict, nz +# import Knet: forw, back, ninputs, param, similar!, gpu, initforw, initback, setparam!, update, loss, axpy! # push, pop, get1 +# import Knet: backprop, train, predict, nz # include("../src/rnn.jl") # setparam!(net1; nesterov=0.01) diff --git a/examples/linreg.jl b/examples/linreg.jl index 60c7f3ed6..1ad5f95e1 100644 --- a/examples/linreg.jl +++ b/examples/linreg.jl @@ -1,4 +1,4 @@ -using KUnet +using Knet # Simple linear regression. diff --git a/examples/lstm.jl b/examples/lstm.jl index 387310545..5fd36a9c4 100644 --- a/examples/lstm.jl +++ b/examples/lstm.jl @@ -29,7 +29,7 @@ end ### DEAD CODE -# using KUnet +# using Knet # function LSTM(n; fbias=0, dropout=0) # Net( diff --git a/examples/mnist2d.jl b/examples/mnist2d.jl index 6024b806e..80c19f0db 100644 --- a/examples/mnist2d.jl +++ b/examples/mnist2d.jl @@ -1,7 +1,7 @@ # Handwritten digit recognition problem from http://yann.lecun.com/exdb/mnist. using Base.Test -using KUnet +using Knet isdefined(:MNIST) || include("mnist.jl") include("mlp.jl") setseed(42) diff --git a/examples/mnist4d.jl b/examples/mnist4d.jl index 120447c15..03ee68f90 100644 --- a/examples/mnist4d.jl +++ b/examples/mnist4d.jl @@ -2,8 +2,8 @@ # 4-D convolution test using Base.Test -using KUnet -using KUnet: params, isapprox2 +using Knet +using Knet: params, isapprox2 isdefined(:MNIST) || include("mnist.jl") setseed(42) nbatch=100 diff --git a/examples/mnistpixels.jl b/examples/mnistpixels.jl index 5fb4ee674..78ac4a2cd 100644 --- a/examples/mnistpixels.jl +++ b/examples/mnistpixels.jl @@ -2,8 +2,8 @@ # N., & Hinton, G. E. (2015). A Simple Way to Initialize Recurrent # Networks of Rectified Linear Units. arXiv preprint arXiv:1504.00941. -using KUnet -using KUnet: nextidx +using Knet +using Knet: nextidx import Base: start, next, done using ArgParse include("irnn.jl") diff --git a/examples/mnistsparse.jl b/examples/mnistsparse.jl index edf16223d..8bdfb39b4 100644 --- a/examples/mnistsparse.jl +++ b/examples/mnistsparse.jl @@ -2,7 +2,7 @@ # Testing sparse arrays. using Base.Test -using KUnet +using Knet include("mlp.jl") isdefined(:MNIST) || include("mnist.jl") diff --git a/examples/predict.jl b/examples/predict.jl index 9d5560f5b..61f85efb2 100644 --- a/examples/predict.jl +++ b/examples/predict.jl @@ -1,6 +1,6 @@ using HDF5 using ArgParse -using KUnet +using Knet function main() s = ArgParseSettings() @@ -23,7 +23,7 @@ function main() default = 128 end args = parse_args(s) - KUnet.gpu(!args["nogpu"]) + Knet.gpu(!args["nogpu"]) args["nogpu"] && blas_set_num_threads(20) x = h5read(args["x"], "/data") net = map(l->Op(l), split(args["net"],',')) diff --git a/examples/rnnlm.jl b/examples/rnnlm.jl index 066fd17e3..de91cd8c9 100644 --- a/examples/rnnlm.jl +++ b/examples/rnnlm.jl @@ -5,7 +5,7 @@ # Usage: julia rnnlm.jl ptb.train.txt ptb.valid.txt ptb.test.txt # Type julia rnnlm.jl --help for more options -using KUnet, ArgParse +using Knet, ArgParse import Base: start, next, done include("lstm.jl") diff --git a/examples/s2c.jl b/examples/s2c.jl index cfcfb96dd..32ea54b7c 100644 --- a/examples/s2c.jl +++ b/examples/s2c.jl @@ -1,7 +1,7 @@ # S2C: sequence to class model -import KUnet: params, forw, back -using KUnet: initback +import Knet: params, forw, back +using Knet: initback immutable S2C <: Model; net1; net2; params; S2C(a,b)=new(a,b,vcat(params(a),params(b))) diff --git a/examples/train.jl b/examples/train.jl index 24bd00f0e..38099795d 100644 --- a/examples/train.jl +++ b/examples/train.jl @@ -4,7 +4,7 @@ using HDF5 using ArgParse -using KUnet +using Knet function parse_commandline() s = ArgParseSettings() @@ -79,7 +79,7 @@ end function main() args = parse_commandline() - KUnet.gpu(!args["nogpu"]) + Knet.gpu(!args["nogpu"]) args["nogpu"] && blas_set_num_threads(20) x = h5read(args["x"], "/data"); y = h5read(args["y"], "/data"); diff --git a/examples/tutorial.jl b/examples/tutorial.jl index fdc355167..e3c1e0ccd 100644 --- a/examples/tutorial.jl +++ b/examples/tutorial.jl @@ -1,5 +1,5 @@ -require(Pkg.dir("KUnet/test/mnist.jl")) -using KUnet +require(Pkg.dir("Knet/test/mnist.jl")) +using Knet using MNIST: xtrn, ytrn, xtst, ytst net = [Mmul(64), Bias(), Relu(), diff --git a/src/KUnet.jl b/src/Knet.jl similarity index 99% rename from src/KUnet.jl rename to src/Knet.jl index 4031c7ad5..fe1ea022d 100644 --- a/src/KUnet.jl +++ b/src/Knet.jl @@ -1,4 +1,4 @@ -module KUnet +module Knet using Compat # Print date, expression and elapsed time after execution diff --git a/src/nettest.jl b/src/nettest.jl index 05f9fff22..e0f130737 100644 --- a/src/nettest.jl +++ b/src/nettest.jl @@ -1,6 +1,6 @@ using CUDArt, CUSPARSE, CUDNN, CUBLAS -#using KUnet, -#import KUnet: forw, back, loss, ninputs, overwrites, back_reads_x, back_reads_y, gpu, axpb!, @gpu, issimilar, mul2! +#using Knet, +#import Knet: forw, back, loss, ninputs, overwrites, back_reads_x, back_reads_y, gpu, axpb!, @gpu, issimilar, mul2! #using Base.LinAlg: axpy!, scale! include("util/gpu.jl") @@ -155,7 +155,7 @@ end # # (6,Par) 8a00 1e00 (10,) # # (7,Add,3,5) 8800* 1c00+ (10,5) # # (8,Add,6,7) 8800* 1c00+ (10,5) -# # (9,KUnet.Relu,8) 8c00 1c00+ (10,5) tosave +# # (9,Knet.Relu,8) 8c00 1c00+ (10,5) tosave # # (10,Par) 8e00 1a00 (10,3) # # (11,Dot,10,1) 8800* 1200* (10,5) # # (12,Par) 9000 1600 (10,10) @@ -163,7 +163,7 @@ end # # (14,Par) 9400 1400 (10,) # # (15,Add,11,13) 8200+ 1200* (10,5) # # (16,Add,14,15) 8200+ 1200* (10,5) -# # (17,KUnet.Relu,16) 9600 1200* (10,5) tosave +# # (17,Knet.Relu,16) 9600 1200* (10,5) tosave # # (18,Par) 9800 1000 (10,3) # # (19,Dot,18,1) 8200+ 0800! (10,5) # # (20,Par) 9a00 0c00 (10,10) @@ -171,7 +171,7 @@ end # # (22,Par) 9e00 0a00 (10,) # # (23,Add,19,21) 8800* 0800! (10,5) # # (24,Add,22,23) 8800* 0800! (10,5) -# # (25,KUnet.Relu,24) a000 0800! (10,5) tosave +# # (25,Knet.Relu,24) a000 0800! (10,5) tosave # # (26,Par) a200 0600 (10,3) # # (27,Dot,26,1) 8800* fe00@ (10,5) # # (28,Par) a400 0200 (10,10) @@ -179,11 +179,11 @@ end # # (30,Par) a800 0000 (10,) # # (31,Add,27,29) 8200+ fe00@ (10,5) # # (32,Add,30,31) 8200+ fe00@ (10,5) -# # (33,KUnet.Relu,32) aa00 fe00@ (10,5) tosave +# # (33,Knet.Relu,32) aa00 fe00@ (10,5) tosave # # (34,Mul,9,33) 8200+ fc00 (10,5) # # (35,Mul,17,36) 8800* fa00 (10,5) # # (36,Add,34,35) ac00 f800 (10,5) tosave,toincr,tozero,tmp=f400 -# # (37,KUnet.Tanh,36) ae00 f600 (10,5) tosave +# # (37,Knet.Tanh,36) ae00 f600 (10,5) tosave # # (38,Mul,37,25) b000 f200 (10,5) tosave,toincr,tozero,tmp=f400 # # ops: 38 diff --git a/src/util/gpu.jl b/src/util/gpu.jl index 7569ef91d..067b91a47 100644 --- a/src/util/gpu.jl +++ b/src/util/gpu.jl @@ -2,7 +2,7 @@ # loaded, not whether it is used. The user can control gpu use by # using the gpu() function. GPU = true -lpath = [Pkg.dir("KUnet/src")] +lpath = [Pkg.dir("Knet/src")] for l in ("libkunet", "libcuda", "libcudart", "libcublas", "libcudnn") isempty(Libdl.find_library([l], lpath)) && (warn("Cannot find $l");GPU=false) end @@ -35,7 +35,7 @@ macro gpu(_ex); if GPU; esc(_ex); end; end GPU && CUDArt.init!([CUDArt.CuModule(),], [CUDArt.device(),]) # Additional cuda code -const libkunet = Libdl.find_library(["libkunet"], [Pkg.dir("KUnet/src")]) +const libkunet = Libdl.find_library(["libkunet"], [Pkg.dir("Knet/src")]) # For debugging function gpumem() diff --git a/src/util/linalg.jl b/src/util/linalg.jl index b0aaad2b3..f219deb80 100644 --- a/src/util/linalg.jl +++ b/src/util/linalg.jl @@ -12,7 +12,7 @@ scale!{T}(a,x::CudaArray{T})=(scal!(length(x),convert(T,a),x,1); x) vecnorm(x::CudaArray)=nrm2(x) ### MMUL -# This is not a complete implementation. The goal is to support KUnet +# This is not a complete implementation. The goal is to support Knet # operations for sparse/dense matrices on cpu/gpu. The operations needed: # # mmul forw: A_mul_B!(y, w, x) A_mul_Bs!(y, w, x): cpu/gpu: kudense, array, sparse diff --git a/test/cusparse.jl b/test/cusparse.jl index 7bdecd4db..7cd9c40b5 100644 --- a/test/cusparse.jl +++ b/test/cusparse.jl @@ -1,4 +1,4 @@ -using CUDArt,CUBLAS,CUSPARSE,KUnet,Base.Test +using CUDArt,CUBLAS,CUSPARSE,Knet,Base.Test include("isapprox.jl") csc2csr{T}(x::SparseMatrixCSC{T})=CudaSparseMatrixCSR{T}(CudaArray(convert(Vector{Cint},x.colptr)), CudaArray(convert(Vector{Cint},x.rowval)), CudaArray(x.nzval), (x.n,x.m), convert(Cint,length(x.nzval)), device()) diff --git a/test/isapprox.jl b/test/isapprox.jl index 0261a302f..eaed284c8 100644 --- a/test/isapprox.jl +++ b/test/isapprox.jl @@ -1,4 +1,4 @@ -using CUDArt, KUnet +using CUDArt, Knet import Base: isapprox function isapprox(x, y; diff --git a/test/rnntest.jl b/test/rnntest.jl index 4eb4a7d88..6e20becfe 100644 --- a/test/rnntest.jl +++ b/test/rnntest.jl @@ -1,8 +1,8 @@ using Base.Test using CUDArt -using KUnet +using Knet -# import KUnet: forw, back, ninputs, param, similar!, gpu, initforw, initback, push, pop, get1 +# import Knet: forw, back, ninputs, param, similar!, gpu, initforw, initback, push, pop, get1 # include("../src/net.jl") include("isapprox.jl") @@ -39,7 +39,7 @@ r = rnn inputs = Any[y] trn = true seq = false -using KUnet: push, pop, dbg, forw, back, get1, initbatch +using Knet: push, pop, dbg, forw, back, get1, initbatch initbatch(r, inputs...; trn=trn, seq=seq, a...) for i = 1:ninputs(r) diff --git a/test/runtests.jl b/test/runtests.jl index 89b700970..328fd527b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,5 +1,5 @@ using Compat -using KUnet +using Knet @date include("testdense.jl") @date include("testsparse.jl") @date include("testconvert.jl") diff --git a/test/testcolops.jl b/test/testcolops.jl index 1b433501b..42bbaa2f4 100644 --- a/test/testcolops.jl +++ b/test/testcolops.jl @@ -1,4 +1,4 @@ -using KUnet, CUDArt, Base.Test +using Knet, CUDArt, Base.Test iseq02(a,b)=(convert(Array,a)==convert(Array,b)) diff --git a/test/testconvert.jl b/test/testconvert.jl index dd447ec78..1ee73424f 100644 --- a/test/testconvert.jl +++ b/test/testconvert.jl @@ -1,4 +1,4 @@ -using KUnet, CUDArt, Base.Test +using Knet, CUDArt, Base.Test a=rand(3,5) diff --git a/test/testdense.jl b/test/testdense.jl index f8a7fc713..c101fb909 100644 --- a/test/testdense.jl +++ b/test/testdense.jl @@ -1,6 +1,6 @@ using Base.Test using CUDArt -using KUnet +using Knet for N in 1:5 for A in (CudaArray, Array) diff --git a/test/testkperceptron.jl b/test/testkperceptron.jl index ea9171cc7..67323b5d8 100644 --- a/test/testkperceptron.jl +++ b/test/testkperceptron.jl @@ -1,7 +1,7 @@ using Base.Test using CUDArt -using KUnet -include(Pkg.dir("KUnet/test/mnist.jl")) +using Knet +include(Pkg.dir("Knet/test/mnist.jl")) sparse32{T}(a::Array{T})=convert(SparseMatrixCSC{T,Int32}, a) xtrn = MNIST.xtrn @@ -39,7 +39,7 @@ for ker in ( ) # loc == :gpu && ker[1] == :perceptron && continue println("\n$ker, $prc, $fmt, $loc") - KUnet.gpu(loc == :gpu) + Knet.gpu(loc == :gpu) for p in (:xtrn, :xtst, :ytrn, :ytst); @eval $p=copy(MNIST.$p); end prc == :double && (for p in (:xtrn, :xtst, :ytrn, :ytst); @eval $p=convert(Array{Float64},$p); end) fmt == :sparse && (for p in (:xtrn, :xtst); @eval $p=sparse32($p); end) @@ -48,7 +48,7 @@ for ker in ( net = (ker[1] == :perceptron ? Op[Mmul(nc;average=true,init=initzero), PercLoss()] : - Op[KPerceptron(nc, KUnet.(ker[1]), ker[2])]) + Op[KPerceptron(nc, Knet.(ker[1]), ker[2])]) gc(); @date train(net, xtrn, ytrn; iters=niter,batch=nbatch) @date a = accuracy(ytst, predict(net, xtst)) s = isdefined(net[1],:s) ? size(net[1].s) : 0 @@ -91,7 +91,7 @@ end # for i=1:2 # ftrn = sparse(MNIST.xtrn) # ftst = sparse(MNIST.xtst) -# @show fnet = Op[KPerceptron(10, KUnet.klinear0, [0f0])] +# @show fnet = Op[KPerceptron(10, Knet.klinear0, [0f0])] # gc() # @date train(fnet, ftrn, ytrn; iters=niter,batch=nbatch) # gc() @@ -105,7 +105,7 @@ end # for i=1:2 # ltrn = sparse(MNIST.xtrn) # ltst = sparse(MNIST.xtst) -# @show lnet = Op[KPerceptron(10, KUnet.klinear, [0f0])] +# @show lnet = Op[KPerceptron(10, Knet.klinear, [0f0])] # gc() # @date train(lnet, ltrn, ytrn; iters=niter,batch=nbatch) # gc() @@ -119,7 +119,7 @@ end # for i=1:2 # qtrn = sparse(MNIST.xtrn) # qtst = sparse(MNIST.xtst) -# @show qnet = Op[KPerceptron(10, KUnet.kgauss, [g0])] +# @show qnet = Op[KPerceptron(10, Knet.kgauss, [g0])] # @date train(qnet, qtrn, ytrn; iters=niter,batch=nbatch) # @time println((i, size(qnet[1].s), # accuracy(ytst, predict(qnet, qtst)), @@ -131,7 +131,7 @@ end # for i=1:2 # qtrn = sparse(MNIST.xtrn) # qtst = sparse(MNIST.xtst) -# @show qnet = Op[KPerceptron(10, KUnet.kgauss0, [g0])] +# @show qnet = Op[KPerceptron(10, Knet.kgauss0, [g0])] # @date train(qnet, qtrn, ytrn; iters=niter,batch=nbatch) # @time println((i, size(qnet[1].s), # accuracy(ytst, predict(qnet, qtst)), @@ -143,7 +143,7 @@ end # for i=1:2 # ktrn = sparse(MNIST.xtrn) # ktst = sparse(MNIST.xtst) -# @show knet = Op[KPerceptron(10, KUnet.kpoly, [c0,d0])] +# @show knet = Op[KPerceptron(10, Knet.kpoly, [c0,d0])] # @date train(knet, ktrn, ytrn; iters=niter,batch=nbatch) # @time println((i, size(knet[1].s), # accuracy(ytst, predict(knet, ktst)), @@ -155,7 +155,7 @@ end # for i=1:2 # ktrn = sparse(MNIST.xtrn) # ktst = sparse(MNIST.xtst) -# @show knet = Op[KPerceptron(10, KUnet.kpoly, [c0,d0])] +# @show knet = Op[KPerceptron(10, Knet.kpoly, [c0,d0])] # @date train(knet, ktrn, ytrn; iters=niter,batch=nbatch) # @time println((i, size(knet[1].s), # accuracy(ytst, predict(knet, ktst)), @@ -220,7 +220,7 @@ end # if false; info("KPerceptron+kgauss") # qtrn = sparse(MNIST.xtrn) # qtst = sparse(MNIST.xtst) -# @show qnet = Op[KPerceptron(10, KUnet.kgauss, [g0])] +# @show qnet = Op[KPerceptron(10, Knet.kgauss, [g0])] # for i=1:1 # @date train(qnet, qtrn, ytrn; iters=niter,batch=nbatch) # @time println((i, size(qnet[1].s), @@ -245,7 +245,7 @@ end # end; end # if false; info("Poly+PercLoss+GPU+dense") # mmul, hcat, ctranspose do not work -# KUnet.gpu(true) +# Knet.gpu(true) # cnet = Op[Poly(c=c0,d=d0,w=CudaArray(w0)), PercLoss()] # for i=1:1 # @time train(cnet, xtrn, ytrn; iters=niter,batch=nbatch) diff --git a/test/testlayers.jl b/test/testlayers.jl index 1560aa086..ee63b8305 100644 --- a/test/testlayers.jl +++ b/test/testlayers.jl @@ -1,10 +1,10 @@ using Compat -using KUnet +using Knet using Base.Test using Base.Test: Success, Failure, Error import Base.Test: default_handler include("isapprox.jl") -if KUnet.GPU +if Knet.GPU eval(Expr(:using,:CUDArt)) eval(Expr(:using,:CUDArt,:ContiguousArray)) else @@ -159,8 +159,8 @@ end function filetest(net1) isa(net1[1], Pool) && (warn("Pooling layers cannot be saved to file yet"); return true) - KUnet.savenet("/tmp/kunet.test", net1) - net2 = KUnet.loadnet("/tmp/kunet.test") + Knet.savenet("/tmp/kunet.test", net1) + net2 = Knet.loadnet("/tmp/kunet.test") return all(map(iseq03, net1, net2)) end @@ -226,7 +226,7 @@ end function main(layers) global net0, x0, z0 - KUnet.gpu(false) + Knet.gpu(false) for F in (Float32,Float64) for D in 1:5 S = tuple(rand(1:20,D)...) @@ -235,7 +235,7 @@ function main(layers) net==nothing && continue # combination not supported net0, x0, z0 = net, x, z @show (F, S, L) - KUnet.GPU && (@test gputest(net, x, z)) + Knet.GPU && (@test gputest(net, x, z)) gradtest(net, x, z) @test filetest(net) end diff --git a/test/testlinalg.jl b/test/testlinalg.jl index 1486246a4..9ccd46821 100644 --- a/test/testlinalg.jl +++ b/test/testlinalg.jl @@ -1,4 +1,4 @@ -using KUnet, CUDArt, CUSPARSE, Base.Test +using Knet, CUDArt, CUSPARSE, Base.Test include("isapprox.jl") # function A_mul_B!{T}(C::KUdense{CudaArray,T,2}, A::CudaArray{T,2}, B::CudaSparseMatrixCSC{T}) diff --git a/test/testloss.jl b/test/testloss.jl index 8216334af..5de1b2e64 100644 --- a/test/testloss.jl +++ b/test/testloss.jl @@ -1,5 +1,5 @@ -using KUnet, CUDArt, CUSPARSE -using KUnet: loss, back +using Knet, CUDArt, CUSPARSE +using Knet: loss, back m = 10 n = 1000 diff --git a/test/testperceptron.jl b/test/testperceptron.jl index 678e5bb6c..fa64b95e4 100644 --- a/test/testperceptron.jl +++ b/test/testperceptron.jl @@ -1,6 +1,6 @@ -using CUDArt, KUnet, Base.Test +using CUDArt, Knet, Base.Test include("isapprox.jl") -@time include(Pkg.dir("KUnet/test/mnist.jl")) +@time include(Pkg.dir("Knet/test/mnist.jl")) ytype(X::DataType)= (X <: KUsparse{Array} ? KUdense{Array} : @@ -52,11 +52,11 @@ end # for X # DEAD CODE: -# using KUnet -# using KUnet: accuracy -# @time require(Pkg.dir("KUnet/test/mnist.jl")) +# using Knet +# using Knet: accuracy +# @time require(Pkg.dir("Knet/test/mnist.jl")) -# KUnet.gpu(false) +# Knet.gpu(false) # xtrn = float32(255*MNIST.xtrn) # xtst = float32(255*MNIST.xtst) # ytrn = float32(MNIST.ytrn) @@ -97,8 +97,8 @@ end # for X # ./(a::CudaArray{Float32,2}, ::Int64)=a # -(a::CudaArray{Float32,2}, b::CudaArray{Float32,2})=a -# # KUnet.atype(CudaArray) -# KUnet.gpu(true) +# # Knet.atype(CudaArray) +# Knet.gpu(true) # cnet = Op[Perceptron(10)] # @time for i=1:5 # train(cnet, xtrn, ytrn; iters=100) @@ -106,7 +106,7 @@ end # for X # accuracy(ytst, predict(cnet, xtst)), # accuracy(ytrn, predict(cnet, xtrn)))) # end -# # KUnet.atype(Array) +# # Knet.atype(Array) # end # if false # # Similar stuff needs to be defined for sparse cuda arrays. \ No newline at end of file