Skip to content

Commit

Permalink
KUnet->Knet
Browse files Browse the repository at this point in the history
  • Loading branch information
denizyuret committed Sep 30, 2015
1 parent f3f7b44 commit 1e246cb
Show file tree
Hide file tree
Showing 29 changed files with 71 additions and 71 deletions.
2 changes: 1 addition & 1 deletion LICENSE.md
@@ -1,4 +1,4 @@
The KUnet.jl package is licensed under the MIT "Expat" License:
The Knet.jl package is licensed under the MIT "Expat" License:

> Copyright (c) 2015: Deniz Yuret.
>
Expand Down
6 changes: 3 additions & 3 deletions examples/adding.jl
Expand Up @@ -4,7 +4,7 @@
# Usage: julia adding.jl [opts], use --help for a full list of opts.

using ArgParse
using KUnet
using Knet
import Base: start, next, done
include("irnn.jl")
include("lstm.jl")
Expand Down Expand Up @@ -244,8 +244,8 @@ main()
# this way forw calculation by the last three layers are wasted
# but this way we are testing the general input/output

# import KUnet: forw, back, ninputs, param, similar!, gpu, initforw, initback, setparam!, update, loss, axpy! # push, pop, get1
# import KUnet: backprop, train, predict, nz
# import Knet: forw, back, ninputs, param, similar!, gpu, initforw, initback, setparam!, update, loss, axpy! # push, pop, get1
# import Knet: backprop, train, predict, nz
# include("../src/rnn.jl")

# setparam!(net1; nesterov=0.01)
Expand Down
2 changes: 1 addition & 1 deletion examples/linreg.jl
@@ -1,4 +1,4 @@
using KUnet
using Knet

# Simple linear regression.

Expand Down
2 changes: 1 addition & 1 deletion examples/lstm.jl
Expand Up @@ -29,7 +29,7 @@ end

### DEAD CODE

# using KUnet
# using Knet

# function LSTM(n; fbias=0, dropout=0)
# Net(
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist2d.jl
@@ -1,7 +1,7 @@
# Handwritten digit recognition problem from http://yann.lecun.com/exdb/mnist.

using Base.Test
using KUnet
using Knet
isdefined(:MNIST) || include("mnist.jl")
include("mlp.jl")
setseed(42)
Expand Down
4 changes: 2 additions & 2 deletions examples/mnist4d.jl
Expand Up @@ -2,8 +2,8 @@
# 4-D convolution test

using Base.Test
using KUnet
using KUnet: params, isapprox2
using Knet
using Knet: params, isapprox2
isdefined(:MNIST) || include("mnist.jl")
setseed(42)
nbatch=100
Expand Down
4 changes: 2 additions & 2 deletions examples/mnistpixels.jl
Expand Up @@ -2,8 +2,8 @@
# N., & Hinton, G. E. (2015). A Simple Way to Initialize Recurrent
# Networks of Rectified Linear Units. arXiv preprint arXiv:1504.00941.

using KUnet
using KUnet: nextidx
using Knet
using Knet: nextidx
import Base: start, next, done
using ArgParse
include("irnn.jl")
Expand Down
2 changes: 1 addition & 1 deletion examples/mnistsparse.jl
Expand Up @@ -2,7 +2,7 @@
# Testing sparse arrays.

using Base.Test
using KUnet
using Knet
include("mlp.jl")

isdefined(:MNIST) || include("mnist.jl")
Expand Down
4 changes: 2 additions & 2 deletions examples/predict.jl
@@ -1,6 +1,6 @@
using HDF5
using ArgParse
using KUnet
using Knet

function main()
s = ArgParseSettings()
Expand All @@ -23,7 +23,7 @@ function main()
default = 128
end
args = parse_args(s)
KUnet.gpu(!args["nogpu"])
Knet.gpu(!args["nogpu"])
args["nogpu"] && blas_set_num_threads(20)
x = h5read(args["x"], "/data")
net = map(l->Op(l), split(args["net"],','))
Expand Down
2 changes: 1 addition & 1 deletion examples/rnnlm.jl
Expand Up @@ -5,7 +5,7 @@
# Usage: julia rnnlm.jl ptb.train.txt ptb.valid.txt ptb.test.txt
# Type julia rnnlm.jl --help for more options

using KUnet, ArgParse
using Knet, ArgParse
import Base: start, next, done
include("lstm.jl")

Expand Down
4 changes: 2 additions & 2 deletions examples/s2c.jl
@@ -1,7 +1,7 @@
# S2C: sequence to class model

import KUnet: params, forw, back
using KUnet: initback
import Knet: params, forw, back
using Knet: initback

immutable S2C <: Model; net1; net2; params;
S2C(a,b)=new(a,b,vcat(params(a),params(b)))
Expand Down
4 changes: 2 additions & 2 deletions examples/train.jl
Expand Up @@ -4,7 +4,7 @@

using HDF5
using ArgParse
using KUnet
using Knet

function parse_commandline()
s = ArgParseSettings()
Expand Down Expand Up @@ -79,7 +79,7 @@ end

function main()
args = parse_commandline()
KUnet.gpu(!args["nogpu"])
Knet.gpu(!args["nogpu"])
args["nogpu"] && blas_set_num_threads(20)
x = h5read(args["x"], "/data");
y = h5read(args["y"], "/data");
Expand Down
4 changes: 2 additions & 2 deletions examples/tutorial.jl
@@ -1,5 +1,5 @@
require(Pkg.dir("KUnet/test/mnist.jl"))
using KUnet
require(Pkg.dir("Knet/test/mnist.jl"))
using Knet
using MNIST: xtrn, ytrn, xtst, ytst

net = [Mmul(64), Bias(), Relu(),
Expand Down
2 changes: 1 addition & 1 deletion src/KUnet.jl → src/Knet.jl
@@ -1,4 +1,4 @@
module KUnet
module Knet
using Compat

# Print date, expression and elapsed time after execution
Expand Down
14 changes: 7 additions & 7 deletions src/nettest.jl
@@ -1,6 +1,6 @@
using CUDArt, CUSPARSE, CUDNN, CUBLAS
#using KUnet,
#import KUnet: forw, back, loss, ninputs, overwrites, back_reads_x, back_reads_y, gpu, axpb!, @gpu, issimilar, mul2!
#using Knet,
#import Knet: forw, back, loss, ninputs, overwrites, back_reads_x, back_reads_y, gpu, axpb!, @gpu, issimilar, mul2!
#using Base.LinAlg: axpy!, scale!

include("util/gpu.jl")
Expand Down Expand Up @@ -155,35 +155,35 @@ end
# # (6,Par) 8a00 1e00 (10,)
# # (7,Add,3,5) 8800* 1c00+ (10,5)
# # (8,Add,6,7) 8800* 1c00+ (10,5)
# # (9,KUnet.Relu,8) 8c00 1c00+ (10,5) tosave
# # (9,Knet.Relu,8) 8c00 1c00+ (10,5) tosave
# # (10,Par) 8e00 1a00 (10,3)
# # (11,Dot,10,1) 8800* 1200* (10,5)
# # (12,Par) 9000 1600 (10,10)
# # (13,Dot,12,38) 8200+ 1200* (10,5)
# # (14,Par) 9400 1400 (10,)
# # (15,Add,11,13) 8200+ 1200* (10,5)
# # (16,Add,14,15) 8200+ 1200* (10,5)
# # (17,KUnet.Relu,16) 9600 1200* (10,5) tosave
# # (17,Knet.Relu,16) 9600 1200* (10,5) tosave
# # (18,Par) 9800 1000 (10,3)
# # (19,Dot,18,1) 8200+ 0800! (10,5)
# # (20,Par) 9a00 0c00 (10,10)
# # (21,Dot,20,38) 8800* 0800! (10,5)
# # (22,Par) 9e00 0a00 (10,)
# # (23,Add,19,21) 8800* 0800! (10,5)
# # (24,Add,22,23) 8800* 0800! (10,5)
# # (25,KUnet.Relu,24) a000 0800! (10,5) tosave
# # (25,Knet.Relu,24) a000 0800! (10,5) tosave
# # (26,Par) a200 0600 (10,3)
# # (27,Dot,26,1) 8800* fe00@ (10,5)
# # (28,Par) a400 0200 (10,10)
# # (29,Dot,28,38) 8200+ fe00@ (10,5)
# # (30,Par) a800 0000 (10,)
# # (31,Add,27,29) 8200+ fe00@ (10,5)
# # (32,Add,30,31) 8200+ fe00@ (10,5)
# # (33,KUnet.Relu,32) aa00 fe00@ (10,5) tosave
# # (33,Knet.Relu,32) aa00 fe00@ (10,5) tosave
# # (34,Mul,9,33) 8200+ fc00 (10,5)
# # (35,Mul,17,36) 8800* fa00 (10,5)
# # (36,Add,34,35) ac00 f800 (10,5) tosave,toincr,tozero,tmp=f400
# # (37,KUnet.Tanh,36) ae00 f600 (10,5) tosave
# # (37,Knet.Tanh,36) ae00 f600 (10,5) tosave
# # (38,Mul,37,25) b000 f200 (10,5) tosave,toincr,tozero,tmp=f400

# # ops: 38
Expand Down
4 changes: 2 additions & 2 deletions src/util/gpu.jl
Expand Up @@ -2,7 +2,7 @@
# loaded, not whether it is used. The user can control gpu use by
# using the gpu() function.
GPU = true
lpath = [Pkg.dir("KUnet/src")]
lpath = [Pkg.dir("Knet/src")]
for l in ("libkunet", "libcuda", "libcudart", "libcublas", "libcudnn")
isempty(Libdl.find_library([l], lpath)) && (warn("Cannot find $l");GPU=false)
end
Expand Down Expand Up @@ -35,7 +35,7 @@ macro gpu(_ex); if GPU; esc(_ex); end; end
GPU && CUDArt.init!([CUDArt.CuModule(),], [CUDArt.device(),])

# Additional cuda code
const libkunet = Libdl.find_library(["libkunet"], [Pkg.dir("KUnet/src")])
const libkunet = Libdl.find_library(["libkunet"], [Pkg.dir("Knet/src")])

# For debugging
function gpumem()
Expand Down
2 changes: 1 addition & 1 deletion src/util/linalg.jl
Expand Up @@ -12,7 +12,7 @@ scale!{T}(a,x::CudaArray{T})=(scal!(length(x),convert(T,a),x,1); x)
vecnorm(x::CudaArray)=nrm2(x)

### MMUL
# This is not a complete implementation. The goal is to support KUnet
# This is not a complete implementation. The goal is to support Knet
# operations for sparse/dense matrices on cpu/gpu. The operations needed:
#
# mmul forw: A_mul_B!(y, w, x) A_mul_Bs!(y, w, x): cpu/gpu: kudense, array, sparse
Expand Down
2 changes: 1 addition & 1 deletion test/cusparse.jl
@@ -1,4 +1,4 @@
using CUDArt,CUBLAS,CUSPARSE,KUnet,Base.Test
using CUDArt,CUBLAS,CUSPARSE,Knet,Base.Test
include("isapprox.jl")

csc2csr{T}(x::SparseMatrixCSC{T})=CudaSparseMatrixCSR{T}(CudaArray(convert(Vector{Cint},x.colptr)), CudaArray(convert(Vector{Cint},x.rowval)), CudaArray(x.nzval), (x.n,x.m), convert(Cint,length(x.nzval)), device())
Expand Down
2 changes: 1 addition & 1 deletion test/isapprox.jl
@@ -1,4 +1,4 @@
using CUDArt, KUnet
using CUDArt, Knet
import Base: isapprox

function isapprox(x, y;
Expand Down
6 changes: 3 additions & 3 deletions test/rnntest.jl
@@ -1,8 +1,8 @@
using Base.Test
using CUDArt
using KUnet
using Knet

# import KUnet: forw, back, ninputs, param, similar!, gpu, initforw, initback, push, pop, get1
# import Knet: forw, back, ninputs, param, similar!, gpu, initforw, initback, push, pop, get1
# include("../src/net.jl")

include("isapprox.jl")
Expand Down Expand Up @@ -39,7 +39,7 @@ r = rnn
inputs = Any[y]
trn = true
seq = false
using KUnet: push, pop, dbg, forw, back, get1, initbatch
using Knet: push, pop, dbg, forw, back, get1, initbatch

initbatch(r, inputs...; trn=trn, seq=seq, a...)
for i = 1:ninputs(r)
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
@@ -1,5 +1,5 @@
using Compat
using KUnet
using Knet
@date include("testdense.jl")
@date include("testsparse.jl")
@date include("testconvert.jl")
Expand Down
2 changes: 1 addition & 1 deletion test/testcolops.jl
@@ -1,4 +1,4 @@
using KUnet, CUDArt, Base.Test
using Knet, CUDArt, Base.Test

iseq02(a,b)=(convert(Array,a)==convert(Array,b))

Expand Down
2 changes: 1 addition & 1 deletion test/testconvert.jl
@@ -1,4 +1,4 @@
using KUnet, CUDArt, Base.Test
using Knet, CUDArt, Base.Test

a=rand(3,5)

Expand Down
2 changes: 1 addition & 1 deletion test/testdense.jl
@@ -1,6 +1,6 @@
using Base.Test
using CUDArt
using KUnet
using Knet

for N in 1:5
for A in (CudaArray, Array)
Expand Down
24 changes: 12 additions & 12 deletions test/testkperceptron.jl
@@ -1,7 +1,7 @@
using Base.Test
using CUDArt
using KUnet
include(Pkg.dir("KUnet/test/mnist.jl"))
using Knet
include(Pkg.dir("Knet/test/mnist.jl"))
sparse32{T}(a::Array{T})=convert(SparseMatrixCSC{T,Int32}, a)

xtrn = MNIST.xtrn
Expand Down Expand Up @@ -39,7 +39,7 @@ for ker in (
)
# loc == :gpu && ker[1] == :perceptron && continue
println("\n$ker, $prc, $fmt, $loc")
KUnet.gpu(loc == :gpu)
Knet.gpu(loc == :gpu)
for p in (:xtrn, :xtst, :ytrn, :ytst); @eval $p=copy(MNIST.$p); end
prc == :double && (for p in (:xtrn, :xtst, :ytrn, :ytst); @eval $p=convert(Array{Float64},$p); end)
fmt == :sparse && (for p in (:xtrn, :xtst); @eval $p=sparse32($p); end)
Expand All @@ -48,7 +48,7 @@ for ker in (

net = (ker[1] == :perceptron ?
Op[Mmul(nc;average=true,init=initzero), PercLoss()] :
Op[KPerceptron(nc, KUnet.(ker[1]), ker[2])])
Op[KPerceptron(nc, Knet.(ker[1]), ker[2])])
gc(); @date train(net, xtrn, ytrn; iters=niter,batch=nbatch)
@date a = accuracy(ytst, predict(net, xtst))
s = isdefined(net[1],:s) ? size(net[1].s) : 0
Expand Down Expand Up @@ -91,7 +91,7 @@ end
# for i=1:2
# ftrn = sparse(MNIST.xtrn)
# ftst = sparse(MNIST.xtst)
# @show fnet = Op[KPerceptron(10, KUnet.klinear0, [0f0])]
# @show fnet = Op[KPerceptron(10, Knet.klinear0, [0f0])]
# gc()
# @date train(fnet, ftrn, ytrn; iters=niter,batch=nbatch)
# gc()
Expand All @@ -105,7 +105,7 @@ end
# for i=1:2
# ltrn = sparse(MNIST.xtrn)
# ltst = sparse(MNIST.xtst)
# @show lnet = Op[KPerceptron(10, KUnet.klinear, [0f0])]
# @show lnet = Op[KPerceptron(10, Knet.klinear, [0f0])]
# gc()
# @date train(lnet, ltrn, ytrn; iters=niter,batch=nbatch)
# gc()
Expand All @@ -119,7 +119,7 @@ end
# for i=1:2
# qtrn = sparse(MNIST.xtrn)
# qtst = sparse(MNIST.xtst)
# @show qnet = Op[KPerceptron(10, KUnet.kgauss, [g0])]
# @show qnet = Op[KPerceptron(10, Knet.kgauss, [g0])]
# @date train(qnet, qtrn, ytrn; iters=niter,batch=nbatch)
# @time println((i, size(qnet[1].s),
# accuracy(ytst, predict(qnet, qtst)),
Expand All @@ -131,7 +131,7 @@ end
# for i=1:2
# qtrn = sparse(MNIST.xtrn)
# qtst = sparse(MNIST.xtst)
# @show qnet = Op[KPerceptron(10, KUnet.kgauss0, [g0])]
# @show qnet = Op[KPerceptron(10, Knet.kgauss0, [g0])]
# @date train(qnet, qtrn, ytrn; iters=niter,batch=nbatch)
# @time println((i, size(qnet[1].s),
# accuracy(ytst, predict(qnet, qtst)),
Expand All @@ -143,7 +143,7 @@ end
# for i=1:2
# ktrn = sparse(MNIST.xtrn)
# ktst = sparse(MNIST.xtst)
# @show knet = Op[KPerceptron(10, KUnet.kpoly, [c0,d0])]
# @show knet = Op[KPerceptron(10, Knet.kpoly, [c0,d0])]
# @date train(knet, ktrn, ytrn; iters=niter,batch=nbatch)
# @time println((i, size(knet[1].s),
# accuracy(ytst, predict(knet, ktst)),
Expand All @@ -155,7 +155,7 @@ end
# for i=1:2
# ktrn = sparse(MNIST.xtrn)
# ktst = sparse(MNIST.xtst)
# @show knet = Op[KPerceptron(10, KUnet.kpoly, [c0,d0])]
# @show knet = Op[KPerceptron(10, Knet.kpoly, [c0,d0])]
# @date train(knet, ktrn, ytrn; iters=niter,batch=nbatch)
# @time println((i, size(knet[1].s),
# accuracy(ytst, predict(knet, ktst)),
Expand Down Expand Up @@ -220,7 +220,7 @@ end
# if false; info("KPerceptron+kgauss")
# qtrn = sparse(MNIST.xtrn)
# qtst = sparse(MNIST.xtst)
# @show qnet = Op[KPerceptron(10, KUnet.kgauss, [g0])]
# @show qnet = Op[KPerceptron(10, Knet.kgauss, [g0])]
# for i=1:1
# @date train(qnet, qtrn, ytrn; iters=niter,batch=nbatch)
# @time println((i, size(qnet[1].s),
Expand All @@ -245,7 +245,7 @@ end
# end; end

# if false; info("Poly+PercLoss+GPU+dense") # mmul, hcat, ctranspose do not work
# KUnet.gpu(true)
# Knet.gpu(true)
# cnet = Op[Poly(c=c0,d=d0,w=CudaArray(w0)), PercLoss()]
# for i=1:1
# @time train(cnet, xtrn, ytrn; iters=niter,batch=nbatch)
Expand Down

0 comments on commit 1e246cb

Please sign in to comment.