Skip to content

Commit

Permalink
Merge 9da0592 into 8ea74b8
Browse files Browse the repository at this point in the history
  • Loading branch information
chewxy committed Dec 30, 2020
2 parents 8ea74b8 + 9da0592 commit 8a58f6b
Show file tree
Hide file tree
Showing 37 changed files with 2,417 additions and 133 deletions.
2 changes: 1 addition & 1 deletion .github/FUNDING.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# These are supported funding model platforms

github: [chewxy] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
github: [chewxy, owulveryck, dcu] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/go.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ jobs:
uses: actions/checkout@v2
- name: Run tests
run: |
go test -v -race
go test -race -tags=${{ matrix.tags }}
go test -v -race -timeout 20m
go test -race -timeout 20m -tags=${{ matrix.tags }}
coverage:
strategy:
Expand Down
66 changes: 42 additions & 24 deletions example_concurrent_training_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ const (
// cols = 53

// We'll use a nice even sized batch size, instead of weird prime numbers
rows = 700000
cols = 50
rows = 30000
cols = 5
batchSize = 100
epochs = 10
)
Expand Down Expand Up @@ -97,7 +97,7 @@ func trainEpoch(bs []batch, ts []*concurrentTrainer, threads int) {
wg.Wait()
close(costChan)

solver := NewVanillaSolver(WithLearnRate(1), WithBatchSize(batchSize))
solver := NewVanillaSolver(WithLearnRate(0.01), WithBatchSize(batchSize))
for cost := range costChan {
// y := cost.Nodes[1].Value()
// yG, _ := cost.Nodes[1].Grad()
Expand All @@ -123,6 +123,21 @@ func prep() (x, y Value, bs []batch) {
xV := tensor.New(tensor.WithShape(rows, cols), tensor.WithBacking(tensor.Range(Float64, 0, cols*rows)))
yV := tensor.New(tensor.WithShape(rows), tensor.WithBacking(tensor.Range(Float64, 0, rows)))

// prep the data: y = ΣnX, where n = col ID, x ∈ X = colID / 100
xData := xV.Data().([]float64)
yData := yV.Data().([]float64)
for r := 0; r < rows; r++ {
var sum float64
for c := 0; c < cols; c++ {
idx := r*cols + c
fc := float64(c)
v := fc * fc / 100
xData[idx] = v
sum += v
}
yData[r] = sum
}

// batch the examples up into their respective batchSize
for i := 0; i < rows; i += batchSize {
xVS, _ := xV.Slice(S(i, i+batchSize))
Expand All @@ -134,7 +149,7 @@ func prep() (x, y Value, bs []batch) {
}

func concurrentTraining(xV, yV Value, bs []batch, es int) {
threads := runtime.GOMAXPROCS(-1) - 1 // reserve one thread for the CPU locked thread
threads := runtime.GOMAXPROCS(-1) // reserve one thread for the CPU locked thread

ts := make([]*concurrentTrainer, threads)
for chunk := 0; chunk < threads; chunk++ {
Expand All @@ -160,11 +175,12 @@ func nonConcurrentTraining(xV, yV Value, es int) {

Let(x, xV)
Let(y, yV)
solver := NewVanillaSolver(WithLearnRate(1), WithBatchSize(batchSize))
solver := NewVanillaSolver(WithLearnRate(0.01), WithBatchSize(batchSize))
for i := 0; i < es; i++ {
vm.RunAll()
solver.Step([]ValueGrad{x, y})
vm.Reset()
runtime.GC()
}
}

Expand All @@ -175,21 +191,22 @@ func Example_concurrentTraining() {
fmt.Printf("x:\n%1.1v", xV)
fmt.Printf("y:\n%1.1v", yV)

// Outputx:
// Output:
// x:
// ⎡ 6 7 8 9 ... 5e+01 5e+01 5e+01 5e+01
// ⎢7e+01 7e+01 7e+01 7e+01 ... 1e+02 1e+02 1e+02 1e+02
// ⎢1e+02 1e+02 1e+02 1e+02 ... 2e+02 2e+02 2e+02 2e+02
// ⎢2e+02 2e+02 2e+02 2e+02 ... 2e+02 2e+02 2e+02 2e+02
// ⎡-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// .
// .
// .
// ⎢4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎢4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎢4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎣4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎣-0.0003 0.01 0.04 0.09 0.2
// y:
// [-1e+02 -4e+02 -7e+02 -9e+02 ... -2e+08 -2e+08 -2e+08 -2e+08]
// [0.3 0.3 0.3 0.3 ... 0.3 0.3 0.3 0.3]

}

func Example_nonConcurrentTraining() {
Expand All @@ -201,17 +218,18 @@ func Example_nonConcurrentTraining() {

//Output:
// x:
// ⎡ 6 7 8 9 ... 5e+01 5e+01 5e+01 5e+01
// ⎢7e+01 7e+01 7e+01 7e+01 ... 1e+02 1e+02 1e+02 1e+02
// ⎢1e+02 1e+02 1e+02 1e+02 ... 2e+02 2e+02 2e+02 2e+02
// ⎢2e+02 2e+02 2e+02 2e+02 ... 2e+02 2e+02 2e+02 2e+02
// ⎡-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// .
// .
// .
// ⎢4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎢4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎢4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎣4e+07 4e+07 4e+07 4e+07 ... 4e+07 4e+07 4e+07 4e+07
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎢-0.0003 0.01 0.04 0.09 0.2
// ⎣-0.0003 0.01 0.04 0.09 0.2
// y:
// [-1e+02 -4e+02 -7e+02 -9e+02 ... -2e+08 -2e+08 -2e+08 -2e+08]
// [0.3 0.3 0.3 0.3 ... 0.3 0.3 0.3 0.3]

}
50 changes: 50 additions & 0 deletions example_tensordot_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
package gorgonia

import (
"fmt"
)

/*
func ExampleTensordot_scalar() {
// Scalars
g := NewGraph()
a := NewScalar(g, Float64, WithValue(2.0), WithName("a"))
b := NewScalar(g, Float64, WithValue(21.0), WithName("b"))
c, err := Tensordot([]int{0}, []int{0}, a, b)
if err != nil {
fmt.Printf("Cannot call Tensordot. Error: %v\n", err)
return
}
vm := NewTapeMachine(g)
if err := vm.RunAll(); err != nil {
fmt.Printf("Cannot perform scalars. Error %v\n", err)
}
fmt.Printf("c: %v (%v) of %v", c.Value(), c.Value().Dtype(), c.Value().Shape())
// Output:
//...
}
*/
func ExampleTensordot_vectors() {
g := NewGraph()
a := NewVector(g, Float64, WithName("a"), WithShape(2), WithInit(RangedFrom(2)))
b := NewVector(g, Float64, WithName("b"), WithShape(2), WithInit(RangedFrom(21)))

c, err := Tensordot([]int{0}, []int{0}, a, b)
if err != nil {
fmt.Printf("Cannot call Tensordot. Error: %v\n", err)
return
}

vm := NewTapeMachine(g)
if err := vm.RunAll(); err != nil {
fmt.Printf("Cannot perform tensordot on vectors. Error %v\n", err)
}
fmt.Printf("a %v b %v ", a.Value(), b.Value())
fmt.Printf("c: %v (%v) of %v", c.Value(), c.Type(), c.Value().Shape())

// Output:
// a [2 3] b [21 22] c: [108] (float64) of (1)

}
5 changes: 5 additions & 0 deletions examples/charRNN/go.mod
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
module main

go 1.15

require (
gorgonia.org/gorgonia v0.9.15
gorgonia.org/tensor v0.9.15
)

0 comments on commit 8a58f6b

Please sign in to comment.