-
Notifications
You must be signed in to change notification settings - Fork 2
/
asyncio.go
91 lines (80 loc) · 2.09 KB
/
asyncio.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
package engine
import (
"github.com/mumax/3/cuda"
"github.com/mumax/3/data"
"log"
"path"
"strings"
)
type Slicer interface {
Slice() (q *data.Slice, recycle bool) // get quantity data (GPU or CPU), indicate need to recycle
NComp() int
Name() string
Unit() string
Mesh() *data.Mesh
}
// Save under given file name (transparant async I/O).
func SaveAs(q Slicer, fname string) {
if !path.IsAbs(fname) && !strings.HasPrefix(fname, OD) {
fname = path.Clean(OD + "/" + fname)
}
if path.Ext(fname) == "" {
fname += ".dump"
}
buffer, recylce := q.Slice()
if recylce {
defer cuda.Recycle(buffer)
}
info := data.Meta{Time: Time, Name: q.Name(), Unit: q.Unit()}
AsyncSave(fname, assureCPU(buffer), info)
}
// Asynchronously save slice to file. Slice should be on CPU and
// not be written after this call.
func AsyncSave(fname string, s *data.Slice, info data.Meta) {
initQue()
saveQue <- saveTask{fname, s, info}
}
// Copy to CPU, if needed
func assureCPU(s *data.Slice) *data.Slice {
if s.CPUAccess() {
return s
} else {
return s.HostCopy()
}
}
var (
saveQue chan saveTask // passes save requests from runDownloader to runSaver
done = make(chan bool) // marks output server is completely done after closing dlQue
nOutBuf int // number of output buffers actually in use (<= maxOutputQueLen)
)
const maxOutputQueLen = 16 // number of outputs that can be queued for asynchronous I/O.
func initQue() {
if saveQue == nil {
saveQue = make(chan saveTask)
go runSaver()
}
}
// output task
type saveTask struct {
fname string
output *data.Slice
info data.Meta
}
// continuously takes save tasks and flushes them to disk.
// the save queue can accommodate many outputs (stored on host).
// the rather big queue allows big output bursts to be concurrent with GPU.
func runSaver() {
for t := range saveQue {
data.MustWriteFile(t.fname, t.output, t.info)
}
done <- true
}
// finalizer function called upon program exit.
// waits until all asynchronous output has been saved.
func drainOutput() {
if saveQue != nil {
log.Println("flushing output")
close(saveQue)
<-done
}
}