/
data.go
155 lines (130 loc) · 3.37 KB
/
data.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
package core
import (
"bufio"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/orca-zhang/ecache"
)
type DataAdapter interface {
SetOptions(opt Options)
Close()
Write(c Ctx, bktID, dataID int64, sn int, buf []byte) error
Read(c Ctx, bktID, dataID int64, sn int) ([]byte, error)
ReadBytes(c Ctx, bktID, dataID int64, sn, offset, size int) ([]byte, error)
}
const interval = time.Second
var queue = ecache.NewLRUCache(16, 1024, interval)
func init() {
queue.Inspect(func(action int, key string, iface *interface{}, bytes []byte, status int) {
// evicted / updated / deleted
if (action == ecache.PUT && status <= 0) || (action == ecache.DEL && status == 1) {
(*iface).(*AsyncHandle).Close()
}
})
go func() {
// manually evict expired items
for {
now := time.Now().UnixNano()
keys := []string{}
queue.Walk(func(key string, iface *interface{}, bytes []byte, expireAt int64) bool {
if expireAt < now {
keys = append(keys, key)
}
return true
})
for _, k := range keys {
queue.Del(k)
}
time.Sleep(interval)
}
}()
}
func HasInflight() (b bool) {
queue.Walk(func(key string, iface *interface{}, bytes []byte, expireAt int64) bool {
b = true
return false
})
return
}
type AsyncHandle struct {
F *os.File
B *bufio.Writer
}
func (ah AsyncHandle) Close() {
ah.B.Flush()
ah.F.Close()
}
type DefaultDataAdapter struct {
opt Options
}
func (dda *DefaultDataAdapter) SetOptions(opt Options) {
dda.opt = opt
}
func (dda *DefaultDataAdapter) Close() {
for HasInflight() {
time.Sleep(100 * time.Millisecond)
}
}
// path/<文件名hash的最后三个字节>/hash/<dataID>_<sn>
func toFilePath(path string, bcktID, dataID int64, sn int) string {
fileName := fmt.Sprintf("%d_%d", dataID, sn)
hash := fmt.Sprintf("%X", md5.Sum([]byte(fileName)))
return filepath.Join(path, fmt.Sprint(bcktID), hash[21:24], hash[8:24], fileName)
}
func (dda *DefaultDataAdapter) Write(c Ctx, bktID, dataID int64, sn int, buf []byte) error {
path := toFilePath(ORCAS_DATA, bktID, dataID, sn)
// 不用判断是否存在,以及是否创建成功,如果失败,下面写入文件之前会报错
os.MkdirAll(filepath.Dir(path), 0766)
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
return ERR_OPEN_FILE
}
ah := &AsyncHandle{F: f, B: bufio.NewWriter(f)}
_, err = ah.B.Write(buf)
if dda.opt.Sync {
ah.Close()
} else {
go ah.B.Flush()
queue.Put(path, ah)
}
return err
}
func (dda *DefaultDataAdapter) Read(c Ctx, bktID, dataID int64, sn int) ([]byte, error) {
return ioutil.ReadFile(toFilePath(ORCAS_DATA, bktID, dataID, sn))
}
func (dda *DefaultDataAdapter) ReadBytes(c Ctx, bktID, dataID int64, sn, offset, size int) ([]byte, error) {
if offset == 0 && size == -1 {
return dda.Read(c, bktID, dataID, sn)
}
f, err := os.Open(toFilePath(ORCAS_DATA, bktID, dataID, sn))
if err != nil {
return nil, ERR_OPEN_FILE
}
defer f.Close()
if offset > 0 {
f.Seek(int64(offset), io.SeekStart)
}
var buf []byte
if size == -1 {
fi, err := f.Stat()
if err != nil || fi.Size() < int64(offset) {
return nil, ERR_READ_FILE
}
buf = make([]byte, fi.Size()-int64(offset))
} else {
buf = make([]byte, size)
}
n, err := bufio.NewReaderSize(f, cap(buf)).Read(buf)
if err != nil {
return nil, ERR_READ_FILE
}
if size > 0 && n < int(size) {
return buf[:n], nil
}
return buf, nil
}