forked from nakabonne/tstorage
/
disk_partition.go
172 lines (153 loc) · 4.96 KB
/
disk_partition.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
package tstorage
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/bingoohuang/tstorage/internal/syscall"
)
const (
dataFileName = "data"
metaFileName = "meta.json"
)
var errInvalidPartition = errors.New("invalid partition")
// A disk partition implements a partition that uses local disk as a storage.
// It mainly has two files, data file and meta file.
// The data file is memory-mapped and read only; no need to lock at all.
type diskPartition struct {
logger Logger
// file descriptor of data file
f *os.File
dirPath string
// memory-mapped file backed by f
mappedFile []byte
meta meta
// duration to store data
retention time.Duration
}
// meta is a mapper for a meta file, which is put for each partition.
// Note that the CreatedAt is surely timestamped by tstorage but Min/Max Timestamps are likely to do by other process.
type meta struct {
CreatedAt time.Time `json:"createdAt"`
Metrics map[string]diskMetric `json:"metrics"`
MinTimestamp int64 `json:"minTimestamp"`
MaxTimestamp int64 `json:"maxTimestamp"`
NumDataPoints int `json:"numDataPoints"`
}
// diskMetric holds meta data to access actual data from the memory-mapped file.
type diskMetric struct {
Name string `json:"name"`
Offset int64 `json:"offset"`
MinTimestamp int64 `json:"minTimestamp"`
MaxTimestamp int64 `json:"maxTimestamp"`
NumDataPoints int64 `json:"numDataPoints"`
}
// openDiskPartition first maps the data file into memory with memory-mapping.
func openDiskPartition(dirPath string, retention time.Duration, unmarshaler MetaUnmarshaler, logger Logger) (partition, error) {
if dirPath == "" {
return nil, fmt.Errorf("dir path is required")
}
metaFilePath := filepath.Join(dirPath, metaFileName)
_, err := os.Stat(metaFilePath)
if errors.Is(err, os.ErrNotExist) {
return nil, errInvalidPartition
}
// Map data to the memory
dataPath := filepath.Join(dirPath, dataFileName)
f, err := os.Open(dataPath)
if err != nil {
return nil, fmt.Errorf("failed to read data file: %w", err)
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, fmt.Errorf("failed to fetch file info: %w", err)
}
if info.Size() == 0 {
return nil, ErrNoDataPoints
}
mapped, err := syscall.Mmap(int(f.Fd()), int(info.Size()))
if err != nil {
return nil, fmt.Errorf("failed to perform mmap: %w", err)
}
// Read metadata to the heap
m := meta{}
mf, err := os.ReadFile(metaFilePath)
if err != nil {
return nil, fmt.Errorf("failed to read metadata: %w", err)
}
if err := unmarshaler(mf, &m); err != nil {
return nil, fmt.Errorf("failed to decode metadata: %w", err)
}
return &diskPartition{
dirPath: dirPath,
meta: m,
f: f,
mappedFile: mapped,
retention: retention,
logger: logger,
}, nil
}
func (d *diskPartition) insertRows(_ []Row) ([]Row, error) {
return nil, fmt.Errorf("can't insert rows into disk partition")
}
func (d *diskPartition) selectDataPoints(metric string, labels []Label, start, end int64) ([]*DataPoint, error) {
if d.needClean(nil) {
return nil, fmt.Errorf("this partition is needClean: %w", ErrNoDataPoints)
}
name := marshalMetricName(metric, labels)
mt, ok := d.meta.Metrics[name]
if !ok {
return nil, ErrNoDataPoints
}
r := bytes.NewReader(d.mappedFile)
if _, err := r.Seek(mt.Offset, io.SeekStart); err != nil {
return nil, fmt.Errorf("failed to seek: %w", err)
}
decoder, err := newSeriesDecoder(r)
if err != nil {
return nil, fmt.Errorf("failed to generate decoder for metric %q in %q: %w", name, d.dirPath, err)
}
// TODO: Divide fixed-lengh chunks when flushing, and index it.
points := make([]*DataPoint, 0, mt.NumDataPoints)
for i := 0; i < int(mt.NumDataPoints); i++ {
point := &DataPoint{}
if err := decoder.decodePoint(point); err != nil {
return nil, fmt.Errorf("failed to decode point of metric %q in %q: %w", name, d.dirPath, err)
}
if point.Timestamp < start {
continue
}
if point.Timestamp >= end {
break
}
points = append(points, point)
}
return points, nil
}
func (d *diskPartition) minTimestamp() int64 { return d.meta.MinTimestamp }
func (d *diskPartition) maxTimestamp() int64 { return d.meta.MaxTimestamp }
func (d *diskPartition) size() int { return d.meta.NumDataPoints }
// Disk partition is immutable.
func (d *diskPartition) active() bool { return false }
func (d *diskPartition) clean() error {
if err := os.RemoveAll(d.dirPath); err != nil {
return fmt.Errorf("failed to remove all files inside the partition (%d~%d): %w", d.minTimestamp(), d.maxTimestamp(), err)
}
return nil
}
func (d *diskPartition) needClean(recycleDirSize *int64) bool {
if recycleDirSize != nil && *recycleDirSize > 0 {
if dirSize, err := DirSize(d.dirPath); err != nil {
d.logger.Printf("E! read dir size %s: %v", d.dirPath, err)
} else {
*recycleDirSize -= dirSize
}
return true
}
diff := time.Since(d.meta.CreatedAt)
return diff > d.retention
}