/
ipc.go
178 lines (152 loc) · 5.31 KB
/
ipc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ipc
import (
"io"
"github.com/apache/arrow/go/v11/arrow"
"github.com/apache/arrow/go/v11/arrow/arrio"
"github.com/apache/arrow/go/v11/arrow/internal/flatbuf"
"github.com/apache/arrow/go/v11/arrow/memory"
)
const (
errNotArrowFile = errString("arrow/ipc: not an Arrow file")
errInconsistentFileMetadata = errString("arrow/ipc: file is smaller than indicated metadata size")
errInconsistentSchema = errString("arrow/ipc: tried to write record batch with different schema")
errMaxRecursion = errString("arrow/ipc: max recursion depth reached")
errBigArray = errString("arrow/ipc: array larger than 2^31-1 in length")
kArrowAlignment = 64 // buffers are padded to 64b boundaries (for SIMD)
kTensorAlignment = 64 // tensors are padded to 64b boundaries
kArrowIPCAlignment = 8 // align on 8b boundaries in IPC
)
var (
paddingBytes [kArrowAlignment]byte
kEOS = [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0, 0, 0, 0} // end of stream message
kIPCContToken uint32 = 0xFFFFFFFF // 32b continuation indicator for FlatBuffers 8b alignment
)
func paddedLength(nbytes int64, alignment int32) int64 {
align := int64(alignment)
return ((nbytes + align - 1) / align) * align
}
type errString string
func (s errString) Error() string {
return string(s)
}
type ReadAtSeeker interface {
io.Reader
io.Seeker
io.ReaderAt
}
type config struct {
alloc memory.Allocator
schema *arrow.Schema
footer struct {
offset int64
}
codec flatbuf.CompressionType
compressNP int
ensureNativeEndian bool
noAutoSchema bool
emitDictDeltas bool
}
func newConfig(opts ...Option) *config {
cfg := &config{
alloc: memory.NewGoAllocator(),
codec: -1, // uncompressed
ensureNativeEndian: true,
}
for _, opt := range opts {
opt(cfg)
}
return cfg
}
// Option is a functional option to configure opening or creating Arrow files
// and streams.
type Option func(*config)
// WithFooterOffset specifies the Arrow footer position in bytes.
func WithFooterOffset(offset int64) Option {
return func(cfg *config) {
cfg.footer.offset = offset
}
}
// WithAllocator specifies the Arrow memory allocator used while building records.
func WithAllocator(mem memory.Allocator) Option {
return func(cfg *config) {
cfg.alloc = mem
}
}
// WithSchema specifies the Arrow schema to be used for reading or writing.
func WithSchema(schema *arrow.Schema) Option {
return func(cfg *config) {
cfg.schema = schema
}
}
// WithLZ4 tells the writer to use LZ4 Frame compression on the data
// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress
func WithLZ4() Option {
return func(cfg *config) {
cfg.codec = flatbuf.CompressionTypeLZ4_FRAME
}
}
// WithZstd tells the writer to use ZSTD compression on the data
// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress
func WithZstd() Option {
return func(cfg *config) {
cfg.codec = flatbuf.CompressionTypeZSTD
}
}
// WithCompressConcurrency specifies a number of goroutines to spin up for
// concurrent compression of the body buffers when writing compress IPC records.
// If n <= 1 then compression will be done serially without goroutine
// parallelization. Default is 0.
func WithCompressConcurrency(n int) Option {
return func(cfg *config) {
cfg.compressNP = n
}
}
// WithEnsureNativeEndian specifies whether or not to automatically byte-swap
// buffers with endian-sensitive data if the schema's endianness is not the
// platform-native endianness. This includes all numeric types, temporal types,
// decimal types, as well as the offset buffers of variable-sized binary and
// list-like types.
//
// This is only relevant to ipc Reader objects, not to writers. This defaults
// to true.
func WithEnsureNativeEndian(v bool) Option {
return func(cfg *config) {
cfg.ensureNativeEndian = v
}
}
// WithDelayedReadSchema alters the ipc.Reader behavior to delay attempting
// to read the schema from the stream until the first call to Next instead
// of immediately attempting to read a schema from the stream when created.
func WithDelayReadSchema(v bool) Option {
return func(cfg *config) {
cfg.noAutoSchema = v
}
}
// WithDictionaryDeltas specifies whether or not to emit dictionary deltas.
func WithDictionaryDeltas(v bool) Option {
return func(cfg *config) {
cfg.emitDictDeltas = v
}
}
var (
_ arrio.Reader = (*Reader)(nil)
_ arrio.Writer = (*Writer)(nil)
_ arrio.Reader = (*FileReader)(nil)
_ arrio.Writer = (*FileWriter)(nil)
_ arrio.ReaderAt = (*FileReader)(nil)
)