forked from jorgecarleitao/arrow2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
stream.rs
148 lines (118 loc) · 3.59 KB
/
stream.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
use std::io::Cursor;
use arrow2::array::Array;
use arrow2::chunk::Chunk;
use arrow2::datatypes::Schema;
use arrow2::error::Result;
use arrow2::io::ipc::read::read_stream_metadata;
use arrow2::io::ipc::read::StreamReader;
use arrow2::io::ipc::write::{StreamWriter, WriteOptions};
use arrow2::io::ipc::IpcField;
use crate::io::ipc::common::read_arrow_stream;
use crate::io::ipc::common::read_gzip_json;
fn write_(
schema: &Schema,
ipc_fields: Option<Vec<IpcField>>,
batches: &[Chunk<Box<dyn Array>>],
) -> Vec<u8> {
let mut result = vec![];
let options = WriteOptions { compression: None };
let mut writer = StreamWriter::new(&mut result, options);
writer.start(schema, ipc_fields).unwrap();
for batch in batches {
writer.write(batch, None).unwrap();
}
writer.finish().unwrap();
result
}
fn test_file(version: &str, file_name: &str) {
let (schema, ipc_fields, batches) = read_arrow_stream(version, file_name, None);
let result = write_(&schema, Some(ipc_fields), &batches);
let mut reader = Cursor::new(result);
let metadata = read_stream_metadata(&mut reader).unwrap();
let reader = StreamReader::new(reader, metadata, None);
let schema = reader.metadata().schema.clone();
let ipc_fields = reader.metadata().ipc_schema.fields.clone();
// read expected JSON output
let (expected_schema, expected_ipc_fields, expected_batches) =
read_gzip_json(version, file_name).unwrap();
assert_eq!(schema, expected_schema);
assert_eq!(ipc_fields, expected_ipc_fields);
let batches = reader
.map(|x| x.map(|x| x.unwrap()))
.collect::<Result<Vec<_>>>()
.unwrap();
assert_eq!(batches, expected_batches);
}
#[test]
fn write_100_primitive() {
test_file("1.0.0-littleendian", "generated_primitive");
}
#[test]
fn write_100_datetime() {
test_file("1.0.0-littleendian", "generated_datetime");
}
#[test]
fn write_100_dictionary_unsigned() {
test_file("1.0.0-littleendian", "generated_dictionary_unsigned");
}
#[test]
fn write_100_dictionary() {
test_file("1.0.0-littleendian", "generated_dictionary");
}
#[test]
fn write_100_interval() {
test_file("1.0.0-littleendian", "generated_interval");
}
#[test]
fn write_100_large_batch() {
// this takes too long for unit-tests. It has been passing...
//test_file("1.0.0-littleendian", "generated_large_batch");
}
#[test]
fn write_100_nested() {
test_file("1.0.0-littleendian", "generated_nested");
}
#[test]
fn write_100_nested_large_offsets() {
test_file("1.0.0-littleendian", "generated_nested_large_offsets");
}
#[test]
fn write_100_null_trivial() {
test_file("1.0.0-littleendian", "generated_null_trivial");
}
#[test]
fn write_100_null() {
test_file("1.0.0-littleendian", "generated_null");
}
#[test]
fn write_100_primitive_large_offsets() {
test_file("1.0.0-littleendian", "generated_primitive_large_offsets");
}
#[test]
fn write_100_union() {
test_file("1.0.0-littleendian", "generated_union");
}
#[test]
fn write_generated_017_union() {
test_file("0.17.1", "generated_union");
}
//#[test]
//fn write_100_recursive_nested() {
//test_file("1.0.0-littleendian", "generated_recursive_nested");
//}
#[test]
fn write_100_primitive_no_batches() {
test_file("1.0.0-littleendian", "generated_primitive_no_batches");
}
#[test]
fn write_100_primitive_zerolength() {
test_file("1.0.0-littleendian", "generated_primitive_zerolength");
}
#[test]
fn write_100_custom_metadata() {
test_file("1.0.0-littleendian", "generated_custom_metadata");
}
#[test]
fn write_100_decimal() {
test_file("1.0.0-littleendian", "generated_decimal");
}