-
Notifications
You must be signed in to change notification settings - Fork 14
/
mattorch.c
329 lines (280 loc) · 10.5 KB
/
mattorch.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
/*
+ This is a wrapper for matlab std I/O functions
+ Supported Types (LOAD):
mxCELL_CLASS
mxSTRUCT_CLASS
mxLOGICAL_CLASS
mxCHAR_CLASS Y
mxDOUBLE_CLASS Y
mxSINGLE_CLASS Y
mxINT8_CLASS Y
mxUINT8_CLASS Y
mxINT16_CLASS Y
mxUINT16_CLASS Y (casts to INT16)
mxINT32_CLASS Y
mxUINT32_CLASS Y (casts to INT32)
mxINT64_CLASS
mxUINT64_CLASS
mxFUNCTION_CLASS
+ Supported Types (SAVE):
mxCELL_CLASS
mxSTRUCT_CLASS
mxLOGICAL_CLASS
mxCHAR_CLASS
mxDOUBLE_CLASS Y
mxSINGLE_CLASS
mxINT8_CLASS
mxUINT8_CLASS
mxINT16_CLASS
mxUINT16_CLASS
mxINT32_CLASS
mxUINT32_CLASS
mxINT64_CLASS
mxUINT64_CLASS
mxFUNCTION_CLASS
-
*/
// To load this lib in LUA:
// require 'libmatlab'
#include <luaT.h>
#include <TH/TH.h>
#include "mat.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
// Loader
static int load_l(lua_State *L) {
// get args
const char *path = lua_tostring(L,1);
// open file
MATFile *file = matOpen(path, "r");
if (file == NULL) THError("Error opening file %s", file);
// create table to hold loaded variables
lua_newtable(L); // vars = {}
int vars = lua_gettop(L);
int varidx = 1;
// extract each var
while (true) {
// get var+name
const char *name;
mxArray *pa = matGetNextVariable(file, &name);
if (pa == NULL) break;
// get dimensions
mwSize ndims = mxGetNumberOfDimensions(pa);
const mwSize *dims = mxGetDimensions(pa);
// infer size and stride
int k;
THLongStorage *size = THLongStorage_newWithSize(ndims);
THLongStorage *stride = THLongStorage_newWithSize(ndims);
for (k=0; k<ndims; k++) {
THLongStorage_set(size, ndims-k-1, dims[k]);
if (k > 0)
THLongStorage_set(stride, ndims-k-1, dims[k-1]*THLongStorage_get(stride,ndims-k));
else
THLongStorage_set(stride, ndims-k-1, 1);
}
// depending on type, create equivalent Lua/torch data structure
if (mxGetClassID(pa) == mxDOUBLE_CLASS) {
THDoubleTensor *tensor = THDoubleTensor_newWithSize(size, stride);
memcpy((void *)(THDoubleTensor_data(tensor)),
(void *)(mxGetPr(pa)), THDoubleTensor_nElement(tensor) * sizeof(double));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.DoubleTensor"));
lua_rawset(L, vars);
} else if (mxGetClassID(pa) == mxSINGLE_CLASS) {
THFloatTensor *tensor = THFloatTensor_newWithSize(size, stride);
memcpy((void *)(THFloatTensor_data(tensor)),
(void *)(mxGetPr(pa)), THFloatTensor_nElement(tensor) * sizeof(float));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.FloatTensor"));
lua_rawset(L, vars);
} else if (mxGetClassID(pa) == mxINT32_CLASS) {
THIntTensor *tensor = THIntTensor_newWithSize(size, stride);
memcpy((void *)(THIntTensor_data(tensor)),
(void *)(mxGetPr(pa)), THIntTensor_nElement(tensor) * sizeof(int));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.IntTensor"));
lua_rawset(L, vars);
} else if (mxGetClassID(pa) == mxUINT32_CLASS) {
THIntTensor *tensor = THIntTensor_newWithSize(size, stride);
memcpy((void *)(THIntTensor_data(tensor)),
(void *)(mxGetPr(pa)), THIntTensor_nElement(tensor) * sizeof(int));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.IntTensor"));
lua_rawset(L, vars);
} else if ((mxGetClassID(pa) == mxINT16_CLASS)) {
THShortTensor *tensor = THShortTensor_newWithSize(size, stride);
memcpy((void *)(THShortTensor_data(tensor)),
(void *)(mxGetPr(pa)), THShortTensor_nElement(tensor) * sizeof(short));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.ShortTensor"));
lua_rawset(L, vars);
} else if ((mxGetClassID(pa) == mxUINT16_CLASS)) {
THShortTensor *tensor = THShortTensor_newWithSize(size, stride);
memcpy((void *)(THShortTensor_data(tensor)),
(void *)(mxGetPr(pa)), THShortTensor_nElement(tensor) * sizeof(short));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.ShortTensor"));
lua_rawset(L, vars);
} else if ((mxGetClassID(pa) == mxINT8_CLASS) || (mxGetClassID(pa) == mxCHAR_CLASS)) {
THCharTensor *tensor = THCharTensor_newWithSize(size, stride);
memcpy((void *)(THCharTensor_data(tensor)),
(void *)(mxGetPr(pa)), THCharTensor_nElement(tensor) * sizeof(char));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.CharTensor"));
lua_rawset(L, vars);
} else if ((mxGetClassID(pa) == mxUINT8_CLASS)) {
THByteTensor *tensor = THByteTensor_newWithSize(size, stride);
memcpy((void *)(THByteTensor_data(tensor)),
(void *)(mxGetPr(pa)), THByteTensor_nElement(tensor) * sizeof(char));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.ByteTensor"));
lua_rawset(L, vars);
} else if ((mxGetClassID(pa) == mxLOGICAL_CLASS)) {
THByteTensor *tensor = THByteTensor_newWithSize(size, stride);
memcpy((void *)(THByteTensor_data(tensor)),
(void *)(mxGetPr(pa)), THByteTensor_nElement(tensor) * sizeof(char));
lua_pushstring(L, name);
luaT_pushudata(L, tensor, luaT_checktypename2id(L, "torch.ByteTensor"));
lua_rawset(L, vars);
} else {
lua_pushstring(L, name);
if ((mxGetClassID(pa) == mxCELL_CLASS)) {
lua_pushstring(L, "unsupported type: mxCELL_CLASS");
} else if ((mxGetClassID(pa) == mxSTRUCT_CLASS)) {
lua_pushstring(L, "unsupported type: mxSTRUCT_CLASS");
} else if ((mxGetClassID(pa) == mxINT64_CLASS)) {
lua_pushstring(L, "unsupported type: mxINT64_CLASS");
} else if ((mxGetClassID(pa) == mxUINT64_CLASS)) {
lua_pushstring(L, "unsupported type: mxUINT64_CLASS");
} else if ((mxGetClassID(pa) == mxFUNCTION_CLASS)) {
lua_pushstring(L, "unsupported type: mxFUNCTION_CLASS");
} else {
lua_pushstring(L, "unknown type");
}
lua_rawset(L, vars);
}
mxDestroyArray(pa);
}
// cleanup
matClose(file);
// return table 'vars'
return 1;
}
// Save single tensor
static int save_tensor_l(lua_State *L) {
// open file for output
const char *path = lua_tostring(L,1);
MATFile *file = matOpen(path, "w");
// load tensor
THDoubleTensor *tensor = (THDoubleTensor *)luaT_checkudata(L, 2, luaT_checktypename2id(L, "torch.DoubleTensor"));
THDoubleTensor *tensorc = THDoubleTensor_newContiguous(tensor);
// infer size and stride
int k;
mwSize size[] = {-1,-1,-1,-1,-1,-1,-1,-1};
const long ndims = tensorc->nDimension;
for (k=0; k<ndims; k++) {
size[k] = tensor->size[ndims-k-1];
}
// create matlab array
mxArray *pm = mxCreateNumericArray(ndims, size, mxDOUBLE_CLASS, mxREAL);
// copy tensor
memcpy((void *)(mxGetPr(pm)),
(void *)(THDoubleTensor_data(tensor)),
THDoubleTensor_nElement(tensor) * sizeof(double));
// save it, in a dummy var named 'x'
const char *name = "x";
matPutVariable(file, name, pm);
// done
THDoubleTensor_free(tensorc);
matClose(file);
return 0;
}
// Save table of tensors
static int save_table_l(lua_State *L) {
// open file for output
const char *path = lua_tostring(L,1);
MATFile *file = matOpen(path, "w");
mxArray **pms;
pms = (mxArray**) malloc(sizeof(mxArray*)*1024);
int counter = 0;
// table is in the stack at index 2 (2nd var)
lua_pushnil(L); // first key
while (lua_next(L, 2) != 0) {
// uses 'key' (at index -2) and 'value' (at index -1)
const char *name = lua_tostring(L,-2);
THDoubleTensor *tensor = (THDoubleTensor *)luaT_checkudata(L, -1, luaT_checktypename2id(L, "torch.DoubleTensor"));
THDoubleTensor *tensorc = THDoubleTensor_newContiguous(tensor);
// infer size and stride
int k;
mwSize size[] = {-1,-1,-1,-1,-1,-1,-1,-1};
const long ndims = tensorc->nDimension;
for (k=0; k<ndims; k++) {
size[k] = tensor->size[ndims-k-1];
}
// create matlab array
mxArray *pm = mxCreateNumericArray(ndims, size, mxDOUBLE_CLASS, mxREAL);
pms[counter++] = pm;
// copy tensor into array
memcpy((void *)(mxGetPr(pm)),
(void *)(THDoubleTensor_data(tensorc)),
THDoubleTensor_nElement(tensor) * sizeof(double));
// store it
matPutVariable(file, name, pm);
// removes 'value'; keeps 'key' for next iteration
lua_pop(L, 1);
// cleanup
THDoubleTensor_free(tensorc);
}
int i = 0;
for(i=0; i<counter;i++)
mxDestroyArray(pms[i]);
free(pms);
// cleanup
lua_pop(L, 1);
matClose(file);
return 0;
}
static int save_tensor_ascii_l(lua_State *L)
{
// get file descriptor
THFile *file = luaT_checkudata(L, 1, luaT_checktypename2id(L, "torch.File"));
// load tensor
THDoubleTensor *tensor = (THDoubleTensor *)luaT_checkudata(L, 2, luaT_checktypename2id(L, "torch.DoubleTensor"));
THDoubleTensor *tensorc = THDoubleTensor_newContiguous(tensor);
double *tensor_data = THDoubleTensor_data(tensorc);
// get sizes
const long ndims = tensorc->nDimension;
if (ndims > 2) {
THError("matlab ascii only supports 1d or 2d tensors");
}
// write all
int i;
if (ndims == 2) {
for (i = 0; i < tensorc->size[0]; i ++) {
THFile_writeDoubleRaw(file, tensor_data, tensorc->size[1]);
tensor_data += tensorc->size[1];
}
} else {
for (i = 0; i < tensorc->size[0]; i ++) {
THFile_writeDoubleRaw(file, tensor_data, 1);
tensor_data += 1;
}
}
// cleanup
THDoubleTensor_free(tensorc);
return 0;
}
// Register functions in LUA
static const struct luaL_reg matlab [] = {
{"load", load_l},
{"saveTensor", save_tensor_l},
{"saveTable", save_table_l},
{"saveTensorAscii", save_tensor_ascii_l},
{NULL, NULL} /* sentinel */
};
int luaopen_libmattorch (lua_State *L) {
luaL_openlib(L, "libmattorch", matlab, 0);
return 1;
}