forked from cmu-db/peloton
-
Notifications
You must be signed in to change notification settings - Fork 0
/
tf_session_entity_input.cpp
93 lines (85 loc) · 3.19 KB
/
tf_session_entity_input.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
//===----------------------------------------------------------------------===//
//
// Peloton
//
// tf_session_entity_input.cpp
//
// Identification: src/brain/util/tf_session_entity/tf_session_entity_input.cpp
//
// Copyright (c) 2015-2018, Carnegie Mellon University Database Group
//
//===----------------------------------------------------------------------===//
#include "brain/util/tf_session_entity/tf_session_entity_input.h"
namespace peloton {
namespace brain {
TFSEIN_TEMPLATE_ARGUMENTS
TFSEIN_TYPE::TfSessionEntityInput(const InputType &input,
const std::string &op) {
this->placeholder_name_ = op;
this->DetermineDataType();
InputType input_for_tf = input;
this->tensor_ =
TF_AllocateTensor(this->data_type_, nullptr, 0, sizeof(InputType));
auto buff = (InputType *)TF_TensorData(this->tensor_);
PELOTON_MEMCPY(buff, &input_for_tf, sizeof(InputType));
}
// 1d vector
TFSEIN_TEMPLATE_ARGUMENTS
TFSEIN_TYPE::TfSessionEntityInput(const std::vector<InputType> &input,
const std::string &op) {
this->placeholder_name_ = op;
this->DetermineDataType();
int64_t dims[] = {static_cast<int64_t>(input.size())};
const InputType *input_for_tf = input.data();
this->tensor_ =
TF_AllocateTensor(this->data_type_, dims, 1, dims[0] * sizeof(InputType));
auto buff = (InputType *)TF_TensorData(this->tensor_);
PELOTON_MEMCPY(buff, input_for_tf, dims[0] * sizeof(InputType));
}
// 2d vector
TFSEIN_TEMPLATE_ARGUMENTS
TFSEIN_TYPE::TfSessionEntityInput(
const std::vector<std::vector<InputType>> &input, const std::string &op) {
this->placeholder_name_ = op;
this->DetermineDataType();
int64_t dims[] = {static_cast<int64_t>(input.size()),
static_cast<int64_t>(input[0].size())};
InputType *input_for_tf = Flatten(input);
this->tensor_ = TF_AllocateTensor(this->data_type_, dims, 2,
dims[0] * dims[1] * sizeof(InputType));
auto buff = (InputType *)TF_TensorData(this->tensor_);
PELOTON_MEMCPY(buff, input_for_tf, dims[0] * dims[1] * sizeof(InputType));
}
// raw flattened input
TFSEIN_TEMPLATE_ARGUMENTS
TFSEIN_TYPE::TfSessionEntityInput(InputType *input,
const std::vector<int64_t> &dims,
const std::string &op) {
this->placeholder_name_ = op;
this->DetermineDataType();
InputType *input_for_tf = input;
int64_t num_elems = 1;
for (auto elem : dims) {
num_elems *= elem;
}
this->tensor_ = TF_AllocateTensor(this->data_type_, dims.data(), dims.size(),
num_elems * sizeof(InputType));
auto buff = (InputType *)TF_TensorData(this->tensor_);
PELOTON_MEMCPY(buff, input_for_tf, num_elems * sizeof(InputType));
}
// Flattens 2d inputs
TFSEIN_TEMPLATE_ARGUMENTS
InputType *TFSEIN_TYPE::Flatten(
const std::vector<std::vector<InputType>> &elems) {
std::vector<InputType> flattened;
for (auto row : elems) {
for (float elem : row) {
flattened.push_back(elem);
}
}
return flattened.data();
}
// Explicit template Initialization
template class TfSessionEntityInput<float>;
} // namespace brain
} // namespace peloton