Auto Gradient Computation Framework (Pytorch Mock Version)
Example code can be viewed here.
- Tensor node.
- Destructible node when flushing memory recursively by default.
#include <Tensor/Tensor.hpp>
Tensor<double>& my_tensor1 = *new Tensor<double>(); // default value 0
Tensor<double>& my_tensor2 = *new Tensor<double>(3.2); // value only with default name as ""
Tensor<double>& my_tensor3 = *new Tensor<double>(3.2, "Data"); // value only and custom name
// ADD, SUBTRACT, MULTIPLY operations will automatically generate Dynamic Graph
Tensor<double>& result = my_tensor1 + my_tensor2 - my_tensor3 * my_tensor4
result.backward(); // EASY. check each tensor's .grad element for gradient value
Tensor<double>& head_node_of_graph;
head_node_of_graph.flush(); // will flush all child node memories
- Inherits Tensor
- Not Destructible node when flushing memory recursively by default. (Used for weights)
#include <Tensor/Tensor.hpp>
Variable<double>& my_tensor1 = *new Variable<double>(); // default value 0
Variable<double>& my_tensor2 = *new Variable<double>(3.2); // value only with default name as ""
Variable<double>& my_tensor3 = *new Variable<double>(3.2, "Weights"); // value only with default name as ""
- Contains weight tensors in a map
- Tensors can be retrieved by std::string key
#include <WeightMap.hpp>
#include <Tensor/Tensor.hpp>
// Initialize weights
Variable<double>& weight1 = *new Variable<double>(1.1, "W1");
Variable<double>& weight2 = *new Variable<double>(1.2, "W2");
Variable<double>& weight3 = *new Variable<double>(1.3, "W3");
// Initialize Weight Map
std::vector<Tensor<double>*> weight_list = {weight1, weight2, weight3}; // first make vector with type Tensor<T>*
WeightMap<double>& weightMap = *new WeightMap<double>(weight_list); // Initialize by passing in weight list
Tensor<float>& weight_i_want = weightMap.getTensor("Weight"); // use key std::string to retrieve reference
- Takes in WeightMap Pointer and Learning Rate
#include <Optimizer.hpp>
#include <WeightMap.hpp>
#include <Tensor/Tensor.hpp>
WeightMap<double>& weightMap = *new WeightMap<double>(weight_list); // Initialize by passing in weight list
double learning_rate = 0.0001;
Optimizer<double> optim(&weightMap, learning_rate);
optim.step(); // EASY.
- Loss Instance for Loss Calculation
#include <Loss.hpp>
Loss<double> lossModule;
Tensor<double>& model_output;
Tensor<double>& target;
Tensor<double>& loss = lossModule.forward(model_output, target); // pass in model_output and target to forward method