Skip to content

Commit

Permalink
Merge pull request #145 from marty1885/apichange
Browse files Browse the repository at this point in the history
Relax indexing requiements
  • Loading branch information
marty1885 committed May 29, 2020
2 parents a61d58b + e24bd53 commit 323efc9
Show file tree
Hide file tree
Showing 12 changed files with 123 additions and 31 deletions.
2 changes: 1 addition & 1 deletion Etaler/Algorithms/SpatialPooler.hpp
Expand Up @@ -40,7 +40,7 @@ struct ETALER_EXPORT SpatialPooler
size_t activeThreshold() const { return active_threshold_; }

void setGlobalDensity(float d) { global_density_ = d; }
size_t globalDensity() const { return global_density_; }
float globalDensity() const { return global_density_; }

void setBoostingFactor(float f) { boost_factor_ = f; }
float boostFactor() const { return boost_factor_; }
Expand Down
5 changes: 5 additions & 0 deletions Etaler/Backends/CPUBackend.cpp
Expand Up @@ -88,6 +88,11 @@ inline void dispatch2d(DType t1, DType t2, Func f)
});
}

CPUBuffer::~CPUBuffer()
{
std::visit([](auto& ptr){delete [] ptr;}, storage_);
}

namespace et::detail
{
template <typename PermType>
Expand Down
2 changes: 1 addition & 1 deletion Etaler/Backends/CPUBackend.hpp
Expand Up @@ -39,7 +39,7 @@ struct ETALER_EXPORT CPUBuffer : public BufferImpl
memcpy(ptr, src_ptr, shape.volume()*dtypeToSize(dtype));
}

virtual ~CPUBuffer() {std::visit([](auto& ptr){delete [] ptr;}, storage_);}
virtual ~CPUBuffer();

virtual void* data() const override;

Expand Down
9 changes: 9 additions & 0 deletions Etaler/Core/Backend.hpp
Expand Up @@ -73,3 +73,12 @@ struct ETALER_EXPORT Backend : public std::enable_shared_from_this<Backend>

}

namespace cling
{

inline std::string printValue(const et::Backend* backend)
{
return "<Etaler backend on " + backend->name() + ">";
}

}
9 changes: 6 additions & 3 deletions Etaler/Core/Error.cpp
Expand Up @@ -11,19 +11,22 @@ static bool g_enable_trace_on_exception = true;

ETALER_EXPORT void et::enableTraceOnException(bool enable)
{
g_enable_trace_on_exception = true;
g_enable_trace_on_exception = enable;
}

ETALER_EXPORT bool et::getEnableTraceOnException()
{
return g_enable_trace_on_exception;
}

std::string et::genStackTrace()
std::string et::genStackTrace(size_t skip)
{
#ifndef BACKWARD_SYSTEM_UNKNOWN
std::stringstream ss;
StackTrace st;
// Skip the at least function calls we don't want
// 1. unwind 2. load_here, 3. genStackTrace
st.skip_n_firsts(3+skip);
st.load_here(32);
Printer p;
p.color_mode = ColorMode::never;
Expand All @@ -44,5 +47,5 @@ ETALER_EXPORT EtError::EtError(const std::string &msg)
: msg_(msg)
{
if(getEnableTraceOnException())
msg_ += genStackTrace();
msg_ += "\n"+genStackTrace(1); // Skip the EtError ctor
}
2 changes: 1 addition & 1 deletion Etaler/Core/Error.hpp
Expand Up @@ -12,7 +12,7 @@
namespace et
{

std::string genStackTrace();
std::string genStackTrace(size_t skip = 0);

class EtError : public std::exception
{
Expand Down
20 changes: 12 additions & 8 deletions Etaler/Core/Tensor.cpp
Expand Up @@ -175,7 +175,7 @@ Tensor Tensor::view(const IndexList& rgs) const
if(ranges.size() > dimentions())
throw EtError("Cannot view a tensor of " + std::to_string(dimentions()) + " with " + std::to_string(ranges.size()) + " dimentions");

// Fill in the blacks where dimensions are not specified
// Fill in the blncks where dimensions are not specified
while(ranges.size() != dimentions())
ranges.push_back(et::all());

Expand All @@ -195,12 +195,12 @@ Tensor Tensor::view(const IndexList& rgs) const

assert(viewed_strides.size() == dimentions());

// Compute the new shape and stride. Most of the code here exists for check for out-of-bounds access
// Compute the new shape and stride. Most of the code here exists to check for out-of-bounds access
offset.reserve(dimentions());
result_shape.reserve(dimentions());
for(size_t i=0;i<dimentions();i++) { std::visit([&](auto index_range) { // <- make the code neater
const auto& r = index_range;
intmax_t dim_size = shape()[i];
const intmax_t dim_size = shape()[i];

// Try to resolve the indexing details
auto [start, stop, step, keep_dim] = [&r, dim_size]() -> std::tuple<intmax_t, intmax_t, intmax_t, bool> {
Expand All @@ -212,17 +212,21 @@ Tensor Tensor::view(const IndexList& rgs) const

intmax_t real_start = resolve_index(start, dim_size);
intmax_t real_stop = resolve_index(stop, dim_size);

// Attempt to fix out-of-bounds indices (The same way NumPy and PyTorch works)
if(real_stop > dim_size)
real_stop = dim_size;
else if(real_stop < 0)
real_stop = 0;
intmax_t size = (std::abs(real_stop - real_start) - 1) / std::abs(step) + 1;

// Indexing validations
if(is_index_valid(stop, dim_size+1) == false)
throw EtError("Stopping index " + std::to_string(stop) + " is out of range in dimension " + std::to_string(i));
if(step == 0)
throw EtError("Error: Step size is zero in dimension " + std::to_string(i));
throw EtError("Step size is zero in dimension " + std::to_string(i));
if(is_index_valid(start, dim_size) == false)
throw EtError("Starting index " + std::to_string(start) + " is out of range in dimension " + std::to_string(i));
throw EtError("Index " + std::to_string(start) + " is out of range for dimension " + std::to_string(i) + " with size " + std::to_string(dim_size));
if((real_stop - real_start) * step < 0)
throw EtError("Step is going in the wrong direction. Will cause infinate loop");
throw EtError("Step is going in the wrong direction in dimension " + std::to_string(i));

viewed_strides[i] *= step;

Expand Down
4 changes: 2 additions & 2 deletions Etaler/Core/Tensor.hpp
Expand Up @@ -172,9 +172,9 @@ struct ETALER_EXPORT Tensor

Tensor swapaxis(size_t axis1, size_t axis2) const
{
if(axis1 < dimentions())
if(axis1 >= dimentions())
throw EtError("Axis " + std::to_string(axis1) + " is out of range.");
if(axis2 < dimentions())
if(axis2 >= dimentions())
throw EtError("Axis " + std::to_string(axis2) + " is out of range.");
Shape stride = pimpl_->stride();
Shape s = shape();
Expand Down
11 changes: 6 additions & 5 deletions Etaler/Core/TensorImpl.hpp
Expand Up @@ -91,13 +91,14 @@ bool checkProperty(const TensorImpl* x, const T& value)
}

template <typename T>
void requireProperty(const TensorImpl* x, const T value, const std::string& line, const std::string& v_name)
void requireProperty(const TensorImpl* x, const T& value, const std::string_view line, const std::string_view v_name)
{
if(checkProperty(x, value) == true)
return;

//Otherwise assertion failed
const std::string msg = line + " Tensor property requirment not match. Expecting " + v_name;
// Otherwise assertion failed
// Workarround string_view limitations
const std::string msg = (std::string(line) + " Tensor property requirment not match. Expecting ").append(v_name);
if constexpr(std::is_base_of_v<Backend, std::remove_pointer_t<std::decay_t<T>>>)
throw EtError(msg + ".backend() == " + value->name());
else if constexpr(std::is_same_v<T, DType>)
Expand All @@ -115,13 +116,13 @@ void requireProperty(const TensorImpl* x, const T value, const std::string& line
}

template <typename ... Args>
bool checkProperties(const TensorImpl* x, Args... args)
bool checkProperties(const TensorImpl* x, const Args& ... args)
{
return (checkProperty(x, args) && ...);
}

template <typename ... Args>
void requirePropertiesInternal(const TensorImpl* x, const std::string& line, const std::string& v_name, Args... args)
void requirePropertiesInternal(const TensorImpl* x, const std::string_view line, const std::string_view v_name, const Args& ... args)
{
(requireProperty(x, args, line, v_name), ...);
}
Expand Down
10 changes: 5 additions & 5 deletions Etaler/Etaler.hpp
@@ -1,8 +1,8 @@
#pragma once

//Includes common headers
#include "Core/Shape.hpp"
#include "Core/DType.hpp"
#include "Core/Backend.hpp"
#include "Core/Tensor.hpp"
#include "Core/DefaultBackend.hpp"
#include <Etaler/Core/Shape.hpp>
#include <Etaler/Core/DType.hpp>
#include <Etaler/Core/Backend.hpp>
#include <Etaler/Core/Tensor.hpp>
#include <Etaler/Core/DefaultBackend.hpp>
23 changes: 23 additions & 0 deletions docs/source/UsingWithClingROOT.md
Expand Up @@ -74,6 +74,29 @@ root [0] gSystem->Load("/usr/local/lib/libEtaler.so");
root [1] #include <Etaler/Etaler.hpp>
```
Or use a macro to load Etaler conveniently
```c++
// /usr/share/root/macros/load_etaler.C
#include <Etaler/Etaler.hpp>
#include <Etaler/Algorithms/SpatialPooler.hpp>
#include <Etaler/Algorithms/TemporalMemory.hpp>
#include <Etaler/Encoders/Scalar.hpp>
#include <Etaler/Encoders/GridCell1d.hpp>
#include <Etaler/Encoders/GridCell2d.hpp> // Add or remove headers as you want
#pragma cling load("/usr/local/lib/libEtaler.so") // Repalce this with your path to Etaler
using namespace et;
void load_etaler(){}
```

Then call the macro in ROOT to load all of Etaler.

```cpp
> root
root [0] .x load_etaler.C
root [1] // Etaler is fully loaded
```

## Using Etaler under an interactive C++ shell

After loading the library. You can use the library as you would normally. (And ROOT imports the `std` namespace by default.)
Expand Down
57 changes: 52 additions & 5 deletions tests/common_tests.cpp
Expand Up @@ -186,6 +186,9 @@ TEST_CASE("Testing Tensor", "[Tensor]")
CHECK_NOTHROW(requireProperties(ones(Shape{1}, DType::Int32).pimpl(), IsPlain()));
CHECK_NOTHROW(requireProperties(ones(Shape{1}, DType::Int32).view({0}).pimpl(), IsPlain()));
CHECK_THROWS(requireProperties(ones(Shape{4,4}, DType::Int32).view({range(2), range(2)}).pimpl(), IsPlain()));

CHECK_NOTHROW(requireProperties(ones({Shape{4, 4}}).pimpl(), Shape{4, 4}));
CHECK_THROWS(requireProperties(ones({Shape{4, 4}}).pimpl(), Shape{4}));
}

SECTION("Views") {
Expand Down Expand Up @@ -216,7 +219,9 @@ TEST_CASE("Testing Tensor", "[Tensor]")
CHECK_THROWS(t.view({0,0,0,0,0}));
CHECK_THROWS(t.view({300}));
CHECK_THROWS(t.view({0, 300}));
CHECK_THROWS(t.view({range(100)}));

// NumPy and PyTorch allows np.ones(4, 4)[:100]
CHECK_NOTHROW(t.view({range(100)}));

Tensor q = t.view({2,2});
CHECK(q.size() == 1);
Expand Down Expand Up @@ -361,24 +366,39 @@ TEST_CASE("Testing Tensor", "[Tensor]")
}

SECTION("iterator") {
// Reference: http://www.cplusplus.com/reference/iterator/RandomAccessIterator/
Tensor t = ones({3, 4});
Tensor q = zeros({3, 4});
REQUIRE(t.shape() == Shape{3, 4});
STATIC_REQUIRE(std::is_same_v<Tensor::iterator::value_type, Tensor>);

// Tensor::iterator should be random,
// Reference: http://www.cplusplus.com/reference/iterator/RandomAccessIterator/
// Tensor::iterator should be a ramdom access iterator
STATIC_REQUIRE(std::is_same_v<std::iterator_traits<Tensor::iterator>::iterator_category, std::random_access_iterator_tag>);

//Is default-constructible, copy-constructible, copy-assignable and destructible
STATIC_REQUIRE(std::is_default_constructible_v<Tensor::iterator>);
STATIC_REQUIRE(std::is_copy_constructible_v<Tensor::iterator>);
STATIC_REQUIRE(std::is_copy_assignable_v<Tensor::iterator>);
STATIC_REQUIRE(std::is_destructible_v<Tensor::iterator>);


//Can be compared for equivalence using the equality/inequality operators
CHECK(t.begin() != t.end());
CHECK(t.begin() == t.begin());

//Can be dereferenced as an rvalue
CHECK((*t.begin()).shape() == Shape{4});
CHECK(t.begin()->shape() == Shape{4});

//Can be dereferenced as an lvalue
CHECK((*t.begin() = zeros({4})).isSame(zeros({4})));
CHECK((*t.begin() = ones({4})).isSame(ones({4}))); // Try a second time

//Supports the arithmetic operators + and - between an iterator and an integer value, or subtracting an iterator from another.
CHECK(t.end() - t.begin() == t.shape()[0]);
CHECK(t.begin()[2].isSame(*t.back()) == true);
CHECK(t.begin() + t.shape()[0] == t.end());
CHECK(t.end() - t.shape()[0] == t.begin());

//Can be incremented
auto it1 = t.begin(), it2 = t.begin();
it1++;
++it2;
Expand All @@ -387,9 +407,27 @@ TEST_CASE("Testing Tensor", "[Tensor]")
it2--;
CHECK(it1 == it2);

//Can be compared with inequality relational operators
CHECK(t.begin() < t.end());
CHECK(t.end() > t.begin());
CHECK(t.begin() <= t.begin());
CHECK(t.begin() >= t.begin());

//Supports compound assignment operations
it1 = t.begin();
it1 += 3;
CHECK(it1 == t.end());
it2 = t.end();
it2 -= 3;
CHECK(it2 == t.begin());

//Supports the offset dereference operator
CHECK(t.begin()[2].isSame(*t.back()) == true);

swap(*t.begin(), *q.begin());
CHECK(t[{0}].isSame(zeros({4})));

// Other misc I came up
int num_iteration = 0;
for(auto s : t) {
CHECK(s.shape() == Shape({4}));
Expand Down Expand Up @@ -818,6 +856,15 @@ TEST_CASE("brodcast")
b = ones({7});
CHECK_THROWS(a+b);
}

SECTION("Manual brodcast") {
a = ones({2, 4});
b = ones({1, 1, 4});
auto [x, y] = a.brodcast(b);
CHECK(x.shape() == Shape({1, 2, 4}));
CHECK(y.shape() == Shape({1, 2, 4}));
CHECK(x.iscontiguous() == false);
}
}

TEST_CASE("SDRClassifer")
Expand Down

0 comments on commit 323efc9

Please sign in to comment.