Skip to content

Commit bfc4c94

Browse files
committed
Switched to xtensor 0.9.0
1 parent 7e132bc commit bfc4c94

File tree

9 files changed

+71
-45
lines changed

9 files changed

+71
-45
lines changed

.appveyor.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ install:
2323
- conda update -q conda
2424
- conda info -a
2525
- conda install gtest cmake -c conda-forge
26-
- conda install xtensor==0.8.3 pytest numpy pybind11==2.1.1 -c conda-forge
26+
- conda install xtensor==0.9.0 pytest numpy pybind11==2.1.1 -c conda-forge
2727
- "set PYTHONHOME=%MINICONDA%"
2828
- cmake -G "NMake Makefiles" -D CMAKE_INSTALL_PREFIX=%MINICONDA%\\Library -D BUILD_TESTS=ON -D PYTHON_EXECUTABLE=%MINICONDA%\\python.exe .
2929
- nmake test_xtensor_python

.travis.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ install:
9494
- conda update -q conda
9595
# Useful for debugging any issues with conda
9696
- conda info -a
97-
- conda install xtensor==0.8.3 pytest numpy pybind11==2.1.1 -c conda-forge
97+
- conda install xtensor==0.9.0 pytest numpy pybind11==2.1.1 -c conda-forge
9898
- cd test
9999
- conda env create -f ./test-environment.yml
100100
- source activate test-xtensor-python

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ conda install -c conda-forge xtensor-python
3030

3131
| `xtensor-python` | `xtensor` | `pybind11` |
3232
|-------------------|------------|-------------|
33-
| master | ^0.8.1 | ^2.1.0 |
33+
| master | ^0.9.0 | ^2.1.0 |
3434
| 0.9.4 | ^0.8.1 | ^2.1.0 |
3535
| 0.9.3 | ^0.8.1 | ^2.1.0 |
3636
| 0.9.2 | ^0.8.1 | ^2.1.0 |

include/xtensor-python/pyarray.hpp

+11-11
Original file line numberDiff line numberDiff line change
@@ -145,8 +145,8 @@ namespace xt
145145
pyarray(pybind11::handle h, pybind11::object::stolen_t);
146146
pyarray(const pybind11::object &o);
147147

148-
explicit pyarray(const shape_type& shape, layout l = layout::row_major);
149-
explicit pyarray(const shape_type& shape, const_reference value, layout l = layout::row_major);
148+
explicit pyarray(const shape_type& shape, layout_type l = layout_type::row_major);
149+
explicit pyarray(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major);
150150
explicit pyarray(const shape_type& shape, const strides_type& strides, const_reference value);
151151
explicit pyarray(const shape_type& shape, const strides_type& strides);
152152

@@ -238,47 +238,47 @@ namespace xt
238238
inline pyarray<T>::pyarray(const value_type& t)
239239
: base_type()
240240
{
241-
base_type::reshape(xt::shape<shape_type>(t), layout::row_major);
241+
base_type::reshape(xt::shape<shape_type>(t), layout_type::row_major);
242242
nested_copy(m_data.begin(), t);
243243
}
244244

245245
template <class T>
246246
inline pyarray<T>::pyarray(nested_initializer_list_t<T, 1> t)
247247
: base_type()
248248
{
249-
base_type::reshape(xt::shape<shape_type>(t), layout::row_major);
249+
base_type::reshape(xt::shape<shape_type>(t), layout_type::row_major);
250250
nested_copy(m_data.begin(), t);
251251
}
252252

253253
template <class T>
254254
inline pyarray<T>::pyarray(nested_initializer_list_t<T, 2> t)
255255
: base_type()
256256
{
257-
base_type::reshape(xt::shape<shape_type>(t), layout::row_major);
257+
base_type::reshape(xt::shape<shape_type>(t), layout_type::row_major);
258258
nested_copy(m_data.begin(), t);
259259
}
260260

261261
template <class T>
262262
inline pyarray<T>::pyarray(nested_initializer_list_t<T, 3> t)
263263
: base_type()
264264
{
265-
base_type::reshape(xt::shape<shape_type>(t), layout::row_major);
265+
base_type::reshape(xt::shape<shape_type>(t), layout_type::row_major);
266266
nested_copy(m_data.begin(), t);
267267
}
268268

269269
template <class T>
270270
inline pyarray<T>::pyarray(nested_initializer_list_t<T, 4> t)
271271
: base_type()
272272
{
273-
base_type::reshape(xt::shape<shape_type>(t), layout::row_major);
273+
base_type::reshape(xt::shape<shape_type>(t), layout_type::row_major);
274274
nested_copy(m_data.begin(), t);
275275
}
276276

277277
template <class T>
278278
inline pyarray<T>::pyarray(nested_initializer_list_t<T, 5> t)
279279
: base_type()
280280
{
281-
base_type::reshape(xt::shape<shape_type>(t), layout::row_major);
281+
base_type::reshape(xt::shape<shape_type>(t), layout_type::row_major);
282282
nested_copy(m_data.begin(), t);
283283
}
284284

@@ -310,7 +310,7 @@ namespace xt
310310
* @param l the layout of the pyarray
311311
*/
312312
template <class T>
313-
inline pyarray<T>::pyarray(const shape_type& shape, layout l)
313+
inline pyarray<T>::pyarray(const shape_type& shape, layout_type l)
314314
: base_type()
315315
{
316316
strides_type strides(shape.size());
@@ -326,7 +326,7 @@ namespace xt
326326
* @param l the layout of the pyarray
327327
*/
328328
template <class T>
329-
inline pyarray<T>::pyarray(const shape_type& shape, const_reference value, layout l)
329+
inline pyarray<T>::pyarray(const shape_type& shape, const_reference value, layout_type l)
330330
: base_type()
331331
{
332332
strides_type strides(shape.size());
@@ -415,7 +415,7 @@ namespace xt
415415
{
416416
shape_type shape = forward_sequence<shape_type>(e.derived_cast().shape());
417417
strides_type strides = make_sequence<strides_type>(shape.size(), size_type(0));
418-
compute_strides(shape, layout::row_major, strides);
418+
compute_strides(shape, layout_type::row_major, strides);
419419
init_array(shape, strides);
420420
semantic_base::assign(e);
421421
}

include/xtensor-python/pycontainer.hpp

+23-3
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,15 @@ namespace xt
8080
using broadcast_iterator = typename iterable_base::broadcast_iterator;
8181
using const_broadcast_iterator = typename iterable_base::broadcast_iterator;
8282

83+
static constexpr layout_type static_layout = layout_type::dynamic;
84+
static constexpr bool contiguous_layout = false;
85+
8386
void reshape(const shape_type& shape);
84-
void reshape(const shape_type& shape, layout l);
87+
void reshape(const shape_type& shape, layout_type l);
8588
void reshape(const shape_type& shape, const strides_type& strides);
8689

90+
layout_type layout() const;
91+
8792
using base_type::operator();
8893
using base_type::operator[];
8994
using base_type::begin;
@@ -213,7 +218,7 @@ namespace xt
213218
{
214219
if (shape.size() != this->dimension() || !std::equal(shape.begin(), shape.end(), this->shape().begin()))
215220
{
216-
reshape(shape, layout::row_major);
221+
reshape(shape, layout_type::row_major);
217222
}
218223
}
219224

@@ -223,7 +228,7 @@ namespace xt
223228
* @param l the new layout
224229
*/
225230
template <class D>
226-
inline void pycontainer<D>::reshape(const shape_type& shape, layout l)
231+
inline void pycontainer<D>::reshape(const shape_type& shape, layout_type l)
227232
{
228233
strides_type strides = make_sequence<strides_type>(shape.size(), size_type(1));
229234
compute_strides(shape, l, strides);
@@ -242,6 +247,21 @@ namespace xt
242247
*static_cast<derived_type*>(this) = std::move(tmp);
243248
}
244249

250+
/**
251+
* Return the layout_type of the container
252+
* @return layout_type of the container
253+
*/
254+
template <class D>
255+
inline layout_type pycontainer<D>::layout() const
256+
{
257+
if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_C_CONTIGUOUS))
258+
return layout_type::row_major;
259+
else if (PyArray_CHKFLAGS(python_array(), NPY_ARRAY_F_CONTIGUOUS))
260+
return layout_type::column_major;
261+
else
262+
return layout_type::dynamic;
263+
}
264+
245265
/**
246266
* Import the numpy Python module.
247267
*/

include/xtensor-python/pytensor.hpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -123,8 +123,8 @@ namespace xt
123123
pytensor(pybind11::handle h, pybind11::object::stolen_t);
124124
pytensor(const pybind11::object& o);
125125

126-
explicit pytensor(const shape_type& shape, layout l = layout::row_major);
127-
explicit pytensor(const shape_type& shape, const_reference value, layout l = layout::row_major);
126+
explicit pytensor(const shape_type& shape, layout_type l = layout_type::row_major);
127+
explicit pytensor(const shape_type& shape, const_reference value, layout_type l = layout_type::row_major);
128128
explicit pytensor(const shape_type& shape, const strides_type& strides, const_reference value);
129129
explicit pytensor(const shape_type& shape, const strides_type& strides);
130130

@@ -197,7 +197,7 @@ namespace xt
197197
inline pytensor<T, N>::pytensor(nested_initializer_list_t<T, N> t)
198198
: base_type()
199199
{
200-
base_type::reshape(xt::shape<shape_type>(t), layout::row_major);
200+
base_type::reshape(xt::shape<shape_type>(t), layout_type::row_major);
201201
nested_copy(m_data.begin(), t);
202202
}
203203

@@ -226,10 +226,10 @@ namespace xt
226226
* Allocates an uninitialized pytensor with the specified shape and
227227
* layout.
228228
* @param shape the shape of the pytensor
229-
* @param l the layout of the pytensor
229+
* @param l the layout_type of the pytensor
230230
*/
231231
template <class T, std::size_t N>
232-
inline pytensor<T, N>::pytensor(const shape_type& shape, layout l)
232+
inline pytensor<T, N>::pytensor(const shape_type& shape, layout_type l)
233233
{
234234
compute_strides(shape, l, m_strides);
235235
init_tensor(shape, m_strides);
@@ -240,12 +240,12 @@ namespace xt
240240
* are initialized to the specified value.
241241
* @param shape the shape of the pytensor
242242
* @param value the value of the elements
243-
* @param l the layout of the pytensor
243+
* @param l the layout_type of the pytensor
244244
*/
245245
template <class T, std::size_t N>
246246
inline pytensor<T, N>::pytensor(const shape_type& shape,
247247
const_reference value,
248-
layout l)
248+
layout_type l)
249249
{
250250
compute_strides(shape, l, m_strides);
251251
init_tensor(shape, m_strides);
@@ -322,7 +322,7 @@ namespace xt
322322
{
323323
shape_type shape = forward_sequence<shape_type>(e.derived_cast().shape());
324324
strides_type strides = make_sequence<strides_type>(N, size_type(0));
325-
compute_strides(shape, layout::row_major, strides);
325+
compute_strides(shape, layout_type::row_major, strides);
326326
init_tensor(shape, strides);
327327
semantic_base::assign(e);
328328
}

test/test_common.hpp

+16-14
Original file line numberDiff line numberDiff line change
@@ -162,14 +162,14 @@ namespace xt
162162
{
163163
SCOPED_TRACE("row_major reshape");
164164
row_major_result<C> rm;
165-
vec.reshape(rm.m_shape, layout::row_major);
165+
vec.reshape(rm.m_shape, layout_type::row_major);
166166
compare_shape(vec, rm);
167167
}
168168

169169
{
170170
SCOPED_TRACE("column_major reshape");
171171
column_major_result<C> cm;
172-
vec.reshape(cm.m_shape, layout::column_major);
172+
vec.reshape(cm.m_shape, layout_type::column_major);
173173
compare_shape(vec, cm);
174174
}
175175

@@ -183,12 +183,14 @@ namespace xt
183183
{
184184
SCOPED_TRACE("unit_shape reshape");
185185
unit_shape_result<C> usr;
186-
vec.reshape(usr.m_shape, layout::row_major);
186+
vec.reshape(usr.m_shape, layout_type::row_major);
187187
compare_shape(vec, usr);
188188
}
189189
}
190190

191-
template <class V, class C = std::vector<std::size_t>>
191+
// TODO: add back when in place transpose methods have been added
192+
// into xtensor
193+
/*template <class V, class C = std::vector<std::size_t>>
192194
void test_transpose(V& vec)
193195
{
194196
using inner_shape_type = typename V::inner_shape_type;
@@ -280,7 +282,7 @@ namespace xt
280282
EXPECT_THROW(vec.transpose({1, 2}, check_policy::full()), transpose_error);
281283
EXPECT_THROW(vec.transpose({3, 0, 1}, check_policy::full()), transpose_error);
282284
}
283-
}
285+
}*/
284286

285287
template <class V1, class V2>
286288
void assign_array(V1& dst, const V2& src)
@@ -313,7 +315,7 @@ namespace xt
313315
{
314316
SCOPED_TRACE("row_major access");
315317
row_major_result<C> rm;
316-
vec.reshape(rm.m_shape, layout::row_major);
318+
vec.reshape(rm.m_shape, layout_type::row_major);
317319
assign_array(vec, rm.m_assigner);
318320
EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), rm.m_data.cbegin()));
319321
EXPECT_EQ(vec(2, 1, 0), vec(2, 1));
@@ -324,7 +326,7 @@ namespace xt
324326
{
325327
SCOPED_TRACE("column_major access");
326328
column_major_result<C> cm;
327-
vec.reshape(cm.m_shape, layout::column_major);
329+
vec.reshape(cm.m_shape, layout_type::column_major);
328330
assign_array(vec, cm.m_assigner);
329331
EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), cm.m_data.cbegin()));
330332
EXPECT_EQ(vec(2, 1, 0), vec(2, 1));
@@ -346,7 +348,7 @@ namespace xt
346348
{
347349
SCOPED_TRACE("unit_shape access");
348350
unit_shape_result<C> usr;
349-
vec.reshape(usr.m_shape, layout::row_major);
351+
vec.reshape(usr.m_shape, layout_type::row_major);
350352
assign_array(vec, usr.m_assigner);
351353
EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), usr.m_data.cbegin()));
352354
EXPECT_EQ(vec(2, 0, 0), vec(2, 0));
@@ -382,7 +384,7 @@ namespace xt
382384
{
383385
SCOPED_TRACE("row_major access");
384386
row_major_result<C> rm;
385-
vec.reshape(rm.m_shape, layout::row_major);
387+
vec.reshape(rm.m_shape, layout_type::row_major);
386388
indexed_assign_array(vec, rm.m_assigner);
387389
EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), rm.m_data.cbegin()));
388390
EXPECT_EQ(vec(2, 1, 0), vec[index1]);
@@ -392,7 +394,7 @@ namespace xt
392394
{
393395
SCOPED_TRACE("column_major access");
394396
column_major_result<C> cm;
395-
vec.reshape(cm.m_shape, layout::column_major);
397+
vec.reshape(cm.m_shape, layout_type::column_major);
396398
indexed_assign_array(vec, cm.m_assigner);
397399
EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), cm.m_data.cbegin()));
398400
EXPECT_EQ(vec(2, 1, 0), vec[index1]);
@@ -412,7 +414,7 @@ namespace xt
412414
{
413415
SCOPED_TRACE("unit_shape access");
414416
unit_shape_result<C> usr;
415-
vec.reshape(usr.m_shape, layout::row_major);
417+
vec.reshape(usr.m_shape, layout_type::row_major);
416418
indexed_assign_array(vec, usr.m_assigner);
417419
EXPECT_TRUE(std::equal(vec.data().cbegin(), vec.data().cend(), usr.m_data.cbegin()));
418420
xindex id1 = { 2, 0 };
@@ -488,15 +490,15 @@ namespace xt
488490
{
489491
SCOPED_TRACE("row_major storage iterator");
490492
row_major_result<C> rm;
491-
vec.reshape(rm.m_shape, layout::row_major);
493+
vec.reshape(rm.m_shape, layout_type::row_major);
492494
std::copy(rm.data().cbegin(), rm.data().cend(), vec.begin());
493495
EXPECT_TRUE(std::equal(rm.data().cbegin(), rm.data().cend(), vec.data().cbegin()));
494496
}
495497

496498
{
497499
SCOPED_TRACE("column_major storage iterator");
498500
column_major_result<C> cm;
499-
vec.reshape(cm.m_shape, layout::column_major);
501+
vec.reshape(cm.m_shape, layout_type::column_major);
500502
std::copy(cm.data().cbegin(), cm.data().cend(), vec.begin());
501503
EXPECT_TRUE(std::equal(cm.data().cbegin(), cm.data().cend(), vec.data().cbegin()));
502504
}
@@ -512,7 +514,7 @@ namespace xt
512514
{
513515
SCOPED_TRACE("unit_shape storage iterator");
514516
unit_shape_result<C> usr;
515-
vec.reshape(usr.m_shape, layout::row_major);
517+
vec.reshape(usr.m_shape, layout_type::row_major);
516518
std::copy(usr.data().cbegin(), usr.data().cend(), vec.begin());
517519
EXPECT_TRUE(std::equal(usr.data().cbegin(), usr.data().cend(), vec.data().cbegin()));
518520
}

test/test_pyarray.cpp

+4-2
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,15 @@ namespace xt
3737
row_major_result<> rm;
3838
pyarray<int> ra(rm.m_shape);
3939
compare_shape(ra, rm);
40+
EXPECT_EQ(layout_type::row_major, ra.layout());
4041
}
4142

4243
{
4344
SCOPED_TRACE("column_major constructor");
4445
column_major_result<> cm;
45-
pyarray<int> ca(cm.m_shape, layout::column_major);
46+
pyarray<int> ca(cm.m_shape, layout_type::column_major);
4647
compare_shape(ca, cm);
48+
EXPECT_EQ(layout_type::column_major, ca.layout());
4749
}
4850
}
4951

@@ -70,7 +72,7 @@ namespace xt
7072
SCOPED_TRACE("column_major valued constructor");
7173
column_major_result<> cm;
7274
int value = 2;
73-
pyarray<int> ca(cm.m_shape, value, layout::column_major);
75+
pyarray<int> ca(cm.m_shape, value, layout_type::column_major);
7476
compare_shape(ca, cm);
7577
std::vector<int> vec(ca.size(), value);
7678
EXPECT_TRUE(std::equal(vec.cbegin(), vec.cend(), ca.data().cbegin()));

0 commit comments

Comments
 (0)