/
soa_container.hpp
1270 lines (1174 loc) · 51.2 KB
/
soa_container.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#pragma once
#include "backtrace_utils.h"
#include "neuron/container/data_handle.hpp"
#include "neuron/container/generic_data_handle.hpp"
#include "neuron/container/soa_identifier.hpp"
#include <atomic>
#include <cstddef>
#include <functional>
#include <limits>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
namespace neuron::container {
namespace detail {
template <typename T, typename... Ts>
inline constexpr bool type_in_pack_v = std::disjunction_v<std::is_same<T, Ts>...>;
// https://stackoverflow.com/a/67106430
template <typename T, typename... Types>
inline constexpr bool are_types_unique_v =
(!std::is_same_v<T, Types> && ...) && are_types_unique_v<Types...>;
template <typename T>
inline constexpr bool are_types_unique_v<T> = true;
// https://stackoverflow.com/a/18063608
template <typename T, typename Tuple>
struct index_of_type_helper;
template <typename T, typename... Ts>
struct index_of_type_helper<T, std::tuple<T, Ts...>> {
static constexpr std::size_t value = 0;
};
template <typename T, typename U, typename... Ts>
struct index_of_type_helper<T, std::tuple<U, Ts...>> {
static constexpr std::size_t value = 1 + index_of_type_helper<T, std::tuple<Ts...>>::value;
};
template <typename T, typename... Ts>
inline constexpr std::size_t index_of_type_v = []() {
constexpr bool Ts_are_unique = are_types_unique_v<Ts...>;
constexpr bool T_is_in_Ts = type_in_pack_v<T, Ts...>;
static_assert(Ts_are_unique,
"index_of_type_v<T, Ts...> assumes there are no duplicates in Ts...");
static_assert(T_is_in_Ts, "index_of_type_v<T, Ts...> assumes that T occurs in Ts...");
// make the error message better by avoiding instantiating index_of_type_helper if the
// assertions fail
if constexpr (Ts_are_unique && T_is_in_Ts) {
return index_of_type_helper<T, std::tuple<Ts...>>::value;
} else {
return std::numeric_limits<std::size_t>::max(); // unreachable without hitting
// static_assert
}
}();
// Detect if a type T has a non-static member function called default_value
template <typename T, typename = void>
inline constexpr bool has_default_value_v = false;
template <typename T>
inline constexpr bool
has_default_value_v<T, std::void_t<decltype(std::declval<T>().default_value())>> = true;
// Get the array dimension for a given field within a given tag, or 1 if the array_dimension
// function is not defined in the tag type
template <typename T>
auto get_array_dimension(T const& t, std::nullptr_t /* higher precedence than the fallback case */)
-> decltype(t.array_dimension(), 0) {
return t.array_dimension();
}
template <typename T>
auto get_array_dimension(T const& t, int i) -> decltype(t.array_dimension(i), 0) {
return t.array_dimension(i);
}
template <typename T>
int get_array_dimension(T const&, ...) {
return 1;
}
// Detect if a type T has a non-static member function called num_variables().
template <typename T, typename = void>
struct has_num_variables: std::false_type {};
template <typename T>
struct has_num_variables<T, std::void_t<decltype(std::declval<T>().num_variables())>>
: std::true_type {};
template <typename T>
inline constexpr bool has_num_variables_v = has_num_variables<T>::value;
// Get the value of a static member variable called optional, or false if it doesn't exist.
template <typename T, typename = void>
struct optional: std::false_type {};
template <typename T>
struct optional<T, std::void_t<decltype(T::optional)>> {
constexpr static bool value = T::optional;
};
template <typename T>
inline constexpr bool optional_v = optional<T>::value;
enum struct FieldImplementation {
AlwaysSingle, // field always exists -> std::vector<T>
OptionalSingle, // field exists 0 or 1 time -> std::vector<T> that might be skipped
RuntimeVariable // field is duplicated a number of times that is set at runtime ->
// std::vector<std::vector<T>>
};
template <typename Tag>
inline constexpr FieldImplementation field_impl_v =
(has_num_variables_v<Tag> ? FieldImplementation::RuntimeVariable
: (optional_v<Tag> ? FieldImplementation::OptionalSingle
: FieldImplementation::AlwaysSingle));
// Get a name for a given field within a given tag
template <typename Tag>
auto get_name_impl(Tag const& tag, int field_index, std::nullptr_t)
-> decltype(static_cast<void>(tag.name(field_index)), std::string()) {
return tag.name(field_index);
}
template <typename Tag>
std::string get_name_impl(Tag const& tag, int field_index, ...) {
auto ret = cxx_demangle(typeid(Tag).name());
if (field_index >= 0) {
ret.append(1, '#');
ret.append(std::to_string(field_index));
}
constexpr std::string_view prefix{"neuron::container::"};
if (std::string_view{ret}.substr(0, prefix.size()) == prefix) {
ret.erase(0, prefix.size());
}
return ret;
}
/**
* @brief Get the nicest available name for the field_index-th instance of Tag.
*
* This should elegantly handle field_index == -1 (=> the tag doesn't get have num_variables()) and
* field_index being out of range.
*/
template <typename Tag>
auto get_name(Tag const& tag, int field_index) {
if constexpr (has_num_variables_v<Tag>) {
if (field_index >= 0 && field_index < tag.num_variables()) {
// use tag.name(field_index) if it's available, otherwise fall back
return get_name_impl(tag, field_index, nullptr);
}
}
// no num_variables() or invalid field_index, use the fallback
return get_name_impl(tag, field_index, 1 /* does not match nullptr */);
}
struct index_column_tag {
using type = non_owning_identifier_without_container;
};
/** Check if the given range is a permutation of the first N integers.
*/
template <typename Rng>
void check_permutation_vector(Rng const& range, std::size_t size) {
if (range.size() != size) {
throw std::runtime_error("invalid permutation vector: wrong size");
}
std::vector<bool> seen(size, false);
for (auto val: range) {
if (!(val >= 0 && val < size)) {
throw std::runtime_error("invalid permutation vector: value out of range");
}
if (seen[val]) {
throw std::runtime_error("invalid permutation vector: repeated value " +
std::to_string(val));
}
seen[val] = true;
}
}
enum struct may_cause_reallocation { Yes, No };
extern std::vector<void*>* defer_delete_storage;
/**
* @brief Storage for safe deletion of soa<...> containers.
*
* This is intended to prevent deleting an instance of a soa<...>-based container from invalidating
* any existing data handles, by keeping certain (small) values alive. Deleting these containers is
* probably not common (e.g. deleting a mechanism type), and only small bookkeeping-related values
* have to be kept alive. Generally defer_delete_storage is non-null for the lifetime of the top
* level Model structure, and the Model destructor deallocates (using delete[]) the pointers that
* are stored inside defer_delete_storage.
*/
template <typename T>
void defer_delete(std::unique_ptr<T[]> data) {
static_assert(std::is_trivially_destructible_v<T>, "defer_delete does not call destructors");
if (data && defer_delete_storage) {
defer_delete_storage->push_back(data.release());
}
}
template <typename Tag, FieldImplementation impl>
struct field_data {
static_assert(impl == FieldImplementation::AlwaysSingle ||
impl == FieldImplementation::OptionalSingle);
using data_type = typename Tag::type;
static_assert(!has_num_variables_v<Tag>);
field_data(Tag tag)
: m_tag{std::move(tag)}
, m_array_dim{get_array_dimension(m_tag)} {
if constexpr (impl == FieldImplementation::AlwaysSingle) {
m_data_ptr = std::make_unique<data_type*[]>(1);
}
}
~field_data() {
// An unknown number of data_handle<T> in the wild may be holding references to m_data_ptr
defer_delete(std::move(m_data_ptr));
}
/**
* @brief Return a reference to the tag instance.
*/
Tag const& tag() const {
return m_tag;
}
template <may_cause_reallocation might_reallocate, typename Callable>
void for_all_vectors(Callable const& callable) {
if constexpr (impl == FieldImplementation::OptionalSingle) {
if (!m_data_ptr) {
// inactive, optional field
return;
}
}
callable(m_tag, m_storage, -1, m_array_dim);
if constexpr (might_reallocate == may_cause_reallocation::Yes) {
m_data_ptr[0] = m_storage.data();
}
}
template <typename Callable>
void for_all_vectors(Callable const& callable) const {
if constexpr (impl == FieldImplementation::OptionalSingle) {
if (!m_data_ptr) {
// inactive, optional field
return;
}
}
callable(m_tag, m_storage, -1, m_array_dim);
}
[[nodiscard]] bool active() const {
static_assert(impl == FieldImplementation::OptionalSingle);
return bool{m_data_ptr};
}
void set_active(bool enable, std::size_t size) {
static_assert(impl == FieldImplementation::OptionalSingle);
if (enable == active()) {
return;
}
if (enable) {
// make sure the storage is allocated + the right size + full of default values
assert(m_storage.empty()); // it should be starting off empty
if constexpr (has_default_value_v<Tag>) {
m_storage.resize(size * m_array_dim, m_tag.default_value());
} else {
m_storage.resize(size * m_array_dim);
}
m_data_ptr = std::make_unique<data_type*[]>(1);
m_data_ptr[0] = m_storage.data();
} else {
// clear + free the storage
m_storage.clear();
m_storage.shrink_to_fit();
// data handles may be holding pointers to m_data_ptr (which is the reason for the
// deferred deletion); signal to them that they are no longer valid by writing nullptr
// here
m_data_ptr[0] = nullptr;
defer_delete(std::move(m_data_ptr));
}
}
[[nodiscard]] data_type* const* data_ptrs() const {
return m_data_ptr.get();
}
[[nodiscard]] int const* array_dims() const {
return &m_array_dim;
}
[[nodiscard]] int const* array_dim_prefix_sums() const {
return &m_array_dim;
}
private:
/**
* @brief Tag type instance.
*
* An instance of @c soa contains an instance of @c field_data for each tag type in its @c
* Tags... pack. The instance of the tag type contains the metadata about the field it
* represents, and @c field_data adds the actual data for that field. For example, with @c Tag =
* @c Node::field::Voltage, which represents the voltage in a given Node, @c m_tag is just an
* empty type that defines the @c data_type and default value of voltages.
*/
Tag m_tag;
/**
* @brief Storage for the data associated with @c Tag.
*
* This is one of the "large" data arrays holding the model data. Because this specialisation of
* @c field_data is for @c Tag types that @b don't have @c num_variables() members, such as @c
* Node::field::Voltage, there is exactly one vector per instance of @c field_data. Because the
* fields in @c Node::storage all have array dimension 1, in that case the size of this vector
* is the number of Node instances in the program.
*/
std::vector<data_type> m_storage;
/**
* @brief Storage where we maintain an up-to-date cache of @c m_storage.data().
* @invariant @c m_storage.data() is equal to @c m_data_ptr.
* @see field_data<Tag, true>::m_data_ptrs %for the motivation.
*
* This is declared as an array (of size 1) to simplify the implementation of defer_delete.
* For FieldImplementation::OptionalSingle then whether or not this is null encodes whether
* or not the field is active. For FieldImplementation::AlwaysSingle it is never null.
*/
std::unique_ptr<data_type*[]> m_data_ptr;
/**
* @brief Array dimension of the data associated with @c Tag.
* @invariant @c m_array_dim is equal to @c m_tag.array_dimension(), if that function exists,
* or 1.
* @see field_data<Tag, true>::m_array_dims %for the motivation.
*/
int m_array_dim;
};
/**
* @brief Storage manager for a tag type that implements num_variables().
*
* An illustrative example is that this is responsible for the storage associated with floating
* point mechanism data, where the number of fields is set at runtime via num_variables.
*
* As well as owning the actual storage containers, this type maintains two spans of values that
* can be used by other types, in particular neuron::cache::MechanismRange:
* - array_dims() returns a pointer to the first element of a num_variables()-sized range holding
* the array dimensions of the variables.
* - array_dim_prefix_sums() returns a pointer to the first element of a num_variables()-sized
* range holding the prefix sum over the array dimensions (i.e. if array_dims() returns [1, 2, 1]
* then array_dim_prefix_sums() returns [1, 3, 4]).
* - data_ptrs() returns a pointer to the first element of a num_variables()-sized range holding
* pointers to the start of the storage associated with each variable (i.e. the result of calling
* data() on the underlying vector).
*
* This is a helper type for use by neuron::container::soa and it should not be used directly.
*/
template <typename Tag>
struct field_data<Tag, FieldImplementation::RuntimeVariable> {
using data_type = typename Tag::type;
static_assert(has_num_variables_v<Tag>);
field_data(Tag tag)
: m_tag{std::move(tag)}
, m_storage{m_tag.num_variables()}
, m_data_ptrs{std::make_unique<data_type*[]>(m_tag.num_variables())} {
update_data_ptr_storage();
auto const num = m_tag.num_variables();
m_array_dims.reserve(num);
m_array_dim_prefix_sums.reserve(num);
for (auto i = 0; i < m_tag.num_variables(); ++i) {
m_array_dims.push_back(get_array_dimension(m_tag, i));
m_array_dim_prefix_sums.push_back(
(m_array_dim_prefix_sums.empty() ? 0 : m_array_dim_prefix_sums.back()) +
m_array_dims.back());
}
}
~field_data() {
// An unknown number of data_handle<T> in the wild may be holding references to m_data_ptrs
defer_delete(std::move(m_data_ptrs));
}
/**
* @brief Return a reference to the tag instance.
*/
Tag const& tag() const {
return m_tag;
}
/**
* @brief Return a pointer to an array of array dimensions for this tag.
*
* This avoids indirection via the tag type instances. Because array dimensions are not
* permitted to change, this is guaranteed to remain valid as long as the underlying soa<...>
* container does. This is mainly intended for use in neuron::cache::MechanismRange and friends.
*/
[[nodiscard]] int const* array_dims() const {
return m_array_dims.data();
}
/**
* @brief Return a pointer to an array of the prefix sum of array dimensions for this tag.
*/
[[nodiscard]] int const* array_dim_prefix_sums() const {
return m_array_dim_prefix_sums.data();
}
[[nodiscard]] int check_array_dim(int field_index, int array_index) const {
assert(field_index >= 0);
assert(array_index >= 0);
if (auto const num_fields = m_tag.num_variables(); field_index >= num_fields) {
throw std::runtime_error(get_name(m_tag, field_index) + "/" +
std::to_string(num_fields) + ": out of range");
}
auto const array_dim = m_array_dims[field_index];
if (array_index >= array_dim) {
throw std::runtime_error(get_name(m_tag, field_index) + ": index " +
std::to_string(array_index) + " out of range");
}
return array_dim;
}
/**
* @brief Return a pointer to an array of data pointers for this tag.
*
* This array is guaranteed to be kept up to date when the actual storage is re-allocated.
* This is mainly intended for use in neuron::cache::MechanismRange and friends.
*/
[[nodiscard]] data_type* const* data_ptrs() const {
return m_data_ptrs.get();
}
/**
* @brief Invoke the given callable for each vector.
*
* @tparam might_reallocate Might the callable cause reallocation of the vector it is given?
* @param callable Callable to invoke.
*/
template <may_cause_reallocation might_reallocate, typename Callable>
void for_all_vectors(Callable const& callable) {
for (auto i = 0; i < m_storage.size(); ++i) {
callable(m_tag, m_storage[i], i, m_array_dims[i]);
}
if constexpr (might_reallocate == may_cause_reallocation::Yes) {
update_data_ptr_storage();
}
}
template <typename Callable>
void for_all_vectors(Callable const& callable) const {
for (auto i = 0; i < m_storage.size(); ++i) {
callable(m_tag, m_storage[i], i, m_array_dims[i]);
}
}
// TODO actually use this
// TODO use array_dim_prefix_sums
[[nodiscard]] field_index translate_legacy_index(int legacy_index) const {
int total{};
auto const num_fields = m_tag.num_variables();
for (auto field = 0; field < num_fields; ++field) {
auto const array_dim = m_array_dims[field];
if (legacy_index < total + array_dim) {
auto const array_index = legacy_index - total;
return {field, array_index};
}
total += array_dim;
}
throw std::runtime_error("could not translate legacy index " +
std::to_string(legacy_index));
}
private:
void update_data_ptr_storage() {
std::transform(m_storage.begin(), m_storage.end(), m_data_ptrs.get(), [](auto& vec) {
return vec.data();
});
}
/**
* @brief Tag type instance.
*
* An instance of @c soa contains an instance of @c field_data for each tag type in its @c
* Tags... pack. The instance of the tag type contains the metadata about the field it
* represents, and @c field_data adds the actual data for that field. For example, with @c Tag =
* @c Mechanism::field::FloatingPoint, which represents RANGE variables in mechanisms, @c m_tag
* holds the names and array dimensions of the RANGE variables.
*/
Tag m_tag;
/**
* @brief Storage for the data associated with @c Tag.
*
* These are the "large" data arrays holding the model data. Because this specialisation of @c
* field_data is for @c Tag types that @b do have @c num_variables() members, such as @c
* Mechanism::field::FloatingPoint, there is an outer vector with this dimension.
*
* @invariant @c m_storage.size() is equal to @c m_tag.num_variables()
*
* For Mechanism data, this size is equal to the number of RANGE variables, while
* @c m_storage[i].size() is (assuming an array dimension of 1) the number of instances (in this
* case of the given Mechanism type) that exist in the program.
*/
std::vector<std::vector<data_type>> m_storage;
/**
* @brief Storage where we maintain an up-to-date cache of .data() pointers from m_storage.
* @invariant @c m_data_ptrs contains @c m_storage.size() elements
* @invariant @c m_storage[i].data() is equal to @c m_data_ptrs[i] for all @c i.
*
* This is useful because it allows @c data_handle<T> to store something like @c T** instead of
* having to store something like @c std::vector<T>*, which avoids hardcoding unnecessary
* details about the allocator and so on, and allows @c cache::MechanismRange to similarly have
* a C-like interface. Because @c data_handle<T> remembers references to this, we cannot free
* it when the container is destroyed (e.g. when a mechanism type is deleted).
*/
std::unique_ptr<data_type*[]> m_data_ptrs;
/**
* @brief Array dimensions of the data associated with @c Tag.
* @invariant @c m_storage.size() is equal to @c m_array_dims.size()
* @invariant @c m_array_dims[i] is equal to @c m_tag.array_dimension(i), if that function
* exists, or otherwise 1, for all @c i
*
* Similar to @c m_data_ptrs, this allows the array dimensions to be communicated simply across
* a C-like interface.
*/
std::vector<int> m_array_dims;
/**
* @brief Prefix sum over array dimensions for the data associated with @c Tag.
* @invariant @c m_storage.size() is equal to @c m_array_dim_prefix_sums.size()
* @invariant @c m_array_dim_prefix_sums[i] is equal to the sum of @c m_array_dims[0] .. @c
* m_array_dims[i] for all @c i.
* @todo This could be used to more efficiently convert legacy indices.
*
* This is mainly useful for logic that aids the transition from AoS to SoAoS format in NEURON.
* For example, the size of the old @c _p vectors in NEURON was @c
* m_array_dim_prefix_sums.back(), the sum over all array dimensions, which is generally larger
* than @c m_tag.num_variables().
*/
std::vector<int> m_array_dim_prefix_sums;
};
struct storage_info_impl: utils::storage_info {
std::string_view container() const override {
return m_container;
}
std::string_view field() const override {
return m_field;
}
std::size_t size() const override {
return m_size;
}
std::string m_container{}, m_field{};
std::size_t m_size{};
};
} // namespace detail
/** @brief Token whose lifetime manages frozen/sorted state of a container.
*/
template <typename Container>
struct state_token {
constexpr state_token() = default;
constexpr state_token(state_token&& other)
: m_container{std::exchange(other.m_container, nullptr)} {}
constexpr state_token(state_token const&) = delete;
constexpr state_token& operator=(state_token&& other) {
m_container = std::exchange(other.m_container, nullptr);
return *this;
}
constexpr state_token& operator=(state_token const&) = delete;
~state_token() {
if (m_container) {
m_container->decrease_frozen_count();
}
}
private:
template <typename, typename...>
friend struct soa;
constexpr state_token(Container& container)
: m_container{&container} {}
Container* m_container{};
};
/**
* @brief Utility for generating SOA data structures.
* @headerfile neuron/container/soa_container.hpp
* @tparam Storage Name of the actual storage type derived from soa<...>.
* @tparam Tags Parameter pack of tag types that define the columns
* included in the container. Types may not be repeated.
*
* This CRTP base class is used to implement the ~global SOA storage structs
* that hold (so far) Node and Mechanism data. Ownership of rows in these
* structs is managed via instances of the owning identifier type @ref
* neuron::container::owning_identifier instantiated with Storage, and
* non-owning reference to rows in the data structures are managed via instances
* of the @ref neuron::container::non_owning_identifier template instantiated
* with Storage. These identifiers are typically wrapped in a
* data-structure-specific (i.e. Node- or Mechanism-specific) interface type
* that provides data-structure-specific accessors and methods to obtain actual
* data values and more generic handle types such as @ref
* neuron::container::data_handle<T> and @ref
* neuron::container::generic_data_handle.
*/
template <typename Storage, typename... Tags>
struct soa {
/**
* @brief Construct with default-constructed tag type instances.
*/
soa()
: soa(Tags{}...) {}
/**
* @brief Construct with specific tag instances.
*
* This is useful if the tag types are not empty, for example if the number
* of times a column is duplicated is a runtime value.
*/
soa(Tags... tag_instances)
: m_data{std::move(tag_instances)...} {}
/**
* @brief @ref soa is not movable
*
* This is to make it harder to accidentally invalidate pointers-to-storage
* in handles.
*/
soa(soa&&) = delete;
/**
* @brief @ref soa is not copiable
*
* This is partly to make it harder to accidentally invalidate
* pointers-to-storage in handles, and partly because it could be very
* expensive so it might be better to be more explicit.
*/
soa(soa const&) = delete;
/**
* @brief @ref soa is not move assignable
*
* For the same reason it isn't movable.
*/
soa& operator=(soa&&) = delete;
/**
* @brief @ref soa is not copy assignable
*
* For the same reasons it isn't copy constructible
*/
soa& operator=(soa const&) = delete;
/**
* @brief Get the size of the container.
*/
[[nodiscard]] std::size_t size() const {
// Check our various std::vector members are still the same size as each
// other. This check could be omitted in release builds...
auto const check_size = m_indices.size();
for_all_vectors(
[check_size](auto const& tag, auto const& vec, int field_index, int array_dim) {
auto const size = vec.size();
assert(size % array_dim == 0);
assert(size / array_dim == check_size);
});
return check_size;
}
/**
* @brief Test if the container is empty.
*/
[[nodiscard]] bool empty() const {
auto const result = m_indices.empty();
for_all_vectors([result](auto const& tag, auto const& vec, int field_index, int array_dim) {
assert(vec.empty() == result);
});
return result;
}
private:
/**
* @brief Remove the @f$i^{\text{th}}@f$ row from the container.
*
* This is currently implemented by swapping the last element into position
* @f$i@f$ (if those are not the same element) and reducing the size by one.
* Iterators to the last element and the deleted element will be
* invalidated.
*/
void erase(std::size_t i) {
if (m_frozen_count) {
throw_error("erase() called on a frozen structure");
}
mark_as_unsorted_impl<true>();
auto const old_size = size();
assert(i < old_size);
if (i != old_size - 1) {
// Swap ranges of size array_dim at logical positions `i` and `old_size - 1` in each
// vector
for_all_vectors<detail::may_cause_reallocation::No>(
[i](auto const& tag, auto& vec, int field_index, int array_dim) {
::std::swap_ranges(::std::next(vec.begin(), i * array_dim),
::std::next(vec.begin(), (i + 1) * array_dim),
::std::prev(vec.end(), array_dim));
});
// Tell the new entry at `i` that its index is `i` now.
m_indices[i].set_current_row(i);
}
for_all_vectors<detail::may_cause_reallocation::No>(
[new_size = old_size - 1](auto const& tag, auto& vec, int field_index, int array_dim) {
vec.resize(new_size * array_dim);
});
}
friend struct state_token<Storage>;
friend struct owning_identifier<Storage>;
static_assert(detail::are_types_unique_v<Tags...>, "All tag types should be unique");
template <typename Tag>
static constexpr std::size_t tag_index_v = detail::index_of_type_v<Tag, Tags...>;
/**
* @brief Apply the given function to non-const versions of all vectors.
*
* @tparam might_reallocate Might the callable trigger reallocation of the vectors?
* @param callable Callable to invoke on each vector.
*
* If might_allocate is true then the "cached" values of .data() for each vector will be
* updated.
*/
template <detail::may_cause_reallocation might_reallocate, typename Callable>
void for_all_vectors(Callable const& callable) {
// might_reallocate is not relevant for m_indices because we do not expose the location of
// its storage, so it doesn't matter whether or not this triggers reallocation
callable(detail::index_column_tag{}, m_indices, -1, 1);
(std::get<tag_index_v<Tags>>(m_data).template for_all_vectors<might_reallocate>(callable),
...);
}
/**
* @brief Apply the given function to const-qualified versions of all vectors.
*
* Because of the const qualification this cannot cause reallocation and trigger updates of
* pointers inside m_data, so no might_reallocate parameter is needed.
*/
template <typename Callable>
void for_all_vectors(Callable const& callable) const {
callable(detail::index_column_tag{}, m_indices, -1, 1);
(std::get<tag_index_v<Tags>>(m_data).for_all_vectors(callable), ...);
}
/**
* @brief Flag that the storage is no longer frozen.
*
* This is called from the destructor of state_token.
*/
void decrease_frozen_count() {
assert(m_frozen_count);
--m_frozen_count;
}
public:
/**
* @brief Return type of get_sorted_token()
*/
using sorted_token_type = state_token<Storage>;
/**
* @brief Mark the container as sorted and return a token guaranteeing that.
*
* Is is user-defined precisely what "sorted" means, but the soa<...> class
* makes some guarantees:
* - if the container is frozen, no pointers to elements in the underlying
* storage will be invalidated -- attempts to do so will throw or abort.
* - if the container is not frozen, it will remain flagged as sorted until
* a potentially-pointer-invalidating operation (insertion, deletion,
* permutation) occurs, or mark_as_unsorted() is called.
*
* The container will be frozen for the lifetime of the token returned from
* this function, and therefore also sorted for at least that time. This
* token has the semantics of a unique_ptr, i.e. it cannot be copied but
* can be moved, and destroying a moved-from token has no effect.
*
* The tokens returned by this function are reference counted; the
* container will be frozen for as long as any token is alive.
*
* Note that "frozen" refers to the storage layout, not to the stored value,
* meaning that values inside a frozen container can still be modified --
* "frozen" is not "runtime const".
*
* @todo A future extension could be to preserve the sorted flag until
* pointers are actually, not potentially, invalidated.
*/
[[nodiscard]] sorted_token_type get_sorted_token() {
// Increment the reference count, marking the container as frozen.
++m_frozen_count;
// Mark the container as sorted
m_sorted = true;
// Return a token that calls decrease_frozen_count() at the end of its lifetime
return sorted_token_type{static_cast<Storage&>(*this)};
}
/**
* @brief Tell the container it is no longer sorted.
*
* The meaning of being sorted is externally defined, and it is possible
* that some external change to an input of the (external) algorithm
* defining the sort order can mean that the data are no longer considered
* sorted, even if nothing has actually changed inside this container.
*/
void mark_as_unsorted() {
mark_as_unsorted_impl<false>();
}
/**
* @brief Set the callback that is invoked when the container becomes unsorted.
*
* This is invoked by mark_as_unsorted() and when a container operation
* (insertion, permutation, deletion) causes the container to transition
* from being sorted to being unsorted.
*/
void set_unsorted_callback(std::function<void()> unsorted_callback) {
m_unsorted_callback = std::move(unsorted_callback);
}
/**
* @brief Query if the underlying vectors are still "sorted".
*
* See the documentation of get_sorted_token() for an explanation of what
* this means.
*/
[[nodiscard]] bool is_sorted() const {
return m_sorted;
}
/** @brief Permute the SOA-format data using an arbitrary vector.
*/
template <typename Range>
void apply_reverse_permutation(Range permutation) {
// Check that the given vector is a valid permutation of length size().
std::size_t const my_size{size()};
detail::check_permutation_vector(permutation, my_size);
// Applying a permutation in general invalidates indices, so it is forbidden if the
// structure is frozen, and it leaves the structure unsorted.
if (m_frozen_count) {
throw_error("apply_reverse_permutation() called on a frozen structure");
}
mark_as_unsorted_impl<true>();
// Now we apply the reverse permutation in `permutation` to all of the columns in the
// container. This is the algorithm from boost::algorithm::apply_reverse_permutation.
for (std::size_t i = 0; i < my_size; ++i) {
while (i != permutation[i]) {
using ::std::swap;
auto const next = permutation[i];
for_all_vectors<detail::may_cause_reallocation::No>(
[i, next](auto const& tag, auto& vec, auto field_index, auto array_dim) {
// swap the i-th and next-th array_dim-sized sub-ranges of vec
::std::swap_ranges(::std::next(vec.begin(), i * array_dim),
::std::next(vec.begin(), (i + 1) * array_dim),
::std::next(vec.begin(), next * array_dim));
});
swap(permutation[i], permutation[next]);
}
}
// update the indices in the container
for (auto i = 0ul; i < my_size; ++i) {
m_indices[i].set_current_row(i);
}
}
private:
template <bool internal>
void mark_as_unsorted_impl() {
if (m_frozen_count) {
// Currently you can only obtain a frozen container by calling
// get_sorted_token(), which explicitly guarantees that the
// container will remain sorted for the lifetime of the returned
// token.
throw_error("mark_as_unsorted() called on a frozen structure");
}
// Only execute the callback if we're transitioning from sorted to
// or if this was an explicit mark_as_unsorted() call
bool const execute_callback{m_sorted || !internal};
m_sorted = false;
if (execute_callback && m_unsorted_callback) {
m_unsorted_callback();
}
}
/**
* @brief Create a new entry and return an identifier that owns it.
*
* Calling this method increases size() by one. Destroying (modulo move
* operations) the returned identifier, which has the semantics of a
* unique_ptr, decreases size() by one.
*
* Note that this has different semantics to standard library container
* methods such as emplace_back(), push_back(), insert() and so on. Because
* the returned identifier manages the lifetime of the newly-created entry,
* discarding the return value will cause the new entry to immediately be
* deleted.
*
* This is a low-level call that is useful for the implementation of the
* owning_identifier template. The returned owning identifier is typically
* wrapped inside an owning handle type that adds data-structure-specific
* methods (e.g. v(), v_handle() for a Node).
*/
[[nodiscard]] owning_identifier<Storage> acquire_owning_identifier() {
if (m_frozen_count) {
throw_error("acquire_owning_identifier() called on a frozen structure");
}
// The .emplace_back() methods we are about to call can trigger
// reallocation and, therefore, invalidation of pointers. At present,
// "sorted" is defined to mean that pointers have not been invalidated.
// There are two reasonable changes that could be made here:
// - possibly for release builds, we could only mark unsorted if a
// reallocation *actually* happens
// - "sorted" could be defined to mean that indices have not been
// invalidated -- adding a new entry to the end of the container
// never invalidates indices
mark_as_unsorted_impl<true>();
// Append to all of the vectors
auto const old_size = size();
for_all_vectors<detail::may_cause_reallocation::Yes>(
[](auto const& tag, auto& vec, auto field_index, auto array_dim) {
using Tag = ::std::decay_t<decltype(tag)>;
if constexpr (detail::has_default_value_v<Tag>) {
vec.insert(vec.end(), array_dim, tag.default_value());
} else {
vec.insert(vec.end(), array_dim, {});
}
});
// Important that this comes after the m_frozen_count check
owning_identifier<Storage> index{static_cast<Storage&>(*this), old_size};
// Update the pointer-to-row-number in m_indices so it refers to the
// same thing as index
m_indices.back() = static_cast<non_owning_identifier_without_container>(index);
return index;
}
public:
/**
* @brief Get a non-owning identifier to the offset-th entry.
*/
[[nodiscard]] non_owning_identifier<Storage> at(std::size_t offset) const {
return {const_cast<Storage*>(static_cast<Storage const*>(this)), m_indices[offset]};
}
/**
* @brief Get the instance of the given tag type.
* @tparam Tag The tag type, which must be a member of the @c Tags... pack.
* @return Const reference to the given tag type instance.
*
* For example, if this is called on the @c Node::storage then @c Tag would be something like @c
* Node::field::Area, @c Node::field::RHS or @c Node::field::Voltage, which are empty types that
* serve to define the default values and types of those quantities.
*
* At the time of writing the other possibility is that this is called on an instance of @c
* Mechanism::storage, in which case @c Tag must (currently) be @c
* Mechanism::field::FloatingPoint. This stores the names and array dimensions of the RANGE
* variables in the mechanism (MOD file), which are only known at runtime.
*/
template <typename Tag>
[[nodiscard]] constexpr Tag const& get_tag() const {
return std::get<tag_index_v<Tag>>(m_data).tag();
}
template <typename Tag>
static constexpr bool has_tag_v = detail::type_in_pack_v<Tag, Tags...>;
/**
* @brief Get the offset-th element of the column named by Tag.
*
* Because this is returning a single value, it is permitted even in
* read-only mode. The container being in read only mode means that
* operations that would invalidate iterators/pointers are forbidden, not
* that actual data values cannot change.
*/
template <typename Tag>
[[nodiscard]] typename Tag::type& get(std::size_t offset) {
static_assert(has_tag_v<Tag>);
static_assert(!detail::has_num_variables_v<Tag>);
auto& field_data = std::get<tag_index_v<Tag>>(m_data);
if constexpr (detail::field_impl_v<Tag> == detail::FieldImplementation::OptionalSingle) {
if (!field_data.active()) {
throw_error("get(offset) called for a disabled optional field");
}
}
return field_data.data_ptrs()[0][offset];
}
/**
* @brief Get the offset-th element of the column named by Tag.
*/
template <typename Tag>
[[nodiscard]] typename Tag::type const& get(std::size_t offset) const {
static_assert(has_tag_v<Tag>);
static_assert(!detail::has_num_variables_v<Tag>);
auto const& field_data = std::get<tag_index_v<Tag>>(m_data);
if constexpr (detail::field_impl_v<Tag> == detail::FieldImplementation::OptionalSingle) {
if (!field_data.active()) {
throw_error("get(offset) const called for a disabled optional field");
}
}
return field_data.data_ptrs()[0][offset];
}