-
Notifications
You must be signed in to change notification settings - Fork 435
/
Mesh.cpp
1417 lines (1232 loc) · 58.5 KB
/
Mesh.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
This file is part of Magnum.
Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,
2020, 2021, 2022, 2023 Vladimír Vondruš <mosra@centrum.cz>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#include "Mesh.h"
#include <Corrade/Containers/GrowableArray.h>
#include <Corrade/Containers/StridedArrayView.h>
#ifndef MAGNUM_TARGET_WEBGL
#include <Corrade/Containers/String.h>
#endif
#include <Corrade/Utility/Algorithms.h>
#include <Corrade/Utility/Debug.h>
#include "Magnum/Mesh.h"
#include "Magnum/GL/Buffer.h"
#include "Magnum/GL/Context.h"
#include "Magnum/GL/Extensions.h"
#ifndef MAGNUM_TARGET_GLES
#include "Magnum/GL/TransformFeedback.h"
#endif
#include "Magnum/GL/Implementation/BufferState.h"
#ifndef MAGNUM_TARGET_WEBGL
#include "Magnum/GL/Implementation/DebugState.h"
#endif
#include "Magnum/GL/Implementation/MeshState.h"
#include "Magnum/GL/Implementation/State.h"
#ifdef MAGNUM_BUILD_DEPRECATED
#include "Magnum/GL/AbstractShaderProgram.h"
#endif
namespace Magnum { namespace GL {
namespace {
constexpr MeshPrimitive PrimitiveMapping[]{
MeshPrimitive::Points,
MeshPrimitive::Lines,
MeshPrimitive::LineLoop,
MeshPrimitive::LineStrip,
MeshPrimitive::Triangles,
MeshPrimitive::TriangleStrip,
MeshPrimitive::TriangleFan,
MeshPrimitive(~UnsignedInt{}), /* Instances */
MeshPrimitive(~UnsignedInt{}), /* Faces */
MeshPrimitive(~UnsignedInt{}), /* Edges */
MeshPrimitive(~UnsignedInt{}) /* Meshlets */
};
constexpr MeshIndexType IndexTypeMapping[]{
MeshIndexType::UnsignedByte,
MeshIndexType::UnsignedShort,
MeshIndexType::UnsignedInt
};
}
bool hasMeshPrimitive(const Magnum::MeshPrimitive primitive) {
if(isMeshPrimitiveImplementationSpecific(primitive))
return true;
CORRADE_ASSERT(UnsignedInt(primitive) - 1 < Containers::arraySize(PrimitiveMapping),
"GL::hasPrimitive(): invalid primitive" << primitive, {});
return UnsignedInt(PrimitiveMapping[UnsignedInt(primitive) - 1]) != ~UnsignedInt{};
}
MeshPrimitive meshPrimitive(const Magnum::MeshPrimitive primitive) {
if(isMeshPrimitiveImplementationSpecific(primitive))
return meshPrimitiveUnwrap<GL::MeshPrimitive>(primitive);
CORRADE_ASSERT(UnsignedInt(primitive) - 1 < Containers::arraySize(PrimitiveMapping),
"GL::meshPrimitive(): invalid primitive" << primitive, {});
const MeshPrimitive out = PrimitiveMapping[UnsignedInt(primitive) - 1];
CORRADE_ASSERT(out != MeshPrimitive(~UnsignedInt{}),
"GL::meshPrimitive(): unsupported primitive" << primitive, {});
return out;
}
MeshIndexType meshIndexType(const Magnum::MeshIndexType type) {
if(isMeshIndexTypeImplementationSpecific(type))
return meshIndexTypeUnwrap<GL::MeshIndexType>(type);
CORRADE_ASSERT(UnsignedInt(type) - 1 < Containers::arraySize(IndexTypeMapping),
"GL::meshIndexType(): invalid type" << type, {});
return IndexTypeMapping[UnsignedInt(type) - 1];
}
UnsignedInt meshIndexTypeSize(const MeshIndexType type) {
switch(type) {
case MeshIndexType::UnsignedByte: return 1;
case MeshIndexType::UnsignedShort: return 2;
case MeshIndexType::UnsignedInt: return 4;
}
CORRADE_ASSERT_UNREACHABLE("GL::meshIndexTypeSize(): invalid type" << type, {});
}
#ifndef DOXYGEN_GENERATING_OUTPUT
Debug& operator<<(Debug& debug, const MeshPrimitive value) {
debug << "GL::MeshPrimitive" << Debug::nospace;
switch(value) {
/* LCOV_EXCL_START */
#define _c(value) case MeshPrimitive::value: return debug << "::" #value;
_c(Points)
_c(Lines)
_c(LineLoop)
_c(LineStrip)
#if !defined(MAGNUM_TARGET_GLES2) && !defined(MAGNUM_TARGET_WEBGL)
_c(LineStripAdjacency)
_c(LinesAdjacency)
#endif
_c(Triangles)
_c(TriangleStrip)
_c(TriangleFan)
#if !defined(MAGNUM_TARGET_GLES2) && !defined(MAGNUM_TARGET_WEBGL)
_c(TrianglesAdjacency)
_c(TriangleStripAdjacency)
_c(Patches)
#endif
#undef _c
/* LCOV_EXCL_STOP */
}
return debug << "(" << Debug::nospace << Debug::hex << GLenum(value) << Debug::nospace << ")";
}
Debug& operator<<(Debug& debug, const MeshIndexType value) {
debug << "GL::MeshIndexType" << Debug::nospace;
switch(value) {
/* LCOV_EXCL_START */
#define _c(value) case MeshIndexType::value: return debug << "::" #value;
_c(UnsignedByte)
_c(UnsignedShort)
_c(UnsignedInt)
#undef _c
/* LCOV_EXCL_STOP */
}
return debug << "(" << Debug::nospace << Debug::hex << GLenum(value) << Debug::nospace << ")";
}
#endif
struct Mesh::AttributeLayout {
/* Records attribute layout with a non-owning Buffer reference. Used as a
temporary data holder when VAOs are used, saved to the _attributes
member when not. If a Buffer instance needs to be owned, it's
subsequently moved in (usually with ObjectFlag::DeleteOnDestruction
set). */
explicit AttributeLayout(const Buffer& buffer, GLuint location, GLint size, GLenum type, DynamicAttribute::Kind kind, GLintptr offset, GLsizei stride, GLuint divisor) noexcept: buffer{Buffer::wrap(buffer.id())}, location{UnsignedByte(location)},
/* Have to use () instead of {}, otherwise GCC 4.8 complains that
parameter kind is set but not used */
kindSize(UnsignedByte(kind)), type{UnsignedShort(type)}, divisor{divisor}, offsetStride{(UnsignedLong(offset) << 16)|stride}
{
CORRADE_INTERNAL_ASSERT(location < 256 && type < 65536 && UnsignedLong(offset) < (1ull << 48) && stride < 65536);
#ifndef MAGNUM_TARGET_GLES
if(size == GL_BGRA) {
kindSize |= 0 << 2;
} else
#endif
{
CORRADE_INTERNAL_ASSERT(size >= 1 && size <= 4);
kindSize |= size << 2;
}
}
/* Takes ownership of a Buffer instance. Abuses the _attributes storage in
cases where VAOs are used. That wastes a bit of space as only 8 out of
the 24 bytes is actually used, but that should be okay as there's likely
only very few buffers (compared to attributes, which can be quite
many). */
explicit AttributeLayout(Buffer&& buffer): buffer{Utility::move(buffer)}, location{}, kindSize{}, type{}, divisor{}, offsetStride{} {}
AttributeLayout(AttributeLayout&&) noexcept = default;
AttributeLayout(const AttributeLayout&) noexcept = delete;
AttributeLayout& operator=(AttributeLayout&&) noexcept = default;
AttributeLayout& operator=(const AttributeLayout&) noexcept = delete;
DynamicAttribute::Kind kind() const {
return DynamicAttribute::Kind(kindSize & 0x03);
}
GLint size() const {
const GLint size = kindSize >> 2;
#ifndef MAGNUM_TARGET_GLES
if(!size) return GL_BGRA;
#endif
return size;
}
GLintptr offset() const {
return offsetStride >> 16;
};
GLsizei stride() const {
return offsetStride & 0xffff;
}
/* Packing to just 20 bytes would be possible with unwrapping the buffer,
keeping just the ID from it putting the 2-bit ObjectFlags into the
remaining free bits in `kindSize`, at the cost of extra logic that would
be needed to properly destruct it if it's owned. Then, on 32-bit WebGL
we don't need the offset to be more than 32 bits and the stride can be
just 1 byte, leaving us with just 17 bytes. The last byte could be then
stolen from the `divisor`, for example. Not doing that as I don't feel
it's necessary to optimize that much, additionally the AttributeLayout
instances are only stored if VAOs are disabled, which is a rare
scenario. */
/* 4 bytes +
2 bits: if unwrapped (for flags, the TargetHint is always Array) */
Buffer buffer;
/* 4 bits: GPUs have usually max 8 or 16 locations */
UnsignedByte location;
/* 2 bits for a kind +
3 bits for size: kind is just 4 values, size is 1, 2, 3, 4 components
or GL_BGRA, which is treated as 0 */
UnsignedByte kindSize;
/* 2 bytes: the type values are all just 16-bit */
UnsignedShort type;
/* 4 bytes: not sure what's the limit on this, but looks like it can be a
full 32 bit range, same as vertex / element count (unlike in
Vulkan, where it's often either just 0 or 1) */
GLuint divisor;
/* 6 bytes offset +
2 byte stride: offset has to be more than 32 bits to work with buffers
larger than 4 GB, but 48 bits (256 TB?) could be enough. Max
stride is usually 2048, it's just 256 on WebGL so 16 bits for
it should be enough. */
UnsignedLong offsetStride;
};
UnsignedInt Mesh::maxVertexAttributeStride() {
#ifdef MAGNUM_TARGET_WEBGL
/* Defined for WebGL 1 and for the new vertexAttribIPointer in WebGL 2 too:
https://www.khronos.org/registry/webgl/specs/latest/1.0/index.html#5.14.10
https://www.khronos.org/registry/webgl/specs/latest/2.0/#3.7.8
*/
return 255;
#else
#ifndef MAGNUM_TARGET_GLES
if(!Context::current().isVersionSupported(Version::GL440))
#elif !defined(MAGNUM_TARGET_GLES2)
if(!Context::current().isVersionSupported(Version::GLES310))
#endif
{
return 0xffffffffu;
}
#ifndef MAGNUM_TARGET_GLES2
GLint& value = Context::current().state().mesh.maxVertexAttributeStride;
/* Get the value, if not already cached */
if(value == 0)
glGetIntegerv(GL_MAX_VERTEX_ATTRIB_STRIDE, &value);
return value;
#endif
#endif
}
#ifndef MAGNUM_TARGET_GLES2
#ifndef MAGNUM_TARGET_WEBGL
Long Mesh::maxElementIndex()
#else
Int Mesh::maxElementIndex()
#endif
{
#ifndef MAGNUM_TARGET_GLES
if(!Context::current().isExtensionSupported<Extensions::ARB::ES3_compatibility>())
return 0xFFFFFFFFl;
#endif
#ifndef MAGNUM_TARGET_WEBGL
GLint64& value =
#else
GLint& value =
#endif
Context::current().state().mesh.maxElementIndex;
/* Get the value, if not already cached */
if(value == 0) {
#ifndef MAGNUM_TARGET_WEBGL
glGetInteger64v(GL_MAX_ELEMENT_INDEX, &value);
#else
glGetIntegerv(GL_MAX_ELEMENT_INDEX, &value);
#endif
}
return value;
}
Int Mesh::maxElementsIndices() {
GLint& value = Context::current().state().mesh.maxElementsIndices;
/* Get the value, if not already cached */
if(value == 0)
glGetIntegerv(GL_MAX_ELEMENTS_INDICES, &value);
return value;
}
Int Mesh::maxElementsVertices() {
GLint& value = Context::current().state().mesh.maxElementsVertices;
/* Get the value, if not already cached */
if(value == 0)
glGetIntegerv(GL_MAX_ELEMENTS_VERTICES, &value);
return value;
}
#endif
Mesh::Mesh(const MeshPrimitive primitive): _primitive{primitive}, _flags{ObjectFlag::DeleteOnDestruction} {
Context::current().state().mesh.createImplementation(*this);
}
Mesh::Mesh(NoCreateT) noexcept: _id{0}, _primitive{MeshPrimitive::Triangles}, _flags{ObjectFlag::DeleteOnDestruction} {}
Mesh::~Mesh() {
/* Moved out or not deleting on destruction, nothing to do */
if(!_id || !(_flags & ObjectFlag::DeleteOnDestruction))
return;
/* Remove current vao from the state */
GLuint& current = Context::current().state().mesh.currentVAO;
if(current == _id) current = 0;
Context::current().state().mesh.destroyImplementation(*this);
}
Mesh::Mesh(Mesh&& other) noexcept: _id(other._id), _primitive(other._primitive), _flags{other._flags}, _countSet{other._countSet}, _count(other._count), _baseVertex{other._baseVertex}, _instanceCount{other._instanceCount},
#ifndef MAGNUM_TARGET_GLES
_baseInstance{other._baseInstance},
#endif
#ifndef MAGNUM_TARGET_GLES2
_indexStart(other._indexStart), _indexEnd(other._indexEnd),
#endif
_indexType(other._indexType), _indexBufferOffset(other._indexBufferOffset), _indexOffset(other._indexOffset), _indexBuffer{Utility::move(other._indexBuffer)},
_attributes{Utility::move(other._attributes)}
{
other._id = 0;
}
Mesh& Mesh::operator=(Mesh&& other) noexcept {
using Utility::swap;
swap(_id, other._id);
swap(_flags, other._flags);
swap(_primitive, other._primitive);
swap(_countSet, other._countSet);
swap(_count, other._count);
swap(_baseVertex, other._baseVertex);
swap(_instanceCount, other._instanceCount);
#ifndef MAGNUM_TARGET_GLES
swap(_baseInstance, other._baseInstance);
#endif
#ifndef MAGNUM_TARGET_GLES2
swap(_indexStart, other._indexStart);
swap(_indexEnd, other._indexEnd);
#endif
swap(_indexBufferOffset, other._indexBufferOffset);
swap(_indexType, other._indexType);
swap(_indexOffset, other._indexOffset);
swap(_indexBuffer, other._indexBuffer);
swap(_attributes, other._attributes);
return *this;
}
Mesh::Mesh(const GLuint id, const MeshPrimitive primitive, const ObjectFlags flags): _id{id}, _primitive{primitive}, _flags{flags} {}
inline void Mesh::createIfNotAlready() {
/* If VAO extension is not available, the following is always true */
if(_flags & ObjectFlag::Created) return;
/* glGen*() does not create the object, just reserves the name. Some
commands (such as glObjectLabel()) operate with IDs directly and they
require the object to be created. Binding the VAO finally creates it.
Also all EXT DSA functions implicitly create it. */
bindVAO();
CORRADE_INTERNAL_ASSERT(_flags & ObjectFlag::Created);
}
#ifndef MAGNUM_TARGET_WEBGL
Containers::String Mesh::label() {
createIfNotAlready();
#ifndef MAGNUM_TARGET_GLES2
return Context::current().state().debug.getLabelImplementation(GL_VERTEX_ARRAY, _id);
#else
return Context::current().state().debug.getLabelImplementation(GL_VERTEX_ARRAY_KHR, _id);
#endif
}
Mesh& Mesh::setLabel(const Containers::StringView label) {
createIfNotAlready();
#ifndef MAGNUM_TARGET_GLES2
Context::current().state().debug.labelImplementation(GL_VERTEX_ARRAY, _id, label);
#else
Context::current().state().debug.labelImplementation(GL_VERTEX_ARRAY_KHR, _id, label);
#endif
return *this;
}
#endif
MeshIndexType Mesh::indexType() const {
CORRADE_ASSERT(_indexBuffer.id(), "GL::Mesh::indexType(): mesh is not indexed", {});
return _indexType;
}
#ifdef MAGNUM_BUILD_DEPRECATED
UnsignedInt Mesh::indexTypeSize() const {
CORRADE_ASSERT(_indexBuffer.id(), "GL::Mesh::indexTypeSize(): mesh is not indexed", {});
return meshIndexTypeSize(_indexType);
}
#endif
Mesh& Mesh::setIndexOffset(Int offset) {
CORRADE_ASSERT(_indexBuffer.id(),
"GL::Mesh::setIndexOffset(): mesh is not indexed", *this);
_indexOffset = offset;
return *this;
}
Mesh& Mesh::addVertexBufferInstanced(Buffer& buffer, const UnsignedInt divisor, const GLintptr offset, const GLsizei stride, const DynamicAttribute& attribute) {
for(UnsignedInt i = 0; i != attribute.vectors(); ++i)
attributePointerInternal(AttributeLayout{buffer,
attribute.location() + i,
GLint(attribute.components()),
GLenum(attribute.dataType()),
attribute.kind(),
GLintptr(offset + i*attribute.vectorStride()),
stride,
divisor});
return *this;
}
Mesh& Mesh::setIndexBuffer(Buffer&& buffer, GLintptr offset, MeshIndexType type, UnsignedInt start, UnsignedInt end) {
CORRADE_ASSERT(buffer.id(),
"GL::Mesh::setIndexBuffer(): empty or moved-out Buffer instance was passed", *this);
#ifdef MAGNUM_TARGET_WEBGL
CORRADE_ASSERT(buffer.targetHint() == Buffer::TargetHint::ElementArray,
"GL::Mesh::setIndexBuffer(): the buffer has unexpected target hint, expected" << Buffer::TargetHint::ElementArray << "but got" << buffer.targetHint(), *this);
#endif
/* It's IMPORTANT to do this *before* the _indexBuffer is set, since the
bindVAO() function called from here is resetting element buffer state
tracker to _indexBuffer.id(). */
Context::current().state().mesh.bindIndexBufferImplementation(*this, buffer);
_indexBuffer = Utility::move(buffer);
_indexBufferOffset = offset;
_indexType = type;
#ifndef MAGNUM_TARGET_GLES2
_indexStart = start;
_indexEnd = end;
#else
static_cast<void>(start);
static_cast<void>(end);
#endif
return *this;
}
Mesh& Mesh::setIndexBuffer(Buffer& buffer, const GLintptr offset, const MeshIndexType type, const UnsignedInt start, const UnsignedInt end) {
setIndexBuffer(Buffer::wrap(buffer.id(), buffer.targetHint()), offset, type, start, end);
return *this;
}
void Mesh::drawInternal(const Containers::ArrayView<const UnsignedInt>& counts, const Containers::ArrayView<const UnsignedInt>& vertexOffsets,
#ifdef CORRADE_TARGET_32BIT
const Containers::ArrayView<const UnsignedInt>& indexOffsets
#else
const Containers::ArrayView<const UnsignedLong>& indexOffsets
#endif
) {
/* Not asserting for _instanceCount == 1, as this is *not* taken from the
original mesh, the counts/vertexOffsets/indexOffsets completely describe
the range being drawn */
const Implementation::MeshState& state = Context::current().state().mesh;
state.bindImplementation(*this);
/* Non-indexed meshes */
if(!_indexBuffer.id()) {
CORRADE_ASSERT(vertexOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "vertex offset items but got" << vertexOffsets.size(), );
#ifndef MAGNUM_TARGET_GLES
glMultiDrawArrays
#else
state.multiDrawArraysImplementation
#endif
(GLenum(_primitive), reinterpret_cast<const GLint*>(vertexOffsets.data()), reinterpret_cast<const GLsizei*>(counts.data()), counts.size());
/* Indexed meshes */
} else {
CORRADE_ASSERT(indexOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "index offset items but got" << indexOffsets.size(), );
/* Indexed meshes */
if(vertexOffsets.isEmpty()) {
#ifndef MAGNUM_TARGET_GLES
glMultiDrawElements
#else
state.multiDrawElementsImplementation
#endif
(GLenum(_primitive), reinterpret_cast<const GLsizei*>(counts.data()), GLenum(_indexType), reinterpret_cast<const void* const*>(indexOffsets.data()), counts.size());
/* Indexed meshes with base vertex */
} else {
CORRADE_ASSERT(vertexOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "vertex offset items but got" << vertexOffsets.size(), );
#if !(defined(MAGNUM_TARGET_WEBGL) && defined(MAGNUM_TARGET_GLES2))
#ifndef MAGNUM_TARGET_GLES
glMultiDrawElementsBaseVertex
#else
state.multiDrawElementsBaseVertexImplementation
#endif
(GLenum(_primitive), reinterpret_cast<const GLsizei*>(counts.data()), GLenum(_indexType), reinterpret_cast<const void* const*>(indexOffsets.data()), counts.size(), reinterpret_cast<const GLint*>(vertexOffsets.data()));
#else
CORRADE_ASSERT_UNREACHABLE("GL::AbstractShaderProgram::draw(): indexed mesh multi-draw with base vertex specification possible only since WebGL 2.0", );
#endif
}
}
state.unbindImplementation(*this);
}
#ifdef MAGNUM_TARGET_GLES
void Mesh::drawInternal(const Containers::ArrayView<const UnsignedInt>& counts, const Containers::ArrayView<const UnsignedInt>& instanceCounts, const Containers::ArrayView<const UnsignedInt>& vertexOffsets,
#ifdef CORRADE_TARGET_32BIT
const Containers::ArrayView<const UnsignedInt>& indexOffsets
#else
const Containers::ArrayView<const UnsignedLong>& indexOffsets
#endif
#ifndef MAGNUM_TARGET_GLES2
, const Containers::ArrayView<const UnsignedInt>& instanceOffsets
#endif
) {
const Implementation::MeshState& state = Context::current().state().mesh;
state.bindImplementation(*this);
CORRADE_ASSERT(instanceCounts.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "instance count items but got" << instanceCounts.size(), );
/* Non-indexed instanced meshes */
if(!_indexBuffer.id()) {
CORRADE_ASSERT(vertexOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "vertex offset items but got" << vertexOffsets.size(), );
/* Non-indexed instanced meshes */
#ifndef MAGNUM_TARGET_GLES2
if(instanceOffsets.isEmpty())
#endif
{
state.multiDrawArraysInstancedImplementation(GLenum(_primitive), reinterpret_cast<const GLint*>(vertexOffsets.data()), reinterpret_cast<const GLsizei*>(counts.data()), reinterpret_cast<const GLsizei*>(instanceCounts.data()), counts.size());
}
/* Non-indexed instanced meshes with base instance */
#ifndef MAGNUM_TARGET_GLES2
else {
CORRADE_ASSERT(instanceOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "instance offset items but got" << instanceOffsets.size(), );
state.multiDrawArraysInstancedBaseInstanceImplementation(GLenum(_primitive), reinterpret_cast<const GLint*>(vertexOffsets.data()), reinterpret_cast<const GLsizei*>(counts.data()), reinterpret_cast<const GLsizei*>(instanceCounts.data()),
reinterpret_cast<const GLuint*>(instanceOffsets.data()), counts.size());
}
#endif
/* Indexed meshes */
} else {
CORRADE_ASSERT(indexOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "index offset items but got" << indexOffsets.size(), );
/* Indexed meshes */
if(vertexOffsets.isEmpty()
#ifndef MAGNUM_TARGET_GLES2
&& instanceOffsets.isEmpty()
#endif
) {
state.multiDrawElementsInstancedImplementation(GLenum(_primitive), reinterpret_cast<const GLsizei*>(counts.data()), GLenum(_indexType), reinterpret_cast<const void* const*>(indexOffsets.data()), reinterpret_cast<const GLsizei*>(instanceCounts.data()), counts.size());
/* Indexed meshes with base vertex / base instance. According to the
extension spec both have to be present, not just one. */
} else {
CORRADE_ASSERT(vertexOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "vertex offset items but got" << vertexOffsets.size(), );
#ifndef MAGNUM_TARGET_GLES2
CORRADE_ASSERT(instanceOffsets.size() == counts.size(),
"GL::AbstractShaderProgram::draw(): expected" << counts.size() << "instance offset items but got" << instanceOffsets.size(), );
#endif
#ifndef MAGNUM_TARGET_GLES2
state.multiDrawElementsInstancedBaseVertexBaseInstanceImplementation(GLenum(_primitive), reinterpret_cast<const GLsizei*>(counts.data()), GLenum(_indexType), reinterpret_cast<const void* const*>(indexOffsets.data()), reinterpret_cast<const GLsizei*>(instanceCounts.data()), reinterpret_cast<const GLint*>(vertexOffsets.data()),
reinterpret_cast<const GLuint*>(instanceOffsets.data()), counts.size());
#else
CORRADE_ASSERT_UNREACHABLE("GL::AbstractShaderProgram::draw(): instanced indexed mesh multi-draw with base vertex specification possible only since OpenGL ES 3.0 and WebGL 2.0", );
#endif
}
}
state.unbindImplementation(*this);
}
#endif
void Mesh::drawInternalStrided(const Containers::StridedArrayView1D<const UnsignedInt>& counts, const Containers::StridedArrayView1D<const UnsignedInt>& vertexOffsets, const Containers::StridedArrayView1D<const UnsignedInt>& indexOffsets) {
#ifdef CORRADE_TARGET_32BIT
/* If all views are contiguous and we're on 32-bit, call the implementation
directly */
if(counts.isContiguous() && vertexOffsets.isContiguous() && indexOffsets.isContiguous())
return drawInternal(counts.asContiguous(), vertexOffsets.asContiguous(), indexOffsets.asContiguous());
#endif
/* Otherwise allocate contiguous copies. While it's possible that some
views could have been contigous already and some not, such scenario is
unlikely to make a practical sense, so we'll allocate & copy always. */
Containers::ArrayView<UnsignedInt> countsContiguous;
Containers::ArrayView<UnsignedInt> vertexOffsetsContiguous;
#ifdef CORRADE_TARGET_32BIT
Containers::ArrayView<UnsignedInt>
#else
Containers::ArrayView<UnsignedLong>
#endif
indexOffsetsContiguous;
Containers::ArrayTuple data{
{NoInit, counts.size(), countsContiguous},
{NoInit, vertexOffsets.size(), vertexOffsetsContiguous},
/* On 64-bit we'll be filling just the lower 32 bits so zero-init the
array. On 32-bit we'll overwrite it completely, so NoInit is fine. */
{
#ifdef CORRADE_TARGET_32BIT
NoInit
#else
ValueInit
#endif
, indexOffsets.size(), indexOffsetsContiguous}
};
Utility::copy(counts, countsContiguous);
Utility::copy(vertexOffsets, vertexOffsetsContiguous);
Utility::copy(indexOffsets,
#ifdef CORRADE_TARGET_32BIT
indexOffsetsContiguous
#else
/* Write to the lower 32 bits of the index offsets, which is the
leftmost bits on Little-Endian and rightmost on Big-Endian. On LE it
could be just Containers::arrayCast<const UnsignedInt>(indexOffsets)
but to minimize a chance of error on BE platforms that are hard to
test on, the same code is used for both. */
Containers::arrayCast<2, UnsignedInt>(stridedArrayView(indexOffsetsContiguous)).transposed<0, 1>()[
#ifndef CORRADE_TARGET_BIG_ENDIAN
0
#else
1
#endif
]
#endif
);
drawInternal(countsContiguous, vertexOffsetsContiguous, indexOffsetsContiguous);
}
#ifndef CORRADE_TARGET_32BIT
void Mesh::drawInternalStrided(const Containers::StridedArrayView1D<const UnsignedInt>& counts, const Containers::StridedArrayView1D<const UnsignedInt>& vertexOffsets, const Containers::StridedArrayView1D<const UnsignedLong>& indexOffsets) {
/* If all views are contiguous, call the implementation directly */
if(counts.isContiguous() && vertexOffsets.isContiguous() && indexOffsets.isContiguous())
return drawInternal(counts.asContiguous(), vertexOffsets.asContiguous(), indexOffsets.asContiguous());
/* Otherwise delegate into the 32-bit variant, which will allocate a
contiguous copy */
drawInternalStrided(counts, vertexOffsets,
/* Get the lower 32 bits of the index offsets, which is the leftmost
bits on Little-Endian and rightmost on Big-Endian. On LE it could be
just Containers::arrayCast<const UnsignedInt>(indexOffsets) but to
minimize a chance of error on BE platforms that are hard to test on,
the same code is used for both. */
Containers::arrayCast<2, const UnsignedInt>(indexOffsets).transposed<0, 1>()[
#ifndef CORRADE_TARGET_BIG_ENDIAN
0
#else
1
#endif
]
);
}
#endif
#ifdef MAGNUM_TARGET_GLES
void Mesh::drawInternalStrided(const Containers::StridedArrayView1D<const UnsignedInt>& counts, const Containers::StridedArrayView1D<const UnsignedInt>& instanceCounts, const Containers::StridedArrayView1D<const UnsignedInt>& vertexOffsets, const Containers::StridedArrayView1D<const UnsignedInt>& indexOffsets
#ifndef MAGNUM_TARGET_GLES2
, const Containers::StridedArrayView1D<const UnsignedInt>& instanceOffsets
#endif
) {
#ifdef CORRADE_TARGET_32BIT
/* If all views are contiguous, the mesh specifies either both base vertex
and base instance or neither and we're on 32-bit, call the
implementation directly */
if(counts.isContiguous() && instanceCounts.isContiguous() && vertexOffsets.isContiguous() && indexOffsets.isContiguous()
#ifndef MAGNUM_TARGET_GLES2
&& instanceOffsets.isContiguous() && (!_indexBuffer.id() || vertexOffsets.size() == instanceOffsets.size())
#endif
)
return drawInternal(counts.asContiguous(), instanceCounts.asContiguous(), vertexOffsets.asContiguous(), indexOffsets.asContiguous()
#ifndef MAGNUM_TARGET_GLES2
, instanceOffsets.asContiguous()
#endif
);
#endif
/* Expected vertex offset and instance offset count. If the mesh is
indexed, they either have to be both used or both empty. */
std::size_t expectedVertexOffsetCount = vertexOffsets.size();
#ifndef MAGNUM_TARGET_GLES2
std::size_t expectedInstanceOffsetCount = instanceOffsets.size();
if(_indexBuffer.id()) {
/* Use counts.size() instead of <the-other>OffsetCount to avoid hitting
a wrong assert in case the vertex/instance count doesn't match */
if(expectedVertexOffsetCount && !expectedInstanceOffsetCount)
expectedInstanceOffsetCount = counts.size();
else if(expectedInstanceOffsetCount && !expectedVertexOffsetCount)
expectedVertexOffsetCount = counts.size();
}
#endif
/* Otherwise allocate contiguous copies. While it's possible that some
views could have been contigous already and some not, such scenario is
unlikely to make a practical sense, so we'll allocate & copy always. */
Containers::ArrayView<UnsignedInt> countsContiguous;
Containers::ArrayView<UnsignedInt> instanceCountsContiguous;
Containers::ArrayView<UnsignedInt> vertexOffsetsContiguous;
#ifdef CORRADE_TARGET_32BIT
Containers::ArrayView<UnsignedInt>
#else
Containers::ArrayView<UnsignedLong>
#endif
indexOffsetsContiguous;
#ifndef MAGNUM_TARGET_GLES2
Containers::ArrayView<UnsignedInt> instanceOffsetsContiguous;
#endif
Containers::ArrayTuple data{
{NoInit, counts.size(), countsContiguous},
{NoInit, instanceCounts.size(), instanceCountsContiguous},
/* Zero init vertex offsets if we don't have them */
vertexOffsets.size() ?
Containers::ArrayTuple::Item{NoInit, expectedVertexOffsetCount, vertexOffsetsContiguous} :
Containers::ArrayTuple::Item{ValueInit, expectedVertexOffsetCount, vertexOffsetsContiguous},
/* On 64-bit we'll be filling just the lower 32 bits so zero-init the
array. On 32-bit we'll overwrite it completely, so NoInit is fine. */
{
#ifdef CORRADE_TARGET_32BIT
NoInit
#else
ValueInit
#endif
, indexOffsets.size(), indexOffsetsContiguous},
#ifndef MAGNUM_TARGET_GLES2
/* Zero init instance offsets if we don't have them */
instanceOffsets.size() ?
Containers::ArrayTuple::Item{NoInit, expectedInstanceOffsetCount, instanceOffsetsContiguous} :
Containers::ArrayTuple::Item{ValueInit, expectedInstanceOffsetCount, instanceOffsetsContiguous},
#endif
};
Utility::copy(counts, countsContiguous);
Utility::copy(instanceCounts, instanceCountsContiguous);
/* Copy vertex offsets only if we have them and leave them zero-init'd
otherwise */
if(vertexOffsets.size())
Utility::copy(vertexOffsets, vertexOffsetsContiguous);
Utility::copy(indexOffsets,
#ifdef CORRADE_TARGET_32BIT
indexOffsetsContiguous
#else
/* Write to the lower 32 bits of the index offsets, which is the
leftmost bits on Little-Endian and rightmost on Big-Endian. On LE it
could be just Containers::arrayCast<const UnsignedInt>(indexOffsets)
but to minimize a chance of error on BE platforms that are hard to
test on, the same code is used for both. */
Containers::arrayCast<2, UnsignedInt>(stridedArrayView(indexOffsetsContiguous)).transposed<0, 1>()[
#ifndef CORRADE_TARGET_BIG_ENDIAN
0
#else
1
#endif
]
#endif
);
#ifndef MAGNUM_TARGET_GLES2
/* Copy instance offsets only if we have them and leave them zero-init'd
otherwise */
if(instanceOffsets.size())
Utility::copy(instanceOffsets, instanceOffsetsContiguous);
#endif
drawInternal(countsContiguous, instanceCountsContiguous, vertexOffsetsContiguous, indexOffsetsContiguous
#ifndef MAGNUM_TARGET_GLES2
, instanceOffsetsContiguous
#endif
);
}
#ifndef CORRADE_TARGET_32BIT
void Mesh::drawInternalStrided(const Containers::StridedArrayView1D<const UnsignedInt>& counts, const Containers::StridedArrayView1D<const UnsignedInt>& instanceCounts, const Containers::StridedArrayView1D<const UnsignedInt>& vertexOffsets, const Containers::StridedArrayView1D<const UnsignedLong>& indexOffsets
#ifndef MAGNUM_TARGET_GLES2
, const Containers::StridedArrayView1D<const UnsignedInt>& instanceOffsets
#endif
) {
/* If all views are contiguous, the mesh specifies either both base vertex
and base instance or neither, call the implementation directly */
if(counts.isContiguous() && instanceCounts.isContiguous() && vertexOffsets.isContiguous() && indexOffsets.isContiguous()
#ifndef MAGNUM_TARGET_GLES2
&& instanceOffsets.isContiguous() && (!_indexBuffer.id() || vertexOffsets.size() == instanceOffsets.size())
#endif
)
return drawInternal(counts.asContiguous(), instanceCounts.asContiguous(), vertexOffsets.asContiguous(), indexOffsets.asContiguous()
#ifndef MAGNUM_TARGET_GLES2
, instanceOffsets.asContiguous()
#endif
);
/* Otherwise delegate into the 32-bit variant, which will allocate a
contiguous copy */
drawInternalStrided(counts, instanceCounts, vertexOffsets,
/* Get the lower 32 bits of the index offsets, which is the leftmost
bits on Little-Endian and rightmost on Big-Endian. On LE it could be
just Containers::arrayCast<const UnsignedInt>(indexOffsets) but to
minimize a chance of error on BE platforms that are hard to test on,
the same code is used for both. */
Containers::arrayCast<2, const UnsignedInt>(indexOffsets).transposed<0, 1>()[
#ifndef CORRADE_TARGET_BIG_ENDIAN
0
#else
1
#endif
]
#ifndef MAGNUM_TARGET_GLES2
, instanceOffsets
#endif
);
}
#endif
#endif
#ifndef MAGNUM_TARGET_GLES2
void Mesh::drawInternal(Int count, Int baseVertex, Int instanceCount, UnsignedInt baseInstance, GLintptr indexOffset, Int indexStart, Int indexEnd)
#else
void Mesh::drawInternal(Int count, Int baseVertex, Int instanceCount, GLintptr indexOffset)
#endif
{
const Implementation::MeshState& state = Context::current().state().mesh;
const UnsignedInt indexByteOffset = _indexBuffer.id() ?
_indexBufferOffset + indexOffset*meshIndexTypeSize(_indexType) :
0;
state.bindImplementation(*this);
/* Non-instanced mesh */
if(instanceCount == 1
#ifdef MAGNUM_TARGET_GLES
/* See the "angle-instanced-attributes-always-draw-instanced"
workaround */
&& !_instanced
#endif
) {
/* Non-indexed mesh */
if(!_indexBuffer.id()) {
glDrawArrays(GLenum(_primitive), baseVertex, count);
/* Indexed mesh with base vertex */
} else if(baseVertex) {
#if !(defined(MAGNUM_TARGET_WEBGL) && defined(MAGNUM_TARGET_GLES2))
#ifndef MAGNUM_TARGET_GLES2
/* Indexed mesh with specified range */
if(indexEnd) {
#ifndef MAGNUM_TARGET_GLES
glDrawRangeElementsBaseVertex
#else
state.drawRangeElementsBaseVertexImplementation
#endif
(GLenum(_primitive), indexStart, indexEnd, count, GLenum(_indexType), reinterpret_cast<GLvoid*>(indexByteOffset), baseVertex);
/* Indexed mesh */
} else
#endif
{
#ifndef MAGNUM_TARGET_GLES
glDrawElementsBaseVertex
#else
state.drawElementsBaseVertexImplementation
#endif
(GLenum(_primitive), count, GLenum(_indexType), reinterpret_cast<GLvoid*>(indexByteOffset), baseVertex);
}
#else
CORRADE_ASSERT_UNREACHABLE("GL::AbstractShaderProgram::draw(): indexed mesh draw with base vertex specification possible only since WebGL 2.0", );
#endif
/* Indexed mesh */
} else {
#ifndef MAGNUM_TARGET_GLES2
/* Indexed mesh with specified range */
if(indexEnd) {
glDrawRangeElements(GLenum(_primitive), indexStart, indexEnd, count, GLenum(_indexType), reinterpret_cast<GLvoid*>(indexByteOffset));
/* Indexed mesh */
} else
#elif !defined(MAGNUM_TARGET_GLES2)
static_cast<void>(indexStart);
static_cast<void>(indexEnd);
#endif
{
glDrawElements(GLenum(_primitive), count, GLenum(_indexType), reinterpret_cast<GLvoid*>(indexByteOffset));
}
}
/* Instanced mesh */
} else {
/* Non-indexed mesh */
if(!_indexBuffer.id()) {
#ifndef MAGNUM_TARGET_GLES2
/* Non-indexed mesh with base instance */
if(baseInstance) {
#ifndef MAGNUM_TARGET_GLES
glDrawArraysInstancedBaseInstance
#else
state.drawArraysInstancedBaseInstanceImplementation
#endif
(GLenum(_primitive), baseVertex, count, instanceCount, baseInstance);
/* Non-indexed mesh */
} else
#endif
{
#ifndef MAGNUM_TARGET_GLES2
glDrawArraysInstanced
#else
state.drawArraysInstancedImplementation
#endif
(GLenum(_primitive), baseVertex, count, instanceCount);
}
/* Indexed mesh with base vertex */
} else if(baseVertex) {
#ifndef MAGNUM_TARGET_GLES2
/* Indexed mesh with base vertex and base instance */
if(baseInstance) {
#ifndef MAGNUM_TARGET_GLES
glDrawElementsInstancedBaseVertexBaseInstance
#else
state.drawElementsInstancedBaseVertexBaseInstanceImplementation
#endif
(GLenum(_primitive), count, GLenum(_indexType), reinterpret_cast<GLvoid*>(indexByteOffset), instanceCount, baseVertex, baseInstance);
/* Indexed mesh with base vertex */
} else {
#ifndef MAGNUM_TARGET_GLES
glDrawElementsInstancedBaseVertex
#else
state.drawElementsInstancedBaseVertexImplementation
#endif
(GLenum(_primitive), count, GLenum(_indexType), reinterpret_cast<GLvoid*>(indexByteOffset), instanceCount, baseVertex);
}
#else
CORRADE_ASSERT_UNREACHABLE("GL::AbstractShaderProgram::draw(): instanced indexed mesh draw with base vertex specification possible only since OpenGL ES 3.0", );
#endif
/* Indexed mesh */
} else {
#ifndef MAGNUM_TARGET_GLES2
/* Indexed mesh with base instance */
if(baseInstance) {
#ifndef MAGNUM_TARGET_GLES
glDrawElementsInstancedBaseInstance
#else
state.drawElementsInstancedBaseInstanceImplementation
#endif
(GLenum(_primitive), count, GLenum(_indexType), reinterpret_cast<GLvoid*>(indexByteOffset), instanceCount, baseInstance);