forked from KhronosGroup/SPIRV-Cross
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.cpp
1991 lines (1821 loc) · 85.4 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright 2015-2021 Arm Limited
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* At your option, you may choose to accept this material under either:
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
*/
#include "spirv_cpp.hpp"
#include "spirv_cross_util.hpp"
#include "spirv_glsl.hpp"
#include "spirv_hlsl.hpp"
#include "spirv_msl.hpp"
#include "spirv_parser.hpp"
#include "spirv_reflect.hpp"
#include <algorithm>
#include <cstdio>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <stdexcept>
#include <unordered_map>
#include <unordered_set>
#ifdef _WIN32
#include <io.h>
#include <fcntl.h>
#endif
#ifdef HAVE_SPIRV_CROSS_GIT_VERSION
#include "gitversion.h"
#endif
using namespace spv;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
static inline void THROW(const char *str)
{
fprintf(stderr, "SPIRV-Cross will abort: %s\n", str);
fflush(stderr);
abort();
}
#else
#define THROW(x) throw runtime_error(x)
#endif
struct CLIParser;
struct CLICallbacks
{
void add(const char *cli, const function<void(CLIParser &)> &func)
{
callbacks[cli] = func;
}
unordered_map<string, function<void(CLIParser &)>> callbacks;
function<void()> error_handler;
function<void(const char *)> default_handler;
};
struct CLIParser
{
CLIParser(CLICallbacks cbs_, int argc_, char *argv_[])
: cbs(std::move(cbs_))
, argc(argc_)
, argv(argv_)
{
}
bool parse()
{
#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
try
#endif
{
while (argc && !ended_state)
{
const char *next = *argv++;
argc--;
if (*next != '-' && cbs.default_handler)
{
cbs.default_handler(next);
}
else
{
auto itr = cbs.callbacks.find(next);
if (itr == ::end(cbs.callbacks))
{
THROW("Invalid argument");
}
itr->second(*this);
}
}
return true;
}
#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
catch (...)
{
if (cbs.error_handler)
{
cbs.error_handler();
}
return false;
}
#endif
}
void end()
{
ended_state = true;
}
uint32_t next_uint()
{
if (!argc)
{
THROW("Tried to parse uint, but nothing left in arguments");
}
uint64_t val = stoul(*argv);
if (val > numeric_limits<uint32_t>::max())
{
THROW("next_uint() out of range");
}
argc--;
argv++;
return uint32_t(val);
}
uint32_t next_hex_uint()
{
if (!argc)
{
THROW("Tried to parse uint, but nothing left in arguments");
}
uint64_t val = stoul(*argv, nullptr, 16);
if (val > numeric_limits<uint32_t>::max())
{
THROW("next_uint() out of range");
}
argc--;
argv++;
return uint32_t(val);
}
double next_double()
{
if (!argc)
{
THROW("Tried to parse double, but nothing left in arguments");
}
double val = stod(*argv);
argc--;
argv++;
return val;
}
// Return a string only if it's not prefixed with `--`, otherwise return the default value
const char *next_value_string(const char *default_value)
{
if (!argc)
{
return default_value;
}
if (0 == strncmp("--", *argv, 2))
{
return default_value;
}
return next_string();
}
const char *next_string()
{
if (!argc)
{
THROW("Tried to parse string, but nothing left in arguments");
}
const char *ret = *argv;
argc--;
argv++;
return ret;
}
CLICallbacks cbs;
int argc;
char **argv;
bool ended_state = false;
};
#if defined(__clang__) || defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#elif defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4996)
#endif
static vector<uint32_t> read_spirv_file_stdin()
{
#ifdef _WIN32
setmode(fileno(stdin), O_BINARY);
#endif
vector<uint32_t> buffer;
uint32_t tmp[256];
size_t ret;
while ((ret = fread(tmp, sizeof(uint32_t), 256, stdin)))
buffer.insert(buffer.end(), tmp, tmp + ret);
return buffer;
}
static vector<uint32_t> read_spirv_file(const char *path)
{
if (path[0] == '-' && path[1] == '\0')
return read_spirv_file_stdin();
FILE *file = fopen(path, "rb");
if (!file)
{
fprintf(stderr, "Failed to open SPIR-V file: %s\n", path);
return {};
}
fseek(file, 0, SEEK_END);
long len = ftell(file) / sizeof(uint32_t);
rewind(file);
vector<uint32_t> spirv(len);
if (fread(spirv.data(), sizeof(uint32_t), len, file) != size_t(len))
spirv.clear();
fclose(file);
return spirv;
}
static bool write_string_to_file(const char *path, const char *string)
{
FILE *file = fopen(path, "w");
if (!file)
{
fprintf(stderr, "Failed to write file: %s\n", path);
return false;
}
fprintf(file, "%s", string);
fclose(file);
return true;
}
#if defined(__clang__) || defined(__GNUC__)
#pragma GCC diagnostic pop
#elif defined(_MSC_VER)
#pragma warning(pop)
#endif
static void print_resources(const Compiler &compiler, spv::StorageClass storage,
const SmallVector<BuiltInResource> &resources)
{
fprintf(stderr, "%s\n", storage == StorageClassInput ? "builtin inputs" : "builtin outputs");
fprintf(stderr, "=============\n\n");
for (auto &res : resources)
{
bool active = compiler.has_active_builtin(res.builtin, storage);
const char *basetype = "?";
auto &type = compiler.get_type(res.value_type_id);
switch (type.basetype)
{
case SPIRType::Float: basetype = "float"; break;
case SPIRType::Int: basetype = "int"; break;
case SPIRType::UInt: basetype = "uint"; break;
default: break;
}
uint32_t array_size = 0;
bool array_size_literal = false;
if (!type.array.empty())
{
array_size = type.array.front();
array_size_literal = type.array_size_literal.front();
}
string type_str = basetype;
if (type.vecsize > 1)
type_str += std::to_string(type.vecsize);
if (array_size)
{
if (array_size_literal)
type_str += join("[", array_size, "]");
else
type_str += join("[", array_size, " (spec constant ID)]");
}
string builtin_str;
switch (res.builtin)
{
case spv::BuiltInPosition: builtin_str = "Position"; break;
case spv::BuiltInPointSize: builtin_str = "PointSize"; break;
case spv::BuiltInCullDistance: builtin_str = "CullDistance"; break;
case spv::BuiltInClipDistance: builtin_str = "ClipDistance"; break;
case spv::BuiltInTessLevelInner: builtin_str = "TessLevelInner"; break;
case spv::BuiltInTessLevelOuter: builtin_str = "TessLevelOuter"; break;
default: builtin_str = string("builtin #") + to_string(res.builtin);
}
fprintf(stderr, "Builtin %s (%s) (active: %s).\n", builtin_str.c_str(), type_str.c_str(), active ? "yes" : "no");
}
fprintf(stderr, "=============\n\n");
}
static void print_resources(const Compiler &compiler, const char *tag, const SmallVector<Resource> &resources)
{
fprintf(stderr, "%s\n", tag);
fprintf(stderr, "=============\n\n");
bool print_ssbo = !strcmp(tag, "ssbos");
for (auto &res : resources)
{
auto &type = compiler.get_type(res.type_id);
if (print_ssbo && compiler.buffer_is_hlsl_counter_buffer(res.id))
continue;
// If we don't have a name, use the fallback for the type instead of the variable
// for SSBOs and UBOs since those are the only meaningful names to use externally.
// Push constant blocks are still accessed by name and not block name, even though they are technically Blocks.
bool is_push_constant = compiler.get_storage_class(res.id) == StorageClassPushConstant;
bool is_block = compiler.get_decoration_bitset(type.self).get(DecorationBlock) ||
compiler.get_decoration_bitset(type.self).get(DecorationBufferBlock);
bool is_sized_block = is_block && (compiler.get_storage_class(res.id) == StorageClassUniform ||
compiler.get_storage_class(res.id) == StorageClassUniformConstant);
ID fallback_id = !is_push_constant && is_block ? ID(res.base_type_id) : ID(res.id);
uint32_t block_size = 0;
uint32_t runtime_array_stride = 0;
if (is_sized_block)
{
auto &base_type = compiler.get_type(res.base_type_id);
block_size = uint32_t(compiler.get_declared_struct_size(base_type));
runtime_array_stride = uint32_t(compiler.get_declared_struct_size_runtime_array(base_type, 1) -
compiler.get_declared_struct_size_runtime_array(base_type, 0));
}
Bitset mask;
if (print_ssbo)
mask = compiler.get_buffer_block_flags(res.id);
else
mask = compiler.get_decoration_bitset(res.id);
string array;
for (auto arr : type.array)
array = join("[", arr ? convert_to_string(arr) : "", "]") + array;
fprintf(stderr, " ID %03u : %s%s", uint32_t(res.id),
!res.name.empty() ? res.name.c_str() : compiler.get_fallback_name(fallback_id).c_str(), array.c_str());
if (mask.get(DecorationLocation))
fprintf(stderr, " (Location : %u)", compiler.get_decoration(res.id, DecorationLocation));
if (mask.get(DecorationDescriptorSet))
fprintf(stderr, " (Set : %u)", compiler.get_decoration(res.id, DecorationDescriptorSet));
if (mask.get(DecorationBinding))
fprintf(stderr, " (Binding : %u)", compiler.get_decoration(res.id, DecorationBinding));
if (static_cast<const CompilerGLSL &>(compiler).variable_is_depth_or_compare(res.id))
fprintf(stderr, " (comparison)");
if (mask.get(DecorationInputAttachmentIndex))
fprintf(stderr, " (Attachment : %u)", compiler.get_decoration(res.id, DecorationInputAttachmentIndex));
if (mask.get(DecorationNonReadable))
fprintf(stderr, " writeonly");
if (mask.get(DecorationNonWritable))
fprintf(stderr, " readonly");
if (mask.get(DecorationRestrict))
fprintf(stderr, " restrict");
if (mask.get(DecorationCoherent))
fprintf(stderr, " coherent");
if (mask.get(DecorationVolatile))
fprintf(stderr, " volatile");
if (is_sized_block)
{
fprintf(stderr, " (BlockSize : %u bytes)", block_size);
if (runtime_array_stride)
fprintf(stderr, " (Unsized array stride: %u bytes)", runtime_array_stride);
}
uint32_t counter_id = 0;
if (print_ssbo && compiler.buffer_get_hlsl_counter_buffer(res.id, counter_id))
fprintf(stderr, " (HLSL counter buffer ID: %u)", counter_id);
fprintf(stderr, "\n");
}
fprintf(stderr, "=============\n\n");
}
static const char *execution_model_to_str(spv::ExecutionModel model)
{
switch (model)
{
case spv::ExecutionModelVertex:
return "vertex";
case spv::ExecutionModelTessellationControl:
return "tessellation control";
case ExecutionModelTessellationEvaluation:
return "tessellation evaluation";
case ExecutionModelGeometry:
return "geometry";
case ExecutionModelFragment:
return "fragment";
case ExecutionModelGLCompute:
return "compute";
case ExecutionModelRayGenerationNV:
return "raygenNV";
case ExecutionModelIntersectionNV:
return "intersectionNV";
case ExecutionModelCallableNV:
return "callableNV";
case ExecutionModelAnyHitNV:
return "anyhitNV";
case ExecutionModelClosestHitNV:
return "closesthitNV";
case ExecutionModelMissNV:
return "missNV";
default:
return "???";
}
}
static void print_resources(const Compiler &compiler, const ShaderResources &res)
{
auto &modes = compiler.get_execution_mode_bitset();
fprintf(stderr, "Entry points:\n");
auto entry_points = compiler.get_entry_points_and_stages();
for (auto &e : entry_points)
fprintf(stderr, " %s (%s)\n", e.name.c_str(), execution_model_to_str(e.execution_model));
fprintf(stderr, "\n");
fprintf(stderr, "Execution modes:\n");
modes.for_each_bit([&](uint32_t i) {
auto mode = static_cast<ExecutionMode>(i);
uint32_t arg0 = compiler.get_execution_mode_argument(mode, 0);
uint32_t arg1 = compiler.get_execution_mode_argument(mode, 1);
uint32_t arg2 = compiler.get_execution_mode_argument(mode, 2);
switch (static_cast<ExecutionMode>(i))
{
case ExecutionModeInvocations:
fprintf(stderr, " Invocations: %u\n", arg0);
break;
case ExecutionModeLocalSize:
fprintf(stderr, " LocalSize: (%u, %u, %u)\n", arg0, arg1, arg2);
break;
case ExecutionModeOutputVertices:
fprintf(stderr, " OutputVertices: %u\n", arg0);
break;
#define CHECK_MODE(m) \
case ExecutionMode##m: \
fprintf(stderr, " %s\n", #m); \
break
CHECK_MODE(SpacingEqual);
CHECK_MODE(SpacingFractionalEven);
CHECK_MODE(SpacingFractionalOdd);
CHECK_MODE(VertexOrderCw);
CHECK_MODE(VertexOrderCcw);
CHECK_MODE(PixelCenterInteger);
CHECK_MODE(OriginUpperLeft);
CHECK_MODE(OriginLowerLeft);
CHECK_MODE(EarlyFragmentTests);
CHECK_MODE(PointMode);
CHECK_MODE(Xfb);
CHECK_MODE(DepthReplacing);
CHECK_MODE(DepthGreater);
CHECK_MODE(DepthLess);
CHECK_MODE(DepthUnchanged);
CHECK_MODE(LocalSizeHint);
CHECK_MODE(InputPoints);
CHECK_MODE(InputLines);
CHECK_MODE(InputLinesAdjacency);
CHECK_MODE(Triangles);
CHECK_MODE(InputTrianglesAdjacency);
CHECK_MODE(Quads);
CHECK_MODE(Isolines);
CHECK_MODE(OutputPoints);
CHECK_MODE(OutputLineStrip);
CHECK_MODE(OutputTriangleStrip);
CHECK_MODE(VecTypeHint);
CHECK_MODE(ContractionOff);
default:
break;
}
});
fprintf(stderr, "\n");
print_resources(compiler, "subpass inputs", res.subpass_inputs);
print_resources(compiler, "inputs", res.stage_inputs);
print_resources(compiler, "outputs", res.stage_outputs);
print_resources(compiler, "textures", res.sampled_images);
print_resources(compiler, "separate images", res.separate_images);
print_resources(compiler, "separate samplers", res.separate_samplers);
print_resources(compiler, "images", res.storage_images);
print_resources(compiler, "ssbos", res.storage_buffers);
print_resources(compiler, "ubos", res.uniform_buffers);
print_resources(compiler, "push", res.push_constant_buffers);
print_resources(compiler, "counters", res.atomic_counters);
print_resources(compiler, "acceleration structures", res.acceleration_structures);
print_resources(compiler, "record buffers", res.shader_record_buffers);
print_resources(compiler, spv::StorageClassInput, res.builtin_inputs);
print_resources(compiler, spv::StorageClassOutput, res.builtin_outputs);
}
static void print_push_constant_resources(const Compiler &compiler, const SmallVector<Resource> &res)
{
for (auto &block : res)
{
auto ranges = compiler.get_active_buffer_ranges(block.id);
fprintf(stderr, "Active members in buffer: %s\n",
!block.name.empty() ? block.name.c_str() : compiler.get_fallback_name(block.id).c_str());
fprintf(stderr, "==================\n\n");
for (auto &range : ranges)
{
const auto &name = compiler.get_member_name(block.base_type_id, range.index);
fprintf(stderr, "Member #%3u (%s): Offset: %4u, Range: %4u\n", range.index,
!name.empty() ? name.c_str() : compiler.get_fallback_member_name(range.index).c_str(),
unsigned(range.offset), unsigned(range.range));
}
fprintf(stderr, "==================\n\n");
}
}
static void print_spec_constants(const Compiler &compiler)
{
auto spec_constants = compiler.get_specialization_constants();
fprintf(stderr, "Specialization constants\n");
fprintf(stderr, "==================\n\n");
for (auto &c : spec_constants)
fprintf(stderr, "ID: %u, Spec ID: %u\n", uint32_t(c.id), c.constant_id);
fprintf(stderr, "==================\n\n");
}
static void print_capabilities_and_extensions(const Compiler &compiler)
{
fprintf(stderr, "Capabilities\n");
fprintf(stderr, "============\n");
for (auto &capability : compiler.get_declared_capabilities())
fprintf(stderr, "Capability: %u\n", static_cast<unsigned>(capability));
fprintf(stderr, "============\n\n");
fprintf(stderr, "Extensions\n");
fprintf(stderr, "============\n");
for (auto &ext : compiler.get_declared_extensions())
fprintf(stderr, "Extension: %s\n", ext.c_str());
fprintf(stderr, "============\n\n");
}
struct PLSArg
{
PlsFormat format;
string name;
};
struct Remap
{
string src_name;
string dst_name;
unsigned components;
};
struct VariableTypeRemap
{
string variable_name;
string new_variable_type;
};
struct InterfaceVariableRename
{
StorageClass storageClass;
uint32_t location;
string variable_name;
};
struct HLSLVertexAttributeRemapNamed
{
std::string name;
std::string semantic;
};
struct CLIArguments
{
const char *input = nullptr;
const char *output = nullptr;
const char *cpp_interface_name = nullptr;
uint32_t version = 0;
uint32_t shader_model = 0;
uint32_t msl_version = 0;
bool es = false;
bool set_version = false;
bool set_shader_model = false;
bool set_msl_version = false;
bool set_es = false;
bool dump_resources = false;
bool force_temporary = false;
bool flatten_ubo = false;
bool fixup = false;
bool yflip = false;
bool sso = false;
bool support_nonzero_baseinstance = true;
bool msl_capture_output_to_buffer = false;
bool msl_swizzle_texture_samples = false;
bool msl_ios = false;
bool msl_pad_fragment_output = false;
bool msl_domain_lower_left = false;
bool msl_argument_buffers = false;
uint32_t msl_argument_buffers_tier = 0; // Tier 1
bool msl_texture_buffer_native = false;
bool msl_framebuffer_fetch = false;
bool msl_invariant_float_math = false;
bool msl_emulate_cube_array = false;
bool msl_multiview = false;
bool msl_multiview_layered_rendering = true;
bool msl_view_index_from_device_index = false;
bool msl_dispatch_base = false;
bool msl_decoration_binding = false;
bool msl_force_active_argument_buffer_resources = false;
bool msl_force_native_arrays = false;
bool msl_enable_frag_depth_builtin = true;
bool msl_enable_frag_stencil_ref_builtin = true;
uint32_t msl_enable_frag_output_mask = 0xffffffff;
bool msl_enable_clip_distance_user_varying = true;
bool msl_raw_buffer_tese_input = false;
bool msl_multi_patch_workgroup = false;
bool msl_vertex_for_tessellation = false;
uint32_t msl_additional_fixed_sample_mask = 0xffffffff;
bool msl_arrayed_subpass_input = false;
uint32_t msl_r32ui_linear_texture_alignment = 4;
uint32_t msl_r32ui_alignment_constant_id = 65535;
bool msl_texture_1d_as_2d = false;
bool msl_ios_use_simdgroup_functions = false;
bool msl_emulate_subgroups = false;
uint32_t msl_fixed_subgroup_size = 0;
bool msl_force_sample_rate_shading = false;
bool msl_manual_helper_invocation_updates = true;
bool msl_check_discarded_frag_stores = false;
bool msl_sample_dref_lod_array_as_grad = false;
bool msl_runtime_array_rich_descriptor = false;
bool msl_replace_recursive_inputs = false;
bool msl_readwrite_texture_fences = true;
bool msl_agx_manual_cube_grad_fixup = false;
const char *msl_combined_sampler_suffix = nullptr;
bool glsl_emit_push_constant_as_ubo = false;
bool glsl_emit_ubo_as_plain_uniforms = false;
bool glsl_force_flattened_io_blocks = false;
uint32_t glsl_ovr_multiview_view_count = 0;
SmallVector<pair<uint32_t, uint32_t>> glsl_ext_framebuffer_fetch;
bool glsl_ext_framebuffer_fetch_noncoherent = false;
bool vulkan_glsl_disable_ext_samplerless_texture_functions = false;
bool emit_line_directives = false;
bool enable_storage_image_qualifier_deduction = true;
bool force_zero_initialized_variables = false;
bool relax_nan_checks = false;
uint32_t force_recompile_max_debug_iterations = 3;
SmallVector<uint32_t> msl_discrete_descriptor_sets;
SmallVector<uint32_t> msl_device_argument_buffers;
SmallVector<pair<uint32_t, uint32_t>> msl_dynamic_buffers;
SmallVector<pair<uint32_t, uint32_t>> msl_inline_uniform_blocks;
SmallVector<MSLShaderInterfaceVariable> msl_shader_inputs;
SmallVector<MSLShaderInterfaceVariable> msl_shader_outputs;
SmallVector<PLSArg> pls_in;
SmallVector<PLSArg> pls_out;
SmallVector<Remap> remaps;
SmallVector<string> extensions;
SmallVector<VariableTypeRemap> variable_type_remaps;
SmallVector<InterfaceVariableRename> interface_variable_renames;
SmallVector<HLSLVertexAttributeRemap> hlsl_attr_remap;
SmallVector<HLSLVertexAttributeRemapNamed> hlsl_attr_remap_named;
SmallVector<std::pair<uint32_t, uint32_t>> masked_stage_outputs;
SmallVector<BuiltIn> masked_stage_builtins;
string entry;
string entry_stage;
struct Rename
{
string old_name;
string new_name;
ExecutionModel execution_model;
};
SmallVector<Rename> entry_point_rename;
uint32_t iterations = 1;
bool cpp = false;
string reflect;
bool msl = false;
bool hlsl = false;
bool hlsl_compat = false;
bool hlsl_support_nonzero_base = false;
bool hlsl_base_vertex_index_explicit_binding = false;
uint32_t hlsl_base_vertex_index_register_index = 0;
uint32_t hlsl_base_vertex_index_register_space = 0;
bool hlsl_force_storage_buffer_as_uav = false;
bool hlsl_nonwritable_uav_texture_as_srv = false;
bool hlsl_enable_16bit_types = false;
bool hlsl_flatten_matrix_vertex_input_semantics = false;
bool hlsl_preserve_structured_buffers = false;
HLSLBindingFlags hlsl_binding_flags = 0;
bool vulkan_semantics = false;
bool flatten_multidimensional_arrays = false;
bool use_420pack_extension = true;
bool remove_unused = false;
bool combined_samplers_inherit_bindings = false;
};
static void print_version()
{
#ifdef HAVE_SPIRV_CROSS_GIT_VERSION
fprintf(stderr, "%s\n", SPIRV_CROSS_GIT_REVISION);
#else
fprintf(stderr, "Git revision unknown. Build with CMake to create timestamp and revision info.\n");
#endif
}
static void print_help_backend()
{
// clang-format off
fprintf(stderr, "\nSelect backend:\n"
"\tBy default, OpenGL-style GLSL is the target, with #version and GLSL/ESSL information inherited from the SPIR-V module if present.\n"
"\t[--vulkan-semantics] or [-V]:\n\t\tEmit Vulkan GLSL instead of plain GLSL. Makes use of Vulkan-only features to match SPIR-V.\n"
"\t[--msl]:\n\t\tEmit Metal Shading Language (MSL).\n"
"\t[--hlsl]:\n\t\tEmit HLSL.\n"
"\t[--reflect]:\n\t\tEmit JSON reflection.\n"
"\t[--cpp]:\n\t\tDEPRECATED. Emits C++ code.\n"
);
// clang-format on
}
static void print_help_glsl()
{
// clang-format off
fprintf(stderr, "\nGLSL options:\n"
"\t[--es]:\n\t\tForce ESSL.\n"
"\t[--no-es]:\n\t\tForce desktop GLSL.\n"
"\t[--version <GLSL version>]:\n\t\tE.g. --version 450 will emit '#version 450' in shader.\n"
"\t\tCode generation will depend on the version used.\n"
"\t[--flatten-ubo]:\n\t\tEmit UBOs as plain uniform arrays which are suitable for use with glUniform4*v().\n"
"\t\tThis can be an optimization on GL implementations where this is faster or works around buggy driver implementations.\n"
"\t\tE.g.: uniform MyUBO { vec4 a; float b, c, d, e; }; will be emitted as uniform vec4 MyUBO[2];\n"
"\t\tCaveat: You cannot mix and match floating-point and integer in the same UBO with this option.\n"
"\t\tLegacy GLSL/ESSL (where this flattening makes sense) does not support bit-casting, which would have been the obvious workaround.\n"
"\t[--extension ext]:\n\t\tAdd #extension string of your choosing to GLSL output.\n"
"\t\tUseful if you use variable name remapping to something that requires an extension unknown to SPIRV-Cross.\n"
"\t[--remove-unused-variables]:\n\t\tDo not emit interface variables which are not statically accessed by the shader.\n"
"\t[--separate-shader-objects]:\n\t\tRedeclare gl_PerVertex blocks to be suitable for desktop GL separate shader objects.\n"
"\t[--glsl-emit-push-constant-as-ubo]:\n\t\tInstead of a plain uniform of struct for push constants, emit a UBO block instead.\n"
"\t[--glsl-emit-ubo-as-plain-uniforms]:\n\t\tInstead of emitting UBOs, emit them as plain uniform structs.\n"
"\t[--glsl-remap-ext-framebuffer-fetch input-attachment color-location]:\n\t\tRemaps an input attachment to use GL_EXT_shader_framebuffer_fetch.\n"
"\t\tgl_LastFragData[location] is read from. The attachment to read from must be declared as an output in the shader.\n"
"\t[--glsl-ext-framebuffer-fetch-noncoherent]:\n\t\tUses noncoherent qualifier for framebuffer fetch.\n"
"\t[--vulkan-glsl-disable-ext-samplerless-texture-functions]:\n\t\tDo not allow use of GL_EXT_samperless_texture_functions, even in Vulkan GLSL.\n"
"\t\tUse of texelFetch and similar might have to create dummy samplers to work around it.\n"
"\t[--combined-samplers-inherit-bindings]:\n\t\tInherit binding information from the textures when building combined image samplers from separate textures and samplers.\n"
"\t[--no-support-nonzero-baseinstance]:\n\t\tWhen using gl_InstanceIndex with desktop GL,\n"
"\t\tassume that base instance is always 0, and do not attempt to fix up gl_InstanceID to match Vulkan semantics.\n"
"\t[--pls-in format input-name]:\n\t\tRemaps a subpass input with name into a GL_EXT_pixel_local_storage input.\n"
"\t\tEntry in PLS block is ordered where first --pls-in marks the first entry. Can be called multiple times.\n"
"\t\tFormats allowed: r11f_g11f_b10f, r32f, rg16f, rg16, rgb10_a2, rgba8, rgba8i, rgba8ui, rg16i, rgb10_a2ui, rg16ui, r32ui.\n"
"\t\tRequires ESSL.\n"
"\t[--pls-out format output-name]:\n\t\tRemaps a color output with name into a GL_EXT_pixel_local_storage output.\n"
"\t\tEntry in PLS block is ordered where first --pls-output marks the first entry. Can be called multiple times.\n"
"\t\tFormats allowed: r11f_g11f_b10f, r32f, rg16f, rg16, rgb10_a2, rgba8, rgba8i, rgba8ui, rg16i, rgb10_a2ui, rg16ui, r32ui.\n"
"\t\tRequires ESSL.\n"
"\t[--remap source_name target_name components]:\n\t\tRemaps a variable to a different name with N components.\n"
"\t\tMain use case is to remap a subpass input to gl_LastFragDepthARM.\n"
"\t\tE.g.:\n"
"\t\tuniform subpassInput uDepth;\n"
"\t\t--remap uDepth gl_LastFragDepthARM 1 --extension GL_ARM_shader_framebuffer_fetch_depth_stencil\n"
"\t[--no-420pack-extension]:\n\t\tDo not make use of GL_ARB_shading_language_420pack in older GL targets to support layout(binding).\n"
"\t[--remap-variable-type <variable_name> <new_variable_type>]:\n\t\tRemaps a variable type based on name.\n"
"\t\tPrimary use case is supporting external samplers in ESSL for video rendering on Android where you could remap a texture to a YUV one.\n"
"\t[--glsl-force-flattened-io-blocks]:\n\t\tAlways flatten I/O blocks and structs.\n"
"\t[--glsl-ovr-multiview-view-count count]:\n\t\tIn GL_OVR_multiview2, specify layout(num_views).\n"
);
// clang-format on
}
static void print_help_hlsl()
{
// clang-format off
fprintf(stderr, "\nHLSL options:\n"
"\t[--shader-model]:\n\t\tEnables a specific shader model, e.g. --shader-model 50 for SM 5.0.\n"
"\t[--flatten-ubo]:\n\t\tEmit UBOs as plain uniform arrays.\n"
"\t\tE.g.: uniform MyUBO { vec4 a; float b, c, d, e; }; will be emitted as uniform float4 MyUBO[2];\n"
"\t\tCaveat: You cannot mix and match floating-point and integer in the same UBO with this option.\n"
"\t[--hlsl-enable-compat]:\n\t\tAllow point size and point coord to be used, even if they won't work as expected.\n"
"\t\tPointSize is ignored, and PointCoord returns (0.5, 0.5).\n"
"\t[--hlsl-support-nonzero-basevertex-baseinstance]:\n\t\tSupport base vertex and base instance by emitting a special cbuffer declared as:\n"
"\t\tcbuffer SPIRV_Cross_VertexInfo { int SPIRV_Cross_BaseVertex; int SPIRV_Cross_BaseInstance; };\n"
"\t[--hlsl-basevertex-baseinstance-binding <register index> <register space>]:\n\t\tAssign a fixed binding to SPIRV_Cross_VertexInfo.\n"
"\t[--hlsl-auto-binding (push, cbv, srv, uav, sampler, all)]\n"
"\t\tDo not emit any : register(#) bindings for specific resource types, and rely on HLSL compiler to assign something.\n"
"\t[--hlsl-force-storage-buffer-as-uav]:\n\t\tAlways emit SSBOs as UAVs, even when marked as read-only.\n"
"\t\tNormally, SSBOs marked with NonWritable will be emitted as SRVs.\n"
"\t[--hlsl-nonwritable-uav-texture-as-srv]:\n\t\tEmit NonWritable storage images as SRV textures instead of UAV.\n"
"\t\tUsing this option messes with the type system. SPIRV-Cross cannot guarantee that this will work.\n"
"\t\tOne major problem area with this feature is function arguments, where we won't know if we're seeing a UAV or SRV.\n"
"\t\tShader must ensure that read/write state is consistent at all call sites.\n"
"\t[--set-hlsl-vertex-input-semantic <location> <semantic>]:\n\t\tEmits a specific vertex input semantic for a given location.\n"
"\t\tOtherwise, TEXCOORD# is used as semantics, where # is location.\n"
"\t[--set-hlsl-named-vertex-input-semantic <name> <semantic>]:\n\t\tEmits a specific vertex input semantic for a given name.\n"
"\t\tOpName reflection information must be intact.\n"
"\t[--hlsl-enable-16bit-types]:\n\t\tEnables native use of half/int16_t/uint16_t and ByteAddressBuffer interaction with these types. Requires SM 6.2.\n"
"\t[--hlsl-flatten-matrix-vertex-input-semantics]:\n\t\tEmits matrix vertex inputs with input semantics as if they were independent vectors, e.g. TEXCOORD{2,3,4} rather than matrix form TEXCOORD2_{0,1,2}.\n"
"\t[--hlsl-preserve-structured-buffers]:\n\t\tEmit SturucturedBuffer<T> rather than ByteAddressBuffer. Requires UserTypeGOOGLE to be emitted. Intended for DXC roundtrips.\n"
);
// clang-format on
}
static void print_help_msl()
{
// clang-format off
fprintf(stderr, "\nMSL options:\n"
"\t[--msl-version <MMmmpp>]:\n\t\tUses a specific MSL version, e.g. --msl-version 20100 for MSL 2.1.\n"
"\t[--msl-capture-output]:\n\t\tWrites geometry varyings to a buffer instead of as stage-outputs.\n"
"\t[--msl-swizzle-texture-samples]:\n\t\tWorks around lack of support for VkImageView component swizzles.\n"
"\t\tThis has a massive impact on performance and bloat. Do not use this unless you are absolutely forced to.\n"
"\t\tTo use this feature, the API side must pass down swizzle buffers.\n"
"\t\tShould only be used by translation layers as a last resort.\n"
"\t\tRecent Metal versions do not require this workaround.\n"
"\t[--msl-ios]:\n\t\tTarget iOS Metal instead of macOS Metal.\n"
"\t[--msl-pad-fragment-output]:\n\t\tAlways emit color outputs as 4-component variables.\n"
"\t\tIn Metal, the fragment shader must emit at least as many components as the render target format.\n"
"\t[--msl-domain-lower-left]:\n\t\tUse a lower-left tessellation domain.\n"
"\t[--msl-argument-buffers]:\n\t\tEmit Metal argument buffers instead of discrete resource bindings.\n"
"\t\tRequires MSL 2.0 to be enabled.\n"
"\t[--msl-argument-buffer-tier]:\n\t\tWhen using Metal argument buffers, indicate the Metal argument buffer tier level supported by the Metal platform.\n"
"\t\tUses same values as Metal MTLArgumentBuffersTier enumeration (0 = Tier1, 1 = Tier2).\n"
"\t\tNOTE: Setting this value no longer enables msl-argument-buffers implicitly.\n"
"\t[--msl-runtime-array-rich-descriptor]:\n\t\tWhen declaring a runtime array of SSBOs, declare an array of {ptr, len} pairs to support OpArrayLength.\n"
"\t[--msl-replace-recursive-inputs]:\n\t\tWorks around a Metal 3.1 regression bug, which causes an infinite recursion crash during Metal's analysis of an entry point input structure that itself contains internal recursion.\n"
"\t[--msl-texture-buffer-native]:\n\t\tEnable native support for texel buffers. Otherwise, it is emulated as a normal texture.\n"
"\t[--msl-framebuffer-fetch]:\n\t\tImplement subpass inputs with frame buffer fetch.\n"
"\t\tEmits [[color(N)]] inputs in fragment stage.\n"
"\t\tRequires an Apple GPU.\n"
"\t[--msl-emulate-cube-array]:\n\t\tEmulate cube arrays with 2D array and manual math.\n"
"\t[--msl-discrete-descriptor-set <index>]:\n\t\tWhen using argument buffers, forces a specific descriptor set to be implemented without argument buffers.\n"
"\t\tUseful for implementing push descriptors in emulation layers.\n"
"\t\tCan be used multiple times for each descriptor set in question.\n"
"\t[--msl-device-argument-buffer <descriptor set index>]:\n\t\tUse device address space to hold indirect argument buffers instead of constant.\n"
"\t\tComes up when trying to support argument buffers which are larger than 64 KiB.\n"
"\t[--msl-multiview]:\n\t\tEnable SPV_KHR_multiview emulation.\n"
"\t[--msl-multiview-no-layered-rendering]:\n\t\tDon't set [[render_target_array_index]] in multiview shaders.\n"
"\t\tUseful for devices which don't support layered rendering. Only effective when --msl-multiview is enabled.\n"
"\t[--msl-view-index-from-device-index]:\n\t\tTreat the view index as the device index instead.\n"
"\t\tFor multi-GPU rendering.\n"
"\t[--msl-dispatch-base]:\n\t\tAdd support for vkCmdDispatchBase() or similar APIs.\n"
"\t\tOffsets the workgroup ID based on a buffer.\n"
"\t[--msl-dynamic-buffer <set index> <binding>]:\n\t\tMarks a buffer as having dynamic offset.\n"
"\t\tThe offset is applied in the shader with pointer arithmetic.\n"
"\t\tUseful for argument buffers where it is non-trivial to apply dynamic offset otherwise.\n"
"\t[--msl-inline-uniform-block <set index> <binding>]:\n\t\tIn argument buffers, mark an UBO as being an inline uniform block which is embedded into the argument buffer itself.\n"
"\t[--msl-decoration-binding]:\n\t\tUse SPIR-V bindings directly as MSL bindings.\n"
"\t\tThis does not work in the general case as there is no descriptor set support, and combined image samplers are split up.\n"
"\t\tHowever, if the shader author knows of binding limitations, this option will avoid the need for reflection on Metal side.\n"
"\t[--msl-force-active-argument-buffer-resources]:\n\t\tAlways emit resources which are part of argument buffers.\n"
"\t\tThis makes sure that similar shaders with same resource declarations can share the argument buffer as declaring an argument buffer implies an ABI.\n"
"\t[--msl-force-native-arrays]:\n\t\tRather than implementing array types as a templated value type ala std::array<T>, use plain, native arrays.\n"
"\t\tThis will lead to worse code-gen, but can work around driver bugs on certain driver revisions of certain Intel-based Macbooks where template arrays break.\n"
"\t[--msl-disable-frag-depth-builtin]:\n\t\tDisables FragDepth output. Useful if pipeline does not enable depth, as pipeline creation might otherwise fail.\n"
"\t[--msl-disable-frag-stencil-ref-builtin]:\n\t\tDisable FragStencilRef output. Useful if pipeline does not enable stencil output, as pipeline creation might otherwise fail.\n"
"\t[--msl-enable-frag-output-mask <mask>]:\n\t\tOnly selectively enable fragment outputs. Useful if pipeline does not enable fragment output for certain locations, as pipeline creation might otherwise fail.\n"
"\t[--msl-no-clip-distance-user-varying]:\n\t\tDo not emit user varyings to emulate gl_ClipDistance in fragment shaders.\n"
"\t[--msl-add-shader-input <index> <format> <size> <rate>]:\n\t\tSpecify the format of the shader input at <index>.\n"
"\t\t<format> can be 'any32', 'any16', 'u16', 'u8', or 'other', to indicate a 32-bit opaque value, 16-bit opaque value, 16-bit unsigned integer, 8-bit unsigned integer, "
"or other-typed variable. <size> is the vector length of the variable, which must be greater than or equal to that declared in the shader. <rate> can be 'vertex', "
"'primitive', or 'patch' to indicate a per-vertex, per-primitive, or per-patch variable.\n"
"\t\tUseful if shader stage interfaces don't match up, as pipeline creation might otherwise fail.\n"
"\t[--msl-add-shader-output <index> <format> <size> <rate>]:\n\t\tSpecify the format of the shader output at <index>.\n"
"\t\t<format> can be 'any32', 'any16', 'u16', 'u8', or 'other', to indicate a 32-bit opaque value, 16-bit opaque value, 16-bit unsigned integer, 8-bit unsigned integer, "
"or other-typed variable. <size> is the vector length of the variable, which must be greater than or equal to that declared in the shader. <rate> can be 'vertex', "
"'primitive', or 'patch' to indicate a per-vertex, per-primitive, or per-patch variable.\n"
"\t\tUseful if shader stage interfaces don't match up, as pipeline creation might otherwise fail.\n"
"\t[--msl-shader-input <index> <format> <size>]:\n\t\tSpecify the format of the shader input at <index>.\n"
"\t\t<format> can be 'any32', 'any16', 'u16', 'u8', or 'other', to indicate a 32-bit opaque value, 16-bit opaque value, 16-bit unsigned integer, 8-bit unsigned integer, "
"or other-typed variable. <size> is the vector length of the variable, which must be greater than or equal to that declared in the shader."
"\t\tEquivalent to --msl-add-shader-input with a rate of 'vertex'.\n"
"\t[--msl-shader-output <index> <format> <size>]:\n\t\tSpecify the format of the shader output at <index>.\n"
"\t\t<format> can be 'any32', 'any16', 'u16', 'u8', or 'other', to indicate a 32-bit opaque value, 16-bit opaque value, 16-bit unsigned integer, 8-bit unsigned integer, "
"or other-typed variable. <size> is the vector length of the variable, which must be greater than or equal to that declared in the shader."
"\t\tEquivalent to --msl-add-shader-output with a rate of 'vertex'.\n"
"\t[--msl-raw-buffer-tese-input]:\n\t\tUse raw buffers for tessellation evaluation input.\n"
"\t\tThis allows the use of nested structures and arrays.\n"
"\t\tIn a future version of SPIRV-Cross, this will become the default.\n"
"\t[--msl-multi-patch-workgroup]:\n\t\tUse the new style of tessellation control processing, where multiple patches are processed per workgroup.\n"
"\t\tThis should increase throughput by ensuring all the GPU's SIMD lanes are occupied, but it is not compatible with the old style.\n"
"\t\tIn addition, this style also passes input variables in buffers directly instead of using vertex attribute processing.\n"
"\t\tIn a future version of SPIRV-Cross, this will become the default.\n"
"\t[--msl-vertex-for-tessellation]:\n\t\tWhen handling a vertex shader, marks it as one that will be used with a new-style tessellation control shader.\n"
"\t\tThe vertex shader is output to MSL as a compute kernel which outputs vertices to the buffer in the order they are received, rather than in index order as with --msl-capture-output normally.\n"
"\t[--msl-additional-fixed-sample-mask <mask>]:\n"
"\t\tSet an additional fixed sample mask. If the shader outputs a sample mask, then the final sample mask will be a bitwise AND of the two.\n"
"\t[--msl-arrayed-subpass-input]:\n\t\tAssume that images of dimension SubpassData have multiple layers. Layered input attachments are accessed relative to BuiltInLayer.\n"
"\t\tThis option has no effect if multiview is also enabled.\n"
"\t[--msl-r32ui-linear-texture-align <alignment>]:\n\t\tThe required alignment of linear textures of format MTLPixelFormatR32Uint.\n"
"\t\tThis is used to align the row stride for atomic accesses to such images.\n"
"\t[--msl-r32ui-linear-texture-align-constant-id <id>]:\n\t\tThe function constant ID to use for the linear texture alignment.\n"
"\t\tOn MSL 1.2 or later, you can override the alignment by setting this function constant.\n"
"\t[--msl-texture-1d-as-2d]:\n\t\tEmit Image variables of dimension Dim1D as texture2d.\n"
"\t\tIn Metal, 1D textures do not support all features that 2D textures do. Use this option if your code relies on these features.\n"
"\t[--msl-ios-use-simdgroup-functions]:\n\t\tUse simd_*() functions for subgroup ops instead of quad_*().\n"
"\t\tRecent Apple GPUs support SIMD-groups larger than a quad. Use this option to take advantage of this support.\n"
"\t[--msl-emulate-subgroups]:\n\t\tAssume subgroups of size 1.\n"
"\t\tIntended for Vulkan Portability implementations where Metal support for SIMD-groups is insufficient for true subgroups.\n"
"\t[--msl-fixed-subgroup-size <size>]:\n\t\tAssign a constant <size> to the SubgroupSize builtin.\n"
"\t\tIntended for Vulkan Portability implementations where VK_EXT_subgroup_size_control is not supported or disabled.\n"
"\t\tIf 0, assume variable subgroup size as actually exposed by Metal.\n"
"\t[--msl-force-sample-rate-shading]:\n\t\tForce fragment shaders to run per sample.\n"
"\t\tThis adds a [[sample_id]] parameter if none is already present.\n"
"\t[--msl-no-manual-helper-invocation-updates]:\n\t\tDo not manually update the HelperInvocation builtin when a fragment is discarded.\n"
"\t\tSome Metal devices have a bug where simd_is_helper_thread() does not return true\n"
"\t\tafter the fragment is discarded. This behavior is required by Vulkan and SPIR-V, however.\n"
"\t[--msl-check-discarded-frag-stores]:\n\t\tAdd additional checks to resource stores in a fragment shader.\n"
"\t\tSome Metal devices have a bug where stores to resources from a fragment shader\n"
"\t\tcontinue to execute, even when the fragment is discarded. These checks\n"
"\t\tprevent these stores from executing.\n"
"\t[--msl-sample-dref-lod-array-as-grad]:\n\t\tUse a gradient instead of a level argument.\n"
"\t\tSome Metal devices have a bug where the level() argument to\n"
"\t\tdepth2d_array<T>::sample_compare() in a fragment shader is biased by some\n"
"\t\tunknown amount. This prevents the bias from being added.\n"
"\t[--msl-no-readwrite-texture-fences]:\n\t\tDo not insert fences before each read of a\n"
"\t\tread_write texture. MSL does not guarantee coherence between writes and later reads\n"
"\t\tof read_write textures. If you don't rely on this, you can disable this for a\n"
"\t\tpossible performance improvement.\n"
"\t[--msl-agx-manual-cube-grad-fixup]:\n\t\tManually transform cube texture gradients.\n"
"\t\tAll released Apple Silicon GPUs to date ignore one of the three partial derivatives\n"
"\t\tbased on the selected major axis, and expect the remaining derivatives to be\n"
"\t\tpartially transformed. This fixup gives correct results on Apple Silicon.\n"
"\t[--msl-combined-sampler-suffix <suffix>]:\n\t\tUses a custom suffix for combined samplers.\n");
// clang-format on
}
static void print_help_common()
{
// clang-format off
fprintf(stderr, "\nCommon options:\n"
"\t[--entry name]:\n\t\tUse a specific entry point. By default, the first entry point in the module is used.\n"
"\t[--stage <stage (vert, frag, geom, tesc, tese, comp)>]:\n\t\tForces use of a certain shader stage.\n"
"\t\tCan disambiguate the entry point if more than one entry point exists with same name, but different stage.\n"
"\t[--emit-line-directives]:\n\t\tIf SPIR-V has OpLine directives, aim to emit those accurately in output code as well.\n"
"\t[--rename-entry-point <old> <new> <stage>]:\n\t\tRenames an entry point from what is declared in SPIR-V to code output.\n"
"\t\tMostly relevant for HLSL or MSL.\n"
"\t[--rename-interface-variable <in|out> <location> <new_variable_name>]:\n\t\tRename an interface variable based on location decoration.\n"
"\t[--force-zero-initialized-variables]:\n\t\tForces temporary variables to be initialized to zero.\n"
"\t\tCan be useful in environments where compilers do not allow potentially uninitialized variables.\n"
"\t\tThis usually comes up with Phi temporaries.\n"
"\t[--fixup-clipspace]:\n\t\tFixup Z clip-space at the end of a vertex shader. The behavior is backend-dependent.\n"
"\t\tGLSL: Rewrites [0, w] Z range (D3D/Metal/Vulkan) to GL-style [-w, w].\n"
"\t\tHLSL/MSL: Rewrites [-w, w] Z range (GL) to D3D/Metal/Vulkan-style [0, w].\n"
"\t[--flip-vert-y]:\n\t\tInverts gl_Position.y (or equivalent) at the end of a vertex shader. This is equivalent to using negative viewport height.\n"
"\t[--mask-stage-output-location <location> <component>]:\n"
"\t\tIf a stage output variable with matching location and component is active, optimize away the variable if applicable.\n"
"\t[--mask-stage-output-builtin <Position|PointSize|ClipDistance|CullDistance>]:\n"
"\t\tIf a stage output variable with matching builtin is active, "
"optimize away the variable if it can affect cross-stage linking correctness.\n"
"\t[--relax-nan-checks]:\n\t\tRelax NaN checks for N{Clamp,Min,Max} and ordered vs. unordered compare instructions.\n"
);
// clang-format on