forked from nvpro-samples/gl_occlusion_culling
/
scan.comp.glsl
295 lines (224 loc) · 7.07 KB
/
scan.comp.glsl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
#version 430
/**/
#define TASK_SUM 0
#define TASK_OFFSETS 1
#define TASK_COMBINE 2
#ifndef TASK
#define TASK TASK_SUM
#endif
#define THREADBLOCK_SIZE 512
#define BATCH_SIZE (THREADBLOCK_SIZE*4)
uniform uint numElements;
///////////////////////////////////////////////////////
// based on CUDA Sample "scan.cu"
layout (local_size_x = THREADBLOCK_SIZE) in;
#if TASK != TASK_COMBINE
uint threadIdx = gl_LocalInvocationID.x;
#extension GL_NV_shader_thread_group : enable
#extension GL_NV_shader_thread_shuffle : enable
#if GL_NV_shader_thread_group
#define USESHUFFLE
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
// Almost the same as naive scan1Inclusive but doesn't need barriers
// nor shared memory
// and works only for size <= WARP_SIZE
#if GL_NV_shader_thread_shuffle
shared uint s_Data[(THREADBLOCK_SIZE / WARP_SIZE)];
uint warpScanInclusive(uint idata, uint size){
uint sum = idata;
for (int STEP = 0; STEP < 5 && (1<<(STEP+1)) <= size; STEP++){
bool valid = false;
uint temp = shuffleUpNV(sum, 1 << STEP, 32, valid);
if (valid) {
sum += temp;
}
}
return sum;
}
#else
shared uint s_Data[THREADBLOCK_SIZE * 2];
// Almost the same as naive scan1Inclusive but doesn't need barriers
// and works only for size <= WARP_SIZE
uint warpScanInclusive(uint idata, uint size){
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
if(size >= 2) s_Data[pos] += s_Data[pos - 1];
if(size >= 4) s_Data[pos] += s_Data[pos - 2];
if(size >= 8) s_Data[pos] += s_Data[pos - 4];
if(size >= 16) s_Data[pos] += s_Data[pos - 8];
if(size >= 32) s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
#endif
uint warpScanExclusive(uint idata, uint size){
return warpScanInclusive(idata, size) - idata;
}
uint scan1Inclusive(uint idata, uint size){
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
uint warpResult = warpScanInclusive(idata, WARP_SIZE);
//Save top elements of each warp for exclusive warp scan
#if !GL_NV_shader_thread_shuffle
//sync to wait for warp scans to complete (because l_Data is being overwritten)
memoryBarrierShared();
barrier();
#endif
if( (threadIdx & (WARP_SIZE - 1)) == (WARP_SIZE - 1) )
s_Data[threadIdx >> LOG2_WARP_SIZE] = warpResult;
//wait for warp scans to complete
memoryBarrierShared();
barrier();
if( threadIdx < (THREADBLOCK_SIZE / WARP_SIZE) ){
//grab top warp elements
uint val = s_Data[threadIdx];
//calculate exclsive scan and write back to shared memory
s_Data[threadIdx] = warpScanExclusive(val, size >> LOG2_WARP_SIZE);
}
//return updated warp scans with exclusive scan results
memoryBarrierShared();
barrier();
return warpResult + s_Data[threadIdx >> LOG2_WARP_SIZE];
}else{
return warpScanInclusive(idata, size);
}
}
#else
shared uint s_Data[THREADBLOCK_SIZE * 2];
uint scan1Inclusive(uint idata, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
memoryBarrierShared();
barrier();
uint t = s_Data[pos] + s_Data[pos - offset];
memoryBarrierShared();
barrier();
s_Data[pos] = t;
}
return s_Data[pos];
}
#endif
uint scan1Exclusive(uint idata, uint size)
{
return scan1Inclusive(idata, size) - idata;
}
uvec4 scan4Inclusive(uvec4 idata4, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
uvec4 scan4Exclusive(uvec4 idata4, uint size)
{
uvec4 odata4 = scan4Inclusive(idata4, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
#endif
#if TASK == TASK_SUM
layout (std430, binding=1) buffer inputBuffer {
uvec4 indata[];
};
layout (std430, binding=0) buffer outputBuffer {
uvec4 outdata[];
};
void main()
{
uint idx = gl_GlobalInvocationID.x;
uint maxidx = ((numElements + 3) / 4);
bool valid = idx < maxidx;
//Load data
uvec4 idata4 = valid ? indata[idx] : uvec4(0);
// Calculate scan
//uvec4 odata4 = scan4Inclusive(idata4, min(BATCH_SIZE, (maxidx-idx)*4));
uvec4 odata4 = scan4Inclusive(idata4, BATCH_SIZE);
//Write back
if (valid) outdata[idx] = odata4;
}
#endif
#if TASK == TASK_OFFSETS
layout (std430, binding=1) buffer inputBuffer {
uint indata[];
};
layout (std430, binding=0) buffer outputBuffer {
uvec4 outdata[];
};
void main()
{
uint idx = gl_GlobalInvocationID.x;
uint startIdx = (idx * BATCH_SIZE * 4);
bool valid = false;
//Load data
uvec4 idata4 = uvec4(0);
for (uint i = 0; i < 4; i++){
uint readIdx = startIdx + (i+1)*BATCH_SIZE - 1u;
if ( readIdx < numElements ){
idata4[i] = indata[readIdx];
valid = true;
}
}
//Calculate scan
uvec4 odata4 = scan4Inclusive(idata4, BATCH_SIZE);
//Write back
if (valid) outdata[idx] = odata4;
}
#endif
#if TASK == TASK_COMBINE
layout (std430, binding=1) buffer inputBuffer {
uint indata[];
};
layout (std430, binding=0) buffer outputBuffer {
uint outdata[];
};
void main()
{
uint idx = gl_GlobalInvocationID.x;
bool valid = idx < numElements;
uint batch = idx / BATCH_SIZE;
if (valid && batch > 0) {
outdata[idx] += indata[batch-1];
}
}
#endif
/*-----------------------------------------------------------------------
Copyright (c) 2014, NVIDIA. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Neither the name of its contributors may be used to endorse
or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-----------------------------------------------------------------------*/