Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WebGPU] Implement dynamic buffer offsets for render bundles #19025

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -88,18 +88,16 @@ void RenderBundleEncoderImpl::setBindGroup(Index32 index, const BindGroup& bindG
std::optional<Vector<BufferDynamicOffset>>&& dynamicOffsets)
{
auto backingOffsets = valueOrDefault(dynamicOffsets);
wgpuRenderBundleEncoderSetBindGroup(m_backing.get(), index, m_convertToBackingContext->convertToBacking(bindGroup), backingOffsets.size(), backingOffsets.data());
wgpuRenderBundleEncoderSetBindGroupWithDynamicOffsets(m_backing.get(), index, m_convertToBackingContext->convertToBacking(bindGroup), WTFMove(dynamicOffsets));
}

void RenderBundleEncoderImpl::setBindGroup(Index32 index, const BindGroup& bindGroup,
const uint32_t* dynamicOffsetsArrayBuffer,
size_t dynamicOffsetsArrayBufferLength,
Size64 dynamicOffsetsDataStart,
Size32 dynamicOffsetsDataLength)
void RenderBundleEncoderImpl::setBindGroup(Index32, const BindGroup&,
const uint32_t*,
size_t,
Size64,
Size32)
{
UNUSED_PARAM(dynamicOffsetsArrayBufferLength);
// FIXME: Use checked algebra.
wgpuRenderBundleEncoderSetBindGroup(m_backing.get(), index, m_convertToBackingContext->convertToBacking(bindGroup), dynamicOffsetsDataLength, dynamicOffsetsArrayBuffer + dynamicOffsetsDataStart);
RELEASE_ASSERT_NOT_REACHED();
}

void RenderBundleEncoderImpl::pushDebugGroup(String&& groupLabel)
Expand Down
8 changes: 5 additions & 3 deletions Source/WebGPU/WebGPU/RenderBundleEncoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class RenderBundleEncoder : public WGPURenderBundleEncoderImpl, public RefCounte
void insertDebugMarker(String&& markerLabel);
void popDebugGroup();
void pushDebugGroup(String&& groupLabel);
void setBindGroup(uint32_t groupIndex, const BindGroup&, uint32_t dynamicOffsetCount, const uint32_t* dynamicOffsets);
void setBindGroup(uint32_t groupIndex, const BindGroup&, std::optional<Vector<uint32_t>>&& dynamicOffsets);
void setIndexBuffer(const Buffer&, WGPUIndexFormat, uint64_t offset, uint64_t size);
void setPipeline(const RenderPipeline&);
void setVertexBuffer(uint32_t slot, const Buffer&, uint64_t offset, uint64_t size);
Expand Down Expand Up @@ -114,10 +114,12 @@ class RenderBundleEncoder : public WGPURenderBundleEncoderImpl, public RefCounte
Vector<BufferAndOffset> m_vertexBuffers;
Vector<BufferAndOffset> m_fragmentBuffers;
const Ref<Device> m_device;
Vector<uint32_t> m_vertexDynamicOffsets;
Vector<uint32_t> m_fragmentDynamicOffsets;
const RenderPipeline* m_pipeline { nullptr };
HashMap<uint32_t, Vector<uint32_t>, DefaultHash<uint32_t>, WTF::UnsignedWithZeroKeyHashTraits<uint32_t>> m_bindGroupDynamicOffsets;
id<MTLBuffer> m_dynamicOffsetsVertexBuffer { nil };
id<MTLBuffer> m_dynamicOffsetsFragmentBuffer { nil };
uint64_t m_vertexDynamicOffset { 0 };
uint64_t m_fragmentDynamicOffset { 0 };
};

} // namespace WebGPU
150 changes: 94 additions & 56 deletions Source/WebGPU/WebGPU/RenderBundleEncoder.mm
Original file line number Diff line number Diff line change
Expand Up @@ -67,51 +67,87 @@
return m_currentCommandIndex < m_indirectCommandBuffer.size ? [m_indirectCommandBuffer indirectRenderCommandAtIndex:m_currentCommandIndex] : nil;
}

void RenderBundleEncoder::executePreDrawCommands()
static void addResource(RenderBundle::ResourcesContainer* resources, id<MTLResource> mtlResource, ResourceUsageAndRenderStage *resource)
{
if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand()) {
for (size_t i = 0, sz = m_vertexBuffers.size(); i < sz; ++i)
[icbCommand setVertexBuffer:m_vertexBuffers[i].buffer offset:m_vertexBuffers[i].offset atIndex:i];
if (ResourceUsageAndRenderStage *existingResource = [resources objectForKey:mtlResource]) {
existingResource.usage |= resource.usage;
existingResource.renderStages |= resource.renderStages;
} else
[resources setObject:resource forKey:mtlResource];
}

for (size_t i = 0, sz = m_fragmentBuffers.size(); i < sz; ++i)
[icbCommand setFragmentBuffer:m_fragmentBuffers[i].buffer offset:m_fragmentBuffers[i].offset atIndex:i];
static void addResource(RenderBundle::ResourcesContainer* resources, id<MTLResource> mtlResource, MTLRenderStages stage)
{
return addResource(resources, mtlResource, [[ResourceUsageAndRenderStage alloc] initWithUsage:MTLResourceUsageRead renderStages:stage]);
}

void RenderBundleEncoder::executePreDrawCommands()
{
auto vertexDynamicOffset = m_vertexDynamicOffset;
auto fragmentDynamicOffset = m_fragmentDynamicOffset;
if (m_pipeline) {
m_vertexDynamicOffset += sizeof(uint32_t) * m_pipeline->pipelineLayout().sizeOfVertexDynamicOffsets();
m_fragmentDynamicOffset += sizeof(uint32_t) * m_pipeline->pipelineLayout().sizeOfFragmentDynamicOffsets();
}

if (!m_pipeline)
id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand();
if (!icbCommand)
return;

for (size_t i = 0, sz = m_vertexBuffers.size(); i < sz; ++i)
[icbCommand setVertexBuffer:m_vertexBuffers[i].buffer offset:m_vertexBuffers[i].offset atIndex:i];

for (size_t i = 0, sz = m_fragmentBuffers.size(); i < sz; ++i)
[icbCommand setFragmentBuffer:m_fragmentBuffers[i].buffer offset:m_fragmentBuffers[i].offset atIndex:i];

for (auto& kvp : m_bindGroupDynamicOffsets) {
auto& pipelineLayout = m_pipeline->pipelineLayout();
auto bindGroupIndex = kvp.key;

auto* pvertexOffsets = pipelineLayout.vertexOffsets(bindGroupIndex, kvp.value);
if (pvertexOffsets && pvertexOffsets->size()) {
auto& vertexOffsets = *pvertexOffsets;
auto startIndex = pipelineLayout.vertexOffsetForBindGroup(bindGroupIndex);
RELEASE_ASSERT(vertexOffsets.size() <= m_vertexDynamicOffsets.size() + startIndex);
memcpy(&m_vertexDynamicOffsets[startIndex], &vertexOffsets[0], sizeof(vertexOffsets[0]) * vertexOffsets.size());
if (m_dynamicOffsetsVertexBuffer) {
auto maxBufferLength = m_dynamicOffsetsVertexBuffer.length;
auto bufferOffset = vertexDynamicOffset;
uint8_t* vertexBufferContents = static_cast<uint8_t*>(m_dynamicOffsetsVertexBuffer.contents) + bufferOffset;
auto* pvertexOffsets = pipelineLayout.vertexOffsets(bindGroupIndex, kvp.value);
if (pvertexOffsets && pvertexOffsets->size()) {
auto& vertexOffsets = *pvertexOffsets;
auto startIndex = sizeof(uint32_t) * pipelineLayout.vertexOffsetForBindGroup(bindGroupIndex);
auto bytesToCopy = sizeof(vertexOffsets[0]) * vertexOffsets.size();
RELEASE_ASSERT(bytesToCopy <= maxBufferLength - (startIndex + bufferOffset));
memcpy(&vertexBufferContents[startIndex], &vertexOffsets[0], bytesToCopy);
}
}

auto* pfragmentOffsets = pipelineLayout.fragmentOffsets(bindGroupIndex, kvp.value);
if (pfragmentOffsets && pfragmentOffsets->size()) {
auto& fragmentOffsets = *pfragmentOffsets;
auto startIndex = pipelineLayout.fragmentOffsetForBindGroup(bindGroupIndex);
RELEASE_ASSERT(fragmentOffsets.size() <= m_fragmentDynamicOffsets.size() + startIndex);
memcpy(&m_fragmentDynamicOffsets[startIndex], &fragmentOffsets[0], sizeof(fragmentOffsets[0]) * fragmentOffsets.size());
if (m_dynamicOffsetsFragmentBuffer) {
auto maxBufferLength = m_dynamicOffsetsVertexBuffer.length;
auto bufferOffset = fragmentDynamicOffset;
uint8_t* fragmentBufferContents = static_cast<uint8_t*>(m_dynamicOffsetsFragmentBuffer.contents) + bufferOffset;
auto* pfragmentOffsets = pipelineLayout.fragmentOffsets(bindGroupIndex, kvp.value);
if (pfragmentOffsets && pfragmentOffsets->size()) {
auto& fragmentOffsets = *pfragmentOffsets;
auto startIndex = sizeof(uint32_t) * pipelineLayout.fragmentOffsetForBindGroup(bindGroupIndex);
auto bytesToCopy = sizeof(fragmentOffsets[0]) * fragmentOffsets.size();
RELEASE_ASSERT(bytesToCopy <= maxBufferLength - (startIndex + bufferOffset));
memcpy(&fragmentBufferContents[startIndex], &fragmentOffsets[0], bytesToCopy);
}
}
}

// FIXME: https://bugs.webkit.org/show_bug.cgi?id=262208 implement dynamic offsets in render bundles
if (m_dynamicOffsetsVertexBuffer)
[icbCommand setVertexBuffer:m_dynamicOffsetsVertexBuffer offset:vertexDynamicOffset atIndex:m_device->maxBuffersPlusVertexBuffersForVertexStage()];

if (m_dynamicOffsetsFragmentBuffer)
[icbCommand setFragmentBuffer:m_dynamicOffsetsFragmentBuffer offset:fragmentDynamicOffset atIndex:m_device->maxBuffersForFragmentStage()];

m_bindGroupDynamicOffsets.clear();
}

void RenderBundleEncoder::draw(uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance)
{
if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand()) {
executePreDrawCommands();
executePreDrawCommands();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this need to be computed for every draw, or can the calculations be cached?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it needs to happen before every draw because the vertex buffers can change between draws.

And Metal ICBs appear to require all buffers in use to call setVertexBuffer: for each command, whereas WebGPU maintains a persistent state across commands

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

which is best illustrated through https://webgpu.github.io/webgpu-samples/samples/renderBundles

    passEncoder.setPipeline(pipeline);
    passEncoder.setBindGroup(0, frameBindGroup);

    // Loop through every renderable object and draw them individually.
    // (Because many of these meshes are repeated, with only the transforms
    // differing, instancing would be highly effective here. This sample
    // intentionally avoids using instancing in order to emulate a more complex
    // scene, which helps demonstrate the potential time savings a render bundle
    // can provide.)
    let count = 0;
    for (const renderable of renderables) {
      passEncoder.setBindGroup(1, renderable.bindGroup);
      passEncoder.setVertexBuffer(0, renderable.vertices);
      passEncoder.setIndexBuffer(renderable.indices, 'uint16');
      passEncoder.drawIndexed(renderable.indexCount);

      if (++count > settings.asteroidCount) {
        break;
      }
    }

setBindGroup(0 is only called once and the expectation is that bind group is persistent throughout the draw calls

if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand())
[icbCommand drawPrimitives:m_primitiveType vertexStart:firstVertex vertexCount:vertexCount instanceCount:instanceCount baseInstance:firstInstance];
} else {
else {
m_icbDescriptor.commandTypes |= MTLIndirectCommandTypeDraw;

m_recordedCommands.append([vertexCount, instanceCount, firstVertex, firstInstance, protectedThis = Ref { *this }] {
Expand All @@ -124,8 +160,8 @@

void RenderBundleEncoder::drawIndexed(uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance)
{
executePreDrawCommands();
if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand()) {
executePreDrawCommands();
auto firstIndexOffsetInBytes = firstIndex * (m_indexType == MTLIndexTypeUInt16 ? sizeof(uint16_t) : sizeof(uint32_t));
[icbCommand drawIndexedPrimitives:m_primitiveType indexCount:indexCount indexType:m_indexType indexBuffer:m_indexBuffer indexBufferOffset:(m_indexBufferOffset + firstIndexOffsetInBytes) instanceCount:instanceCount baseVertex:baseVertex baseInstance:firstInstance];
} else {
Expand All @@ -147,9 +183,9 @@
if (!contents)
return;

executePreDrawCommands();
if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand()) {
ASSERT(m_indexBufferOffset == contents->indexStart);
executePreDrawCommands();
[icbCommand drawIndexedPrimitives:m_primitiveType indexCount:contents->indexCount indexType:m_indexType indexBuffer:m_indexBuffer indexBufferOffset:m_indexBufferOffset instanceCount:contents->instanceCount baseVertex:contents->baseVertex baseInstance:contents->baseInstance];
} else {
m_icbDescriptor.commandTypes |= MTLIndirectCommandTypeDrawIndexed;
Expand All @@ -170,10 +206,10 @@
if (!contents)
return;

if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand()) {
executePreDrawCommands();
executePreDrawCommands();
if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand())
[icbCommand drawPrimitives:m_primitiveType vertexStart:contents->vertexStart vertexCount:contents->vertexCount instanceCount:contents->instanceCount baseInstance:contents->baseInstance];
} else {
else {
m_icbDescriptor.commandTypes |= MTLIndirectCommandTypeDraw;

m_recordedCommands.append([&indirectBuffer, indirectOffset, protectedThis = Ref { *this }] {
Expand All @@ -186,13 +222,28 @@

Ref<RenderBundle> RenderBundleEncoder::finish(const WGPURenderBundleDescriptor& descriptor)
{
if (!m_currentCommandIndex)
return RenderBundle::createInvalid(m_device);

auto commandCount = m_currentCommandIndex;
m_currentCommandIndex = 0;

if (!m_indirectCommandBuffer) {
m_icbDescriptor.maxVertexBufferBindCount = m_device->maxBuffersPlusVertexBuffersForVertexStage();
m_icbDescriptor.maxVertexBufferBindCount = m_device->maxBuffersPlusVertexBuffersForVertexStage() + 1;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you a comment here why + 1?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh because the dynamic offsets get set to index = m_device->maxBuffersPlusVertexBuffersForVertexStage() and if I set them to m_device->maxBuffersPlusVertexBuffersForVertexStage() - 1 then I need to change the WGSL compiler.

But that is kind of ugly, maybe I can clean that up

m_vertexBuffers.resize(m_icbDescriptor.maxVertexBufferBindCount);
m_fragmentBuffers.resize(m_icbDescriptor.maxFragmentBufferBindCount);
if (m_vertexDynamicOffset) {
m_dynamicOffsetsVertexBuffer = [m_device->device() newBufferWithLength:m_vertexDynamicOffset options:MTLResourceStorageModeShared];
addResource(m_resources, m_dynamicOffsetsVertexBuffer, MTLRenderStageVertex);
m_vertexDynamicOffset = 0;
}

if (m_fragmentDynamicOffset) {
m_dynamicOffsetsFragmentBuffer = [m_device->device() newBufferWithLength:m_fragmentDynamicOffset options:MTLResourceStorageModeShared];
addResource(m_resources, m_dynamicOffsetsFragmentBuffer, MTLRenderStageFragment);
m_fragmentDynamicOffset = 0;
}

m_indirectCommandBuffer = [m_device->device() newIndirectCommandBufferWithDescriptor:m_icbDescriptor maxCommandCount:commandCount options:0];

for (auto& command : m_recordedCommands)
Expand Down Expand Up @@ -259,38 +310,25 @@
// MTLIndirectCommandBuffers don't support debug commands.
}

static void addResource(RenderBundle::ResourcesContainer* resources, id<MTLResource> mtlResource, ResourceUsageAndRenderStage *resource)
{
if (ResourceUsageAndRenderStage *existingResource = [resources objectForKey:mtlResource]) {
existingResource.usage |= resource.usage;
existingResource.renderStages |= resource.renderStages;
} else
[resources setObject:resource forKey:mtlResource];
}

static void addResource(RenderBundle::ResourcesContainer* resources, id<MTLResource> mtlResource, MTLRenderStages stage)
{
return addResource(resources, mtlResource, [[ResourceUsageAndRenderStage alloc] initWithUsage:MTLResourceUsageRead renderStages:stage]);
}

void RenderBundleEncoder::setBindGroup(uint32_t groupIndex, const BindGroup& group, uint32_t dynamicOffsetCount, const uint32_t* dynamicOffsets)
void RenderBundleEncoder::setBindGroup(uint32_t groupIndex, const BindGroup& group, std::optional<Vector<uint32_t>>&& dynamicOffsets)
{
id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand();
if (!icbCommand) {
if (group.fragmentArgumentBuffer())
m_icbDescriptor.maxFragmentBufferBindCount = std::max<NSUInteger>(m_icbDescriptor.maxFragmentBufferBindCount, 1 + groupIndex);

m_recordedCommands.append([groupIndex, &group, protectedThis = Ref { *this }, dynamicOffsets, dynamicOffsetCount] {
protectedThis->setBindGroup(groupIndex, group, dynamicOffsetCount, dynamicOffsets);
m_recordedCommands.append([groupIndex, &group, protectedThis = Ref { *this }, dynamicOffsets = WTFMove(dynamicOffsets)]() mutable {
protectedThis->setBindGroup(groupIndex, group, WTFMove(dynamicOffsets));
});
return;
}

if (!m_currentPipelineState)
return;

uint32_t dynamicOffsetCount = dynamicOffsets ? dynamicOffsets->size() : 0;
if (dynamicOffsetCount)
m_bindGroupDynamicOffsets.add(groupIndex, Vector<uint32_t>(dynamicOffsets, dynamicOffsetCount));
m_bindGroupDynamicOffsets.set(groupIndex, WTFMove(*dynamicOffsets));

for (const auto& resource : group.resources()) {
ResourceUsageAndRenderStage* usageAndRenderStage = [[ResourceUsageAndRenderStage alloc] initWithUsage:resource.usage renderStages:resource.renderStages];
Expand All @@ -300,11 +338,11 @@ static void addResource(RenderBundle::ResourcesContainer* resources, id<MTLResou

if (group.vertexArgumentBuffer()) {
addResource(m_resources, group.vertexArgumentBuffer(), MTLRenderStageVertex);
m_vertexBuffers[m_device->vertexBufferIndexForBindGroup(groupIndex)] = { group.vertexArgumentBuffer(), 0, dynamicOffsetCount, dynamicOffsets };
m_vertexBuffers[m_device->vertexBufferIndexForBindGroup(groupIndex)] = { group.vertexArgumentBuffer(), 0, dynamicOffsetCount, dynamicOffsets->data() };
}
if (group.fragmentArgumentBuffer()) {
addResource(m_resources, group.fragmentArgumentBuffer(), MTLRenderStageFragment);
m_fragmentBuffers[groupIndex] = { group.fragmentArgumentBuffer(), 0, dynamicOffsetCount, dynamicOffsets };
m_fragmentBuffers[groupIndex] = { group.fragmentArgumentBuffer(), 0, dynamicOffsetCount, dynamicOffsets->data() };
}
}

Expand All @@ -330,12 +368,8 @@ static void addResource(RenderBundle::ResourcesContainer* resources, id<MTLResou
if (!pipeline.renderPipelineState())
return;

m_pipeline = &pipeline;
if (id<MTLIndirectRenderCommand> icbCommand = currentRenderCommand()) {
m_pipeline = &pipeline;

m_vertexDynamicOffsets.resize(pipeline.pipelineLayout().sizeOfVertexDynamicOffsets());
m_fragmentDynamicOffsets.resize(pipeline.pipelineLayout().sizeOfFragmentDynamicOffsets());

m_currentPipelineState = pipeline.renderPipelineState();
m_depthStencilState = pipeline.depthStencilState();
m_cullMode = pipeline.cullMode();
Expand Down Expand Up @@ -420,9 +454,13 @@ void wgpuRenderBundleEncoderPushDebugGroup(WGPURenderBundleEncoder renderBundleE
WebGPU::fromAPI(renderBundleEncoder).pushDebugGroup(WebGPU::fromAPI(groupLabel));
}

void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, const uint32_t* dynamicOffsets)
void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder, uint32_t, WGPUBindGroup, size_t, const uint32_t*)
{
}

void wgpuRenderBundleEncoderSetBindGroupWithDynamicOffsets(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPUBindGroup group, std::optional<Vector<uint32_t>>&& dynamicOffsets)
{
WebGPU::fromAPI(renderBundleEncoder).setBindGroup(groupIndex, WebGPU::fromAPI(group), dynamicOffsetCount, dynamicOffsets);
WebGPU::fromAPI(renderBundleEncoder).setBindGroup(groupIndex, WebGPU::fromAPI(group), WTFMove(dynamicOffsets));
}

void wgpuRenderBundleEncoderSetIndexBuffer(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size)
Expand Down
6 changes: 6 additions & 0 deletions Source/WebGPU/WebGPU/WebGPUExt.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,9 @@
#include <IOSurface/IOSurfaceRef.h>

#ifdef __cplusplus
#include <optional>
#include <wtf/Vector.h>

extern "C" {
#endif

Expand Down Expand Up @@ -117,6 +120,9 @@ WGPU_EXPORT WGPUExternalTexture wgpuDeviceImportExternalTexture(WGPUDevice devic

WGPU_EXPORT void wgpuExternalTextureReference(WGPUExternalTexture externalTexture);
WGPU_EXPORT void wgpuExternalTextureRelease(WGPUExternalTexture externalTexture);
#ifdef __cplusplus
WGPU_EXPORT void wgpuRenderBundleEncoderSetBindGroupWithDynamicOffsets(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, std::optional<Vector<uint32_t>>&& dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE;
#endif

#endif // !defined(WGPU_SKIP_DECLARATIONS)

Expand Down