Skip to content

Commit

Permalink
Test message passing using permuted indices
Browse files Browse the repository at this point in the history
This change also adds support for a custom checkSupport
callback when creating Amber test cases.

VK-GL-CTS Issue: 3390

New tests:

dEQP-VK.memory_model.message_passing.permuted_index.*

Components: Vulkan, Framework
Change-Id: I68c13e0106acc4c2a2f7541c4acdf3274009ad7b
  • Loading branch information
Ari Suonpaa authored and mnetsch committed Feb 24, 2022
1 parent 3d74630 commit 0f04733
Show file tree
Hide file tree
Showing 10 changed files with 314 additions and 7 deletions.
3 changes: 3 additions & 0 deletions android/cts/main/vk-master-2021-03-01/memory-model.txt
@@ -1,3 +1,6 @@
dEQP-VK.memory_model.message_passing.permuted_index.barrier
dEQP-VK.memory_model.message_passing.permuted_index.release_acquire
dEQP-VK.memory_model.message_passing.permuted_index.release_acquire_atomic_payload
dEQP-VK.memory_model.message_passing.ext.f32.coherent.atomic_atomic.atomicwrite.device.payload_nonlocal.buffer.guard_nonlocal.buffer.comp
dEQP-VK.memory_model.message_passing.ext.f32.coherent.atomic_atomic.atomicwrite.device.payload_nonlocal.buffer.guard_nonlocal.buffer.vert
dEQP-VK.memory_model.message_passing.ext.f32.coherent.atomic_atomic.atomicwrite.device.payload_nonlocal.buffer.guard_nonlocal.buffer.frag
Expand Down
3 changes: 3 additions & 0 deletions android/cts/main/vk-master/memory-model.txt
@@ -1,3 +1,6 @@
dEQP-VK.memory_model.message_passing.permuted_index.barrier
dEQP-VK.memory_model.message_passing.permuted_index.release_acquire
dEQP-VK.memory_model.message_passing.permuted_index.release_acquire_atomic_payload
dEQP-VK.memory_model.message_passing.core11.u32.coherent.fence_fence.atomicwrite.workgroup.payload_nonlocal.buffer.guard_nonlocal.buffer.comp
dEQP-VK.memory_model.message_passing.core11.u32.coherent.fence_fence.atomicwrite.workgroup.payload_nonlocal.buffer.guard_nonlocal.image.comp
dEQP-VK.memory_model.message_passing.core11.u32.coherent.fence_fence.atomicwrite.workgroup.payload_nonlocal.buffer.guard_nonlocal.workgroup.comp
Expand Down
@@ -0,0 +1,87 @@
#!amber

# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

SHADER compute compute_shader GLSL
#version 450
#extension GL_KHR_memory_scope_semantics: enable

layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;

struct S
{
uint data;
uint flag;
};

layout(set = 0, binding = 0, std430) buffer Buf0
{
S arr[];
} buf;

layout(set = 0, binding = 1, std430) buffer Buf1
{
uint failures;
} bufOut;

uint permute(uint x)
{
return (x * 419u) & 0xffff;
}

void main()
{
// Message passing using data and flag pairs. The data is written to an index i, but the flag
// used for synchronization is from a different index calculated using permute(i).

uint i0 = gl_GlobalInvocationID.x;
atomicStore(buf.arr[i0].data, 1u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);

// This barrier makes sure the order of the atomic store above and below does not change.
// This guarantees the data is always written before the flag.
memoryBarrierBuffer();

atomicStore(buf.arr[permute(i0)].flag, 1u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);

// Read from an index which is presumably written by a different workgroup.
uint i1 = (i0 * 4099u) & 0xffff;
uint flag = atomicLoad(buf.arr[permute(i1)].flag, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);

// This barrier makes sure the atomic loads keep their execution order. That is, the flag is always read first.
memoryBarrierBuffer();

uint data = atomicLoad(buf.arr[i1].data, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);

// The flag should never be written before data.
if (flag > data)
{
atomicAdd(bufOut.failures, 1u);
}
}
END

BUFFER buf0 DATA_TYPE uint32 SIZE 131072 FILL 0
BUFFER buf1 DATA_TYPE uint32 SIZE 1 FILL 0

PIPELINE compute pipeline
ATTACH compute_shader

BIND BUFFER buf0 AS storage DESCRIPTOR_SET 0 BINDING 0
BIND BUFFER buf1 AS storage DESCRIPTOR_SET 0 BINDING 1
END

RUN pipeline 256 1 1

EXPECT buf1 IDX 0 EQ 0
@@ -0,0 +1,78 @@
#!amber

# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

SHADER compute compute_shader GLSL
#version 450
#extension GL_KHR_memory_scope_semantics: enable

layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;

struct S
{
uint data;
uint flag;
};

layout(set = 0, binding = 0, std430) buffer Buf0
{
S arr[];
} buf;

layout(set = 0, binding = 1, std430) buffer Buf1
{
uint failures;
} bufOut;

uint permute(uint x)
{
return (x * 419u) & 0xffff;
}

void main()
{
// Message passing using data and flag pairs. The data is written to an index i, but the flag
// used for synchronization is from a different index calculated using permute(i).

uint i0 = gl_GlobalInvocationID.x;
atomicStore(buf.arr[i0].data, 1u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);
atomicStore(buf.arr[permute(i0)].flag, 1u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelease);

// Read from an index which is presumably written by a different workgroup.
uint i1 = (i0 * 4099u) & 0xffff;
uint flag = atomicLoad(buf.arr[permute(i1)].flag, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquire);
uint data = atomicLoad(buf.arr[i1].data, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed);

// The flag should never be written before data.
if (flag > data)
{
atomicAdd(bufOut.failures, 1u);
}
}
END

BUFFER buf0 DATA_TYPE uint32 SIZE 131072 FILL 0
BUFFER buf1 DATA_TYPE uint32 SIZE 1 FILL 0

PIPELINE compute pipeline
ATTACH compute_shader

BIND BUFFER buf0 AS storage DESCRIPTOR_SET 0 BINDING 0
BIND BUFFER buf1 AS storage DESCRIPTOR_SET 0 BINDING 1
END

RUN pipeline 256 1 1

EXPECT buf1 IDX 0 EQ 0
@@ -0,0 +1,78 @@
#!amber

# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

SHADER compute compute_shader GLSL
#version 450
#extension GL_KHR_memory_scope_semantics: enable

layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in;

struct S
{
uint data;
uint flag;
};

layout(set = 0, binding = 0, std430) buffer Buf0
{
S arr[];
} buf;

layout(set = 0, binding = 1, std430) buffer Buf1
{
uint failures;
} bufOut;

uint permute(uint x)
{
return (x * 419u) & 0xffff;
}

void main()
{
// Message passing using data and flag pairs. The data is written to an index i, but the flag
// used for synchronization is from a different index calculated using permute(i).

uint i0 = gl_GlobalInvocationID.x;
buf.arr[i0].data = 1u;
atomicStore(buf.arr[permute(i0)].flag, 1u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelease);

// Read from an index which is presumably written by a different workgroup.
uint i1 = (i0 * 4099u) & 0xffff;
uint flag = atomicLoad(buf.arr[permute(i1)].flag, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsAcquire);
uint data = buf.arr[i1].data;

// The flag should never be written before data.
if (flag > data)
{
atomicAdd(bufOut.failures, 1u);
}
}
END

BUFFER buf0 DATA_TYPE uint32 SIZE 131072 FILL 0
BUFFER buf1 DATA_TYPE uint32 SIZE 1 FILL 0

PIPELINE compute pipeline
ATTACH compute_shader

BIND BUFFER buf0 AS storage DESCRIPTOR_SET 0 BINDING 0
BIND BUFFER buf1 AS storage DESCRIPTOR_SET 0 BINDING 1
END

RUN pipeline 256 1 1

EXPECT buf1 IDX 0 EQ 0
3 changes: 3 additions & 0 deletions external/vulkancts/modules/vulkan/amber/vktAmberTestCase.cpp
Expand Up @@ -224,6 +224,9 @@ void AmberTestCase::checkSupport(Context& ctx) const
TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Stride is not multiply of minVertexInputBindingStrideAlignment");
}
}

if (m_checkSupportCallback)
(m_checkSupportCallback)(ctx, m_name);
}

class Delegate : public amber::Delegate
Expand Down
19 changes: 12 additions & 7 deletions external/vulkancts/modules/vulkan/amber/vktAmberTestCase.hpp
Expand Up @@ -26,6 +26,7 @@

#include <string>
#include <set>
#include <functional>
#include "tcuDefs.hpp"
#include "tcuTestCase.hpp"
#include "vkSpirVProgram.hpp"
Expand Down Expand Up @@ -79,6 +80,8 @@ class AmberTestCase : public TestCase
// - Otherwise, we do a secondary sanity check depending on code inside
// Amber itself: if the Amber test says it is not supported, then
// throw an internal error exception.
// A function pointer for a custom checkSupport function can also be
// provided for a more sophisticated support check.
void checkSupport (Context& ctx) const override;

// If the test case uses SPIR-V Assembly, use these build options.
Expand All @@ -96,6 +99,7 @@ class AmberTestCase : public TestCase

void addImageRequirement(vk::VkImageCreateInfo info);
void addBufferRequirement(BufferRequirement req);
void setCheckSupportCallback(std::function<void(Context&, std::string)> func) { m_checkSupportCallback = func; }

virtual bool validateRequirements() override;

Expand All @@ -104,26 +108,27 @@ class AmberTestCase : public TestCase
private:
bool parse (const std::string& readFilename);

amber::Recipe* m_recipe;
vk::SpirVAsmBuildOptions m_asm_options;
amber::Recipe* m_recipe;
vk::SpirVAsmBuildOptions m_asm_options;

std::string m_readFilename;
std::string m_readFilename;

// Instance and device extensions required by the test.
// We don't differentiate between the two: We consider the requirement
// satisfied if the string is registered as either an instance or device
// extension. Use a set for consistent ordering.
std::set<std::string> m_required_extensions;
std::set<std::string> m_required_extensions;

// Features required by the test.
// A feature bit is represented by a string of form "<structure>.<feature>", where
// the structure name matches the Vulkan spec, but without the leading "VkPhysicalDevice".
// An example entry is: "VariablePointerFeatures.variablePointers".
// Use a set for consistent ordering.
std::set<std::string> m_required_features;
std::set<std::string> m_required_features;

std::vector<vk::VkImageCreateInfo> m_imageRequirements;
std::vector<BufferRequirement> m_bufferRequirements;
std::vector<vk::VkImageCreateInfo> m_imageRequirements;
std::vector<BufferRequirement> m_bufferRequirements;
std::function<void(Context&, std::string)> m_checkSupportCallback = nullptr;
};

AmberTestCase* createAmberTestCase (tcu::TestContext& testCtx,
Expand Down
@@ -1,5 +1,6 @@
include_directories(
..
../amber
${DEQP_INL_DIR}
)

Expand Down

0 comments on commit 0f04733

Please sign in to comment.