diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 18d5fdd129ab7..32410f43fda41 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -201,7 +201,9 @@ jobs: strategy: fail-fast: false matrix: ${{ fromJson(needs.matrix_prep.outputs.matrix) }} - runs-on: macos-10.15 + runs-on: [self-hosted, macos-10.15] + env: + PY: ${{ matrix.python }} steps: - uses: actions/checkout@v2 with: @@ -215,19 +217,16 @@ jobs: restore-keys: | sccache-mac- - - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python }} - - name: Download Pre-Built LLVM 10.0.0 run: python misc/ci_download.py env: - CI_PLATFORM: macos-10.15 + CI_PLATFORM: macos - name: Create Python Wheel run: | brew install molten-vk export PATH=$(pwd)/taichi-llvm/bin/:$PATH + export PATH=$(ls -d ~/mini*/envs/$PY/bin):$PATH bash .github/workflows/scripts/unix_build.sh brew uninstall molten-vk env: diff --git a/.github/workflows/scripts/unix_test.sh b/.github/workflows/scripts/unix_test.sh index 92c41fb7f8ac2..6a0cf923d80c9 100755 --- a/.github/workflows/scripts/unix_test.sh +++ b/.github/workflows/scripts/unix_test.sh @@ -65,7 +65,10 @@ EOF python3 taichi-release-tests/run.py --log=DEBUG --runners 1 taichi-release-tests/timelines fi -python3 tests/run_tests.py --cpp + +if [ ! -z $TI_SKIP_CPP_TESTS ]; then + python3 tests/run_tests.py --cpp +fi if [ -z "$GPU_TEST" ]; then if [[ $PLATFORM == *"m1"* ]]; then diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 4c592d307d465..5281a89ab4b58 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -9,6 +9,9 @@ concurrency: group: ${{ github.event.number || github.run_id }} cancel-in-progress: true +env: + TI_CI: "1" + jobs: check_files: name: Check files @@ -192,16 +195,16 @@ jobs: with_cc: OFF with_cpp_tests: ON wanted_archs: 'cpu' - runs-on: ${{ matrix.os }} + runs-on: + - self-hosted + - ${{ matrix.os }} + env: + PY: ${{ matrix.python }} steps: - uses: actions/checkout@v2 with: submodules: 'recursive' - - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python }} - - name: Get sccache cache uses: actions/cache@v2 with: @@ -210,23 +213,27 @@ jobs: restore-keys: | sccache-mac- - - name: Download Pre-Built LLVM 10.0.0 + - name: Setup Python PATH && Download Pre-Built LLVM 10.0.0 run: | - if [[ ${{needs.check_files.outputs.run_job}} == false ]]; then + export PATH=`pwd`/taichi-llvm/bin/:$PATH + # miniconda / miniforge + export PATH=$(ls -d ~/mini*/envs/$PY/bin):$PATH + if [[ "${{needs.check_files.outputs.run_job}}" == "false" ]]; then exit 0 fi python misc/ci_download.py + echo PATH=$PATH >> $GITHUB_ENV + # env: - CI_PLATFORM: ${{ matrix.os }} + CI_PLATFORM: macos - name: Build & Install run: | - brew install molten-vk - if [[ ${{needs.check_files.outputs.run_job}} == false ]]; then + if [[ "${{needs.check_files.outputs.run_job}}" == "false" ]]; then exit 0 fi + brew install molten-vk mkdir -p sccache_cache - export PATH=`pwd`/taichi-llvm/bin/:$PATH .github/workflows/scripts/unix_build.sh brew uninstall molten-vk env: @@ -242,13 +249,13 @@ jobs: - name: Test id: test run: | - if [[ ${{needs.check_files.outputs.run_job}} == false ]]; then + if [[ "${{needs.check_files.outputs.run_job}}" == "false" ]]; then exit 0 fi .github/workflows/scripts/unix_test.sh env: TI_WANTED_ARCHS: ${{ matrix.wanted_archs }} - TI_CI: 1 + TI_SKIP_CPP_TESTS: Disabled because Vulkan is supported but not working on buildbot4 - name: Save wheel if test failed if: failure() && steps.test.conclusion == 'failure' diff --git a/CMakeLists.txt b/CMakeLists.txt index cf7ad7896bcf0..10869bd828af7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ # The Taichi Programming Language #********************************************************************* -cmake_minimum_required(VERSION 3.15) +cmake_minimum_required(VERSION 3.17) project(taichi) diff --git a/cmake/TaichiCore.cmake b/cmake/TaichiCore.cmake index f9df634a46a77..3c98c040747af 100644 --- a/cmake/TaichiCore.cmake +++ b/cmake/TaichiCore.cmake @@ -305,19 +305,10 @@ add_subdirectory(taichi/util) target_link_libraries(${CORE_LIBRARY_NAME} PRIVATE taichi_util) if (TI_WITH_CUDA_TOOLKIT) - if("$ENV{CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "") - message(FATAL_ERROR "TI_WITH_CUDA_TOOLKIT is ON but CUDA_TOOLKIT_ROOT_DIR not found") - else() - message(STATUS "TI_WITH_CUDA_TOOLKIT = ON") - message(STATUS "CUDA_TOOLKIT_ROOT_DIR=$ENV{CUDA_TOOLKIT_ROOT_DIR}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DTI_WITH_CUDA_TOOLKIT") - target_include_directories(${CORE_LIBRARY_NAME} PRIVATE $ENV{CUDA_TOOLKIT_ROOT_DIR}/include) - target_link_directories(${CORE_LIBRARY_NAME} PRIVATE $ENV{CUDA_TOOLKIT_ROOT_DIR}/lib64) - #libraries for cuda kernel profiler CuptiToolkit - target_link_libraries(${CORE_LIBRARY_NAME} PRIVATE cupti nvperf_host) - endif() -else() - message(STATUS "TI_WITH_CUDA_TOOLKIT = OFF") + find_package(CUDAToolkit REQUIRED) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DTI_WITH_CUDA_TOOLKIT") + target_include_directories(${CORE_LIBRARY_NAME} PUBLIC ${CUDAToolkit_INCLUDE_DIRS}) + target_link_libraries(${CORE_LIBRARY_NAME} PUBLIC CUDA::cupti) endif() if (TI_WITH_METAL) diff --git a/cmake/TaichiExamples.cmake b/cmake/TaichiExamples.cmake index 064cbc076de02..092a6c3b823b1 100644 --- a/cmake/TaichiExamples.cmake +++ b/cmake/TaichiExamples.cmake @@ -19,12 +19,16 @@ if (WIN32) set_target_properties(${EXAMPLES_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${EXAMPLES_OUTPUT_DIR}) set_target_properties(${EXAMPLES_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${EXAMPLES_OUTPUT_DIR}) endif() + target_link_libraries(${EXAMPLES_NAME} PRIVATE taichi_core) -target_link_libraries(${EXAMPLES_NAME} PRIVATE - metal_program_impl - metal_runtime - metal_codegen - ) + +if (TI_WITH_METAL) + target_link_libraries(${EXAMPLES_NAME} PRIVATE + metal_program_impl + metal_runtime + metal_codegen + ) +endif() # TODO 4832: be specific on the header dependencies here, e.g., ir target_include_directories(${EXAMPLES_NAME} diff --git a/cmake/TaichiExportCore.cmake b/cmake/TaichiExportCore.cmake index 1baf7f992e643..9d0cc61a57273 100644 --- a/cmake/TaichiExportCore.cmake +++ b/cmake/TaichiExportCore.cmake @@ -2,6 +2,8 @@ cmake_minimum_required(VERSION 3.0) set(TAICHI_EXPORT_CORE_NAME taichi_export_core) +message(WARNING "You are trying to build the taichi_export_core target, support for this target will be deprecated in the future, please considering using the taichi_c_api target.") + add_library(${TAICHI_EXPORT_CORE_NAME} SHARED) target_link_libraries(${TAICHI_EXPORT_CORE_NAME} PRIVATE taichi_core) set_target_properties(${TAICHI_EXPORT_CORE_NAME} PROPERTIES diff --git a/python/taichi/aot/utils.py b/python/taichi/aot/utils.py index dfe9e930b3f3d..14d9f552c32ca 100644 --- a/python/taichi/aot/utils.py +++ b/python/taichi/aot/utils.py @@ -1,10 +1,12 @@ from taichi.lang._ndarray import ScalarNdarray +from taichi.lang._texture import Texture from taichi.lang.enums import Layout from taichi.lang.exception import TaichiCompilationError from taichi.lang.matrix import Matrix, MatrixNdarray, MatrixType, VectorNdarray from taichi.lang.util import cook_dtype from taichi.types.annotations import template from taichi.types.ndarray_type import NdarrayType +from taichi.types.texture_type import RWTextureType, TextureType template_types = (NdarrayType, template) @@ -85,6 +87,16 @@ def produce_injected_args(kernel, symbolic_args=None): layout=Layout.AOS)) else: raise RuntimeError('') + elif isinstance(anno, (TextureType, RWTextureType)): + if symbolic_args is None: + raise RuntimeError( + 'Texture type annotation doesn\'t have enough information for aot. Please either specify the channel_format, shape and num_channels in the graph arg declaration.' + ) + texture_shape = tuple(symbolic_args[i].texture_shape) + channel_format = symbolic_args[i].channel_format() + num_channels = symbolic_args[i].num_channels + injected_args.append( + Texture(channel_format, num_channels, texture_shape)) elif isinstance(anno, MatrixType): if not isinstance(symbolic_args[i], list): raise RuntimeError('Expected a symbolic arg with Matrix type.') diff --git a/python/taichi/examples/graph/texture_graph.py b/python/taichi/examples/graph/texture_graph.py new file mode 100644 index 0000000000000..2f26214af7312 --- /dev/null +++ b/python/taichi/examples/graph/texture_graph.py @@ -0,0 +1,93 @@ +from taichi.examples.patterns import taichi_logo + +import taichi as ti + +ti.init(arch=ti.vulkan) + +res = (512, 512) +img = ti.Vector.field(4, dtype=float, shape=res) +pixels_arr = ti.Vector.ndarray(4, dtype=float, shape=res) + +texture = ti.Texture(ti.f32, 1, (128, 128)) + + +@ti.kernel +def make_texture(tex: ti.types.rw_texture(num_dimensions=2, + num_channels=1, + channel_format=ti.f32, + lod=0)): + for i, j in ti.ndrange(128, 128): + ret = ti.cast(taichi_logo(ti.Vector([i, j]) / 128), ti.f32) + tex.store(ti.Vector([i, j]), ti.Vector([ret, 0.0, 0.0, 0.0])) + + +@ti.kernel +def paint(t: ti.f32, pixels: ti.types.ndarray(field_dim=2), + tex: ti.types.texture(num_dimensions=2)): + for i, j in pixels: + uv = ti.Vector([i / res[0], j / res[1]]) + warp_uv = uv + ti.Vector( + [ti.cos(t + uv.x * 5.0), + ti.sin(t + uv.y * 5.0)]) * 0.1 + c = ti.math.vec4(0.0) + if uv.x > 0.5: + c = tex.sample_lod(warp_uv, 0.0) + else: + c = tex.fetch(ti.cast(warp_uv * 128, ti.i32), 0) + pixels[i, j] = [c.r, c.r, c.r, 1.0] + + +@ti.kernel +def copy_to_field(pixels: ti.types.ndarray(field_dim=2)): + for I in ti.grouped(pixels): + img[I] = pixels[I] + + +def main(): + _t = ti.graph.Arg(ti.graph.ArgKind.SCALAR, 't', ti.f32) + _pixels_arr = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, + 'pixels_arr', + ti.f32, + field_dim=2, + element_shape=(4, )) + + _rw_tex = ti.graph.Arg(ti.graph.ArgKind.RWTEXTURE, + 'rw_tex', + channel_format=ti.f32, + shape=(128, 128), + num_channels=1) + g_init_builder = ti.graph.GraphBuilder() + g_init_builder.dispatch(make_texture, _rw_tex) + g_init = g_init_builder.compile() + + g_init.run({'rw_tex': texture}) + _tex = ti.graph.Arg(ti.graph.ArgKind.TEXTURE, + 'tex', + channel_format=ti.f32, + shape=(128, 128), + num_channels=1) + g_builder = ti.graph.GraphBuilder() + g_builder.dispatch(paint, _t, _pixels_arr, _tex) + g = g_builder.compile() + + aot = False + if aot: + tmpdir = 'shaders' + mod = ti.aot.Module(ti.vulkan) + mod.add_graph('g', g) + mod.add_graph('g_init', g_init) + mod.save(tmpdir, '') + else: + t = 0.0 + window = ti.ui.Window('UV', res) + canvas = window.get_canvas() + while window.running: + g.run({'t': t, 'pixels_arr': pixels_arr, 'tex': texture}) + copy_to_field(pixels_arr) + canvas.set_image(img) + window.show() + t += 0.03 + + +if __name__ == '__main__': + main() diff --git a/python/taichi/graph/_graph.py b/python/taichi/graph/_graph.py index a28fe64c695cb..7f3d6195ad168 100644 --- a/python/taichi/graph/_graph.py +++ b/python/taichi/graph/_graph.py @@ -2,6 +2,7 @@ from taichi.aot.utils import produce_injected_args from taichi.lang import kernel_impl from taichi.lang._ndarray import Ndarray +from taichi.lang._texture import Texture from taichi.lang.exception import TaichiRuntimeError from taichi.lang.matrix import Matrix, MatrixType @@ -66,6 +67,8 @@ def run(self, args): for k, v in args.items(): if isinstance(v, Ndarray): flattened[k] = v.arr + elif isinstance(v, Texture): + flattened[k] = v.tex elif isinstance(v, Matrix): mat_val_id = 0 for a in range(v.n): @@ -82,7 +85,14 @@ def run(self, args): self._compiled_graph.run(flattened) -def Arg(tag, name, dtype, field_dim=0, element_shape=()): +def Arg(tag, + name, + dtype=None, + field_dim=0, + element_shape=(), + channel_format=None, + shape=(), + num_channels=None): if isinstance(dtype, MatrixType): if len(element_shape) > 0: raise TaichiRuntimeError( @@ -101,6 +111,16 @@ def Arg(tag, name, dtype, field_dim=0, element_shape=()): arg_list.append(arg_sublist) return arg_list + if tag == ArgKind.TEXTURE or tag == ArgKind.RWTEXTURE: + if channel_format is None or len(shape) == 0 or num_channels is None: + raise TaichiRuntimeError( + 'channel_format, num_channels and shape arguments are required for texture arguments' + ) + return _ti_core.Arg(tag, + name, + channel_format=channel_format, + num_channels=num_channels, + shape=shape) return _ti_core.Arg(tag, name, dtype, field_dim, element_shape) diff --git a/python/taichi/lang/ast/ast_transformer.py b/python/taichi/lang/ast/ast_transformer.py index 891cdf4f2347a..1d30ffceae77d 100644 --- a/python/taichi/lang/ast/ast_transformer.py +++ b/python/taichi/lang/ast/ast_transformer.py @@ -1125,7 +1125,8 @@ def build_While(ctx, node): "'else' clause for 'while' not supported in Taichi kernels") with ctx.loop_scope_guard(): - ctx.ast_builder.begin_frontend_while(expr.Expr(1).ptr) + ctx.ast_builder.begin_frontend_while( + expr.Expr(1, dtype=primitive_types.i32).ptr) while_cond = build_stmt(ctx, node.test) impl.begin_frontend_if(ctx.ast_builder, while_cond) ctx.ast_builder.begin_frontend_if_true() diff --git a/python/taichi/ui/window.py b/python/taichi/ui/window.py index 7d429cedf6322..3ebac72556c42 100644 --- a/python/taichi/ui/window.py +++ b/python/taichi/ui/window.py @@ -28,7 +28,6 @@ class Window: def __init__(self, name, res, vsync=False, show_window=True): check_ggui_availability() package_path = str(pathlib.Path(__file__).parent.parent) - ti_arch = default_cfg().arch is_packed = default_cfg().packed self.window = _ti_core.PyWindow(get_runtime().prog, name, res, vsync, diff --git a/taichi/aot/graph_data.cpp b/taichi/aot/graph_data.cpp index a9fb839488414..11cc3804b2a43 100644 --- a/taichi/aot/graph_data.cpp +++ b/taichi/aot/graph_data.cpp @@ -1,5 +1,6 @@ #include "taichi/aot/graph_data.h" #include "taichi/program/ndarray.h" +#include "taichi/program/texture.h" namespace taichi { namespace lang { @@ -39,6 +40,12 @@ void CompiledGraph::run( set_runtime_ctx_ndarray(&ctx, i, arr); } else if (ival.tag == aot::ArgKind::kScalar) { ctx.set_arg(i, ival.val); + } else if (ival.tag == aot::ArgKind::kTexture) { + Texture *tex = reinterpret_cast(ival.val); + ctx.set_arg_texture(i, tex->get_device_allocation_ptr_as_int()); + } else if (ival.tag == aot::ArgKind::kRWTexture) { + Texture *tex = reinterpret_cast(ival.val); + ctx.set_arg_rw_texture(i, tex->get_device_allocation_ptr_as_int()); } else { TI_ERROR("Error in compiled graph: unknown tag {}", ival.tag); } diff --git a/taichi/aot/graph_data.h b/taichi/aot/graph_data.h index d472964bf14f2..5f57a23ccc933 100644 --- a/taichi/aot/graph_data.h +++ b/taichi/aot/graph_data.h @@ -15,10 +15,18 @@ namespace taichi { namespace lang { class AotModuleBuilder; class Ndarray; +class Texture; namespace aot { // Currently only scalar, matrix and ndarray are supported. -enum class ArgKind { kScalar, kMatrix, kNdarray, kUnknown }; +enum class ArgKind { + kScalar, + kMatrix, + kNdarray, + kTexture, + kRWTexture, + kUnknown +}; /** * Symbolic argument used in building `Dispatch` nodes in the `Graph`. @@ -26,11 +34,16 @@ enum class ArgKind { kScalar, kMatrix, kNdarray, kUnknown }; struct Arg { ArgKind tag; std::string name; - // TODO: real element dtype = dtype + element_shape + // Ndarray: element_dtype = dtype + element_shape + // Texture: element_shape carries [width, height, depth] info + // dtype_id carries channel_format info PrimitiveTypeID dtype_id; size_t field_dim; std::vector element_shape; + // For texture + size_t num_channels; // TODO: maybe rename field_dim and merge? + // For serialization & deserialization explicit Arg() : tag(ArgKind::kUnknown), @@ -56,14 +69,15 @@ struct Arg { // Python/C++ interface that's user facing. explicit Arg(ArgKind tag, const std::string &name, - const DataType &dtype, - size_t field_dim = 0, + size_t dim = 0, const std::vector &element_shape = {}) - : tag(tag), - name(name), - field_dim(field_dim), - element_shape(element_shape) { + : tag(tag), name(name), element_shape(element_shape) { + if (tag == ArgKind::kTexture || tag == ArgKind::kRWTexture) { + num_channels = dim; + } else { + field_dim = dim; + } dtype_id = dtype->as()->type; } @@ -81,7 +95,7 @@ struct Arg { return !(*this == other); } - TI_IO_DEF(name, dtype_id, field_dim, tag, element_shape); + TI_IO_DEF(name, dtype_id, field_dim, tag, element_shape, num_channels); }; /** @@ -96,6 +110,10 @@ struct TI_DLL_EXPORT IValue { return IValue(reinterpret_cast(&ndarray), ArgKind::kNdarray); } + static IValue create(const Texture &tex) { + return IValue(reinterpret_cast(&tex), ArgKind::kTexture); + } + template ::value, void>> static IValue create(T v) { diff --git a/taichi/aot/module_data.h b/taichi/aot/module_data.h index a1434e9ee6b5f..f0c6ea740ae06 100644 --- a/taichi/aot/module_data.h +++ b/taichi/aot/module_data.h @@ -44,6 +44,14 @@ struct BufferBind { TI_IO_DEF(buffer, binding); }; +struct TextureBind { + int arg_id; + int binding; + bool is_storage; + + TI_IO_DEF(arg_id, binding, is_storage); +}; + struct CompiledOffloadedTask { std::string type; std::string range_hint; @@ -53,8 +61,15 @@ struct CompiledOffloadedTask { int gpu_block_size{0}; std::vector buffer_binds; - - TI_IO_DEF(type, range_hint, name, source_path, gpu_block_size, buffer_binds); + std::vector texture_binds; + + TI_IO_DEF(type, + range_hint, + name, + source_path, + gpu_block_size, + buffer_binds, + texture_binds); }; struct ScalarArg { diff --git a/taichi/codegen/metal/CMakeLists.txt b/taichi/codegen/metal/CMakeLists.txt index 1df11fda82168..55011c600c8f6 100644 --- a/taichi/codegen/metal/CMakeLists.txt +++ b/taichi/codegen/metal/CMakeLists.txt @@ -11,8 +11,9 @@ target_sources(metal_codegen target_include_directories(metal_codegen PRIVATE ${PROJECT_SOURCE_DIR} - ${PROJECT_SOURCE_DIR}/external/eigen ${LLVM_INCLUDE_DIRS} + PUBLIC + ${PROJECT_SOURCE_DIR}/external/eigen ) target_link_libraries(metal_codegen PRIVATE taichi_util) diff --git a/taichi/codegen/spirv/kernel_utils.h b/taichi/codegen/spirv/kernel_utils.h index e0d77bcc060ff..3dd005c8b17a5 100644 --- a/taichi/codegen/spirv/kernel_utils.h +++ b/taichi/codegen/spirv/kernel_utils.h @@ -72,6 +72,8 @@ struct TaskAttributes { int arg_id{0}; int binding{0}; bool is_storage{false}; + + TI_IO_DEF(arg_id, binding, is_storage); }; std::string name; @@ -115,6 +117,7 @@ struct TaskAttributes { advisory_num_threads_per_group, task_type, buffer_binds, + texture_binds, range_for_attribs); }; diff --git a/taichi/common/cleanup.cpp b/taichi/common/cleanup.cpp index c4d73684da2d0..23599cbb405f0 100644 --- a/taichi/common/cleanup.cpp +++ b/taichi/common/cleanup.cpp @@ -1,4 +1,5 @@ #include "taichi/common/cleanup.h" +#include namespace taichi { diff --git a/taichi/common/core.cpp b/taichi/common/core.cpp index 3ae9d47dac74d..842a875300759 100644 --- a/taichi/common/core.cpp +++ b/taichi/common/core.cpp @@ -4,10 +4,13 @@ *******************************************************************************/ #include "taichi/common/core.h" - #include "taichi/common/version.h" #include "taichi/common/commit_hash.h" +#include +#include +#include "taichi/common/logging.h" + #if defined(TI_PLATFORM_WINDOWS) #include "taichi/platform/windows/windows.h" #else diff --git a/taichi/common/core.h b/taichi/common/core.h index 9750e702d1841..a66b50f18568f 100644 --- a/taichi/common/core.h +++ b/taichi/common/core.h @@ -9,19 +9,13 @@ #define _CRT_SECURE_NO_WARNINGS #endif -#include #include -#include -#include -#include -#include -#include #include #include #include -#include -#include #include +#include +#include //****************************************************************************** // System State diff --git a/taichi/common/symbol_version.cpp b/taichi/common/symbol_version.cpp index 9f1bfeaffd3d7..f453407952205 100644 --- a/taichi/common/symbol_version.cpp +++ b/taichi/common/symbol_version.cpp @@ -3,7 +3,8 @@ The use of this software is governed by the LICENSE file. *******************************************************************************/ -#include "taichi/common/core.h" +#include "taichi/common/platform_macros.h" +#include #if defined(TI_PLATFORM_WINDOWS) #include "taichi/platform/windows/windows.h" @@ -12,8 +13,7 @@ #include #endif -TI_NAMESPACE_BEGIN - +namespace taichi { extern "C" { #if defined(TI_PLATFORM_LINUX) && defined(TI_ARCH_x64) // Avoid dependency on higher glibc versions such as 2.27 or 2.29 @@ -57,5 +57,4 @@ float __wrap_pow(float x, float y) { } #endif } - -TI_NAMESPACE_END +} // namespace taichi diff --git a/taichi/program/context.h b/taichi/program/context.h index eeb5be9623cbc..4b4c981107f13 100644 --- a/taichi/program/context.h +++ b/taichi/program/context.h @@ -78,13 +78,13 @@ struct RuntimeContext { return taichi_union_cast_with_different_sizes(result_buffer[i]); } - void set_arg_texture(int arg_id, DeviceAllocation &alloc) { - args[arg_id] = taichi_union_cast_with_different_sizes(&alloc); + void set_arg_texture(int arg_id, intptr_t alloc_ptr) { + args[arg_id] = taichi_union_cast_with_different_sizes(alloc_ptr); set_array_device_allocation_type(arg_id, DevAllocType::kTexture); } - void set_arg_rw_texture(int arg_id, DeviceAllocation &alloc) { - args[arg_id] = taichi_union_cast_with_different_sizes(&alloc); + void set_arg_rw_texture(int arg_id, intptr_t alloc_ptr) { + args[arg_id] = taichi_union_cast_with_different_sizes(alloc_ptr); set_array_device_allocation_type(arg_id, DevAllocType::kRWTexture); } diff --git a/taichi/python/export_ggui.cpp b/taichi/python/export_ggui.cpp index 3f1665ebe015d..380178d18491a 100644 --- a/taichi/python/export_ggui.cpp +++ b/taichi/python/export_ggui.cpp @@ -19,6 +19,7 @@ namespace py = pybind11; #include "taichi/ui/backends/vulkan/canvas.h" #include "taichi/ui/backends/vulkan/scene.h" #include "taichi/rhi/vulkan/vulkan_loader.h" +#include "taichi/rhi/arch.h" #include "taichi/ui/common/field_info.h" #include "taichi/ui/common/gui_base.h" #include "taichi/program/ndarray.h" @@ -309,6 +310,11 @@ struct PyWindow { vsync, show_window, package_path, ti_arch, is_packed_mode}; // todo: support other ggui backends + if (!(taichi::arch_is_cpu(ti_arch) || ti_arch == Arch::vulkan || + ti_arch == Arch::cuda)) { + throw std::runtime_error( + "GGUI is only supported on cpu, vulkan and cuda backends"); + } if (!lang::vulkan::is_vulkan_api_available()) { throw std::runtime_error("Vulkan must be available for GGUI"); } diff --git a/taichi/python/export_lang.cpp b/taichi/python/export_lang.cpp index f2e86925179ac..f964051698586 100644 --- a/taichi/python/export_lang.cpp +++ b/taichi/python/export_lang.cpp @@ -560,6 +560,8 @@ void export_lang(py::module &m) { // Using this MATRIX as Scalar alias, we can move to native matrix type // when supported .value("MATRIX", aot::ArgKind::kMatrix) + .value("TEXTURE", aot::ArgKind::kTexture) + .value("RWTEXTURE", aot::ArgKind::kRWTexture) .export_values(); py::class_(m, "Arg") @@ -567,10 +569,17 @@ void export_lang(py::module &m) { std::vector>(), py::arg("tag"), py::arg("name"), py::arg("dtype"), py::arg("field_dim"), py::arg("element_shape")) + .def(py::init>(), + py::arg("tag"), py::arg("name"), py::arg("channel_format"), + py::arg("num_channels"), py::arg("shape")) .def_readonly("name", &aot::Arg::name) .def_readonly("element_shape", &aot::Arg::element_shape) + .def_readonly("texture_shape", &aot::Arg::element_shape) .def_readonly("field_dim", &aot::Arg::field_dim) - .def("dtype", &aot::Arg::dtype); + .def_readonly("num_channels", &aot::Arg::num_channels) + .def("dtype", &aot::Arg::dtype) + .def("channel_format", &aot::Arg::dtype); py::class_(m, "Node"); @@ -597,6 +606,12 @@ void export_lang(py::module &m) { auto &val = it.second.cast(); args.insert( {py::cast(it.first), aot::IValue::create(val)}); + } else if (tag == aot::ArgKind::kTexture || + tag == aot::ArgKind::kRWTexture) { + auto &val = it.second.cast(); + args.insert( + {py::cast(it.first), aot::IValue::create(val)}); + } else if (tag == aot::ArgKind::kScalar || tag == aot::ArgKind::kMatrix) { std::string arg_name = py::cast(it.first); diff --git a/taichi/rhi/device.h b/taichi/rhi/device.h index 280333447d796..e73cda074d7a3 100644 --- a/taichi/rhi/device.h +++ b/taichi/rhi/device.h @@ -199,7 +199,7 @@ enum class PolygonMode : int { Point = 2, }; -enum class BufferFormat : uint32_t { +enum class TI_DLL_EXPORT BufferFormat : uint32_t { r8, rg8, rgba8, @@ -254,9 +254,9 @@ class Pipeline { virtual ResourceBinder *resource_binder() = 0; }; -enum class ImageDimension { d1D, d2D, d3D }; +enum class TI_DLL_EXPORT ImageDimension { d1D, d2D, d3D }; -enum class ImageLayout { +enum class TI_DLL_EXPORT ImageLayout { undefined, shader_read, shader_write, @@ -584,7 +584,7 @@ struct SurfaceConfig { uint32_t height{1}; }; -struct ImageParams { +struct TI_DLL_EXPORT ImageParams { ImageDimension dimension; BufferFormat format; ImageLayout initial_layout; diff --git a/taichi/runtime/cuda/CMakeLists.txt b/taichi/runtime/cuda/CMakeLists.txt index 858bdf46ad0a4..1c2233784875d 100644 --- a/taichi/runtime/cuda/CMakeLists.txt +++ b/taichi/runtime/cuda/CMakeLists.txt @@ -8,8 +8,6 @@ target_sources(cuda_runtime jit_cuda.cpp ) -#TODO #4832, some path here should not be included as they are -# dependencies of other targets. target_include_directories(cuda_runtime PRIVATE ${PROJECT_SOURCE_DIR} diff --git a/taichi/runtime/gfx/aot_module_builder_impl.cpp b/taichi/runtime/gfx/aot_module_builder_impl.cpp index 48e3b7964de03..b07acce05215e 100644 --- a/taichi/runtime/gfx/aot_module_builder_impl.cpp +++ b/taichi/runtime/gfx/aot_module_builder_impl.cpp @@ -91,6 +91,11 @@ class AotDataConverter { buffer_bind.binding}); } } + + for (auto &texture_bind : in.texture_binds) { + res.texture_binds.push_back( + {texture_bind.arg_id, texture_bind.binding, texture_bind.is_storage}); + } return res; } }; diff --git a/taichi/runtime/llvm/CMakeLists.txt b/taichi/runtime/llvm/CMakeLists.txt index fc0918798991f..1efaec65b77c1 100644 --- a/taichi/runtime/llvm/CMakeLists.txt +++ b/taichi/runtime/llvm/CMakeLists.txt @@ -12,8 +12,6 @@ target_sources(llvm_runtime snode_tree_buffer_manager.cpp ) -#TODO #4832, some path here should not be included as they are -# dependencies of other targets. target_include_directories(llvm_runtime PRIVATE ${PROJECT_SOURCE_DIR} diff --git a/taichi/runtime/metal/CMakeLists.txt b/taichi/runtime/metal/CMakeLists.txt index 561ed72bb626e..21765293bbc62 100644 --- a/taichi/runtime/metal/CMakeLists.txt +++ b/taichi/runtime/metal/CMakeLists.txt @@ -12,8 +12,6 @@ target_sources(metal_runtime runtime_utils.cpp ) -#TODO #4832, some path here should not be included as they are -# dependencies of other targets. target_include_directories(metal_runtime PRIVATE ${PROJECT_SOURCE_DIR} @@ -22,9 +20,9 @@ target_include_directories(metal_runtime ${PROJECT_SOURCE_DIR}/external/glad/include ${PROJECT_SOURCE_DIR}/external/eigen ${PROJECT_SOURCE_DIR}/external/glfw/include - PRIVATE - ${PROJECT_SOURCE_DIR}/external/spdlog/include ${LLVM_INCLUDE_DIRS} + PUBLIC + ${PROJECT_SOURCE_DIR}/external/spdlog/include ) target_link_libraries(metal_runtime PRIVATE metal_rhi) diff --git a/taichi/runtime/program_impls/llvm/CMakeLists.txt b/taichi/runtime/program_impls/llvm/CMakeLists.txt index 4cc3227537f3b..ab4a73a9d30bb 100644 --- a/taichi/runtime/program_impls/llvm/CMakeLists.txt +++ b/taichi/runtime/program_impls/llvm/CMakeLists.txt @@ -18,6 +18,5 @@ target_link_libraries(llvm_program_impl PRIVATE llvm_codegen) target_link_libraries(llvm_program_impl PRIVATE cpu_runtime) if (TI_WITH_CUDA) - target_link_libraries(llvm_program_impl PRIVATE cuda_codegen) target_link_libraries(llvm_program_impl PRIVATE cuda_runtime) endif() diff --git a/taichi/runtime/program_impls/metal/CMakeLists.txt b/taichi/runtime/program_impls/metal/CMakeLists.txt index 63d43e0ea402e..6b56d8ebbaae5 100644 --- a/taichi/runtime/program_impls/metal/CMakeLists.txt +++ b/taichi/runtime/program_impls/metal/CMakeLists.txt @@ -9,10 +9,8 @@ target_sources(metal_program_impl target_include_directories(metal_program_impl PRIVATE ${PROJECT_SOURCE_DIR} - ${PROJECT_SOURCE_DIR}/external/eigen - ${PROJECT_SOURCE_DIR}/external/spdlog/include ${LLVM_INCLUDE_DIRS} ) -target_link_libraries(metal_program_impl PRIVATE metal_codegen) -target_link_libraries(metal_program_impl PRIVATE metal_runtime) +target_link_libraries(metal_program_impl PUBLIC metal_codegen) +target_link_libraries(metal_program_impl PUBLIC metal_runtime) diff --git a/taichi/runtime/program_impls/opengl/CMakeLists.txt b/taichi/runtime/program_impls/opengl/CMakeLists.txt index b70c0ea3b61e0..ca69a3ddf4003 100644 --- a/taichi/runtime/program_impls/opengl/CMakeLists.txt +++ b/taichi/runtime/program_impls/opengl/CMakeLists.txt @@ -15,5 +15,5 @@ target_include_directories(opengl_program_impl ${LLVM_INCLUDE_DIRS} ) -target_link_libraries(opengl_program_impl PRIVATE opengl_rhi) target_link_libraries(opengl_program_impl PRIVATE gfx_runtime) +target_link_libraries(opengl_program_impl PRIVATE opengl_rhi) diff --git a/taichi/runtime/program_impls/vulkan/CMakeLists.txt b/taichi/runtime/program_impls/vulkan/CMakeLists.txt index 01b9f1b4351ff..8cb4f343e04d5 100644 --- a/taichi/runtime/program_impls/vulkan/CMakeLists.txt +++ b/taichi/runtime/program_impls/vulkan/CMakeLists.txt @@ -15,5 +15,5 @@ target_include_directories(vulkan_program_impl ${LLVM_INCLUDE_DIRS} ) -target_link_libraries(vulkan_program_impl PRIVATE vulkan_rhi) target_link_libraries(vulkan_program_impl PRIVATE gfx_runtime) +target_link_libraries(vulkan_program_impl PRIVATE vulkan_rhi) diff --git a/taichi/runtime/wasm/CMakeLists.txt b/taichi/runtime/wasm/CMakeLists.txt index b68617de400cf..e83466fb4ec24 100644 --- a/taichi/runtime/wasm/CMakeLists.txt +++ b/taichi/runtime/wasm/CMakeLists.txt @@ -13,5 +13,3 @@ target_include_directories(wasm_runtime ${PROJECT_SOURCE_DIR}/external/eigen ${LLVM_INCLUDE_DIRS} ) - -target_link_libraries(wasm_runtime PRIVATE wasm_codegen) diff --git a/taichi/transforms/frontend_type_check.cpp b/taichi/transforms/frontend_type_check.cpp index 9e6c21871665c..c0b3ec395c220 100644 --- a/taichi/transforms/frontend_type_check.cpp +++ b/taichi/transforms/frontend_type_check.cpp @@ -9,7 +9,7 @@ class FrontendTypeCheck : public IRVisitor { void check_cond_type(const Expr &cond, std::string stmt_name) { if (!cond->ret_type->is_primitive(PrimitiveTypeID::i32)) throw TaichiTypeError(fmt::format( - "`{0}` conditions must be of type int32; found {1}. Consider using " + "`{0}` conditions must be of type i32; found {1}. Consider using " "`{0} x != 0` instead of `{0} x` for float values.", stmt_name, cond->ret_type->to_string())); } diff --git a/taichi/util/action_recorder.h b/taichi/util/action_recorder.h index b7eabe0754ef2..88174bd17a4c3 100644 --- a/taichi/util/action_recorder.h +++ b/taichi/util/action_recorder.h @@ -4,7 +4,7 @@ #include "taichi/common/core.h" -TI_NAMESPACE_BEGIN +namespace taichi { struct ActionArg { ActionArg(const std::string &key, const std::string &val) @@ -61,4 +61,4 @@ class ActionRecorder { bool running_{false}; }; -TI_NAMESPACE_END +} // namespace taichi diff --git a/taichi/util/bit.h b/taichi/util/bit.h index 82cf3c49d34ae..7e3ecbaf4d9a9 100644 --- a/taichi/util/bit.h +++ b/taichi/util/bit.h @@ -6,8 +6,7 @@ #include "taichi/common/core.h" -TI_NAMESPACE_BEGIN - +namespace taichi { namespace bit { TI_FORCE_INLINE constexpr bool is_power_of_two(int32 x) { @@ -202,5 +201,4 @@ class Bitset { }; } // namespace bit - -TI_NAMESPACE_END +} // namespace taichi diff --git a/taichi/util/file_sequence_writer.cpp b/taichi/util/file_sequence_writer.cpp index ce21d55c7dd8e..db2c69f577a3a 100644 --- a/taichi/util/file_sequence_writer.cpp +++ b/taichi/util/file_sequence_writer.cpp @@ -3,9 +3,10 @@ #include "llvm/Support/raw_ostream.h" #endif +#include "taichi/ir/transforms.h" #include "taichi/util/file_sequence_writer.h" -TLANG_NAMESPACE_BEGIN +namespace taichi { FileSequenceWriter::FileSequenceWriter(std::string filename_template, std::string file_type) @@ -29,9 +30,9 @@ std::string FileSequenceWriter::write(const std::string &str) { return fn; } -std::string FileSequenceWriter::write(IRNode *irnode) { +std::string FileSequenceWriter::write(lang::IRNode *irnode) { std::string content; - irpass::print(irnode, &content); + lang::irpass::print(irnode, &content); return write(content); } @@ -42,4 +43,4 @@ std::pair FileSequenceWriter::create_new_file() { return {std::ofstream(fn), fn}; } -TLANG_NAMESPACE_END +} // namespace taichi diff --git a/taichi/util/file_sequence_writer.h b/taichi/util/file_sequence_writer.h index 9894c037e650b..2c66cb9771479 100644 --- a/taichi/util/file_sequence_writer.h +++ b/taichi/util/file_sequence_writer.h @@ -1,12 +1,17 @@ #pragma once #include "taichi/util/lang_util.h" -#include "taichi/ir/transforms.h" #ifdef TI_WITH_LLVM #include "taichi/runtime/llvm/llvm_fwd.h" #endif -TLANG_NAMESPACE_BEGIN +namespace taichi { +namespace lang { +class IRNode; +} +} // namespace taichi + +namespace taichi { class FileSequenceWriter { public: @@ -17,7 +22,7 @@ class FileSequenceWriter { std::string write(llvm::Module *module); #endif - std::string write(IRNode *irnode); + std::string write(lang::IRNode *irnode); std::string write(const std::string &str); @@ -29,4 +34,4 @@ class FileSequenceWriter { std::pair create_new_file(); }; -TLANG_NAMESPACE_END +} // namespace taichi diff --git a/taichi/util/image_io.cpp b/taichi/util/image_io.cpp index 0f1023d9d0cd2..c514d8613ad33 100644 --- a/taichi/util/image_io.cpp +++ b/taichi/util/image_io.cpp @@ -1,10 +1,10 @@ -#include "taichi/common/core.h" +#include "taichi/common/logging.h" #include "taichi/util/image_io.h" #include "stb_image.h" #include "stb_image_write.h" -TI_NAMESPACE_BEGIN +namespace taichi { void imwrite(const std::string &filename, size_t ptr, @@ -45,4 +45,4 @@ std::vector imread(const std::string &filename, int comp) { return ret; } -TI_NAMESPACE_END +} // namespace taichi diff --git a/taichi/util/image_io.h b/taichi/util/image_io.h index 57c73e3873be1..0d15cacc3b46d 100644 --- a/taichi/util/image_io.h +++ b/taichi/util/image_io.h @@ -3,11 +3,13 @@ #include #include -TI_NAMESPACE_BEGIN +namespace taichi { + void imwrite(const std::string &filename, size_t ptr, int resx, int resy, int comp); std::vector imread(const std::string &filename, int comp); -TI_NAMESPACE_END + +} // namespace taichi diff --git a/taichi/util/statistics.h b/taichi/util/statistics.h index 9196117ad45d5..816b17fe1413a 100644 --- a/taichi/util/statistics.h +++ b/taichi/util/statistics.h @@ -2,7 +2,7 @@ #include "taichi/common/core.h" -TI_NAMESPACE_BEGIN +namespace taichi { class Statistics { public: @@ -27,4 +27,4 @@ class Statistics { extern Statistics stat; -TI_NAMESPACE_END +} // namespace taichi diff --git a/taichi/util/testing.cpp b/taichi/util/testing.cpp index 140f97a9011c7..d80d0d915a7c8 100644 --- a/taichi/util/testing.cpp +++ b/taichi/util/testing.cpp @@ -6,7 +6,7 @@ #define CATCH_CONFIG_RUNNER #include "taichi/util/testing.h" -TI_NAMESPACE_BEGIN +namespace taichi { int run_tests(std::vector argv) { char arg[] = "test"; @@ -22,4 +22,4 @@ int run_tests(std::vector argv) { return session.run(); } -TI_NAMESPACE_END +} // namespace taichi diff --git a/taichi/util/testing.h b/taichi/util/testing.h index 7773a1dec6f88..f39bcb2a78f85 100644 --- a/taichi/util/testing.h +++ b/taichi/util/testing.h @@ -5,13 +5,11 @@ #pragma once -#include "taichi/common/core.h" - #define BENCHMARK CATCH_BENCHMARK #include #undef BENCHMARK -TI_NAMESPACE_BEGIN +namespace taichi { #define TI_CHECK_EQUAL(A, B, tolerance) \ { \ @@ -37,4 +35,4 @@ TI_NAMESPACE_BEGIN int run_tests(std::vector argv); -TI_NAMESPACE_END +} // namespace taichi diff --git a/tests/python/test_ad_for_fwd.py b/tests/python/test_ad_for_fwd.py index 0dc042050e824..19fa53ae47f92 100644 --- a/tests/python/test_ad_for_fwd.py +++ b/tests/python/test_ad_for_fwd.py @@ -212,3 +212,142 @@ def double_for(): for i in range(N): assert f.dual[i] == 2 * i + + +@test_utils.test(exclude=[ti.cc]) +def test_double_for_loops_more_nests(): + N = 6 + a = ti.field(ti.f32, shape=N, needs_dual=True) + b = ti.field(ti.f32, shape=N, needs_dual=True) + c = ti.field(ti.i32, shape=(N, N // 2)) + f = ti.field(ti.f32, shape=(N, N // 2), needs_dual=True) + + @ti.kernel + def double_for(): + for i in range(N): + for k in range(N // 2): + weight = 1.0 + for j in range(c[i, k]): + weight *= a[i] + s = 0.0 + for j in range(c[i, k] * 2): + s += weight + b[i] + f[i, k] = s + + a.fill(2) + b.fill(1) + + for i in range(N): + for k in range(N // 2): + c[i, k] = i + k + + double_for() + + for i in range(N): + for k in range(N // 2): + assert f[i, k] == 2 * (i + k) * (1 + 2**(i + k)) + + with ti.ad.FwdMode(loss=f, parameters=a, seed=[1.0 for _ in range(N)]): + double_for() + + for i in range(N): + total_grad_a = 0 + for k in range(N // 2): + total_grad_a = 2 * (i + k)**2 * 2**(i + k - 1) + assert f.dual[i, k] == total_grad_a + + with ti.ad.FwdMode(loss=f, parameters=b, seed=[1.0 for _ in range(N)]): + double_for() + + for i in range(N): + total_grad_b = 0 + for k in range(N // 2): + total_grad_b = 2 * (i + k) + assert f.dual[i, k] == total_grad_b + + +@test_utils.test(exclude=[ti.cc]) +def test_complex_body(): + N = 5 + a = ti.field(ti.f32, shape=N, needs_dual=True) + b = ti.field(ti.f32, shape=N, needs_dual=True) + c = ti.field(ti.i32, shape=N) + f = ti.field(ti.f32, shape=N, needs_dual=True) + g = ti.field(ti.f32, shape=N, needs_dual=False) + + @ti.kernel + def complex(): + for i in range(N): + weight = 2.0 + tot = 0.0 + tot_weight = 0.0 + for j in range(c[i]): + tot_weight += weight + 1 + tot += (weight + 1) * a[i] + weight = weight + 1 + weight = weight * 4 + weight = ti.cast(weight, ti.f64) + weight = ti.cast(weight, ti.f32) + + g[i] = tot_weight + f[i] = tot + + a.fill(2) + b.fill(1) + + for i in range(N): + c[i] = i + + with ti.ad.FwdMode(loss=f, parameters=a, seed=[1.0 for _ in range(N)]): + complex() + + for i in range(N): + assert f.dual[i] == g[i] + + +@test_utils.test(exclude=[ti.cc]) +def test_triple_for_loops_bls(): + N = 8 + M = 3 + a = ti.field(ti.f32, shape=N, needs_dual=True) + b = ti.field(ti.f32, shape=2 * N, needs_dual=True) + f = ti.field(ti.f32, shape=(N - M, N), needs_dual=True) + + @ti.kernel + def triple_for(): + ti.block_local(a) + ti.block_local(b) + for i in range(N - M): + for k in range(N): + weight = 1.0 + for j in range(M): + weight *= a[i + j] + s = 0.0 + for j in range(2 * M): + s += weight + b[2 * i + j] + f[i, k] = s + + a.fill(2) + + for i in range(2 * N): + b[i] = i + + triple_for() + + for i in range(N - M): + for k in range(N): + assert f[i, k] == 2 * M * 2**M + (4 * i + 2 * M - 1) * M + + with ti.ad.FwdMode(loss=f, parameters=a, seed=[1.0 for _ in range(N)]): + triple_for() + + for i in range(N - M): + for k in range(N): + assert f.dual[i, k] == 2 * M * M * 2**(M - 1) + + with ti.ad.FwdMode(loss=f, parameters=b, seed=[1.0 for _ in range(2 * N)]): + triple_for() + + for i in range(N - M): + for k in range(N): + assert f.dual[i, k] == 2 * M diff --git a/tests/python/test_compare.py b/tests/python/test_compare.py index a082f465269ba..0522b3bf03b7c 100644 --- a/tests/python/test_compare.py +++ b/tests/python/test_compare.py @@ -204,11 +204,14 @@ def is_f32(tp: ti.template()) -> ti.i32: def test_compare_ret_type(): # The purpose of this test is to make sure a comparison returns i32 # regardless of default_ip so that it can always serve as the condition of - # an if statement. + # an if/while statement. @ti.kernel def foo(): for i in range(100): if i == 0: pass + i = 100 + while i != 0: + i -= 1 foo() diff --git a/tests/python/test_graph.py b/tests/python/test_graph.py index cb39870ae9ff2..96dd445bd1261 100644 --- a/tests/python/test_graph.py +++ b/tests/python/test_graph.py @@ -307,3 +307,69 @@ def foo(a: dt, b: ti.types.ndarray(dtype=dt, field_dim=1)): graph = builder.compile() graph.run({"mat": 1234, 'b': k}) assert k.to_numpy()[0] == 1234 + + +@test_utils.test(arch=ti.vulkan) +def test_texture(): + res = (256, 256) + + @ti.kernel + def make_texture(tex: ti.types.rw_texture(num_dimensions=2, + num_channels=1, + channel_format=ti.f32, + lod=0)): + for i, j in ti.ndrange(128, 128): + tex.store(ti.Vector([i, j]), ti.Vector([0.1, 0.0, 0.0, 0.0])) + + @ti.kernel + def paint(t: ti.f32, pixels: ti.types.ndarray(field_dim=2), + tex: ti.types.texture(num_dimensions=2)): + for i, j in pixels: + uv = ti.Vector([i / res[0], j / res[1]]) + warp_uv = uv + ti.Vector( + [ti.cos(t + uv.x * 5.0), + ti.sin(t + uv.y * 5.0)]) * 0.1 + c = ti.math.vec4(0.0) + if uv.x > 0.5: + c = tex.sample_lod(warp_uv, 0.0) + else: + c = tex.fetch(ti.cast(warp_uv * 128, ti.i32), 0) + pixels[i, j] = [c.r, c.r, c.r, 1.0] + + _t = ti.graph.Arg(ti.graph.ArgKind.SCALAR, 't', ti.f32) + _pixels_arr = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, + 'pixels_arr', + ti.f32, + field_dim=2, + element_shape=(4, )) + + _rw_tex = ti.graph.Arg(ti.graph.ArgKind.RWTEXTURE, + 'rw_tex', + channel_format=ti.f32, + shape=(128, 128), + num_channels=1) + _tex = ti.graph.Arg(ti.graph.ArgKind.TEXTURE, + 'tex', + channel_format=ti.f32, + shape=(128, 128), + num_channels=1) + + g_builder = ti.graph.GraphBuilder() + g_builder.dispatch(make_texture, _rw_tex) + g_builder.dispatch(paint, _t, _pixels_arr, _tex) + g = g_builder.compile() + + pixels_arr = ti.Vector.ndarray(4, dtype=float, shape=res) + texture = ti.Texture(ti.f32, 1, (128, 128)) + t = 1 + + g.run({ + 'rw_tex': texture, + 't': t, + 'pixels_arr': pixels_arr, + 'tex': texture + }) + pixels = pixels_arr.to_numpy() + for i in range(res[0]): + for j in range(res[1]): + assert test_utils.allclose(pixels[i, j], [0.1, 0.1, 0.1, 1.])