Skip to content

Commit

Permalink
Merge branch 'master' into show-gui-hint
Browse files Browse the repository at this point in the history
  • Loading branch information
archibate committed Sep 11, 2020
2 parents d89f297 + ef072d3 commit 66e0638
Show file tree
Hide file tree
Showing 9 changed files with 71 additions and 29 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Expand Up @@ -8,7 +8,7 @@ project(taichi)

SET(TI_VERSION_MAJOR 0)
SET(TI_VERSION_MINOR 6)
SET(TI_VERSION_PATCH 32)
SET(TI_VERSION_PATCH 33)

execute_process(
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
Expand Down
4 changes: 3 additions & 1 deletion README.md
Expand Up @@ -16,9 +16,11 @@

<a href="https://github.com/taichi-dev/taichi/blob/master/examples/fractal.py#L1-L31"> <img src="https://github.com/taichi-dev/public_files/raw/master/taichi/fractal_code.png" height="270px"></a> <img src="https://raw.githubusercontent.com/taichi-dev/public_files/master/taichi/fractal_small.gif" height="270px">


Advanced features of Taichi include [spatially sparse computing](https://taichi.readthedocs.io/en/latest/sparse.html) and [differentiable programming](https://taichi.readthedocs.io/en/latest/differentiable_programming.html) [[examples]](https://github.com/yuanming-hu/difftaichi).

**Please check out our SIGGRAPH 2020 course for Taichi basics:** [YouTube](https://youtu.be/Y0-76n3aZFA), [Bilibili](https://www.bilibili.com/video/BV1kA411n7jk/), [slides (pdf)](http://taichi.graphics/wp-content/uploads/2020/07/taichi_course_siggraph2020.pdf).


## Examples ([More...](misc/examples.md))

<a href="https://github.com/taichi-dev/taichi/blob/master/examples/mpm128.py"><img src="https://github.com/taichi-dev/public_files/blob/6bd234694270c83baf97ba32e0c6278b8cf37e6e/taichi/mpm128.gif" height="192px"></a>
Expand Down
2 changes: 1 addition & 1 deletion docs/version
@@ -1 +1 @@
0.6.32
0.6.33
6 changes: 3 additions & 3 deletions examples/mpm3d.py
Expand Up @@ -35,7 +35,7 @@
@ti.kernel
def substep():
for I in ti.grouped(grid_m):
grid_v[I] = grid_v[I] * 0
grid_v[I] = ti.zero(grid_v[I])
grid_m[I] = 0
ti.block_dim(n_grid)
for p in x:
Expand Down Expand Up @@ -65,8 +65,8 @@ def substep():
base = int(Xp - 0.5)
fx = Xp - base
w = [0.5 * (1.5 - fx)**2, 0.75 - (fx - 1)**2, 0.5 * (fx - 0.5)**2]
new_v = v[p] * 0
new_C = C[p] * 0
new_v = ti.zero(v[p])
new_C = ti.zero(C[p])
for offset in ti.static(ti.grouped(ti.ndrange(*neighbour))):
dpos = (offset - fx) * dx
weight = 1.0
Expand Down
11 changes: 11 additions & 0 deletions python/taichi/lang/impl.py
Expand Up @@ -492,6 +492,17 @@ def ti_float(var):
return float(var)


@taichi_scope
def zero(x):
# TODO: get dtype from Expr and Matrix:
return x * 0


@taichi_scope
def one(x):
return zero(x) + 1


@taichi_scope
def get_external_tensor_dim(var):
return taichi_lang_core.get_external_tensor_dim(var)
Expand Down
32 changes: 16 additions & 16 deletions taichi/program/state_flow_graph.cpp
Expand Up @@ -42,28 +42,27 @@ void StateFlowGraph::insert_task(const TaskLaunchRecord &rec,
void StateFlowGraph::insert_state_flow(Node *from, Node *to, AsyncState state) {
TI_ASSERT(from != nullptr);
TI_ASSERT(to != nullptr);
from->output_edges.insert(std::make_pair(state, to));
from->output_edges[state].insert(to);
to->input_edges.insert(std::make_pair(state, from));
}

void StateFlowGraph::print_edges(const StateFlowGraph::Edges &edges) {
for (auto &edge : edges) {
auto input_node = edge.second;
fmt::print(" {} -> {}\n", edge.first.name(), input_node->string());
}
}

void StateFlowGraph::print() {
fmt::print("=== State Flow Graph ===\n");
for (auto &node : nodes_) {
fmt::print("{}\n", node->string());
if (!node->input_edges.empty()) {
fmt::print(" Inputs:\n");
print_edges(node->input_edges);
for (const auto &p : node->input_edges) {
fmt::print(" {} <- {}\n", p.first.name(), p.second->string());
}
}
if (!node->output_edges.empty()) {
fmt::print(" Outputs:\n");
print_edges(node->output_edges);
for (const auto &p : node->output_edges) {
for (const auto *to : p.second) {
fmt::print(" {} -> {}\n", p.first.name(), to->string());
}
}
}
}
fmt::print("=======================\n");
Expand Down Expand Up @@ -106,13 +105,14 @@ std::string StateFlowGraph::dump_dot() {
if (visited.find(from) == visited.end()) {
visited.insert(from);
for (const auto &p : from->output_edges) {
auto *to = p.second;
stack.push_back(to);
for (const auto *to : p.second) {
stack.push_back(to);

ss << " "
<< fmt::format("{} -> {} [label=\"{}\"]", node_id(from),
node_id(to), p.first.name())
<< '\n';
ss << " "
<< fmt::format("{} -> {} [label=\"{}\"]", node_id(from),
node_id(to), p.first.name())
<< '\n';
}
}
}
}
Expand Down
15 changes: 9 additions & 6 deletions taichi/program/state_flow_graph.h
@@ -1,5 +1,8 @@
#pragma once

#include <unordered_map>
#include <unordered_set>

#include "taichi/ir/ir.h"
#include "taichi/ir/statements.h"
#include "taichi/lang_util.h"
Expand All @@ -13,7 +16,6 @@ class StateFlowGraph {
struct Node;
using StateToNodeMapping =
std::unordered_map<AsyncState, Node *, AsyncStateHash>;
using Edges = std::unordered_multimap<AsyncState, Node *, AsyncStateHash>;

// Each node is a task
// Note: after SFG is done, each node here should hold a TaskLaunchRecord.
Expand All @@ -26,17 +28,18 @@ class StateFlowGraph {
std::string task_name;
// Incremental ID to identify the i-th launch of the task.
int launch_id;
// For |input_edges|, each state could map to exactly one node.
// For |output_edges|, each state could map to at least one node.
Edges input_edges, output_edges;

StateToNodeMapping input_edges;
// Profiling showed horrible performance using std::unordered_multimap (at
// least on Mac with clang-1103.0.32.62)...
std::unordered_map<AsyncState, std::unordered_set<Node *>, AsyncStateHash>
output_edges;

std::string string() const;
};

StateFlowGraph();

void print_edges(const Edges &edges);

void print();

std::string dump_dot();
Expand Down
26 changes: 26 additions & 0 deletions tests/python/test_lang.py
@@ -1,4 +1,6 @@
import taichi as ti
import numpy as np
import pytest


@ti.all_archs
Expand Down Expand Up @@ -131,3 +133,27 @@ def test():
print(i)

test()


@ti.test(arch=ti.cpu)
@pytest.mark.parametrize('dtype', [ti.i32, ti.f32, ti.i64, ti.f64])
@pytest.mark.parametrize('ti_zero,zero', [(ti.zero, 0), (ti.one, 1)])
@pytest.mark.parametrize('is_mat', [False, True])
def test_meta_zero_one(dtype, ti_zero, zero, is_mat):
if is_mat:
x = ti.Matrix.field(2, 3, dtype, ())
y = ti.Matrix.field(2, 3, dtype, ())
else:
x = ti.field(dtype, ())
y = ti.field(dtype, ())

@ti.kernel
def func():
y[None] = ti_zero(x[None])

for a in [-1, -2.3, -1, -0.3, 0, 1, 1.9, 2, 3]:
if ti.core.is_integral(dtype):
a = int(a)
x.fill(a)
func()
assert np.all(y.to_numpy() == zero)
2 changes: 1 addition & 1 deletion tests/python/test_reduction.py
Expand Up @@ -23,7 +23,7 @@ def reduce():

@ti.kernel
def reduce_tmp() -> dtype:
s = tot[None] * 0 # Hack to get |s| to the correct type...
s = ti.zero(tot[None])
for i in a:
s += a[i]
return s
Expand Down

0 comments on commit 66e0638

Please sign in to comment.