From d236b843be150e7b5204ce2db54e4a5dfe3c49b8 Mon Sep 17 00:00:00 2001 From: raggledodo <35833068+raggledodo@users.noreply.github.com> Date: Mon, 23 Sep 2019 18:26:24 -0700 Subject: [PATCH] Improve Eigen Module API and Rename Modules to make more Sense (#33) * overload binary elementary operators with constants * overload operator for c++ * overload operator for python * rename ade and ead to make more sense * rename pll and move modl and eqns out of rocnnet * rename modl to avoid confusing with model directory --- BUILD.bazel | 4 +- Doxyfile | 2 +- Makefile | 24 +- README.md | 18 +- ade/ade.hpp | 11 - ade/bm/benchmark.cpp | 146 -- ade/test/test_functor.cpp | 104 -- ade/test/test_tensor.cpp | 47 - {pll => ccur}/BUILD.bazel | 26 +- ccur/README_CCUR.md | 3 + {pll => ccur}/inspector.cpp | 2 +- ccur/partition.hpp | 17 + ccur/python/ccur.cpp | 35 + ccur/rtscale.cpp | 233 +++ {pll => ccur}/session.hpp | 62 +- {pll => ccur}/src/partition.cpp | 40 +- {ade => ccur}/test/main.cpp | 0 {pll => ccur}/test/test_partition.cpp | 40 +- {pll => ccur}/test/test_session.cpp | 40 +- {pll => ccur}/weights.proto | 2 +- cfg/BUILD.bazel | 8 +- cfg/ead.yml | 960 ------------ cfg/ead_min.yml | 952 ------------ cfg/eteq.yml | 1343 +++++++++++++++++ cfg/eteq_min.yml | 1336 ++++++++++++++++ dbg/BUILD.bazel | 6 +- dbg/README_DBG.md | 2 +- dbg/csv_to_png.py | 2 +- dbg/grpc/session.hpp | 68 +- dbg/python/grpc.cpp | 42 +- dbg/python/stream.cpp | 24 +- dbg/stream/{ade.hpp => teq.hpp} | 34 +- dbg/stream/{ade_csv.hpp => teq_csv.hpp} | 36 +- ead/README_EAD.md | 3 - ead/age/plugins/pyapis.py | 243 --- ead/coord.hpp | 59 - ead/ead.hpp | 5 - ead/grader.hpp | 482 ------ ead/src/eigen.cpp | 17 - {ead => eteq}/BUILD.bazel | 60 +- eteq/README_ETEQ.md | 3 + {ead => eteq}/bm/benchmark.cpp | 222 ++- {ead => eteq}/constant.hpp | 28 +- eteq/coord.hpp | 59 + {ead => eteq}/eigen.hpp | 32 +- eteq/eteq.hpp | 5 + {ead => eteq}/funcarg.hpp | 96 +- {ead => eteq}/functor.hpp | 66 +- ead/age/agen.py => eteq/gen/egen.py | 10 +- {ead/age => eteq/gen}/plugins/apis.py | 102 +- {ead/age => eteq/gen}/plugins/dtypes.py | 33 +- {ead/age => eteq/gen}/plugins/opcodes.py | 6 +- eteq/gen/plugins/pyapis.py | 303 ++++ {ead/age => eteq/gen}/plugins/template.py | 0 eteq/grader.hpp | 426 ++++++ {ead => eteq}/ileaf.hpp | 26 +- {ead => eteq}/inode.hpp | 32 +- {ead => eteq}/operator.hpp | 240 +-- {ead => eteq}/parse.hpp | 91 +- ead/python/ead.cpp => eteq/python/eteq.cpp | 151 +- {ead => eteq}/random.hpp | 8 +- {ead => eteq}/serialize.hpp | 104 +- {ead => eteq}/session.hpp | 58 +- {ead => eteq}/src/coord.cpp | 42 +- eteq/src/eigen.cpp | 17 + {ead => eteq}/src/random.cpp | 6 +- {ead => eteq}/test/main.cpp | 0 {ead => eteq}/test/ptest.py | 360 +++-- {ead => eteq}/test/test_api.cpp | 706 +++++---- {ead => eteq}/test/test_coord.cpp | 18 +- {ead => eteq}/test/test_equation.cpp | 206 +-- {ead => eteq}/test/test_random.cpp | 12 +- {ead => eteq}/test/test_serialize.cpp | 60 +- {ead => eteq}/test/test_session.cpp | 24 +- {ead => eteq}/variable.hpp | 46 +- experimental/distance_finder.hpp | 12 +- layr/BUILD.bazel | 48 + layr/README_LAYR.md | 3 + {rocnnet/modl => layr}/activations.hpp | 30 +- {rocnnet/modl => layr/broken}/dbn.hpp | 34 +- {rocnnet/modl => layr/broken}/rnn.hpp | 34 +- layr/conv.hpp | 208 +++ {rocnnet/modl => layr}/dense.hpp | 77 +- {rocnnet/eqns => layr}/err_approx.hpp | 32 +- {rocnnet/eqns => layr}/init.hpp | 44 +- {rocnnet/modl => layr}/layer.hpp | 44 +- {rocnnet/modl => layr}/model.hpp | 24 +- {rocnnet/modl => layr}/rbm.hpp | 44 +- {rocnnet/modl => layr}/src/activations.cpp | 6 +- layr/src/conv.cpp | 19 + {rocnnet/modl => layr}/src/dense.cpp | 6 +- {rocnnet/eqns => layr}/src/err_approx.cpp | 51 +- {rocnnet/modl => layr}/src/layer.cpp | 56 +- {rocnnet/modl => layr}/src/model.cpp | 6 +- {rocnnet/modl => layr}/src/rbm.cpp | 6 +- layr/test/main.cpp | 7 + layr/test/test_approx.cpp | 101 ++ layr/test/test_dense.cpp | 0 layr/test/test_init.cpp | 150 ++ models/test/{ead_test.json => eteq_test.json} | 0 models/test/{ead_test.pbx => eteq_test.pbx} | Bin models/test/{ead_test.txt => eteq_test.txt} | 0 opt/BUILD.bazel | 4 +- opt/README_OPT.md | 3 + opt/candidate.hpp | 10 +- opt/iconverter.hpp | 4 +- opt/ivoter.hpp | 8 +- opt/matcher.hpp | 22 +- opt/optimize.hpp | 4 +- opt/parse.hpp | 4 +- opt/rmdups.hpp | 22 +- opt/rules.md | 4 +- opt/src/optimize.cpp | 16 +- opt/src/rmdups.cpp | 26 +- opt/src/stats.cpp | 40 +- opt/stats.hpp | 22 +- opt/test/test_matcher.cpp | 28 +- opt/test/test_opt.cpp | 132 +- pbm/README_PBM.md | 6 +- pbm/data.hpp | 18 +- pbm/load.hpp | 32 +- pbm/save.hpp | 46 +- pbm/test/common.hpp | 16 +- pbm/test/test_load.cpp | 32 +- pbm/test/test_save.cpp | 106 +- perf/README_PERF.md | 3 + pll/README_CCE.md | 1 - pll/partition.hpp | 17 - pll/python/pll.cpp | 35 - pll/rtscale.cpp | 233 --- rocnnet/BUILD.bazel | 6 +- rocnnet/comparison/comparison_matmul.py | 14 +- rocnnet/comparison/comparison_mlp.py | 16 +- rocnnet/comparison/comparison_mlp_grad.py | 16 +- rocnnet/comparison/comparison_mlp_grad_pll.py | 18 +- rocnnet/demo/dbn_demo.cpp | 6 +- rocnnet/demo/dqn_demo.py | 8 +- rocnnet/demo/gd_demo.cpp | 52 +- rocnnet/demo/gd_demo.py | 10 +- rocnnet/demo/rbm_demo.py | 12 +- rocnnet/eqns/BUILD.bazel | 23 - rocnnet/modl/BUILD.bazel | 23 - rocnnet/modl/conv.hpp | 98 -- rocnnet/notebooks/ead/__init__.py | 2 +- rocnnet/notebooks/karpathy_game.ipynb | 6 +- rocnnet/python/rocnnet.cpp | 190 +-- rocnnet/trainer/dbn_trainer.hpp | 64 +- rocnnet/trainer/dqn_trainer.hpp | 151 +- rocnnet/trainer/mlp_trainer.hpp | 60 +- rocnnet/trainer/old_rbm_trainer.hpp | 228 --- rocnnet/trainer/rbm_trainer.hpp | 98 +- tag/BUILD.bazel | 2 +- tag/README_TAG.md | 3 + tag/group.hpp | 24 +- tag/prop.hpp | 6 +- tag/src/group.cpp | 30 +- tag/src/prop.cpp | 2 +- tag/src/tag.cpp | 24 +- tag/tag.hpp | 38 +- tag/test/common.hpp | 10 +- tag/test/test_group.cpp | 56 +- tag/test/test_prop.cpp | 4 +- tag/test/test_tag.cpp | 16 +- {ade => teq}/BUILD.bazel | 10 +- ade/README_ADE.md => teq/README_TEQ.md | 6 +- teq/bm/benchmark.cpp | 173 +++ {ade => teq}/coord.hpp | 14 +- {ade => teq}/funcarg.hpp | 16 +- {ade => teq}/functor.hpp | 14 +- {ade => teq}/grad_def.hpp | 12 +- {ade => teq}/idata.hpp | 10 +- {ade => teq}/ifunctor.hpp | 12 +- {ade => teq}/ileaf.hpp | 14 +- {ade => teq}/iopfunc.hpp | 14 +- {ade => teq}/itensor.hpp | 12 +- {ade => teq}/matops.hpp | 14 +- {ade => teq}/shape.hpp | 10 +- {ade => teq}/src/coord.cpp | 6 +- {ade => teq}/src/funcarg.cpp | 6 +- {ade => teq}/src/matops.cpp | 6 +- {ade => teq}/src/shape.cpp | 6 +- {ade => teq}/src/traveler.cpp | 6 +- teq/teq.hpp | 11 + {ade => teq}/test/common.hpp | 16 +- {pll => teq}/test/main.cpp | 0 {ade => teq}/test/test_coord.cpp | 154 +- {ade => teq}/test/test_funcarg.cpp | 116 +- teq/test/test_functor.cpp | 104 ++ {ade => teq}/test/test_grad.cpp | 122 +- {ade => teq}/test/test_matops.cpp | 70 +- {ade => teq}/test/test_shape.cpp | 120 +- teq/test/test_tensor.cpp | 47 + {ade => teq}/test/test_traveler.cpp | 102 +- {ade => teq}/traveler.hpp | 32 +- tests.sh | 8 +- testutil/src/tutil.cpp | 2 +- testutil/tutil.hpp | 4 +- todo | 3 +- 198 files changed, 7952 insertions(+), 6708 deletions(-) delete mode 100644 ade/ade.hpp delete mode 100644 ade/bm/benchmark.cpp delete mode 100644 ade/test/test_functor.cpp delete mode 100644 ade/test/test_tensor.cpp rename {pll => ccur}/BUILD.bazel (79%) create mode 100644 ccur/README_CCUR.md rename {pll => ccur}/inspector.cpp (97%) create mode 100644 ccur/partition.hpp create mode 100644 ccur/python/ccur.cpp create mode 100644 ccur/rtscale.cpp rename {pll => ccur}/session.hpp (71%) rename {pll => ccur}/src/partition.cpp (81%) rename {ade => ccur}/test/main.cpp (100%) rename {pll => ccur}/test/test_partition.cpp (77%) rename {pll => ccur}/test/test_session.cpp (88%) rename {pll => ccur}/weights.proto (68%) delete mode 100644 cfg/ead.yml delete mode 100644 cfg/ead_min.yml create mode 100644 cfg/eteq.yml create mode 100644 cfg/eteq_min.yml rename dbg/stream/{ade.hpp => teq.hpp} (58%) rename dbg/stream/{ade_csv.hpp => teq_csv.hpp} (81%) delete mode 100644 ead/README_EAD.md delete mode 100644 ead/age/plugins/pyapis.py delete mode 100644 ead/coord.hpp delete mode 100644 ead/ead.hpp delete mode 100644 ead/grader.hpp delete mode 100644 ead/src/eigen.cpp rename {ead => eteq}/BUILD.bazel (68%) create mode 100644 eteq/README_ETEQ.md rename {ead => eteq}/bm/benchmark.cpp (62%) rename {ead => eteq}/constant.hpp (80%) create mode 100644 eteq/coord.hpp rename {ead => eteq}/eigen.hpp (83%) create mode 100644 eteq/eteq.hpp rename {ead => eteq}/funcarg.hpp (52%) rename {ead => eteq}/functor.hpp (74%) rename ead/age/agen.py => eteq/gen/egen.py (88%) rename {ead/age => eteq/gen}/plugins/apis.py (66%) rename {ead/age => eteq/gen}/plugins/dtypes.py (87%) rename {ead/age => eteq/gen}/plugins/opcodes.py (97%) create mode 100644 eteq/gen/plugins/pyapis.py rename {ead/age => eteq/gen}/plugins/template.py (100%) create mode 100644 eteq/grader.hpp rename {ead => eteq}/ileaf.hpp (70%) rename {ead => eteq}/inode.hpp (61%) rename {ead => eteq}/operator.hpp (84%) rename {ead => eteq}/parse.hpp (76%) rename ead/python/ead.cpp => eteq/python/eteq.cpp (58%) rename {ead => eteq}/random.hpp (93%) rename {ead => eteq}/serialize.hpp (56%) rename {ead => eteq}/session.hpp (74%) rename {ead => eteq}/src/coord.cpp (65%) create mode 100644 eteq/src/eigen.cpp rename {ead => eteq}/src/random.cpp (57%) rename {ead => eteq}/test/main.cpp (100%) rename {ead => eteq}/test/ptest.py (79%) rename {ead => eteq}/test/test_api.cpp (54%) rename {ead => eteq}/test/test_coord.cpp (52%) rename {ead => eteq}/test/test_equation.cpp (86%) rename {ead => eteq}/test/test_random.cpp (88%) rename {ead => eteq}/test/test_serialize.cpp (77%) rename {ead => eteq}/test/test_session.cpp (72%) rename {ead => eteq}/variable.hpp (75%) create mode 100644 layr/BUILD.bazel create mode 100644 layr/README_LAYR.md rename {rocnnet/modl => layr}/activations.hpp (79%) rename {rocnnet/modl => layr/broken}/dbn.hpp (66%) rename {rocnnet/modl => layr/broken}/rnn.hpp (76%) create mode 100644 layr/conv.hpp rename {rocnnet/modl => layr}/dense.hpp (63%) rename {rocnnet/eqns => layr}/err_approx.hpp (59%) rename {rocnnet/eqns => layr}/init.hpp (60%) rename {rocnnet/modl => layr}/layer.hpp (76%) rename {rocnnet/modl => layr}/model.hpp (86%) rename {rocnnet/modl => layr}/rbm.hpp (77%) rename {rocnnet/modl => layr}/src/activations.cpp (80%) create mode 100644 layr/src/conv.cpp rename {rocnnet/modl => layr}/src/dense.cpp (74%) rename {rocnnet/eqns => layr}/src/err_approx.cpp (52%) rename {rocnnet/modl => layr}/src/layer.cpp (80%) rename {rocnnet/modl => layr}/src/model.cpp (73%) rename {rocnnet/modl => layr}/src/rbm.cpp (85%) create mode 100644 layr/test/main.cpp create mode 100644 layr/test/test_approx.cpp create mode 100644 layr/test/test_dense.cpp create mode 100644 layr/test/test_init.cpp rename models/test/{ead_test.json => eteq_test.json} (100%) rename models/test/{ead_test.pbx => eteq_test.pbx} (100%) rename models/test/{ead_test.txt => eteq_test.txt} (100%) create mode 100644 opt/README_OPT.md create mode 100644 perf/README_PERF.md delete mode 100644 pll/README_CCE.md delete mode 100644 pll/partition.hpp delete mode 100644 pll/python/pll.cpp delete mode 100644 pll/rtscale.cpp delete mode 100644 rocnnet/eqns/BUILD.bazel delete mode 100644 rocnnet/modl/BUILD.bazel delete mode 100644 rocnnet/modl/conv.hpp delete mode 100644 rocnnet/trainer/old_rbm_trainer.hpp create mode 100644 tag/README_TAG.md rename {ade => teq}/BUILD.bazel (88%) rename ade/README_ADE.md => teq/README_TEQ.md (81%) create mode 100644 teq/bm/benchmark.cpp rename {ade => teq}/coord.hpp (96%) rename {ade => teq}/funcarg.hpp (94%) rename {ade => teq}/functor.hpp (91%) rename {ade => teq}/grad_def.hpp (96%) rename {ade => teq}/idata.hpp (87%) rename {ade => teq}/ifunctor.hpp (87%) rename {ade => teq}/ileaf.hpp (78%) rename {ade => teq}/iopfunc.hpp (74%) rename {ade => teq}/itensor.hpp (88%) rename {ade => teq}/matops.hpp (82%) rename {ade => teq}/shape.hpp (98%) rename {ade => teq}/src/coord.cpp (98%) rename {ade => teq}/src/funcarg.cpp (96%) rename {ade => teq}/src/matops.cpp (98%) rename {ade => teq}/src/shape.cpp (94%) rename {ade => teq}/src/traveler.cpp (90%) create mode 100644 teq/teq.hpp rename {ade => teq}/test/common.hpp (67%) rename {pll => teq}/test/main.cpp (100%) rename {ade => teq}/test/test_coord.cpp (65%) rename {ade => teq}/test/test_funcarg.cpp (53%) create mode 100644 teq/test/test_functor.cpp rename {ade => teq}/test/test_grad.cpp (70%) rename {ade => teq}/test/test_matops.cpp (86%) rename {ade => teq}/test/test_shape.cpp (52%) create mode 100644 teq/test/test_tensor.cpp rename {ade => teq}/test/test_traveler.cpp (63%) rename {ade => teq}/traveler.hpp (90%) diff --git a/BUILD.bazel b/BUILD.bazel index 342c5b449..a1b4fd500 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -9,9 +9,9 @@ package( filegroup( name = "srcs", srcs = [ - "//ade:srcs", + "//teq:srcs", "//dbg:srcs", - "//ead:srcs", + "//eteq:srcs", "//opt:srcs", "//pbm:srcs", "//rocnnet:srcs", diff --git a/Doxyfile b/Doxyfile index e3f082b8a..aae0d09c3 100644 --- a/Doxyfile +++ b/Doxyfile @@ -791,7 +791,7 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = README.md ade dbg ead opt pbm tag +INPUT = README.md teq dbg eteq opt pbm tag ccur modl # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/Makefile b/Makefile index 84e7e9ecb..ee61eee84 100644 --- a/Makefile +++ b/Makefile @@ -2,9 +2,9 @@ COVERAGE_INFO_FILE := bazel-out/_coverage/_coverage_report.dat COVERAGE_IGNORE := 'external/*' '**/test/*' 'testutil/*' '**/genfiles/*' 'dbg/*' -CCOVER := bazel coverage --config asan --action_env="ASAN_OPTIONS=detect_leaks=0" --config gtest --config cc_coverage --define EAD_CFG=MIN +CCOVER := bazel coverage --config asan --action_env="ASAN_OPTIONS=detect_leaks=0" --config gtest --config cc_coverage --define ETEQ_CFG=MIN -ADE_TEST := //ade:test +TEQ_TEST := //teq:test TAG_TEST := //tag:test @@ -12,7 +12,7 @@ PBM_TEST := //pbm:test OPT_TEST := //opt/... -EAD_CTEST := //ead:ctest +ETEQ_CTEST := //eteq:ctest CC := gcc @@ -26,34 +26,34 @@ print_vars: rocnnet_py_build: bazel build --config $(CC)_eigen_optimal //rocnnet:rocnnet_py -rocnnet_py_export: bazel-bin/rocnnet/rocnnet.so bazel-bin/ead/tenncor.so bazel-bin/ead/ead.so +rocnnet_py_export: bazel-bin/rocnnet/rocnnet.so bazel-bin/eteq/tenncor.so bazel-bin/eteq/eteq.so cp -f bazel-bin/rocnnet/rocnnet.so rocnnet/notebooks/rocnnet - cp -f bazel-bin/ead/*.so rocnnet/notebooks/ead + cp -f bazel-bin/eteq/*.so rocnnet/notebooks/eteq coverage: - $(CCOVER) $(ADE_TEST) $(TAG_TEST) $(PBM_TEST) $(OPT_TEST) $(EAD_CTEST) + $(CCOVER) $(TEQ_TEST) $(TAG_TEST) $(PBM_TEST) $(OPT_TEST) $(ETEQ_CTEST) lcov --remove $(COVERAGE_INFO_FILE) -o coverage.info cover_ade: - $(CCOVER) $(ADE_TEST) + $(CCOVER) $(TEQ_TEST) lcov --remove $(COVERAGE_INFO_FILE) -o coverage.info cover_tag: $(CCOVER) $(TAG_TEST) - lcov --remove $(COVERAGE_INFO_FILE) 'ade/*' -o coverage.info + lcov --remove $(COVERAGE_INFO_FILE) 'teq/*' -o coverage.info cover_pbm: $(CCOVER) $(PBM_TEST) - lcov --remove $(COVERAGE_INFO_FILE) 'ade/*' -o coverage.info + lcov --remove $(COVERAGE_INFO_FILE) 'teq/*' -o coverage.info cover_opt: $(CCOVER) $(OPT_TEST) - lcov --remove $(COVERAGE_INFO_FILE) 'ade/*' 'tag/*' 'ead/*' -o coverage.info + lcov --remove $(COVERAGE_INFO_FILE) 'teq/*' 'tag/*' 'eteq/*' -o coverage.info cover_ead: - $(CCOVER) $(EAD_CTEST) - lcov --remove $(COVERAGE_INFO_FILE) 'ade/*' 'tag/*' 'opt/*' -o coverage.info + $(CCOVER) $(ETEQ_CTEST) + lcov --remove $(COVERAGE_INFO_FILE) 'teq/*' 'tag/*' 'opt/*' -o coverage.info # optimized comparisons diff --git a/README.md b/README.md index a1962e381..5eae98292 100644 --- a/README.md +++ b/README.md @@ -11,31 +11,31 @@ High-level diagram available: https://drive.google.com/file/d/1PrsFa7Duj4Whlu_m0 ## Components -- [ADE (Automatic Differentiation Engine)](ade/README_ADE.md) +- [TEQ (Automatic Differentiation Engine)](teq/README_ADE.md) This module supplies syntax tree for equation and generates derivative. Constraints to the equation is limited to each tensor's shape. -- [DBG (Debug)](ead/README_DBG.md) +- [DBG (Debug)](eteq/README_DBG.md) -This module is contains debug libraries for ADE Graphs. +This module is contains debug libraries for TEQ Graphs. -- [EAD (Eigen ADE Operators)](ead/README_EAD.md) +- [ETEQ (Eigen TEQ Operators)](eteq/README_EAD.md) -This module is implements basic operations for Tenncor's ADE Tensor objects generated through pybinder. -Additionally, ead also defines data format and (de)serialization methods required by PBM. +This module is implements basic operations for Tenncor's TEQ Tensor objects generated through pybinder. +Additionally, ETEQ also defines data format and (de)serialization methods required by PBM. - [OPT (Optimizer)](opt/README_OPT.md) -This module specifies graph optimization through ADE's visitor pattern. +This module specifies graph optimization through TEQ's visitor pattern. - [PBM (Protobuf Marshaller)](pbm/README_PBM.md) -This module marshals any ADE graph, but requires data serialization functors when saving and loading. +This module marshals any TEQ graph, but requires data serialization functors when saving and loading. - [TAG (Tagger)](tag/README_TAG.md) -This module tags ADE tensors with labels. +This module tags TEQ tensors with labels. ## Tools and utility diff --git a/ade/ade.hpp b/ade/ade.hpp deleted file mode 100644 index a86aaed07..000000000 --- a/ade/ade.hpp +++ /dev/null @@ -1,11 +0,0 @@ -/// -/// ade.hpp -/// ade -/// -/// Purpose: -/// Collectively include all ade header files -/// - -#include "ade/functor.hpp" -#include "ade/traveler.hpp" -#include "ade/iopfunc.hpp" diff --git a/ade/bm/benchmark.cpp b/ade/bm/benchmark.cpp deleted file mode 100644 index 6f024658d..000000000 --- a/ade/bm/benchmark.cpp +++ /dev/null @@ -1,146 +0,0 @@ -#include - -#include "benchmark/benchmark.h" - -#include "ade/coord.hpp" - - -static std::random_device rnd_device; -static std::mt19937 mersenne_engine(rnd_device()); - - -template -static std::vector random_vector ( - ade::DimT lower, ade::DimT upper) -{ - std::vector out(N); - std::uniform_int_distribution dist(lower, upper); - std::generate(out.begin(), out.end(), - [&dist]() { return dist(mersenne_engine); }); - return out; -} - - -static ade::NElemT random_bignum (ade::NElemT lower, ade::NElemT upper) -{ - std::uniform_int_distribution dist(lower, upper); - return dist(mersenne_engine); -} - - -static void BM_MakeReduce(benchmark::State& state) -{ - std::vector slist; - for (auto _ : state) - { - state.PauseTiming(); - slist = random_vector(1, 255); - ade::RankT rank = random_bignum(0, ade::rank_cap - 1); - state.ResumeTiming(); - ade::reduce(rank, - std::vector(slist.begin() + rank, slist.end())); - } -} - -BENCHMARK(BM_MakeReduce); - - -static void BM_CoordFromIndex(benchmark::State& state) -{ - std::vector slist; - for (auto _ : state) - { - state.PauseTiming(); - slist = random_vector(1, 255); - ade::Shape shape(slist); - ade::NElemT index = random_bignum(0, shape.n_elems()); - state.ResumeTiming(); - ade::coordinate(shape, index); - } -} - -BENCHMARK(BM_CoordFromIndex); - - -static void BM_IndexFromCoord(benchmark::State& state) -{ - ade::CoordT coord; - std::vector slist; - for (auto _ : state) - { - state.PauseTiming(); - slist = random_vector(1, 255); - ade::Shape shape(slist); - ade::NElemT index = random_bignum(0, shape.n_elems()); - coord = ade::coordinate(shape, index); - state.ResumeTiming(); - ade::index(shape, coord); - } -} - -BENCHMARK(BM_IndexFromCoord); - - -static void BM_CoordReduce(benchmark::State& state) -{ - ade::CoordT outcoord, coord; - std::vector slist; - for (auto _ : state) - { - state.PauseTiming(); - slist = random_vector(1, 255); - ade::Shape shape(slist); - ade::NElemT index = random_bignum(0, shape.n_elems()); - coord = ade::coordinate(shape, index); - ade::RankT rank = random_bignum(0, ade::rank_cap - 1); - auto reducer = ade::reduce(rank, - std::vector(slist.begin() + rank, slist.end())); - state.ResumeTiming(); - reducer->forward(outcoord.begin(), coord.begin()); - } -} - -BENCHMARK(BM_CoordReduce); - - -static void BM_ReduceReverse(benchmark::State& state) -{ - std::vector slist; - for (auto _ : state) - { - state.PauseTiming(); - slist = random_vector(1, 255); - ade::RankT rank = random_bignum(0, ade::rank_cap - 1); - auto reducer = ade::reduce(rank, - std::vector(slist.begin() + rank, slist.end())); - state.ResumeTiming(); - delete reducer->reverse(); - } -} - -BENCHMARK(BM_ReduceReverse); - - -static void BM_RedPermConnect(benchmark::State& state) -{ - std::vector slist; - for (auto _ : state) - { - state.PauseTiming(); - slist = random_vector(1, 255); - ade::RankT rank = random_bignum(0, ade::rank_cap - 1); - std::vector indices(ade::rank_cap); - std::iota(indices.begin(), indices.end(), 0); - std::shuffle(indices.begin(), indices.end(), mersenne_engine); - auto permuter = ade::permute(indices); - auto reducer = ade::reduce(rank, - std::vector(slist.begin() + rank, slist.end())); - state.ResumeTiming(); - delete reducer->connect(*permuter); - } -} - -BENCHMARK(BM_RedPermConnect); - - -BENCHMARK_MAIN(); diff --git a/ade/test/test_functor.cpp b/ade/test/test_functor.cpp deleted file mode 100644 index bfb484c42..000000000 --- a/ade/test/test_functor.cpp +++ /dev/null @@ -1,104 +0,0 @@ - -#ifndef DISABLE_FUNCTOR_TEST - - -#include "gtest/gtest.h" - -#include "exam/exam.hpp" - -#include "ade/test/common.hpp" - -#include "ade/functor.hpp" - - -TEST(FUNCTOR, Shapes) -{ - std::vector slist = {94, 78, 70, 82, 62, 29, 38}; - std::vector bad = {94, 78, 70, 82, 62, 22, 38}; - ade::Shape shape(slist); - ade::Shape badshape(bad); - - ade::TensptrT leaf(new MockTensor(shape)); - ade::TensptrT leaf1(new MockTensor(shape)); - ade::TensptrT badleaf(new MockTensor(badshape)); - - ade::TensptrT func(ade::Functor::get(ade::Opcode{"MOCK", 0}, { - ade::identity_map(leaf), - ade::identity_map(leaf1), - })); - - ade::Shape gotshape = func->shape(); - EXPECT_ARREQ(shape, gotshape); - - EXPECT_FATAL(ade::Functor::get(ade::Opcode{"MOCK", 0}, {}), - "cannot perform `MOCK` with no arguments"); - - std::string fatalmsg = fmts::sprintf( - "cannot perform `MOCK` with incompatible shapes %s and %s", - shape.to_string().c_str(), badshape.to_string().c_str()); - EXPECT_FATAL(ade::Functor::get(ade::Opcode{"MOCK", 0}, { - ade::identity_map(leaf), - ade::identity_map(badleaf), - }), fatalmsg.c_str()); -} - - -TEST(FUNCTOR, Opcode) -{ - std::string mockname = "asd123101ksq"; - size_t mockcode = 3247; - ade::TensptrT leaf(new MockTensor()); - - ade::Functor* func = ade::Functor::get(ade::Opcode{mockname, mockcode}, { - ade::identity_map(leaf), - }); - - ade::Opcode op = func->get_opcode(); - EXPECT_STREQ(mockname.c_str(), op.name_.c_str()); - EXPECT_EQ(mockcode, op.code_); - - delete func; -} - - -TEST(FUNCTOR, Children) -{ - ade::TensptrT leaf(new MockTensor()); - ade::TensptrT leaf1(new MockTensor()); - ade::TensptrT leaf2(new MockTensor()); - - ade::FuncptrT func(ade::Functor::get(ade::Opcode{"MOCK", 0}, { - ade::identity_map(leaf), - ade::identity_map(leaf1), - })); - - ASSERT_NE(nullptr, func.get()); - - ade::ArgsT refs = func->get_children(); - - ASSERT_EQ(2, refs.size()); - EXPECT_EQ(leaf.get(), refs[0].get_tensor().get()); - EXPECT_EQ(leaf1.get(), refs[1].get_tensor().get()); - - EXPECT_WARN((func->update_child(ade::identity_map(leaf2), 1)), - "ade::Functor does not allow editing of children"); -} - - -TEST(FUNCTOR, ToString) -{ - ade::TensptrT leaf(new MockTensor()); - ade::TensptrT leaf1(new MockTensor()); - - ade::TensptrT func(ade::Functor::get(ade::Opcode{"MOCK", 0}, { - ade::identity_map(leaf), - ade::identity_map(leaf1), - })); - - ASSERT_NE(nullptr, func.get()); - - EXPECT_STREQ("MOCK", func->to_string().c_str()); -} - - -#endif // DISABLE_FUNCTOR_TEST diff --git a/ade/test/test_tensor.cpp b/ade/test/test_tensor.cpp deleted file mode 100644 index 3bee328aa..000000000 --- a/ade/test/test_tensor.cpp +++ /dev/null @@ -1,47 +0,0 @@ - -#ifndef DISABLE_TENSOR_TEST - - -#include "gtest/gtest.h" - -#include "exam/exam.hpp" - -#include "ade/test/common.hpp" - -#include "ade/funcarg.hpp" - - -TEST(TENSOR, FuncArg) -{ - std::vector slist = {2, 81}; - - size_t dim = 1; - ade::TensptrT tens(new MockTensor(ade::Shape(slist))); - ade::FuncArg mt = ade::flip_map(tens, dim); - - ade::Shape shape = mt.shape(); - EXPECT_ARREQ(slist, shape); - - ade::FuncArg mt2(tens, ade::CoordptrT(new ade::CoordMap( - [](ade::MatrixT m) - { - for (size_t i = 0; i < ade::mat_dim; ++i) - { - m[i][i] = 1; - } - m[0][0] = 4; - }))); - - ade::Shape shape2 = mt2.shape(); - EXPECT_EQ(4 * slist[0], shape2.at(0)); - - EXPECT_FATAL(ade::identity_map(nullptr), - "cannot map a null tensor"); - - EXPECT_FATAL(ade::FuncArg(nullptr, ade::reduce(3, {4}), - false, ade::extend(3, {4})), - "cannot map a null tensor"); -} - - -#endif // DISABLE_TENSOR_TEST diff --git a/pll/BUILD.bazel b/ccur/BUILD.bazel similarity index 79% rename from pll/BUILD.bazel rename to ccur/BUILD.bazel index d5dd5196a..0d12d8f1b 100644 --- a/pll/BUILD.bazel +++ b/ccur/BUILD.bazel @@ -37,30 +37,30 @@ filegroup( proto_library( name = "weight_proto", - srcs = ["//pll:protos"], + srcs = ["//ccur:protos"], ) cpp_proto_library( name = "weight_cc_proto", - deps = ["//pll:weight_proto"], + deps = ["//ccur:weight_proto"], ) cc_library( - name = "pll", + name = "ccur", hdrs = glob(["*.hpp"]), srcs = glob(["src/*.cpp"]), copts = ["-std=c++17"], deps = [ - "//ead:ead", + "//eteq:eteq", "@boost//:asio", ], ) pybind_library( - name = "pll_py", - cc_srcs = ["//pll:python/pll.cpp"], - cc_deps = ["//pll:pll"], - py_deps = ["//ead:ead_py"], + name = "ccur_py", + cc_srcs = ["//ccur:python/ccur.cpp"], + cc_deps = ["//ccur:ccur"], + py_deps = ["//eteq:eteq_py"], visibility = ["//visibility:public"], ) @@ -71,7 +71,7 @@ cc_binary( srcs = glob(["inspector.cpp"]), copts = ["-std=c++17"], deps = [ - "//pll:weight_cc_proto", + "//ccur:weight_cc_proto", "@com_github_mingkaic_cppkg//flag:flag", ], ) @@ -81,8 +81,8 @@ cc_binary( srcs = ["rtscale.cpp"], copts = ["-std=c++17"], deps = [ - "//ead:ead", - "//pll:weight_cc_proto", + "//eteq:eteq", + "//ccur:weight_cc_proto", "@com_github_mingkaic_cppkg//flag:flag", ], ) @@ -91,11 +91,11 @@ cc_binary( cc_test( name = "test", - srcs = ["//pll:test_srcs"], + srcs = ["//ccur:test_srcs"], copts = ["-std=c++17"], deps = [ "//dbg:stream_out", - "//pll:pll", + "//ccur:ccur", "@gtest//:gtest", "@com_github_mingkaic_cppkg//exam:exam", ], diff --git a/ccur/README_CCUR.md b/ccur/README_CCUR.md new file mode 100644 index 000000000..44600fa88 --- /dev/null +++ b/ccur/README_CCUR.md @@ -0,0 +1,3 @@ +# ConCURrent session (CCUR) + +Concurrent ETEQ session diff --git a/pll/inspector.cpp b/ccur/inspector.cpp similarity index 97% rename from pll/inspector.cpp rename to ccur/inspector.cpp index 49d6aa59b..49c2da5af 100644 --- a/pll/inspector.cpp +++ b/ccur/inspector.cpp @@ -5,7 +5,7 @@ #include "flag/flag.hpp" #include "fmts/fmts.hpp" -#include "pll/weights.pb.h" +#include "ccur/weights.pb.h" int main (int argc, const char** argv) { diff --git a/ccur/partition.hpp b/ccur/partition.hpp new file mode 100644 index 000000000..11acf5f8b --- /dev/null +++ b/ccur/partition.hpp @@ -0,0 +1,17 @@ +#include "teq/traveler.hpp" + +#ifndef CCE_PARTITION_HPP +#define CCE_PARTITION_HPP + +namespace ccur +{ + +using PartGroupsT = std::vector>; + +using OpWeightT = std::unordered_map; + +PartGroupsT k_partition (teq::TensT roots, size_t k, OpWeightT weights = OpWeightT()); + +} + +#endif // CCE_PARTITION_HPP diff --git a/ccur/python/ccur.cpp b/ccur/python/ccur.cpp new file mode 100644 index 000000000..55f8296b8 --- /dev/null +++ b/ccur/python/ccur.cpp @@ -0,0 +1,35 @@ +#include "pybind11/pybind11.h" +#include "pybind11/numpy.h" +#include "pybind11/stl.h" + +#include "eteq/generated/pyapi.hpp" +#include "eteq/parse.hpp" + +#include "ccur/session.hpp" + +namespace py = pybind11; + +PYBIND11_MODULE(ccur, m) +{ + m.doc() = "ccur session"; + + // ==== session ==== + auto isess = (py::class_) + py::module::import("eteq.eteq").attr("iSession"); + py::class_ session(m, "Session", isess); + + py::implicitly_convertible(); + session + .def(py::init(), + py::arg("nthread") = 2, + py::arg("weights") = ccur::OpWeightT()) + .def("optimize", + [](py::object self, std::string filename) + { + auto sess = self.cast(); + opt::OptCtx rules = eteq::parse_file(filename); + sess->optimize(rules); + }, + py::arg("filename") = "cfg/optimizations.rules", + "Optimize using rules for specified filename"); +} diff --git a/ccur/rtscale.cpp b/ccur/rtscale.cpp new file mode 100644 index 000000000..b46c4159a --- /dev/null +++ b/ccur/rtscale.cpp @@ -0,0 +1,233 @@ +// Weigh the runtime of each opcode +#include +#include +#include + +#include "flag/flag.hpp" + +#include "eteq/generated/api.hpp" +#include "eteq/generated/opcode.hpp" +#include "eteq/functor.hpp" + +#include "ccur/weights.pb.h" + +#define TIME(action)\ +std::chrono::high_resolution_clock::time_point start =\ + std::chrono::high_resolution_clock::now();\ +action;\ +stat = std::chrono::duration_cast(\ + std::chrono::high_resolution_clock::now() - start).count(); + +double softplus (double x) +{ + return std::log(1 + std::exp(x)); +} + +int main (int argc, const char** argv) +{ + std::string writepath; + flag::FlagSet flags("rt_anubis"); + flags.add_flags() + ("target", flag::opt::value(&writepath), + "filename of json to write weights to"); + + if (false == flags.parse(argc, argv)) + { + return 1; + } + + logs::get_logger().set_log_level(logs::INFO); + + std::unordered_map stats; + size_t mean_stat = 0;//, + // max_stat = 0, + // min_stat = std::numeric_limits::max(); + for (size_t i = 0; i < egen::_N_GENERATED_OPCODES; ++i) + { + size_t stat; + auto opcode = (egen::_GENERATED_OPCODE) i; + teq::Opcode op{egen::name_op(opcode), opcode}; + logs::infof("weighing operation %s", op.name_.c_str()); + switch (i) + { + // elementary unary + case egen::ABS: + case egen::NEG: + case egen::SIN: + case egen::COS: + case egen::TAN: + case egen::EXP: + case egen::LOG: + case egen::SQRT: + case egen::ROUND: + case egen::SIGMOID: + case egen::SIGMOID_GRAD: + case egen::TANH: + case egen::SQUARE: + case egen::CUBE: + { + auto var = eteq::make_constant_scalar( + 0.5, teq::Shape({56, 57, 58})); + auto f = eteq::make_functor(op, { + eteq::identity_map(var)}); + TIME(f->update()) + } + break; + + // elementary binary + case egen::POW: + case egen::ADD: + case egen::SUB: + case egen::MUL: + case egen::DIV: + case egen::MIN: + case egen::MAX: + case egen::EQ: + case egen::NEQ: + case egen::LT: + case egen::GT: + case egen::RAND_UNIF: + { + auto var = eteq::make_constant_scalar( + 0.5, teq::Shape({56, 57, 58})); + auto f = eteq::make_functor(op, { + eteq::identity_map(var), eteq::identity_map(var)}); + TIME(f->update()) + } + break; + + // reductions + case egen::REDUCE_SUM: + case egen::REDUCE_PROD: + case egen::REDUCE_MIN: + case egen::REDUCE_MAX: + { + auto var = eteq::make_constant_scalar( + 0.5, teq::Shape({56, 57, 58})); + auto f = eteq::make_functor(op, { + eteq::reduce_map(var, 1, 1)}); + TIME(f->update()) + } + break; + + // other stuff + case egen::PERMUTE: + { + auto var = eteq::make_constant_scalar( + 0.5, teq::Shape({56, 57, 58})); + auto f = eteq::make_functor(op, { + eteq::permute_map(var, {2, 0, 1})}); + TIME(f->update()) + } + break; + + case egen::EXTEND: + { + auto var = eteq::make_constant_scalar( + 0.5, teq::Shape({56, 58})); + auto f = eteq::make_functor(op, { + eteq::extend_map(var, 2, {57})}); + TIME(f->update()) + } + break; + + case egen::SLICE: + { + auto var = eteq::make_constant_scalar( + 0.5, teq::Shape({56, 57, 58})); + auto f = eteq::make_functor(op, { + eteq::slice_map(var, 2, 2, 2)}); + TIME(f->update()) + } + break; + + case egen::MATMUL: + { + auto a = eteq::make_constant_scalar( + 0.3, teq::Shape({253, 255})); + auto b = eteq::make_constant_scalar( + 0.6, teq::Shape({254, 253})); + auto f = tenncor::matmul(a, b); + TIME(f->update()) + } + break; + + case egen::CONV: + { + auto img = eteq::make_constant_scalar( + 0.3, teq::Shape({254, 255})); + auto kern = eteq::make_constant_scalar( + 0.6, teq::Shape({5, 7})); + auto f = tenncor::convolution(img, kern, {0, 1}); + TIME(f->update()) + } + break; + + case egen::PAD: + { + auto var = eteq::make_constant_scalar( + 0.5, teq::Shape({56, 57, 58})); + auto f = eteq::make_functor(op, { + eteq::pad_map(var, {3, 4}, 2)}); + TIME(f->update()) + } + break; + + case egen::SELECT: + { + teq::Shape shape({56, 57, 58}); + size_t n = shape.n_elems(); + std::vector data; + data.reserve(n); + for (size_t i = 0; i < n; ++i) + { + data.push_back(i % 2); + } + auto cond = eteq::make_constant(data.data(), shape); + auto a = eteq::make_constant_scalar(0.3, shape); + auto b = eteq::make_constant_scalar(0.6, shape); + auto f = tenncor::if_then_else(cond, a, b); + TIME(f->update()) + } + break; + + case egen::CONV_IMG_GRAD: + case egen::CONV_KRN_GRAD: + default: + continue; + } + mean_stat += stat; + // max_stat = std::max(max_stat, stat); + // min_stat = std::min(min_stat, stat); + stats.emplace(op.name_, stat); + } + + double mean = (double) mean_stat / stats.size(); + + // normalize stats by mean + weights::OpWeights opweights; + opweights.set_label("ead_weights"); + ::google::protobuf::Map< ::std::string,double>* weights = + opweights.mutable_weights(); + for (auto& op : stats) + { + double value = softplus((op.second - mean) / ( + mean + std::numeric_limits::epsilon())); + weights->insert({op.first, value}); + } + + logs::infof("writing to %s", writepath.c_str()); + std::fstream out(writepath, + std::ios::out | std::ios::trunc | std::ios::binary); + if (out.is_open()) + { + logs::infof("opened %s", writepath.c_str()); + if (opweights.SerializeToOstream(&out)) + { + logs::infof("done writing to %s", writepath.c_str()); + } + out.close(); + } + + return 0; +} diff --git a/pll/session.hpp b/ccur/session.hpp similarity index 71% rename from pll/session.hpp rename to ccur/session.hpp index aaf869515..966ca92ef 100644 --- a/pll/session.hpp +++ b/ccur/session.hpp @@ -3,55 +3,55 @@ #include #include -#include "ead/session.hpp" +#include "eteq/session.hpp" -#include "pll/partition.hpp" +#include "ccur/partition.hpp" #ifndef CCE_ASESS_HPP #define CCE_ASESS_HPP -namespace pll +namespace ccur { -using SessReqsT = std::vector>; +using SessReqsT = std::vector>; using AtomicFulfilMapT = std::unordered_map< - ade::iOperableFunc*,std::atomic>; + teq::iOperableFunc*,std::atomic>; -struct Session final : public ead::iSession +struct Session final : public eteq::iSession { Session (size_t nthreads = 2, OpWeightT weights = OpWeightT()) : nthreads_(nthreads), weights_(weights) {} - std::unordered_set tracked_; + std::unordered_set tracked_; - void track (ade::TensT roots) override + void track (teq::TensT roots) override { tracked_.insert(roots.begin(), roots.end()); - ade::GraphStat stat; + teq::GraphStat stat; for (auto& trac : tracked_) { trac->accept(stat); } - ade::ParentFinder pfinder; - for (ade::TensptrT& root : roots) + teq::ParentFinder pfinder; + for (teq::TensptrT& root : roots) { root->accept(pfinder); } - ade::TensT trackvecs(tracked_.begin(), tracked_.end()); + teq::TensT trackvecs(tracked_.begin(), tracked_.end()); PartGroupsT groups = k_partition(trackvecs, nthreads_, weights_); requirements_.clear(); for (auto& group : groups) { SessReqsT reqs; reqs.reserve(group.size()); - for (ade::iFunctor* func : group) + for (teq::iFunctor* func : group) { auto& args = func->get_children(); - ead::TensSetT unique_children; - for (const ade::FuncArg& arg : args) + eteq::TensSetT unique_children; + for (const teq::FuncArg& arg : args) { auto tens = arg.get_tensor().get(); if (0 < stat.graphsize_[tens].upper_) // ignore leaves @@ -60,7 +60,7 @@ struct Session final : public ead::iSession } } reqs.push_back({ - static_cast(func), + static_cast(func), unique_children.size() }); } @@ -72,7 +72,7 @@ struct Session final : public ead::iSession for (auto& parent_pair : assocs.second) { parents_[assocs.first].emplace( - static_cast(parent_pair.first)); + static_cast(parent_pair.first)); } } @@ -81,22 +81,22 @@ struct Session final : public ead::iSession { if (tpair.second.upper_ > 0) { - ops_.emplace(static_cast(tpair.first)); + ops_.emplace(static_cast(tpair.first)); } } } // this function is expected to be called repeatedly during runtime - void update (ead::TensSetT updated = {}, ead::TensSetT ignores = {}) override + void update (eteq::TensSetT updated = {}, eteq::TensSetT ignores = {}) override { AtomicFulfilMapT fulfilments; for (auto op : ops_) { fulfilments.emplace(op, 0); } - for (ade::iTensor* unodes : updated) + for (teq::iTensor* unodes : updated) { - if (dynamic_cast(unodes)) + if (dynamic_cast(unodes)) { auto& node_parents = parents_[unodes]; for (auto& node_parent : node_parents) @@ -121,7 +121,7 @@ struct Session final : public ead::iSession false == estd::has(ignores, op.first)) { op.first->update(); - std::unordered_set op_parents; + std::unordered_set op_parents; if (estd::get(op_parents, this->parents_, op.first)) { @@ -140,9 +140,9 @@ struct Session final : public ead::iSession } // this function is expected to be called repeatedly during runtime - void update_target (ead::TensSetT target, ead::TensSetT updated = {}) override + void update_target (eteq::TensSetT target, eteq::TensSetT updated = {}) override { - ade::OnceTraveler targetted; + teq::OnceTraveler targetted; for (auto& tens : target) { tens->accept(targetted); @@ -152,9 +152,9 @@ struct Session final : public ead::iSession { fulfilments.emplace(op, 0); } - for (ade::iTensor* unodes : updated) + for (teq::iTensor* unodes : updated) { - if (dynamic_cast(unodes)) + if (dynamic_cast(unodes)) { auto& node_parents = parents_[unodes]; for (auto& node_parent : node_parents) @@ -179,7 +179,7 @@ struct Session final : public ead::iSession estd::has(targetted.visited_, op.first)) { op.first->update(); - std::unordered_set op_parents; + std::unordered_set op_parents; if (estd::get(op_parents, this->parents_, op.first)) { @@ -199,7 +199,7 @@ struct Session final : public ead::iSession void optimize (const opt::OptCtx& rules) { - ade::TensT tracked(tracked_.begin(), tracked_.end()); + teq::TensT tracked(tracked_.begin(), tracked_.end()); opt::optimize(tracked, rules); parents_.clear(); track(tracked); @@ -207,15 +207,15 @@ struct Session final : public ead::iSession std::vector requirements_; - std::unordered_map> parents_; + std::unordered_map> parents_; private: size_t nthreads_; OpWeightT weights_; - std::unordered_set ops_; + std::unordered_set ops_; }; } diff --git a/pll/src/partition.cpp b/ccur/src/partition.cpp similarity index 81% rename from pll/src/partition.cpp rename to ccur/src/partition.cpp index 29fe41459..a3eb1e32f 100644 --- a/pll/src/partition.cpp +++ b/ccur/src/partition.cpp @@ -1,29 +1,29 @@ #include -#include "ade/iopfunc.hpp" +#include "teq/iopfunc.hpp" -#include "pll/partition.hpp" +#include "ccur/partition.hpp" #ifdef CCE_PARTITION_HPP -namespace pll +namespace ccur { struct WeighedGroup final { - std::vector reps_; + std::vector reps_; - std::unordered_set ancestors_; + std::unordered_set ancestors_; double weight_; }; -PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights) +PartGroupsT k_partition (teq::TensT roots, size_t k, OpWeightT weights) { PartGroupsT groups; - ade::GraphStat stat; - ade::ParentFinder pfinder; + teq::GraphStat stat; + teq::ParentFinder pfinder; for (auto root : roots) { root->accept(stat); @@ -31,22 +31,22 @@ PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights) } // partition by bases (the funcs right above variables) - std::vector bases; + std::vector bases; for (auto& gpair : stat.graphsize_) { if (gpair.second.upper_ == 1) { - bases.push_back(static_cast(gpair.first)); + bases.push_back(static_cast(gpair.first)); } } // partition bases by number of ancestor - std::unordered_map weight_map; - std::unordered_map> ancestors; + std::unordered_map weight_map; + std::unordered_map> ancestors; for (auto base : bases) { - std::queue q; + std::queue q; q.push(base); while (false == q.empty()) { @@ -54,13 +54,13 @@ PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights) if (false == estd::has(weight_map, tens)) { double weight = 1; - if (auto op = dynamic_cast(tens)) + if (auto op = dynamic_cast(tens)) { weight = estd::try_get(weights, op->type_code(), 1); } weight_map.emplace(tens, weight); } - ade::ParentMapT parents; + teq::ParentMapT parents; if (estd::get(parents, pfinder.parents_, tens)) { for (auto& ppair : parents) @@ -86,7 +86,7 @@ PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights) group.push_back(bases[i]); for (auto anc : ancs) { - group.push_back(static_cast(anc)); + group.push_back(static_cast(anc)); } } } @@ -153,7 +153,7 @@ PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights) groups.reserve(nbases); for (auto& kgroup : kgroups) { - std::vector group; + std::vector group; group.reserve(kgroup.reps_.size() + kgroup.ancestors_.size()); for (auto& rep : kgroup.reps_) { @@ -161,7 +161,7 @@ PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights) } for (auto& anc : kgroup.ancestors_) { - group.push_back(static_cast(anc)); + group.push_back(static_cast(anc)); } groups.push_back(group); } @@ -171,7 +171,7 @@ PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights) for (auto& group : groups) { std::sort(group.begin(), group.end(), - [&stat](ade::iTensor* a, ade::iTensor* b) + [&stat](teq::iTensor* a, teq::iTensor* b) { return stat.graphsize_[a].upper_ < stat.graphsize_[b].upper_; }); diff --git a/ade/test/main.cpp b/ccur/test/main.cpp similarity index 100% rename from ade/test/main.cpp rename to ccur/test/main.cpp diff --git a/pll/test/test_partition.cpp b/ccur/test/test_partition.cpp similarity index 77% rename from pll/test/test_partition.cpp rename to ccur/test/test_partition.cpp index 0cee64b8e..f9bbb0eb2 100644 --- a/pll/test/test_partition.cpp +++ b/ccur/test/test_partition.cpp @@ -8,19 +8,19 @@ #include "exam/exam.hpp" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" -#include "pll/partition.hpp" +#include "ccur/partition.hpp" TEST(PARTITION, Kpartition) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); std::vector in_data = { 0.8575073725, 0.0910915775, 0.9133499042, @@ -100,12 +100,12 @@ TEST(PARTITION, Kpartition) 0.4350741570, 0.3949956178, 0.2341486792, 0.1348473539, 0.8681677362, }; - ead::NodeptrT in = ead::make_variable(in_data.data(), in_shape); - ead::NodeptrT weight0 = ead::make_variable(w0_data.data(), weight0_shape); - ead::NodeptrT bias0 = ead::make_variable(b0_data.data(), bias0_shape); - ead::NodeptrT weight1 = ead::make_variable(w1_data.data(), weight1_shape); - ead::NodeptrT bias1 = ead::make_variable(b1_data.data(), bias1_shape); - ead::NodeptrT out = ead::make_variable(out_data.data(), out_shape); + eteq::NodeptrT in = eteq::make_variable(in_data.data(), in_shape); + eteq::NodeptrT weight0 = eteq::make_variable(w0_data.data(), weight0_shape); + eteq::NodeptrT bias0 = eteq::make_variable(b0_data.data(), bias0_shape); + eteq::NodeptrT weight1 = eteq::make_variable(w1_data.data(), weight1_shape); + eteq::NodeptrT bias1 = eteq::make_variable(b1_data.data(), bias1_shape); + eteq::NodeptrT out = eteq::make_variable(out_data.data(), out_shape); auto layer0 = tenncor::add(tenncor::matmul(in, weight0), tenncor::extend(bias0, 1, {3})); auto sig0 = tenncor::sigmoid(layer0); @@ -113,14 +113,14 @@ TEST(PARTITION, Kpartition) auto layer1 = tenncor::add(tenncor::matmul(sig0, weight1), tenncor::extend(bias1, 1, {3})); auto sig1 = tenncor::sigmoid(layer1); - auto err = tenncor::pow(tenncor::sub(out, sig1), ead::make_constant_scalar(2, out_shape)); + auto err = tenncor::pow(tenncor::sub(out, sig1), eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0); - auto db0 = ead::derive(err, bias0); - auto dw1 = ead::derive(err, weight1); - auto db1 = ead::derive(err, bias1); + auto dw0 = eteq::derive(err, weight0); + auto db0 = eteq::derive(err, bias0); + auto dw1 = eteq::derive(err, weight1); + auto db1 = eteq::derive(err, bias1); - auto groups = pll::k_partition({ + auto groups = ccur::k_partition({ dw0->get_tensor(), db0->get_tensor(), dw1->get_tensor(), diff --git a/pll/test/test_session.cpp b/ccur/test/test_session.cpp similarity index 88% rename from pll/test/test_session.cpp rename to ccur/test/test_session.cpp index c11ddb390..de1b3d12d 100644 --- a/pll/test/test_session.cpp +++ b/ccur/test/test_session.cpp @@ -8,19 +8,19 @@ #include "exam/exam.hpp" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" -#include "pll/session.hpp" +#include "ccur/session.hpp" TEST(SESSION, Update) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); std::vector in_data = { 0.8575073725, 0.0910915775, 0.9133499042, @@ -100,12 +100,12 @@ TEST(SESSION, Update) 0.4350741570, 0.3949956178, 0.2341486792, 0.1348473539, 0.8681677362, }; - ead::NodeptrT in = ead::make_variable(in_data.data(), in_shape); - ead::NodeptrT weight0 = ead::make_variable(w0_data.data(), weight0_shape); - ead::NodeptrT bias0 = ead::make_variable(b0_data.data(), bias0_shape); - ead::NodeptrT weight1 = ead::make_variable(w1_data.data(), weight1_shape); - ead::NodeptrT bias1 = ead::make_variable(b1_data.data(), bias1_shape); - ead::NodeptrT out = ead::make_variable(out_data.data(), out_shape); + eteq::NodeptrT in = eteq::make_variable(in_data.data(), in_shape); + eteq::NodeptrT weight0 = eteq::make_variable(w0_data.data(), weight0_shape); + eteq::NodeptrT bias0 = eteq::make_variable(b0_data.data(), bias0_shape); + eteq::NodeptrT weight1 = eteq::make_variable(w1_data.data(), weight1_shape); + eteq::NodeptrT bias1 = eteq::make_variable(b1_data.data(), bias1_shape); + eteq::NodeptrT out = eteq::make_variable(out_data.data(), out_shape); auto layer0 = tenncor::add(tenncor::matmul(in, weight0), tenncor::extend(bias0, 1, {3})); auto sig0 = tenncor::sigmoid(layer0); @@ -113,14 +113,14 @@ TEST(SESSION, Update) auto layer1 = tenncor::add(tenncor::matmul(sig0, weight1), tenncor::extend(bias1, 1, {3})); auto sig1 = tenncor::sigmoid(layer1); - auto err = tenncor::pow(tenncor::sub(out, sig1), ead::make_constant_scalar(2, out_shape)); + auto err = tenncor::pow(tenncor::sub(out, sig1), eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0); - auto db0 = ead::derive(err, bias0); - auto dw1 = ead::derive(err, weight1); - auto db1 = ead::derive(err, bias1); + auto dw0 = eteq::derive(err, weight0); + auto db0 = eteq::derive(err, bias0); + auto dw1 = eteq::derive(err, weight1); + auto db1 = eteq::derive(err, bias1); - pll::Session sess(4); + ccur::Session sess(4); sess.track({ dw0->get_tensor(), db0->get_tensor(), diff --git a/pll/weights.proto b/ccur/weights.proto similarity index 68% rename from pll/weights.proto rename to ccur/weights.proto index 9e7c7389a..f38bd8621 100644 --- a/pll/weights.proto +++ b/ccur/weights.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -option go_package = "github.com/mingkaic/tenncor/pll"; +option go_package = "github.com/mingkaic/tenncor/ccur"; package weights; diff --git a/cfg/BUILD.bazel b/cfg/BUILD.bazel index efc97340a..71bdc75ca 100644 --- a/cfg/BUILD.bazel +++ b/cfg/BUILD.bazel @@ -5,13 +5,13 @@ package( ) filegroup( - name = "ead", - srcs = ["ead.yml"], + name = "eteq", + srcs = ["eteq.yml"], ) filegroup( - name = "ead_min", - srcs = ["ead_min.yml"], + name = "eteq_min", + srcs = ["eteq_min.yml"], ) filegroup( diff --git a/cfg/ead.yml b/cfg/ead.yml deleted file mode 100644 index c7c410511..000000000 --- a/cfg/ead.yml +++ /dev/null @@ -1,960 +0,0 @@ ---- -dtype: - DOUBLE: double - FLOAT: float - INT8: int8_t - UINT8: uint8_t - INT16: int16_t - UINT16: uint16_t - INT32: int32_t - UINT32: uint32_t - INT64: int64_t - UINT64: uint64_t -opcode: - operator_path: ead/operator.hpp - params: ade::Shape shape, ead::EigenptrT& out, std::vector>& in - opcalls: - ABS: out = ead::abs(shape,in[0]); - NEG: out = ead::neg(shape,in[0]); - SIN: out = ead::sin(shape,in[0]); - COS: out = ead::cos(shape,in[0]); - TAN: out = ead::tan(shape,in[0]); - EXP: out = ead::exp(shape,in[0]); - LOG: out = ead::log(shape,in[0]); - SQRT: out = ead::sqrt(shape,in[0]); - ROUND: out = ead::round(shape,in[0]); - SIGMOID: out = ead::sigmoid(shape,in[0]); - SIGMOID_GRAD: out = ead::sigmoid_grad(shape,in[0]); - TANH: out = ead::tanh(shape,in[0]); - SQUARE: out = ead::square(shape,in[0]); - CUBE: out = ead::cube(shape,in[0]); - POW: out = ead::pow(shape,in[0],in[1]); - ADD: out = ead::add(shape,in[0],in[1]); - SUB: out = ead::sub(shape,in[0],in[1]); - MUL: out = ead::mul(shape,in[0],in[1]); - DIV: out = ead::div(shape,in[0],in[1]); - MIN: out = ead::min(shape,in[0],in[1]); - MAX: out = ead::max(shape,in[0],in[1]); - EQ: out = ead::eq(shape,in[0],in[1]); - NEQ: out = ead::neq(shape,in[0],in[1]); - LT: out = ead::lt(shape,in[0],in[1]); - GT: out = ead::gt(shape,in[0],in[1]); - RAND_UNIF: out = ead::rand_uniform(shape,in[0],in[1]); - REDUCE_SUM: out = ead::reduce_sum(shape,in[0]); - REDUCE_PROD: out = ead::reduce_prod(shape,in[0]); - REDUCE_MIN: out = ead::reduce_min(shape,in[0]); - REDUCE_MAX: out = ead::reduce_max(shape,in[0]); - PERMUTE: out = ead::permute(shape,in[0]); - EXTEND: out = ead::extend(shape,in[0]); - MATMUL: out = ead::matmul(shape,in[0],in[1]); - CONV: out = ead::convolution(shape,in[0],in[1]); - SLICE: out = ead::slice(shape,in[0]); - PAD: out = ead::pad(shape,in[0]); - CONV_IMG_GRAD: out = ead::convolution_image_grad(shape,in[0],in[1]); - CONV_KRN_GRAD: out = ead::convolution_kernel_grad(shape,in[0],in[1]); - SELECT: out = ead::select(shape, in[0], in[1], in[2]); -api: - pybind_type: float - includes: - - '"ead/constant.hpp"' - - '"ead/variable.hpp"' - - '"ead/functor.hpp"' - - '"tag/group.hpp"' - - '"tag/prop.hpp"' - namespaces: - tenncor: - - template: typename T - name: abs - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"ABS",::age::ABS},{ead::identity_map(arg)}); - - template: typename T - name: neg - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"NEG",::age::NEG},{ead::identity_map(arg)}); - - template: typename T - name: sin - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SIN",::age::SIN},{ead::identity_map(arg)}); - - template: typename T - name: cos - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"COS",::age::COS},{ead::identity_map(arg)}); - - template: typename T - name: tan - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"TAN",::age::TAN},{ead::identity_map(arg)}); - - template: typename T - name: exp - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"EXP",::age::EXP},{ead::identity_map(arg)}); - - template: typename T - name: log - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"LOG",::age::LOG},{ead::identity_map(arg)}); - - template: typename T - name: sqrt - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SQRT",::age::SQRT},{ead::identity_map(arg)}); - - template: typename T - name: round - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"ROUND",::age::ROUND},{ead::identity_map(arg)}); - - template: typename T - name: sigmoid - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SIGMOID",::age::SIGMOID},{ead::identity_map(arg)}); - - template: typename T - name: sigmoid_grad - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SIGMOID_GRAD",::age::SIGMOID_GRAD},{ead::identity_map(arg)}); - - template: typename T - name: tanh - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"TANH",::age::TANH},{ead::identity_map(arg)}); - - template: typename T - name: square - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SQUARE",::age::SQUARE},{ead::identity_map(arg)}); - - template: typename T - name: cube - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"CUBE",::age::CUBE},{ead::identity_map(arg)}); - - template: typename T - name: pow - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"POW",::age::POW},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: add - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"ADD",::age::ADD}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - tag::get_group_reg().group_tag(out->get_tensor(), "sum"); - return out; - - template: typename T - name: sub - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SUB",::age::SUB},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: mul - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"MUL",::age::MUL}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - tag::get_group_reg().group_tag(out->get_tensor(), "prod"); - return out; - - template: typename T - name: div - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"DIV",::age::DIV},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: eq - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"EQ",::age::EQ}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: neq - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"NEQ",::age::NEQ}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: lt - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"LT",::age::LT},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: gt - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"GT",::age::GT},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: min - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"MIN",::age::MIN}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: max - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"MAX",::age::MAX}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: if_then_else - args: - - dtype: ead::NodeptrT - name: condition - - dtype: ead::NodeptrT - name: then - - dtype: ead::NodeptrT - name: otherwise - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor(ade::Opcode{"SELECT",::age::SELECT},{ - ead::identity_map(condition), - ead::identity_map(then), - ead::identity_map(otherwise) - }); - - template: typename T - name: permute - args: - - dtype: ead::NodeptrT - name: arg - - dtype: std::vector - name: order - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"PERMUTE",::age::PERMUTE},{ead::permute_map(arg,order)}); - - template: typename T - name: extend - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - - dtype: std::vector - name: xlist - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"EXTEND",::age::EXTEND},{ead::extend_map(arg,offset,xlist)}); - - template: typename T - name: reduce_sum - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_SUM",::age::REDUCE_SUM},{ead::reduce_map(tens,offset,ndims)}); - description: sum values ignoring coordinate indices between start and end - - template: typename T - name: reduce_prod - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_PROD",::age::REDUCE_PROD},{ead::reduce_map(tens,offset,ndims)}); - description: multiply values ignoring coordinate indices between start and end - - template: typename T - name: reduce_min - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_MIN",::age::REDUCE_MIN},{ead::reduce_map(tens,offset,ndims)}); - description: min values ignoring coordinate indices between start and end - - template: typename T - name: reduce_max - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_MAX",::age::REDUCE_MAX},{ead::reduce_map(tens,offset,ndims)}); - description: max values ignoring coordinate indices between start and end - - template: typename T - name: n_elems - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_constant_scalar(arg->get_tensor()->shape().n_elems(), ade::Shape()); - - template: typename T - name: n_dims - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: rank - out: - type: ead::NodeptrT - val: return ead::make_constant_scalar(arg->get_tensor()->shape().at(rank), ade::Shape()); - - template: typename T - name: slice - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - - dtype: ade::RankT - name: extent - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor(ade::Opcode{"SLICE",::age::SLICE}, { - ead::slice_map(arg, offset, extent, dimension)}); - - template: typename T - name: pad - args: - - dtype: ead::NodeptrT - name: arg - - dtype: std::pair - name: padding - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor(ade::Opcode{"PAD",::age::PAD}, { - ead::pad_map(arg, padding, dimension)}); - - template: typename T - name: matmul - args: - - dtype: ead::NodeptrT - name: a - - dtype: ead::NodeptrT - name: b - out: - type: ead::NodeptrT - val: | - // - ade::Shape ashape = a->get_tensor()->shape(); - ade::Shape bshape = b->get_tensor()->shape(); - ade::DimT ncommon = ashape.at(0); - ade::DimT nrow = ashape.at(1); - ade::DimT ncol = bshape.at(0); - if (ncommon != bshape.at(1)) - { - logs::fatalf("invalid matmul shapes %s and %s", - ashape.to_string().c_str(), bshape.to_string().c_str()); - } - - ade::CoordptrT left_shaper(new ade::CoordMap( - [=](ade::MatrixT fwd) - { - for (ade::RankT i = 3; i < ade::mat_dim; ++i) - { - fwd[i][i] = 1; - } - fwd[2][0] = ncol; - fwd[1][1] = 1; - fwd[0][2] = 1.0 / ncommon; - } - )); - - ade::CoordptrT right_shaper(new ade::CoordMap( - [=](ade::MatrixT fwd) - { - for (ade::RankT i = 3; i < ade::mat_dim; ++i) - { - fwd[i][i] = 1; - } - fwd[0][0] = 1; - fwd[2][1] = nrow; - fwd[1][2] = 1.0 / ncommon; - } - )); - return ead::make_functor(ade::Opcode{"MATMUL",::age::MATMUL}, { - ead::FuncArg(a, left_shaper, nullptr), - ead::FuncArg(b, right_shaper, nullptr) - }); - - template: typename T - name: convolution - args: - - dtype: ead::NodeptrT - name: input - - dtype: ead::NodeptrT - name: kernel - - dtype: std::vector - name: dims - out: - type: ead::NodeptrT - val: | - // - ade::Shape inshape = input->get_tensor()->shape(); - ade::Shape kernelshape = kernel->get_tensor()->shape(); - ade::CoordptrT input_shaper(new ade::CoordMap( - [kernelshape](ade::MatrixT fwd) - { - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[i][i] = 1; - } - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[ade::rank_cap][i] = -kernelshape.at(i) + 1; - } - } - )); - - ade::CoordptrT kernel_shaper(new ade::CoordMap( - [inshape](ade::MatrixT fwd) - { - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[i][i] = -1; - } - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[ade::rank_cap][i] = inshape.at(i) + 1; - } - } - )); - - ade::CoordT kernel_dims; - auto it = kernel_dims.begin(); - std::fill(it, kernel_dims.end(), ade::rank_cap); - std::copy(dims.begin(), dims.end(), it); - return ead::make_functor(ade::Opcode{"CONV",::age::CONV}, { - ead::FuncArg(input, input_shaper, nullptr), - ead::FuncArg(kernel, kernel_shaper, - std::make_shared(kernel_dims, true)), - }); - - template: typename T - name: reduce_sum_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_sum(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: reduce_prod_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_prod(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: reduce_min_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_min(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: reduce_max_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_max(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: transpose - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ::tenncor::permute(arg, {1, 0}); - - template: typename T - name: reduce_mean - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ::tenncor::div(::tenncor::reduce_sum(arg), ::tenncor::n_elems(arg)); - - template: typename T - name: reduce_mean_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_sum_1d(arg, dimension); - auto dim = ead::make_constant_scalar(arg->shape().at(dimension), red->shape()); - return ::tenncor::div(red, dim); - - template: typename T - name: reduce_l2norm - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ::tenncor::sqrt(::tenncor::reduce_sum(::tenncor::square(arg), offset, ndims)); - - template: typename T - name: reduce_l2norm_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: return ::tenncor::sqrt(::tenncor::reduce_sum_1d(::tenncor::square(arg), dimension)); - - template: typename T - name: clip_by_range - args: - - dtype: ead::NodeptrT - name: arg - - dtype: T - name: minval - - dtype: T - name: maxval - out: - type: ead::NodeptrT - val: | - // - if (minval > maxval) - { - logs::fatal("min value is below max"); - } - ade::Shape shape = arg->shape(); - auto lo = ead::make_constant_scalar(minval, shape); - auto hi = ead::make_constant_scalar(maxval, shape); - auto out = ::tenncor::max(::tenncor::min(arg, hi), lo); - tag::recursive_group_tag(out->get_tensor(), "clip_by_range", { - arg->get_tensor().get(), - lo->get_tensor().get(), - hi->get_tensor().get(), - }); - return out; - - template: typename T - name: clip_by_l2norm - args: - - dtype: ead::NodeptrT - name: arg - - dtype: T - name: upper - out: - type: ead::NodeptrT - val: | - // - if (upper == 0) - { - logs::fatal("cannot clip_by_norm with a upper limit of 0"); - } - ade::Shape shape = arg->shape(); - auto limit = ead::make_constant_scalar(upper, shape); - auto norm = ::tenncor::extend(::tenncor::reduce_l2norm(arg), 0, - std::vector(shape.begin(), shape.end())); - auto out = ::tenncor::if_then_else(::tenncor::lt(norm, limit), - arg, ::tenncor::div(::tenncor::mul(arg, limit), norm)); - tag::recursive_group_tag(out->get_tensor(), "clip_by_l2norm", { - arg->get_tensor().get(), - limit->get_tensor().get(), - }); - return out; - description: 'clip by l2norm ((todo) allow l2norm to be configurable)' - - template: typename T - name: sum - args: - - dtype: ead::NodesT - name: args - out: - type: ead::NodeptrT - val: | - // - if (args.empty()) - { - logs::fatal("cannot sum without arguments"); - } - ead::NodeptrT out = args[0]; - for (size_t i = 1, n = args.size(); i < n; ++i) - { - out = ::tenncor::add(out, args[i]); - } - return out; - - template: typename T - name: prod - args: - - dtype: ead::NodesT - name: args - out: - type: ead::NodeptrT - val: | - // - if (args.empty()) - { - logs::fatal("cannot sum without arguments"); - } - ead::NodeptrT out = args[0]; - for (size_t i = 1, n = args.size(); i < n; ++i) - { - out = ::tenncor::mul(out, args[i]); - } - return out; - - template: typename T - name: softmax - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: | - // - auto exarg = exp(arg); - ade::Shape shape = exarg->shape(); - auto it = shape.begin() + offset; - std::vector xlist(it, it + ndims); - auto out = ::tenncor::div(exarg, - ::tenncor::extend(::tenncor::reduce_sum(exarg, offset, offset+ndims), - offset, xlist)); - tag::recursive_group_tag(out->get_tensor(), "softmax", { - arg->get_tensor().get()}); - return out; - - template: typename T - name: sign - args: - - dtype: ead::NodeptrT - name: x - out: - type: ead::NodeptrT - val: return ::tenncor::pow(x,ead::make_constant_scalar(0,x->shape())); - tenncor::random: - - template: typename T - name: rand_unif - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor( - ade::Opcode{"RAND_UNIF",::age::RAND_UNIF},{ - ead::identity_map(arg1), - ead::identity_map(arg2) - }); - - template: typename T - name: rand_binom_one - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: | - // - const ade::Shape& shape = arg->get_tensor()->shape(); - auto trial = ::tenncor::random::rand_unif( - ead::convert_to_node(ead::make_variable_scalar((T) 0, shape)), - ead::convert_to_node(ead::make_variable_scalar((T) 1, shape))); - return ::tenncor::lt(trial, arg); - tenncor::nn: - - template: typename T - name: relu - args: - - dtype: ead::NodeptrT - name: x - out: - type: ead::NodeptrT - val: return ::tenncor::max(x,ead::make_constant_scalar(0,x->shape())); - - template: typename T - name: conv2d - args: - - dtype: ead::NodeptrT - name: image - - dtype: ead::NodeptrT - name: kernel - out: - type: ead::NodeptrT - val: | - // - // image must be in form [in, width, height, batch] - // kernel must be in form [out, in, width, height] - // see https://www.tensorflow.org/api_docs/python/tf/nn/conv2d - ade::DimT nfilters = kernel->shape().at(0); - ead::NodesT convolveds; - convolveds.reserve(nfilters); - for (ade::DimT i = 0; i < nfilters; ++i) - { - auto filter = ::tenncor::permute( - ::tenncor::slice(kernel, i, 1, 0), - {1, 2, 3, 0}); - auto conved = ::tenncor::convolution(image, filter, - {0, 1, 2}); - auto padded = ::tenncor::pad(conved, - {i, nfilters - i - 1}, 0); - convolveds.push_back(padded); - } - auto out = ::tenncor::sum(convolveds); - tag::recursive_group_tag(out->get_tensor(), "conv2d", { - image->get_tensor().get(), - kernel->get_tensor().get() - }); - return out; - - template: typename T - name: fully_connect - args: - - dtype: ead::NodesT - name: inputs - - dtype: ead::NodesT - name: weights - - dtype: ead::NodeptrT - name: bias - out: - type: ead::NodeptrT - val: | - // - if (weights.empty()) - { - logs::fatal("cannot create a fully connected layer without weights"); - } - size_t ninputs = inputs.size(); - if (ninputs != weights.size()) - { - logs::fatalf( - "number of inputs (%d) must equal the number of weights (%d)", - ninputs, weights.size()); - } - std::unordered_set ignores = { - inputs[0]->get_tensor().get(), - weights[0]->get_tensor().get() - }; - auto out = ::tenncor::matmul(inputs[0], weights[0]); - for (size_t i = 1; i < ninputs; ++i) - { - ignores.emplace(inputs[i]->get_tensor().get()); - ignores.emplace(weights[i]->get_tensor().get()); - out = ::tenncor::add(out, ::tenncor::matmul(inputs[i], weights[i])); - } - if (nullptr != bias) - { - const ade::Shape& shape = out->shape(); - out = ::tenncor::add(out, ::tenncor::extend(bias, 1, {shape.at(1)})); - ignores.emplace(bias->get_tensor().get()); - } - tag::recursive_group_tag(out->get_tensor(), "fully_connect", ignores); - return out; diff --git a/cfg/ead_min.yml b/cfg/ead_min.yml deleted file mode 100644 index 5c74fd707..000000000 --- a/cfg/ead_min.yml +++ /dev/null @@ -1,952 +0,0 @@ ---- -dtype: - DOUBLE: double - FLOAT: float -opcode: - operator_path: ead/operator.hpp - params: ade::Shape shape, ead::EigenptrT& out, std::vector>& in - opcalls: - ABS: out = ead::abs(shape,in[0]); - NEG: out = ead::neg(shape,in[0]); - SIN: out = ead::sin(shape,in[0]); - COS: out = ead::cos(shape,in[0]); - TAN: out = ead::tan(shape,in[0]); - EXP: out = ead::exp(shape,in[0]); - LOG: out = ead::log(shape,in[0]); - SQRT: out = ead::sqrt(shape,in[0]); - ROUND: out = ead::round(shape,in[0]); - SIGMOID: out = ead::sigmoid(shape,in[0]); - SIGMOID_GRAD: out = ead::sigmoid_grad(shape,in[0]); - TANH: out = ead::tanh(shape,in[0]); - SQUARE: out = ead::square(shape,in[0]); - CUBE: out = ead::cube(shape,in[0]); - POW: out = ead::pow(shape,in[0],in[1]); - ADD: out = ead::add(shape,in[0],in[1]); - SUB: out = ead::sub(shape,in[0],in[1]); - MUL: out = ead::mul(shape,in[0],in[1]); - DIV: out = ead::div(shape,in[0],in[1]); - MIN: out = ead::min(shape,in[0],in[1]); - MAX: out = ead::max(shape,in[0],in[1]); - EQ: out = ead::eq(shape,in[0],in[1]); - NEQ: out = ead::neq(shape,in[0],in[1]); - LT: out = ead::lt(shape,in[0],in[1]); - GT: out = ead::gt(shape,in[0],in[1]); - RAND_UNIF: out = ead::rand_uniform(shape,in[0],in[1]); - REDUCE_SUM: out = ead::reduce_sum(shape,in[0]); - REDUCE_PROD: out = ead::reduce_prod(shape,in[0]); - REDUCE_MIN: out = ead::reduce_min(shape,in[0]); - REDUCE_MAX: out = ead::reduce_max(shape,in[0]); - PERMUTE: out = ead::permute(shape,in[0]); - EXTEND: out = ead::extend(shape,in[0]); - MATMUL: out = ead::matmul(shape,in[0],in[1]); - CONV: out = ead::convolution(shape,in[0],in[1]); - SLICE: out = ead::slice(shape,in[0]); - PAD: out = ead::pad(shape,in[0]); - CONV_IMG_GRAD: out = ead::convolution_image_grad(shape,in[0],in[1]); - CONV_KRN_GRAD: out = ead::convolution_kernel_grad(shape,in[0],in[1]); - SELECT: out = ead::select(shape, in[0], in[1], in[2]); -api: - pybind_type: float - includes: - - '"ead/constant.hpp"' - - '"ead/variable.hpp"' - - '"ead/functor.hpp"' - - '"tag/group.hpp"' - - '"tag/prop.hpp"' - namespaces: - tenncor: - - template: typename T - name: abs - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"ABS",::age::ABS},{ead::identity_map(arg)}); - - template: typename T - name: neg - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"NEG",::age::NEG},{ead::identity_map(arg)}); - - template: typename T - name: sin - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SIN",::age::SIN},{ead::identity_map(arg)}); - - template: typename T - name: cos - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"COS",::age::COS},{ead::identity_map(arg)}); - - template: typename T - name: tan - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"TAN",::age::TAN},{ead::identity_map(arg)}); - - template: typename T - name: exp - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"EXP",::age::EXP},{ead::identity_map(arg)}); - - template: typename T - name: log - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"LOG",::age::LOG},{ead::identity_map(arg)}); - - template: typename T - name: sqrt - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SQRT",::age::SQRT},{ead::identity_map(arg)}); - - template: typename T - name: round - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"ROUND",::age::ROUND},{ead::identity_map(arg)}); - - template: typename T - name: sigmoid - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SIGMOID",::age::SIGMOID},{ead::identity_map(arg)}); - - template: typename T - name: sigmoid_grad - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SIGMOID_GRAD",::age::SIGMOID_GRAD},{ead::identity_map(arg)}); - - template: typename T - name: tanh - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"TANH",::age::TANH},{ead::identity_map(arg)}); - - template: typename T - name: square - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SQUARE",::age::SQUARE},{ead::identity_map(arg)}); - - template: typename T - name: cube - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"CUBE",::age::CUBE},{ead::identity_map(arg)}); - - template: typename T - name: pow - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"POW",::age::POW},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: add - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"ADD",::age::ADD}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - tag::get_group_reg().group_tag(out->get_tensor(), "sum"); - return out; - - template: typename T - name: sub - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"SUB",::age::SUB},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: mul - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"MUL",::age::MUL}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - tag::get_group_reg().group_tag(out->get_tensor(), "prod"); - return out; - - template: typename T - name: div - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"DIV",::age::DIV},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: eq - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"EQ",::age::EQ}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: neq - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"NEQ",::age::NEQ}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: lt - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"LT",::age::LT},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: gt - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"GT",::age::GT},{ead::identity_map(arg1),ead::identity_map(arg2)}); - - template: typename T - name: min - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"MIN",::age::MIN}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: max - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - auto out = ead::make_functor(ade::Opcode{"MAX",::age::MAX}, { - ead::identity_map(arg1), - ead::identity_map(arg2), - }); - tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); - return out; - - template: typename T - name: if_then_else - args: - - dtype: ead::NodeptrT - name: condition - - dtype: ead::NodeptrT - name: then - - dtype: ead::NodeptrT - name: otherwise - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor(ade::Opcode{"SELECT",::age::SELECT},{ - ead::identity_map(condition), - ead::identity_map(then), - ead::identity_map(otherwise) - }); - - template: typename T - name: permute - args: - - dtype: ead::NodeptrT - name: arg - - dtype: std::vector - name: order - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"PERMUTE",::age::PERMUTE},{ead::permute_map(arg,order)}); - - template: typename T - name: extend - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - - dtype: std::vector - name: xlist - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"EXTEND",::age::EXTEND},{ead::extend_map(arg,offset,xlist)}); - - template: typename T - name: reduce_sum - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_SUM",::age::REDUCE_SUM},{ead::reduce_map(tens,offset,ndims)}); - description: sum values ignoring coordinate indices between start and end - - template: typename T - name: reduce_prod - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_PROD",::age::REDUCE_PROD},{ead::reduce_map(tens,offset,ndims)}); - description: multiply values ignoring coordinate indices between start and end - - template: typename T - name: reduce_min - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_MIN",::age::REDUCE_MIN},{ead::reduce_map(tens,offset,ndims)}); - description: min values ignoring coordinate indices between start and end - - template: typename T - name: reduce_max - args: - - dtype: ead::NodeptrT - name: tens - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ead::make_functor(ade::Opcode{"REDUCE_MAX",::age::REDUCE_MAX},{ead::reduce_map(tens,offset,ndims)}); - description: max values ignoring coordinate indices between start and end - - template: typename T - name: n_elems - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ead::make_constant_scalar(arg->get_tensor()->shape().n_elems(), ade::Shape()); - - template: typename T - name: n_dims - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: rank - out: - type: ead::NodeptrT - val: return ead::make_constant_scalar(arg->get_tensor()->shape().at(rank), ade::Shape()); - - template: typename T - name: slice - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - - dtype: ade::RankT - name: extent - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor(ade::Opcode{"SLICE",::age::SLICE}, { - ead::slice_map(arg, offset, extent, dimension)}); - - template: typename T - name: pad - args: - - dtype: ead::NodeptrT - name: arg - - dtype: std::pair - name: padding - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor(ade::Opcode{"PAD",::age::PAD}, { - ead::pad_map(arg, padding, dimension)}); - - template: typename T - name: matmul - args: - - dtype: ead::NodeptrT - name: a - - dtype: ead::NodeptrT - name: b - out: - type: ead::NodeptrT - val: | - // - ade::Shape ashape = a->get_tensor()->shape(); - ade::Shape bshape = b->get_tensor()->shape(); - ade::DimT ncommon = ashape.at(0); - ade::DimT nrow = ashape.at(1); - ade::DimT ncol = bshape.at(0); - if (ncommon != bshape.at(1)) - { - logs::fatalf("invalid matmul shapes %s and %s", - ashape.to_string().c_str(), bshape.to_string().c_str()); - } - - ade::CoordptrT left_shaper(new ade::CoordMap( - [=](ade::MatrixT fwd) - { - for (ade::RankT i = 3; i < ade::mat_dim; ++i) - { - fwd[i][i] = 1; - } - fwd[2][0] = ncol; - fwd[1][1] = 1; - fwd[0][2] = 1.0 / ncommon; - } - )); - - ade::CoordptrT right_shaper(new ade::CoordMap( - [=](ade::MatrixT fwd) - { - for (ade::RankT i = 3; i < ade::mat_dim; ++i) - { - fwd[i][i] = 1; - } - fwd[0][0] = 1; - fwd[2][1] = nrow; - fwd[1][2] = 1.0 / ncommon; - } - )); - return ead::make_functor(ade::Opcode{"MATMUL",::age::MATMUL}, { - ead::FuncArg(a, left_shaper, nullptr), - ead::FuncArg(b, right_shaper, nullptr) - }); - - template: typename T - name: convolution - args: - - dtype: ead::NodeptrT - name: input - - dtype: ead::NodeptrT - name: kernel - - dtype: std::vector - name: dims - out: - type: ead::NodeptrT - val: | - // - ade::Shape inshape = input->get_tensor()->shape(); - ade::Shape kernelshape = kernel->get_tensor()->shape(); - ade::CoordptrT input_shaper(new ade::CoordMap( - [kernelshape](ade::MatrixT fwd) - { - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[i][i] = 1; - } - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[ade::rank_cap][i] = -kernelshape.at(i) + 1; - } - } - )); - - ade::CoordptrT kernel_shaper(new ade::CoordMap( - [inshape](ade::MatrixT fwd) - { - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[i][i] = -1; - } - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - fwd[ade::rank_cap][i] = inshape.at(i) + 1; - } - } - )); - - ade::CoordT kernel_dims; - auto it = kernel_dims.begin(); - std::fill(it, kernel_dims.end(), ade::rank_cap); - std::copy(dims.begin(), dims.end(), it); - return ead::make_functor(ade::Opcode{"CONV",::age::CONV}, { - ead::FuncArg(input, input_shaper, nullptr), - ead::FuncArg(kernel, kernel_shaper, - std::make_shared(kernel_dims, true)), - }); - - template: typename T - name: reduce_sum_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_sum(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: reduce_prod_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_prod(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: reduce_min_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_min(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: reduce_max_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_max(arg, dimension, 1); - - std::vector indices(ade::rank_cap); - auto bt = indices.begin(); - auto it = bt + dimension; - std::iota(bt, it, 0); - std::iota(it, indices.end(), dimension + 1); - indices[ade::rank_cap - 1] = dimension; - return ::tenncor::permute(red, indices); - - template: typename T - name: transpose - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ::tenncor::permute(arg, {1, 0}); - - template: typename T - name: reduce_mean - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: return ::tenncor::div(::tenncor::reduce_sum(arg), ::tenncor::n_elems(arg)); - - template: typename T - name: reduce_mean_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: | - // - auto red = ::tenncor::reduce_sum_1d(arg, dimension); - auto dim = ead::make_constant_scalar(arg->shape().at(dimension), red->shape()); - return ::tenncor::div(red, dim); - - template: typename T - name: reduce_l2norm - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: return ::tenncor::sqrt(::tenncor::reduce_sum(::tenncor::square(arg), offset, ndims)); - - template: typename T - name: reduce_l2norm_1d - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: dimension - out: - type: ead::NodeptrT - val: return ::tenncor::sqrt(::tenncor::reduce_sum_1d(::tenncor::square(arg), dimension)); - - template: typename T - name: clip_by_range - args: - - dtype: ead::NodeptrT - name: arg - - dtype: T - name: minval - - dtype: T - name: maxval - out: - type: ead::NodeptrT - val: | - // - if (minval > maxval) - { - logs::fatal("min value is below max"); - } - ade::Shape shape = arg->shape(); - auto lo = ead::make_constant_scalar(minval, shape); - auto hi = ead::make_constant_scalar(maxval, shape); - auto out = ::tenncor::max(::tenncor::min(arg, hi), lo); - tag::recursive_group_tag(out->get_tensor(), "clip_by_range", { - arg->get_tensor().get(), - lo->get_tensor().get(), - hi->get_tensor().get(), - }); - return out; - - template: typename T - name: clip_by_l2norm - args: - - dtype: ead::NodeptrT - name: arg - - dtype: T - name: upper - out: - type: ead::NodeptrT - val: | - // - if (upper == 0) - { - logs::fatal("cannot clip_by_norm with a upper limit of 0"); - } - ade::Shape shape = arg->shape(); - auto limit = ead::make_constant_scalar(upper, shape); - auto norm = ::tenncor::extend(::tenncor::reduce_l2norm(arg), 0, - std::vector(shape.begin(), shape.end())); - auto out = ::tenncor::if_then_else(::tenncor::lt(norm, limit), - arg, ::tenncor::div(::tenncor::mul(arg, limit), norm)); - tag::recursive_group_tag(out->get_tensor(), "clip_by_l2norm", { - arg->get_tensor().get(), - limit->get_tensor().get(), - }); - return out; - description: 'clip by l2norm ((todo) allow l2norm to be configurable)' - - template: typename T - name: sum - args: - - dtype: ead::NodesT - name: args - out: - type: ead::NodeptrT - val: | - // - if (args.empty()) - { - logs::fatal("cannot sum without arguments"); - } - ead::NodeptrT out = args[0]; - for (size_t i = 1, n = args.size(); i < n; ++i) - { - out = ::tenncor::add(out, args[i]); - } - return out; - - template: typename T - name: prod - args: - - dtype: ead::NodesT - name: args - out: - type: ead::NodeptrT - val: | - // - if (args.empty()) - { - logs::fatal("cannot sum without arguments"); - } - ead::NodeptrT out = args[0]; - for (size_t i = 1, n = args.size(); i < n; ++i) - { - out = ::tenncor::mul(out, args[i]); - } - return out; - - template: typename T - name: softmax - args: - - dtype: ead::NodeptrT - name: arg - - dtype: ade::RankT - name: offset - default: '0' - - dtype: ade::RankT - name: ndims - default: ade::rank_cap - out: - type: ead::NodeptrT - val: | - // - auto exarg = exp(arg); - ade::Shape shape = exarg->shape(); - auto it = shape.begin() + offset; - std::vector xlist(it, it + ndims); - auto out = ::tenncor::div(exarg, - ::tenncor::extend(::tenncor::reduce_sum(exarg, offset, offset+ndims), - offset, xlist)); - tag::recursive_group_tag(out->get_tensor(), "softmax", { - arg->get_tensor().get()}); - return out; - - template: typename T - name: sign - args: - - dtype: ead::NodeptrT - name: x - out: - type: ead::NodeptrT - val: return ::tenncor::pow(x,ead::make_constant_scalar(0,x->shape())); - tenncor::random: - - template: typename T - name: rand_unif - args: - - dtype: ead::NodeptrT - name: arg1 - - dtype: ead::NodeptrT - name: arg2 - out: - type: ead::NodeptrT - val: | - // - return ead::make_functor( - ade::Opcode{"RAND_UNIF",::age::RAND_UNIF},{ - ead::identity_map(arg1), - ead::identity_map(arg2) - }); - - template: typename T - name: rand_binom_one - args: - - dtype: ead::NodeptrT - name: arg - out: - type: ead::NodeptrT - val: | - // - const ade::Shape& shape = arg->get_tensor()->shape(); - auto trial = ::tenncor::random::rand_unif( - ead::convert_to_node(ead::make_variable_scalar((T) 0, shape)), - ead::convert_to_node(ead::make_variable_scalar((T) 1, shape))); - return ::tenncor::lt(trial, arg); - tenncor::nn: - - template: typename T - name: relu - args: - - dtype: ead::NodeptrT - name: x - out: - type: ead::NodeptrT - val: return ::tenncor::max(x,ead::make_constant_scalar(0,x->shape())); - - template: typename T - name: conv2d - args: - - dtype: ead::NodeptrT - name: image - - dtype: ead::NodeptrT - name: kernel - out: - type: ead::NodeptrT - val: | - // - // image must be in form [in, width, height, batch] - // kernel must be in form [out, in, width, height] - // see https://www.tensorflow.org/api_docs/python/tf/nn/conv2d - ade::DimT nfilters = kernel->shape().at(0); - ead::NodesT convolveds; - convolveds.reserve(nfilters); - for (ade::DimT i = 0; i < nfilters; ++i) - { - auto filter = ::tenncor::permute( - ::tenncor::slice(kernel, i, 1, 0), - {1, 2, 3, 0}); - auto conved = ::tenncor::convolution(image, filter, - {0, 1, 2}); - auto padded = ::tenncor::pad(conved, - {i, nfilters - i - 1}, 0); - convolveds.push_back(padded); - } - auto out = ::tenncor::sum(convolveds); - tag::recursive_group_tag(out->get_tensor(), "conv2d", { - image->get_tensor().get(), - kernel->get_tensor().get() - }); - return out; - - template: typename T - name: fully_connect - args: - - dtype: ead::NodesT - name: inputs - - dtype: ead::NodesT - name: weights - - dtype: ead::NodeptrT - name: bias - out: - type: ead::NodeptrT - val: | - // - if (weights.empty()) - { - logs::fatal("cannot create a fully connected layer without weights"); - } - size_t ninputs = inputs.size(); - if (ninputs != weights.size()) - { - logs::fatalf( - "number of inputs (%d) must equal the number of weights (%d)", - ninputs, weights.size()); - } - std::unordered_set ignores = { - inputs[0]->get_tensor().get(), - weights[0]->get_tensor().get() - }; - auto out = ::tenncor::matmul(inputs[0], weights[0]); - for (size_t i = 1; i < ninputs; ++i) - { - ignores.emplace(inputs[i]->get_tensor().get()); - ignores.emplace(weights[i]->get_tensor().get()); - out = ::tenncor::add(out, ::tenncor::matmul(inputs[i], weights[i])); - } - if (nullptr != bias) - { - const ade::Shape& shape = out->shape(); - out = ::tenncor::add(out, ::tenncor::extend(bias, 1, {shape.at(1)})); - ignores.emplace(bias->get_tensor().get()); - } - tag::recursive_group_tag(out->get_tensor(), "fully_connect", ignores); - return out; diff --git a/cfg/eteq.yml b/cfg/eteq.yml new file mode 100644 index 000000000..9e4ab852b --- /dev/null +++ b/cfg/eteq.yml @@ -0,0 +1,1343 @@ +--- +dtype: + DOUBLE: double + FLOAT: float + INT8: int8_t + UINT8: uint8_t + INT16: int16_t + UINT16: uint16_t + INT32: int32_t + UINT32: uint32_t + INT64: int64_t + UINT64: uint64_t +opcode: + operator_path: eteq/operator.hpp + params: teq::Shape shape, eteq::EigenptrT& out, std::vector>& in + opcalls: + ABS: out = eteq::abs(shape,in[0]); + NEG: out = eteq::neg(shape,in[0]); + SIN: out = eteq::sin(shape,in[0]); + COS: out = eteq::cos(shape,in[0]); + TAN: out = eteq::tan(shape,in[0]); + EXP: out = eteq::exp(shape,in[0]); + LOG: out = eteq::log(shape,in[0]); + SQRT: out = eteq::sqrt(shape,in[0]); + ROUND: out = eteq::round(shape,in[0]); + SIGMOID: out = eteq::sigmoid(shape,in[0]); + SIGMOID_GRAD: out = eteq::sigmoid_grad(shape,in[0]); + TANH: out = eteq::tanh(shape,in[0]); + SQUARE: out = eteq::square(shape,in[0]); + CUBE: out = eteq::cube(shape,in[0]); + POW: out = eteq::pow(shape,in[0],in[1]); + ADD: out = eteq::add(shape,in[0],in[1]); + SUB: out = eteq::sub(shape,in[0],in[1]); + MUL: out = eteq::mul(shape,in[0],in[1]); + DIV: out = eteq::div(shape,in[0],in[1]); + MIN: out = eteq::min(shape,in[0],in[1]); + MAX: out = eteq::max(shape,in[0],in[1]); + EQ: out = eteq::eq(shape,in[0],in[1]); + NEQ: out = eteq::neq(shape,in[0],in[1]); + LT: out = eteq::lt(shape,in[0],in[1]); + GT: out = eteq::gt(shape,in[0],in[1]); + RAND_UNIF: out = eteq::rand_uniform(shape,in[0],in[1]); + REDUCE_SUM: out = eteq::reduce_sum(shape,in[0]); + REDUCE_PROD: out = eteq::reduce_prod(shape,in[0]); + REDUCE_MIN: out = eteq::reduce_min(shape,in[0]); + REDUCE_MAX: out = eteq::reduce_max(shape,in[0]); + PERMUTE: out = eteq::permute(shape,in[0]); + EXTEND: out = eteq::extend(shape,in[0]); + MATMUL: out = eteq::matmul(shape,in[0],in[1]); + CONV: out = eteq::convolution(shape,in[0],in[1]); + SLICE: out = eteq::slice(shape,in[0]); + PAD: out = eteq::pad(shape,in[0]); + CONV_IMG_GRAD: out = eteq::convolution_image_grad(shape,in[0],in[1]); + CONV_KRN_GRAD: out = eteq::convolution_kernel_grad(shape,in[0],in[1]); + SELECT: out = eteq::select(shape, in[0], in[1], in[2]); +api: + pybind_type: float + includes: + - '"eteq/constant.hpp"' + - '"eteq/variable.hpp"' + - '"eteq/functor.hpp"' + - '"tag/group.hpp"' + - '"tag/prop.hpp"' + namespaces: + tenncor: + - template: typename T + name: abs + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"ABS",::egen::ABS},{eteq::identity_map(arg)}); + - template: typename T + name: neg + operator: "-" + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"NEG",::egen::NEG},{eteq::identity_map(arg)}); + - template: typename T + name: sin + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SIN",::egen::SIN},{eteq::identity_map(arg)}); + - template: typename T + name: cos + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"COS",::egen::COS},{eteq::identity_map(arg)}); + - template: typename T + name: tan + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"TAN",::egen::TAN},{eteq::identity_map(arg)}); + - template: typename T + name: exp + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"EXP",::egen::EXP},{eteq::identity_map(arg)}); + - template: typename T + name: log + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"LOG",::egen::LOG},{eteq::identity_map(arg)}); + - template: typename T + name: sqrt + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SQRT",::egen::SQRT},{eteq::identity_map(arg)}); + - template: typename T + name: round + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"ROUND",::egen::ROUND},{eteq::identity_map(arg)}); + - template: typename T + name: sigmoid + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SIGMOID",::egen::SIGMOID},{eteq::identity_map(arg)}); + - template: typename T + name: sigmoid_grad + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SIGMOID_GRAD",::egen::SIGMOID_GRAD},{eteq::identity_map(arg)}); + - template: typename T + name: tanh + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"TANH",::egen::TANH},{eteq::identity_map(arg)}); + - template: typename T + name: square + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SQUARE",::egen::SQUARE},{eteq::identity_map(arg)}); + - template: typename T + name: cube + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"CUBE",::egen::CUBE},{eteq::identity_map(arg)}); + - template: typename T + name: pow + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"POW",::egen::POW},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: pow + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"POW",::egen::POW},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: pow + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"POW",::egen::POW},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: add + operator: + + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"ADD",::egen::ADD}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "sum"); + return out; + - template: typename T + name: add + operator: + + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"ADD",::egen::ADD}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "sum"); + return out; + - template: typename T + name: add + operator: + + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"ADD",::egen::ADD}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "sum"); + return out; + - template: typename T + name: sub + operator: "-" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SUB",::egen::SUB},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: sub + operator: "-" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SUB",::egen::SUB},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: sub + operator: "-" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SUB",::egen::SUB},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: mul + operator: "*" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MUL",::egen::MUL}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "prod"); + return out; + - template: typename T + name: mul + operator: "*" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MUL",::egen::MUL}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "prod"); + return out; + - template: typename T + name: mul + operator: "*" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MUL",::egen::MUL}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "prod"); + return out; + - template: typename T + name: div + operator: / + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"DIV",::egen::DIV},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: div + operator: / + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"DIV",::egen::DIV},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: div + operator: / + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"DIV",::egen::DIV},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: eq + operator: == + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"EQ",::egen::EQ}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: eq + operator: == + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"EQ",::egen::EQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: eq + operator: == + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"EQ",::egen::EQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: neq + operator: "!=" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"NEQ",::egen::NEQ}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: neq + operator: "!=" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"NEQ",::egen::NEQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: neq + operator: "!=" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"NEQ",::egen::NEQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: lt + operator: < + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"LT",::egen::LT},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: lt + operator: < + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"LT",::egen::LT},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: lt + operator: < + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"LT",::egen::LT},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: gt + operator: ">" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"GT",::egen::GT},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: gt + operator: ">" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"GT",::egen::GT},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: gt + operator: ">" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"GT",::egen::GT},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: min + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MIN",::egen::MIN}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: min + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MIN",::egen::MIN}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: min + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MIN",::egen::MIN}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: max + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MAX",::egen::MAX}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: max + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MAX",::egen::MAX}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: max + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MAX",::egen::MAX}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: if_then_else + args: + - dtype: eteq::NodeptrT + name: condition + - dtype: eteq::NodeptrT + name: then + - dtype: eteq::NodeptrT + name: otherwise + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SELECT",::egen::SELECT},{ + eteq::identity_map(condition), + eteq::identity_map(then), + eteq::identity_map(otherwise) + }); + - template: typename T + name: permute + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: std::vector + name: order + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"PERMUTE",::egen::PERMUTE},{eteq::permute_map(arg,order)}); + - template: typename T + name: extend + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + - dtype: std::vector + name: xlist + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"EXTEND",::egen::EXTEND},{eteq::extend_map(arg,offset,xlist)}); + - template: typename T + name: reduce_sum + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_SUM",::egen::REDUCE_SUM},{eteq::reduce_map(tens,offset,ndims)}); + description: sum values ignoring coordinate indices between start and end + - template: typename T + name: reduce_prod + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_PROD",::egen::REDUCE_PROD},{eteq::reduce_map(tens,offset,ndims)}); + description: multiply values ignoring coordinate indices between start and end + - template: typename T + name: reduce_min + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_MIN",::egen::REDUCE_MIN},{eteq::reduce_map(tens,offset,ndims)}); + description: min values ignoring coordinate indices between start and end + - template: typename T + name: reduce_max + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_MAX",::egen::REDUCE_MAX},{eteq::reduce_map(tens,offset,ndims)}); + description: max values ignoring coordinate indices between start and end + - template: typename T + name: n_elems + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_constant_scalar(arg->get_tensor()->shape().n_elems(), teq::Shape()); + - template: typename T + name: n_dims + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: rank + out: + type: eteq::NodeptrT + val: return eteq::make_constant_scalar(arg->get_tensor()->shape().at(rank), teq::Shape()); + - template: typename T + name: slice + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + - dtype: teq::RankT + name: extent + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SLICE",::egen::SLICE}, { + eteq::slice_map(arg, offset, extent, dimension)}); + - template: typename T + name: pad + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: std::pair + name: padding + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"PAD",::egen::PAD}, { + eteq::pad_map(arg, padding, dimension)}); + - template: typename T + name: matmul + args: + - dtype: eteq::NodeptrT + name: a + - dtype: eteq::NodeptrT + name: b + out: + type: eteq::NodeptrT + val: | + // + teq::Shape ashape = a->get_tensor()->shape(); + teq::Shape bshape = b->get_tensor()->shape(); + teq::DimT ncommon = ashape.at(0); + teq::DimT nrow = ashape.at(1); + teq::DimT ncol = bshape.at(0); + if (ncommon != bshape.at(1)) + { + logs::fatalf("invalid matmul shapes %s and %s", + ashape.to_string().c_str(), bshape.to_string().c_str()); + } + + teq::CoordptrT left_shaper(new teq::CoordMap( + [=](teq::MatrixT fwd) + { + for (teq::RankT i = 3; i < teq::mat_dim; ++i) + { + fwd[i][i] = 1; + } + fwd[2][0] = ncol; + fwd[1][1] = 1; + fwd[0][2] = 1.0 / ncommon; + } + )); + + teq::CoordptrT right_shaper(new teq::CoordMap( + [=](teq::MatrixT fwd) + { + for (teq::RankT i = 3; i < teq::mat_dim; ++i) + { + fwd[i][i] = 1; + } + fwd[0][0] = 1; + fwd[2][1] = nrow; + fwd[1][2] = 1.0 / ncommon; + } + )); + return eteq::make_functor(teq::Opcode{"MATMUL",::egen::MATMUL}, { + eteq::FuncArg(a, left_shaper, nullptr), + eteq::FuncArg(b, right_shaper, nullptr) + }); + - template: typename T + name: convolution + args: + - dtype: eteq::NodeptrT + name: input + - dtype: eteq::NodeptrT + name: kernel + - dtype: std::vector + name: dims + out: + type: eteq::NodeptrT + val: | + // + teq::Shape inshape = input->get_tensor()->shape(); + teq::Shape kernelshape = kernel->get_tensor()->shape(); + teq::CoordptrT input_shaper(new teq::CoordMap( + [kernelshape](teq::MatrixT fwd) + { + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[i][i] = 1; + } + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[teq::rank_cap][i] = -kernelshape.at(i) + 1; + } + } + )); + + teq::CoordptrT kernel_shaper(new teq::CoordMap( + [inshape](teq::MatrixT fwd) + { + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[i][i] = -1; + } + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[teq::rank_cap][i] = inshape.at(i) + 1; + } + } + )); + + teq::CoordT kernel_dims; + auto it = kernel_dims.begin(); + std::fill(it, kernel_dims.end(), teq::rank_cap); + std::copy(dims.begin(), dims.end(), it); + return eteq::make_functor(teq::Opcode{"CONV",::egen::CONV}, { + eteq::FuncArg(input, input_shaper, nullptr), + eteq::FuncArg(kernel, kernel_shaper, + std::make_shared(kernel_dims, true)), + }); + - template: typename T + name: reduce_sum_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_sum(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: reduce_prod_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_prod(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: reduce_min_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_min(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: reduce_max_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_max(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: transpose + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return ::tenncor::permute(arg, {1, 0}); + - template: typename T + name: reduce_mean + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return ::tenncor::div(::tenncor::reduce_sum(arg), ::tenncor::n_elems(arg)); + - template: typename T + name: reduce_mean_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_sum_1d(arg, dimension); + auto dim = eteq::make_constant_scalar(arg->shape().at(dimension), red->shape()); + return ::tenncor::div(red, dim); + - template: typename T + name: reduce_l2norm + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return ::tenncor::sqrt(::tenncor::reduce_sum(::tenncor::square(arg), offset, ndims)); + - template: typename T + name: reduce_l2norm_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: return ::tenncor::sqrt(::tenncor::reduce_sum_1d(::tenncor::square(arg), dimension)); + - template: typename T + name: clip_by_range + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: T + name: minval + - dtype: T + name: maxval + out: + type: eteq::NodeptrT + val: | + // + if (minval > maxval) + { + logs::fatal("min value is below max"); + } + teq::Shape shape = arg->shape(); + auto lo = eteq::make_constant_scalar(minval, shape); + auto hi = eteq::make_constant_scalar(maxval, shape); + auto out = ::tenncor::max(::tenncor::min(arg, hi), lo); + tag::recursive_group_tag(out->get_tensor(), "clip_by_range", { + arg->get_tensor().get(), + lo->get_tensor().get(), + hi->get_tensor().get(), + }); + return out; + - template: typename T + name: clip_by_l2norm + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: T + name: upper + out: + type: eteq::NodeptrT + val: | + // + if (upper == 0) + { + logs::fatal("cannot clip_by_norm with a upper limit of 0"); + } + teq::Shape shape = arg->shape(); + auto limit = eteq::make_constant_scalar(upper, shape); + auto norm = ::tenncor::extend(::tenncor::reduce_l2norm(arg), 0, + std::vector(shape.begin(), shape.end())); + auto out = ::tenncor::if_then_else(::tenncor::lt(norm, limit), + arg, ::tenncor::div(::tenncor::mul(arg, limit), norm)); + tag::recursive_group_tag(out->get_tensor(), "clip_by_l2norm", { + arg->get_tensor().get(), + limit->get_tensor().get(), + }); + return out; + description: 'clip by l2norm ((todo) allow l2norm to be configurable)' + - template: typename T + name: sum + args: + - dtype: eteq::NodesT + name: args + out: + type: eteq::NodeptrT + val: | + // + if (args.empty()) + { + logs::fatal("cannot sum without arguments"); + } + eteq::NodeptrT out = args[0]; + for (size_t i = 1, n = args.size(); i < n; ++i) + { + out = ::tenncor::add(out, args[i]); + } + return out; + - template: typename T + name: prod + args: + - dtype: eteq::NodesT + name: args + out: + type: eteq::NodeptrT + val: | + // + if (args.empty()) + { + logs::fatal("cannot sum without arguments"); + } + eteq::NodeptrT out = args[0]; + for (size_t i = 1, n = args.size(); i < n; ++i) + { + out = ::tenncor::mul(out, args[i]); + } + return out; + - template: typename T + name: softmax + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: | + // + auto exarg = exp(arg); + teq::Shape shape = exarg->shape(); + auto it = shape.begin() + offset; + std::vector xlist(it, it + ndims); + auto out = ::tenncor::div(exarg, + ::tenncor::extend(::tenncor::reduce_sum(exarg, offset, offset+ndims), + offset, xlist)); + tag::recursive_group_tag(out->get_tensor(), "softmax", { + arg->get_tensor().get()}); + return out; + - template: typename T + name: sign + args: + - dtype: eteq::NodeptrT + name: x + out: + type: eteq::NodeptrT + val: return ::tenncor::pow(x,eteq::make_constant_scalar(0,x->shape())); + tenncor::random: + - template: typename T + name: rand_unif + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor( + teq::Opcode{"RAND_UNIF",::egen::RAND_UNIF},{ + eteq::identity_map(arg1), + eteq::identity_map(arg2) + }); + - template: typename T + name: rand_binom_one + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: | + // + const teq::Shape& shape = arg->get_tensor()->shape(); + auto trial = ::tenncor::random::rand_unif( + eteq::convert_to_node(eteq::make_variable_scalar((T) 0, shape)), + eteq::convert_to_node(eteq::make_variable_scalar((T) 1, shape))); + return ::tenncor::lt(trial, arg); + tenncor::nn: + - template: typename T + name: relu + args: + - dtype: eteq::NodeptrT + name: x + out: + type: eteq::NodeptrT + val: return ::tenncor::max(x,eteq::make_constant_scalar(0,x->shape())); + - template: typename T + name: conv2d + args: + - dtype: eteq::NodeptrT + name: image + - dtype: eteq::NodeptrT + name: kernel + out: + type: eteq::NodeptrT + val: | + // + // image must be in form [in, width, height, batch] + // kernel must be in form [out, in, width, height] + // see https://www.tensorflow.org/api_docs/python/tf/nn/conv2d + teq::DimT nfilters = kernel->shape().at(0); + eteq::NodesT convolveds; + convolveds.reserve(nfilters); + for (teq::DimT i = 0; i < nfilters; ++i) + { + auto filter = ::tenncor::permute( + ::tenncor::slice(kernel, i, 1, 0), + {1, 2, 3, 0}); + auto conved = ::tenncor::convolution(image, filter, + {0, 1, 2}); + auto padded = ::tenncor::pad(conved, + {i, nfilters - i - 1}, 0); + convolveds.push_back(padded); + } + auto out = ::tenncor::sum(convolveds); + tag::recursive_group_tag(out->get_tensor(), "conv2d", { + image->get_tensor().get(), + kernel->get_tensor().get() + }); + return out; + - template: typename T + name: fully_connect + args: + - dtype: eteq::NodesT + name: inputs + - dtype: eteq::NodesT + name: weights + - dtype: eteq::NodeptrT + name: bias + out: + type: eteq::NodeptrT + val: | + // + if (weights.empty()) + { + logs::fatal("cannot create a fully connected layer without weights"); + } + size_t ninputs = inputs.size(); + if (ninputs != weights.size()) + { + logs::fatalf( + "number of inputs (%d) must equal the number of weights (%d)", + ninputs, weights.size()); + } + std::unordered_set ignores = { + inputs[0]->get_tensor().get(), + weights[0]->get_tensor().get() + }; + auto out = ::tenncor::matmul(inputs[0], weights[0]); + for (size_t i = 1; i < ninputs; ++i) + { + ignores.emplace(inputs[i]->get_tensor().get()); + ignores.emplace(weights[i]->get_tensor().get()); + out = ::tenncor::add(out, ::tenncor::matmul(inputs[i], weights[i])); + } + if (nullptr != bias) + { + const teq::Shape& shape = out->shape(); + out = ::tenncor::add(out, ::tenncor::extend(bias, 1, {shape.at(1)})); + ignores.emplace(bias->get_tensor().get()); + } + tag::recursive_group_tag(out->get_tensor(), "fully_connect", ignores); + return out; diff --git a/cfg/eteq_min.yml b/cfg/eteq_min.yml new file mode 100644 index 000000000..3a3cd2f0d --- /dev/null +++ b/cfg/eteq_min.yml @@ -0,0 +1,1336 @@ +--- +dtype: + DOUBLE: double + FLOAT: float + INT32: int32_t +opcode: + operator_path: eteq/operator.hpp + params: teq::Shape shape, eteq::EigenptrT& out, std::vector>& in + opcalls: + ABS: out = eteq::abs(shape,in[0]); + NEG: out = eteq::neg(shape,in[0]); + SIN: out = eteq::sin(shape,in[0]); + COS: out = eteq::cos(shape,in[0]); + TAN: out = eteq::tan(shape,in[0]); + EXP: out = eteq::exp(shape,in[0]); + LOG: out = eteq::log(shape,in[0]); + SQRT: out = eteq::sqrt(shape,in[0]); + ROUND: out = eteq::round(shape,in[0]); + SIGMOID: out = eteq::sigmoid(shape,in[0]); + SIGMOID_GRAD: out = eteq::sigmoid_grad(shape,in[0]); + TANH: out = eteq::tanh(shape,in[0]); + SQUARE: out = eteq::square(shape,in[0]); + CUBE: out = eteq::cube(shape,in[0]); + POW: out = eteq::pow(shape,in[0],in[1]); + ADD: out = eteq::add(shape,in[0],in[1]); + SUB: out = eteq::sub(shape,in[0],in[1]); + MUL: out = eteq::mul(shape,in[0],in[1]); + DIV: out = eteq::div(shape,in[0],in[1]); + MIN: out = eteq::min(shape,in[0],in[1]); + MAX: out = eteq::max(shape,in[0],in[1]); + EQ: out = eteq::eq(shape,in[0],in[1]); + NEQ: out = eteq::neq(shape,in[0],in[1]); + LT: out = eteq::lt(shape,in[0],in[1]); + GT: out = eteq::gt(shape,in[0],in[1]); + RAND_UNIF: out = eteq::rand_uniform(shape,in[0],in[1]); + REDUCE_SUM: out = eteq::reduce_sum(shape,in[0]); + REDUCE_PROD: out = eteq::reduce_prod(shape,in[0]); + REDUCE_MIN: out = eteq::reduce_min(shape,in[0]); + REDUCE_MAX: out = eteq::reduce_max(shape,in[0]); + PERMUTE: out = eteq::permute(shape,in[0]); + EXTEND: out = eteq::extend(shape,in[0]); + MATMUL: out = eteq::matmul(shape,in[0],in[1]); + CONV: out = eteq::convolution(shape,in[0],in[1]); + SLICE: out = eteq::slice(shape,in[0]); + PAD: out = eteq::pad(shape,in[0]); + CONV_IMG_GRAD: out = eteq::convolution_image_grad(shape,in[0],in[1]); + CONV_KRN_GRAD: out = eteq::convolution_kernel_grad(shape,in[0],in[1]); + SELECT: out = eteq::select(shape, in[0], in[1], in[2]); +api: + pybind_type: float + includes: + - '"eteq/constant.hpp"' + - '"eteq/variable.hpp"' + - '"eteq/functor.hpp"' + - '"tag/group.hpp"' + - '"tag/prop.hpp"' + namespaces: + tenncor: + - template: typename T + name: abs + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"ABS",::egen::ABS},{eteq::identity_map(arg)}); + - template: typename T + name: neg + operator: "-" + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"NEG",::egen::NEG},{eteq::identity_map(arg)}); + - template: typename T + name: sin + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SIN",::egen::SIN},{eteq::identity_map(arg)}); + - template: typename T + name: cos + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"COS",::egen::COS},{eteq::identity_map(arg)}); + - template: typename T + name: tan + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"TAN",::egen::TAN},{eteq::identity_map(arg)}); + - template: typename T + name: exp + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"EXP",::egen::EXP},{eteq::identity_map(arg)}); + - template: typename T + name: log + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"LOG",::egen::LOG},{eteq::identity_map(arg)}); + - template: typename T + name: sqrt + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SQRT",::egen::SQRT},{eteq::identity_map(arg)}); + - template: typename T + name: round + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"ROUND",::egen::ROUND},{eteq::identity_map(arg)}); + - template: typename T + name: sigmoid + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SIGMOID",::egen::SIGMOID},{eteq::identity_map(arg)}); + - template: typename T + name: sigmoid_grad + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SIGMOID_GRAD",::egen::SIGMOID_GRAD},{eteq::identity_map(arg)}); + - template: typename T + name: tanh + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"TANH",::egen::TANH},{eteq::identity_map(arg)}); + - template: typename T + name: square + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SQUARE",::egen::SQUARE},{eteq::identity_map(arg)}); + - template: typename T + name: cube + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"CUBE",::egen::CUBE},{eteq::identity_map(arg)}); + - template: typename T + name: pow + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"POW",::egen::POW},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: pow + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"POW",::egen::POW},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: pow + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"POW",::egen::POW},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: add + operator: + + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"ADD",::egen::ADD}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "sum"); + return out; + - template: typename T + name: add + operator: + + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"ADD",::egen::ADD}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "sum"); + return out; + - template: typename T + name: add + operator: + + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"ADD",::egen::ADD}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "sum"); + return out; + - template: typename T + name: sub + operator: "-" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"SUB",::egen::SUB},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: sub + operator: "-" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SUB",::egen::SUB},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: sub + operator: "-" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SUB",::egen::SUB},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: mul + operator: "*" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MUL",::egen::MUL}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "prod"); + return out; + - template: typename T + name: mul + operator: "*" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MUL",::egen::MUL}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "prod"); + return out; + - template: typename T + name: mul + operator: "*" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MUL",::egen::MUL}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + tag::get_group_reg().group_tag(out->get_tensor(), "prod"); + return out; + - template: typename T + name: div + operator: / + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"DIV",::egen::DIV},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: div + operator: / + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"DIV",::egen::DIV},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: div + operator: / + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"DIV",::egen::DIV},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: eq + operator: == + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"EQ",::egen::EQ}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: eq + operator: == + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"EQ",::egen::EQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: eq + operator: == + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"EQ",::egen::EQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: neq + operator: "!=" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"NEQ",::egen::NEQ}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: neq + operator: "!=" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"NEQ",::egen::NEQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: neq + operator: "!=" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"NEQ",::egen::NEQ}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: lt + operator: < + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"LT",::egen::LT},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: lt + operator: < + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"LT",::egen::LT},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: lt + operator: < + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"LT",::egen::LT},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: gt + operator: ">" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"GT",::egen::GT},{eteq::identity_map(arg1),eteq::identity_map(arg2)}); + - template: typename T + name: gt + operator: ">" + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"GT",::egen::GT},{ + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + - template: typename T + name: gt + operator: ">" + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"GT",::egen::GT},{ + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + eteq::identity_map(arg1), + }); + - template: typename T + name: min + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MIN",::egen::MIN}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: min + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MIN",::egen::MIN}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: min + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MIN",::egen::MIN}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: max + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MAX",::egen::MAX}, { + eteq::identity_map(arg1), + eteq::identity_map(arg2), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: max + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: T + name: scalar + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MAX",::egen::MAX}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: max + args: + - dtype: T + name: scalar + - dtype: eteq::NodeptrT + name: arg1 + out: + type: eteq::NodeptrT + val: | + // + auto out = eteq::make_functor(teq::Opcode{"MAX",::egen::MAX}, { + eteq::identity_map(arg1), + eteq::identity_map(eteq::make_constant_scalar(scalar, arg1->shape())), + }); + tag::get_property_reg().property_tag(out->get_tensor(), tag::commutative_tag); + return out; + - template: typename T + name: if_then_else + args: + - dtype: eteq::NodeptrT + name: condition + - dtype: eteq::NodeptrT + name: then + - dtype: eteq::NodeptrT + name: otherwise + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SELECT",::egen::SELECT},{ + eteq::identity_map(condition), + eteq::identity_map(then), + eteq::identity_map(otherwise) + }); + - template: typename T + name: permute + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: std::vector + name: order + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"PERMUTE",::egen::PERMUTE},{eteq::permute_map(arg,order)}); + - template: typename T + name: extend + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + - dtype: std::vector + name: xlist + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"EXTEND",::egen::EXTEND},{eteq::extend_map(arg,offset,xlist)}); + - template: typename T + name: reduce_sum + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_SUM",::egen::REDUCE_SUM},{eteq::reduce_map(tens,offset,ndims)}); + description: sum values ignoring coordinate indices between start and end + - template: typename T + name: reduce_prod + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_PROD",::egen::REDUCE_PROD},{eteq::reduce_map(tens,offset,ndims)}); + description: multiply values ignoring coordinate indices between start and end + - template: typename T + name: reduce_min + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_MIN",::egen::REDUCE_MIN},{eteq::reduce_map(tens,offset,ndims)}); + description: min values ignoring coordinate indices between start and end + - template: typename T + name: reduce_max + args: + - dtype: eteq::NodeptrT + name: tens + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return eteq::make_functor(teq::Opcode{"REDUCE_MAX",::egen::REDUCE_MAX},{eteq::reduce_map(tens,offset,ndims)}); + description: max values ignoring coordinate indices between start and end + - template: typename T + name: n_elems + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return eteq::make_constant_scalar(arg->get_tensor()->shape().n_elems(), teq::Shape()); + - template: typename T + name: n_dims + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: rank + out: + type: eteq::NodeptrT + val: return eteq::make_constant_scalar(arg->get_tensor()->shape().at(rank), teq::Shape()); + - template: typename T + name: slice + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + - dtype: teq::RankT + name: extent + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"SLICE",::egen::SLICE}, { + eteq::slice_map(arg, offset, extent, dimension)}); + - template: typename T + name: pad + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: std::pair + name: padding + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor(teq::Opcode{"PAD",::egen::PAD}, { + eteq::pad_map(arg, padding, dimension)}); + - template: typename T + name: matmul + args: + - dtype: eteq::NodeptrT + name: a + - dtype: eteq::NodeptrT + name: b + out: + type: eteq::NodeptrT + val: | + // + teq::Shape ashape = a->get_tensor()->shape(); + teq::Shape bshape = b->get_tensor()->shape(); + teq::DimT ncommon = ashape.at(0); + teq::DimT nrow = ashape.at(1); + teq::DimT ncol = bshape.at(0); + if (ncommon != bshape.at(1)) + { + logs::fatalf("invalid matmul shapes %s and %s", + ashape.to_string().c_str(), bshape.to_string().c_str()); + } + + teq::CoordptrT left_shaper(new teq::CoordMap( + [=](teq::MatrixT fwd) + { + for (teq::RankT i = 3; i < teq::mat_dim; ++i) + { + fwd[i][i] = 1; + } + fwd[2][0] = ncol; + fwd[1][1] = 1; + fwd[0][2] = 1.0 / ncommon; + } + )); + + teq::CoordptrT right_shaper(new teq::CoordMap( + [=](teq::MatrixT fwd) + { + for (teq::RankT i = 3; i < teq::mat_dim; ++i) + { + fwd[i][i] = 1; + } + fwd[0][0] = 1; + fwd[2][1] = nrow; + fwd[1][2] = 1.0 / ncommon; + } + )); + return eteq::make_functor(teq::Opcode{"MATMUL",::egen::MATMUL}, { + eteq::FuncArg(a, left_shaper, nullptr), + eteq::FuncArg(b, right_shaper, nullptr) + }); + - template: typename T + name: convolution + args: + - dtype: eteq::NodeptrT + name: input + - dtype: eteq::NodeptrT + name: kernel + - dtype: std::vector + name: dims + out: + type: eteq::NodeptrT + val: | + // + teq::Shape inshape = input->get_tensor()->shape(); + teq::Shape kernelshape = kernel->get_tensor()->shape(); + teq::CoordptrT input_shaper(new teq::CoordMap( + [kernelshape](teq::MatrixT fwd) + { + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[i][i] = 1; + } + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[teq::rank_cap][i] = -kernelshape.at(i) + 1; + } + } + )); + + teq::CoordptrT kernel_shaper(new teq::CoordMap( + [inshape](teq::MatrixT fwd) + { + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[i][i] = -1; + } + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + fwd[teq::rank_cap][i] = inshape.at(i) + 1; + } + } + )); + + teq::CoordT kernel_dims; + auto it = kernel_dims.begin(); + std::fill(it, kernel_dims.end(), teq::rank_cap); + std::copy(dims.begin(), dims.end(), it); + return eteq::make_functor(teq::Opcode{"CONV",::egen::CONV}, { + eteq::FuncArg(input, input_shaper, nullptr), + eteq::FuncArg(kernel, kernel_shaper, + std::make_shared(kernel_dims, true)), + }); + - template: typename T + name: reduce_sum_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_sum(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: reduce_prod_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_prod(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: reduce_min_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_min(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: reduce_max_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_max(arg, dimension, 1); + + std::vector indices(teq::rank_cap); + auto bt = indices.begin(); + auto it = bt + dimension; + std::iota(bt, it, 0); + std::iota(it, indices.end(), dimension + 1); + indices[teq::rank_cap - 1] = dimension; + return ::tenncor::permute(red, indices); + - template: typename T + name: transpose + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return ::tenncor::permute(arg, {1, 0}); + - template: typename T + name: reduce_mean + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: return ::tenncor::div(::tenncor::reduce_sum(arg), ::tenncor::n_elems(arg)); + - template: typename T + name: reduce_mean_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: | + // + auto red = ::tenncor::reduce_sum_1d(arg, dimension); + auto dim = eteq::make_constant_scalar(arg->shape().at(dimension), red->shape()); + return ::tenncor::div(red, dim); + - template: typename T + name: reduce_l2norm + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: return ::tenncor::sqrt(::tenncor::reduce_sum(::tenncor::square(arg), offset, ndims)); + - template: typename T + name: reduce_l2norm_1d + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: dimension + out: + type: eteq::NodeptrT + val: return ::tenncor::sqrt(::tenncor::reduce_sum_1d(::tenncor::square(arg), dimension)); + - template: typename T + name: clip_by_range + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: T + name: minval + - dtype: T + name: maxval + out: + type: eteq::NodeptrT + val: | + // + if (minval > maxval) + { + logs::fatal("min value is below max"); + } + teq::Shape shape = arg->shape(); + auto lo = eteq::make_constant_scalar(minval, shape); + auto hi = eteq::make_constant_scalar(maxval, shape); + auto out = ::tenncor::max(::tenncor::min(arg, hi), lo); + tag::recursive_group_tag(out->get_tensor(), "clip_by_range", { + arg->get_tensor().get(), + lo->get_tensor().get(), + hi->get_tensor().get(), + }); + return out; + - template: typename T + name: clip_by_l2norm + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: T + name: upper + out: + type: eteq::NodeptrT + val: | + // + if (upper == 0) + { + logs::fatal("cannot clip_by_norm with a upper limit of 0"); + } + teq::Shape shape = arg->shape(); + auto limit = eteq::make_constant_scalar(upper, shape); + auto norm = ::tenncor::extend(::tenncor::reduce_l2norm(arg), 0, + std::vector(shape.begin(), shape.end())); + auto out = ::tenncor::if_then_else(::tenncor::lt(norm, limit), + arg, ::tenncor::div(::tenncor::mul(arg, limit), norm)); + tag::recursive_group_tag(out->get_tensor(), "clip_by_l2norm", { + arg->get_tensor().get(), + limit->get_tensor().get(), + }); + return out; + description: 'clip by l2norm ((todo) allow l2norm to be configurable)' + - template: typename T + name: sum + args: + - dtype: eteq::NodesT + name: args + out: + type: eteq::NodeptrT + val: | + // + if (args.empty()) + { + logs::fatal("cannot sum without arguments"); + } + eteq::NodeptrT out = args[0]; + for (size_t i = 1, n = args.size(); i < n; ++i) + { + out = ::tenncor::add(out, args[i]); + } + return out; + - template: typename T + name: prod + args: + - dtype: eteq::NodesT + name: args + out: + type: eteq::NodeptrT + val: | + // + if (args.empty()) + { + logs::fatal("cannot sum without arguments"); + } + eteq::NodeptrT out = args[0]; + for (size_t i = 1, n = args.size(); i < n; ++i) + { + out = ::tenncor::mul(out, args[i]); + } + return out; + - template: typename T + name: softmax + args: + - dtype: eteq::NodeptrT + name: arg + - dtype: teq::RankT + name: offset + default: '0' + - dtype: teq::RankT + name: ndims + default: teq::rank_cap + out: + type: eteq::NodeptrT + val: | + // + auto exarg = exp(arg); + teq::Shape shape = exarg->shape(); + auto it = shape.begin() + offset; + std::vector xlist(it, it + ndims); + auto out = ::tenncor::div(exarg, + ::tenncor::extend(::tenncor::reduce_sum(exarg, offset, offset+ndims), + offset, xlist)); + tag::recursive_group_tag(out->get_tensor(), "softmax", { + arg->get_tensor().get()}); + return out; + - template: typename T + name: sign + args: + - dtype: eteq::NodeptrT + name: x + out: + type: eteq::NodeptrT + val: return ::tenncor::pow(x,eteq::make_constant_scalar(0,x->shape())); + tenncor::random: + - template: typename T + name: rand_unif + args: + - dtype: eteq::NodeptrT + name: arg1 + - dtype: eteq::NodeptrT + name: arg2 + out: + type: eteq::NodeptrT + val: | + // + return eteq::make_functor( + teq::Opcode{"RAND_UNIF",::egen::RAND_UNIF},{ + eteq::identity_map(arg1), + eteq::identity_map(arg2) + }); + - template: typename T + name: rand_binom_one + args: + - dtype: eteq::NodeptrT + name: arg + out: + type: eteq::NodeptrT + val: | + // + const teq::Shape& shape = arg->get_tensor()->shape(); + auto trial = ::tenncor::random::rand_unif( + eteq::convert_to_node(eteq::make_variable_scalar((T) 0, shape)), + eteq::convert_to_node(eteq::make_variable_scalar((T) 1, shape))); + return ::tenncor::lt(trial, arg); + tenncor::nn: + - template: typename T + name: relu + args: + - dtype: eteq::NodeptrT + name: x + out: + type: eteq::NodeptrT + val: return ::tenncor::max(x,eteq::make_constant_scalar(0,x->shape())); + - template: typename T + name: conv2d + args: + - dtype: eteq::NodeptrT + name: image + - dtype: eteq::NodeptrT + name: kernel + out: + type: eteq::NodeptrT + val: | + // + // image must be in form [in, width, height, batch] + // kernel must be in form [out, in, width, height] + // see https://www.tensorflow.org/api_docs/python/tf/nn/conv2d + teq::DimT nfilters = kernel->shape().at(0); + eteq::NodesT convolveds; + convolveds.reserve(nfilters); + for (teq::DimT i = 0; i < nfilters; ++i) + { + auto filter = ::tenncor::permute( + ::tenncor::slice(kernel, i, 1, 0), + {1, 2, 3, 0}); + auto conved = ::tenncor::convolution(image, filter, + {0, 1, 2}); + auto padded = ::tenncor::pad(conved, + {i, nfilters - i - 1}, 0); + convolveds.push_back(padded); + } + auto out = ::tenncor::sum(convolveds); + tag::recursive_group_tag(out->get_tensor(), "conv2d", { + image->get_tensor().get(), + kernel->get_tensor().get() + }); + return out; + - template: typename T + name: fully_connect + args: + - dtype: eteq::NodesT + name: inputs + - dtype: eteq::NodesT + name: weights + - dtype: eteq::NodeptrT + name: bias + out: + type: eteq::NodeptrT + val: | + // + if (weights.empty()) + { + logs::fatal("cannot create a fully connected layer without weights"); + } + size_t ninputs = inputs.size(); + if (ninputs != weights.size()) + { + logs::fatalf( + "number of inputs (%d) must equal the number of weights (%d)", + ninputs, weights.size()); + } + std::unordered_set ignores = { + inputs[0]->get_tensor().get(), + weights[0]->get_tensor().get() + }; + auto out = ::tenncor::matmul(inputs[0], weights[0]); + for (size_t i = 1; i < ninputs; ++i) + { + ignores.emplace(inputs[i]->get_tensor().get()); + ignores.emplace(weights[i]->get_tensor().get()); + out = ::tenncor::add(out, ::tenncor::matmul(inputs[i], weights[i])); + } + if (nullptr != bias) + { + const teq::Shape& shape = out->shape(); + out = ::tenncor::add(out, ::tenncor::extend(bias, 1, {shape.at(1)})); + ignores.emplace(bias->get_tensor().get()); + } + tag::recursive_group_tag(out->get_tensor(), "fully_connect", ignores); + return out; diff --git a/dbg/BUILD.bazel b/dbg/BUILD.bazel index a774aa29a..ad5befc87 100644 --- a/dbg/BUILD.bazel +++ b/dbg/BUILD.bazel @@ -25,7 +25,7 @@ cc_library( hdrs = glob(["stream/*.hpp"]), srcs = glob(["stream/*.cpp"]), copts = ["-std=c++17"], - deps = ["//ade:ade"], + deps = ["//teq:teq"], ) pybind_library( @@ -57,7 +57,7 @@ cc_library( srcs = glob(["grpc/*.cpp"]), copts = ["-std=c++17"], deps = [ - "//ead:ead", + "//eteq:eteq", "//dbg:tenncor_cc_proto", "@boost//:uuid", "@com_github_mingkaic_cppkg//jobs:jobs", @@ -68,7 +68,7 @@ pybind_library( name = "grpc_dbg_py", cc_srcs = ["//dbg:python/grpc.cpp"], cc_deps = ["//dbg:grpc_out"], - py_deps = ["//ead:ead_py"], + py_deps = ["//eteq:eteq_py"], ) cc_binary( diff --git a/dbg/README_DBG.md b/dbg/README_DBG.md index 30b552be6..76ee4cb49 100644 --- a/dbg/README_DBG.md +++ b/dbg/README_DBG.md @@ -1,3 +1,3 @@ # Debugger (DBG) -Debugger provides pretty print functions for ADE tree and shaped-data. +Debugger provides pretty print functions for TEQ tree and shaped-data. diff --git a/dbg/csv_to_png.py b/dbg/csv_to_png.py index 83d816a81..2f47e52f5 100644 --- a/dbg/csv_to_png.py +++ b/dbg/csv_to_png.py @@ -1,4 +1,4 @@ -''' Conversion script for writing ade_csv format to png ''' +''' Conversion script for writing teq_csv format to png ''' import sys from collections import defaultdict diff --git a/dbg/grpc/session.hpp b/dbg/grpc/session.hpp index abe13a65d..c07c4354a 100644 --- a/dbg/grpc/session.hpp +++ b/dbg/grpc/session.hpp @@ -8,7 +8,7 @@ #include "jobs/scope_guard.hpp" -#include "ead/session.hpp" +#include "eteq/session.hpp" #include "tag/tag.hpp" @@ -51,7 +51,7 @@ inline bool operator == (const EdgeInfo& lhs, const EdgeInfo& rhs) return hasher(lhs) == hasher(rhs); } -struct InteractiveSession final : public ead::iSession +struct InteractiveSession final : public eteq::iSession { static boost::uuids::random_generator uuid_gen_; @@ -69,7 +69,7 @@ struct InteractiveSession final : public ead::iSession InteractiveSession(grpc::CreateChannel(host, grpc::InsecureChannelCredentials()), client_cfg) {} - void track (ade::TensT roots) override + void track (teq::TensT roots) override { sess_.track(roots); @@ -134,7 +134,7 @@ struct InteractiveSession final : public ead::iSession auto& range = statpair.second; if (range.upper_ > 0) { - auto f = static_cast(tens); + auto f = static_cast(tens); auto& children = f->get_children(); for (size_t i = 0, n = children.size(); i < n; ++i) { @@ -156,11 +156,11 @@ struct InteractiveSession final : public ead::iSession edge->set_parent(node_ids_[f]); edge->set_child(node_ids_[child_tens]); edge->set_label(label); - if (false == ade::is_identity(shaper.get())) + if (false == teq::is_identity(shaper.get())) { edge->set_shaper(shaper->to_string()); } - if (false == ade::is_identity(coorder.get())) + if (false == teq::is_identity(coorder.get())) { edge->set_coorder(coorder->to_string()); } @@ -172,8 +172,8 @@ struct InteractiveSession final : public ead::iSession client_.create_graph(request); } - void update (ead::TensSetT updated = {}, - ead::TensSetT ignores = {}) override + void update (eteq::TensSetT updated = {}, + eteq::TensSetT ignores = {}) override { jobs::ScopeGuard defer([this]() { ++this->update_it_; }); @@ -186,8 +186,8 @@ struct InteractiveSession final : public ead::iSession } // basic copy over from session::update - std::unordered_map fulfilments; - for (ade::iTensor* unodes : updated) + std::unordered_map fulfilments; + for (teq::iTensor* unodes : updated) { auto& node_parents = sess_.parents_[unodes]; for (auto& node_parent : node_parents) @@ -203,12 +203,12 @@ struct InteractiveSession final : public ead::iSession { if (0 == statpair.second.upper_) { - auto leaf = static_cast(statpair.first); - age::_GENERATED_DTYPE dtype = - (age::_GENERATED_DTYPE) leaf->type_code(); + auto leaf = static_cast(statpair.first); + egen::_GENERATED_DTYPE dtype = + (egen::_GENERATED_DTYPE) leaf->type_code(); std::vector data; size_t nelems = leaf->shape().n_elems(); - age::type_convert(data, leaf->data(), dtype, nelems); + egen::type_convert(data, leaf->data(), dtype, nelems); tenncor::UpdateNodeDataRequest request; auto payload = request.mutable_payload(); @@ -229,11 +229,11 @@ struct InteractiveSession final : public ead::iSession false == estd::has(ignores, op.first)) { op.first->update(); - age::_GENERATED_DTYPE dtype = - (age::_GENERATED_DTYPE) op.first->type_code(); + egen::_GENERATED_DTYPE dtype = + (egen::_GENERATED_DTYPE) op.first->type_code(); std::vector data; size_t nelems = op.first->shape().n_elems(); - age::type_convert(data, op.first->data(), dtype, nelems); + egen::type_convert(data, op.first->data(), dtype, nelems); auto& op_parents = sess_.parents_[op.first]; for (auto& op_parent : op_parents) { @@ -255,8 +255,8 @@ struct InteractiveSession final : public ead::iSession client_.update_node_data(requests, update_it_); } - void update_target (ead::TensSetT targeted, - ead::TensSetT updated = {}) override + void update_target (eteq::TensSetT targeted, + eteq::TensSetT updated = {}) override { jobs::ScopeGuard defer([this]() { ++this->update_it_; }); @@ -269,14 +269,14 @@ struct InteractiveSession final : public ead::iSession } // basic copy over from session::update - ade::OnceTraveler traveler; + teq::OnceTraveler traveler; for (auto& tens : targeted) { tens->accept(traveler); } - std::unordered_map fulfilments; - for (ade::iTensor* unodes : updated) + std::unordered_map fulfilments; + for (teq::iTensor* unodes : updated) { auto& node_parents = sess_.parents_[unodes]; for (auto& node_parent : node_parents) @@ -292,12 +292,12 @@ struct InteractiveSession final : public ead::iSession { if (0 == statpair.second.upper_) { - auto leaf = static_cast(statpair.first); - age::_GENERATED_DTYPE dtype = - (age::_GENERATED_DTYPE) leaf->type_code(); + auto leaf = static_cast(statpair.first); + egen::_GENERATED_DTYPE dtype = + (egen::_GENERATED_DTYPE) leaf->type_code(); std::vector data; size_t nelems = leaf->shape().n_elems(); - age::type_convert(data, leaf->data(), dtype, nelems); + egen::type_convert(data, leaf->data(), dtype, nelems); tenncor::UpdateNodeDataRequest request; auto payload = request.mutable_payload(); @@ -318,11 +318,11 @@ struct InteractiveSession final : public ead::iSession fulfilments[op.first].d >= op.second) { op.first->update(); - age::_GENERATED_DTYPE dtype = - (age::_GENERATED_DTYPE) op.first->type_code(); + egen::_GENERATED_DTYPE dtype = + (egen::_GENERATED_DTYPE) op.first->type_code(); std::vector data; size_t nelems = op.first->shape().n_elems(); - age::type_convert(data, op.first->data(), dtype, nelems); + egen::type_convert(data, op.first->data(), dtype, nelems); auto& op_parents = sess_.parents_[op.first]; for (auto& op_parent : op_parents) { @@ -413,7 +413,7 @@ struct InteractiveSession final : public ead::iSession auto& range = statpair.second; if (range.upper_ > 0) { - auto f = static_cast(tens); + auto f = static_cast(tens); auto& children = f->get_children(); for (size_t i = 0, n = children.size(); i < n; ++i) { @@ -435,11 +435,11 @@ struct InteractiveSession final : public ead::iSession edge->set_parent(node_ids_[f]); edge->set_child(node_ids_[child_tens]); edge->set_label(label); - if (false == ade::is_identity(shaper.get())) + if (false == teq::is_identity(shaper.get())) { edge->set_shaper(shaper->to_string()); } - if (false == ade::is_identity(coorder.get())) + if (false == teq::is_identity(coorder.get())) { edge->set_coorder(coorder->to_string()); } @@ -487,7 +487,7 @@ struct InteractiveSession final : public ead::iSession std::unique_ptr stub_; - ead::Session sess_; + eteq::Session sess_; tag::TagRegistry& registry_; @@ -495,7 +495,7 @@ struct InteractiveSession final : public ead::iSession std::string sess_id_ = boost::uuids::to_string( InteractiveSession::uuid_gen_()); - std::unordered_map node_ids_; + std::unordered_map node_ids_; std::unordered_set edges_; diff --git a/dbg/python/grpc.cpp b/dbg/python/grpc.cpp index 781d60555..46810e54a 100644 --- a/dbg/python/grpc.cpp +++ b/dbg/python/grpc.cpp @@ -1,9 +1,9 @@ #include "pybind11/pybind11.h" #include "pybind11/stl.h" -#include "ead/generated/pyapi.hpp" -#include "ead/ead.hpp" -#include "ead/parse.hpp" +#include "eteq/generated/pyapi.hpp" +#include "eteq/eteq.hpp" +#include "eteq/parse.hpp" #include "dbg/grpc/session.hpp" @@ -11,15 +11,15 @@ namespace py = pybind11; PYBIND11_MODULE(grpc_dbg, m) { - m.doc() = "dbg ade equation graphs using interactive grpc session"; + m.doc() = "dbg teq equation graphs using interactive grpc session"; py::object isess = (py::object) - py::module::import("ead.ead").attr("iSession"); + py::module::import("eteq.eteq").attr("iSession"); py::class_> session( m, "InteractiveSession", isess); - py::implicitly_convertible(); + py::implicitly_convertible(); m.def("get_isess", [](std::string host, size_t request_duration, size_t stream_duration) @@ -34,14 +34,14 @@ PYBIND11_MODULE(grpc_dbg, m) py::arg("stream_dur") = 30000); session .def("track", - [](py::object self, ead::NodesT roots) + [](py::object self, eteq::NodesT roots) { auto sess = self.cast(); - ade::TensT troots; + teq::TensT troots; troots.reserve(roots.size()); std::transform(roots.begin(), roots.end(), std::back_inserter(troots), - [](ead::NodeptrT& node) + [](eteq::NodeptrT& node) { return node->get_tensor(); }); @@ -49,37 +49,37 @@ PYBIND11_MODULE(grpc_dbg, m) }, "Track node") .def("update", - [](py::object self, std::vector> nodes) + [](py::object self, std::vector> nodes) { auto sess = self.cast(); - std::unordered_set updates; - for (ead::NodeptrT& node : nodes) + std::unordered_set updates; + for (eteq::NodeptrT& node : nodes) { updates.emplace(node->get_tensor().get()); } sess->update(updates); }, "Return calculated data", - py::arg("nodes") = std::vector>{}) + py::arg("nodes") = std::vector>{}) .def("update_target", - [](py::object self, std::vector> targeted, - std::vector> updated) + [](py::object self, std::vector> targeted, + std::vector> updated) { auto sess = self.cast(); - std::unordered_set targets; - std::unordered_set updates; - for (ead::NodeptrT& node : targeted) + std::unordered_set targets; + std::unordered_set updates; + for (eteq::NodeptrT& node : targeted) { targets.emplace(node->get_tensor().get()); } - for (ead::NodeptrT& node : updated) + for (eteq::NodeptrT& node : updated) { updates.emplace(node->get_tensor().get()); } sess->update_target(targets, updates); }, "Calculate node relevant to targets in the graph given list of updated data", - py::arg("targets"), py::arg("updated") = std::vector>{}) + py::arg("targets"), py::arg("updated") = std::vector>{}) .def("join", [](py::object self) { @@ -97,7 +97,7 @@ PYBIND11_MODULE(grpc_dbg, m) [](py::object self, std::string filename) { auto sess = self.cast(); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); sess->optimize(rules); }, "Optimize using rules for specified filename"); diff --git a/dbg/python/stream.cpp b/dbg/python/stream.cpp index d0c47bb91..6f6e88f17 100644 --- a/dbg/python/stream.cpp +++ b/dbg/python/stream.cpp @@ -4,20 +4,20 @@ #include "pybind11/pybind11.h" #include "pybind11/stl.h" -#include "ade/ade.hpp" +#include "teq/teq.hpp" -#include "dbg/stream/ade.hpp" -#include "dbg/stream/ade_csv.hpp" +#include "dbg/stream/teq.hpp" +#include "dbg/stream/teq_csv.hpp" namespace py = pybind11; PYBIND11_MODULE(stream_dbg, m) { - m.doc() = "print ade equation graphs to stream"; + m.doc() = "print teq equation graphs to stream"; // ==== to stdout functions ==== m.def("print_graph", - [](ade::TensptrT root, bool showshape) + [](teq::TensptrT root, bool showshape) { PrettyEquation peq; peq.showshape_ = showshape; @@ -27,7 +27,7 @@ PYBIND11_MODULE(stream_dbg, m) py::arg("root"), py::arg("showshape") = false); m.def("print_graphcsv", - [](ade::TensptrT root, bool showshape) + [](teq::TensptrT root, bool showshape) { CSVEquation ceq; ceq.showshape_ = showshape; @@ -39,7 +39,7 @@ PYBIND11_MODULE(stream_dbg, m) // ==== to string functions ==== m.def("graph_to_str", - [](ade::TensptrT root, bool showshape) + [](teq::TensptrT root, bool showshape) { std::stringstream ss; PrettyEquation peq; @@ -51,7 +51,7 @@ PYBIND11_MODULE(stream_dbg, m) py::arg("root"), py::arg("showshape") = false); m.def("graph_to_csvstr", - [](ade::TensptrT root, bool showshape) + [](teq::TensptrT root, bool showshape) { std::stringstream ss; CSVEquation ceq; @@ -64,7 +64,7 @@ PYBIND11_MODULE(stream_dbg, m) py::arg("root"), py::arg("showshape") = false); m.def("multigraph_to_csvstr", - [](ade::TensT roots, bool showshape) + [](teq::TensT roots, bool showshape) { std::stringstream ss; CSVEquation ceq; @@ -81,7 +81,7 @@ PYBIND11_MODULE(stream_dbg, m) // ==== to file functions ==== m.def("graph_to_file", - [](ade::TensptrT root, std::string filename, bool showshape) + [](teq::TensptrT root, std::string filename, bool showshape) { std::ofstream outstr(filename); if (outstr.is_open()) @@ -100,7 +100,7 @@ PYBIND11_MODULE(stream_dbg, m) py::arg("root"), py::arg("filename"), py::arg("showshape") = false); m.def("graph_to_csvfile", - [](ade::TensptrT root, std::string filename, bool showshape) + [](teq::TensptrT root, std::string filename, bool showshape) { std::ofstream outstr(filename); if (outstr.is_open()) @@ -119,7 +119,7 @@ PYBIND11_MODULE(stream_dbg, m) py::arg("root"), py::arg("filename"), py::arg("showshape") = false); m.def("multigraph_to_csvfile", - [](ade::TensT roots, std::string filename, bool showshape) + [](teq::TensT roots, std::string filename, bool showshape) { std::ofstream outstr(filename); if (outstr.is_open()) diff --git a/dbg/stream/ade.hpp b/dbg/stream/teq.hpp similarity index 58% rename from dbg/stream/ade.hpp rename to dbg/stream/teq.hpp index 4177d766e..462eeeea8 100644 --- a/dbg/stream/ade.hpp +++ b/dbg/stream/teq.hpp @@ -1,5 +1,5 @@ /// -/// ade.hpp +/// teq.hpp /// dbg /// /// Purpose: @@ -8,28 +8,28 @@ #include -#include "ade/ileaf.hpp" -#include "ade/functor.hpp" +#include "teq/ileaf.hpp" +#include "teq/functor.hpp" #include "dbg/stream/tree.hpp" -#ifndef DBG_ADE_HPP -#define DBG_ADE_HPP +#ifndef DBG_TEQ_HPP +#define DBG_TEQ_HPP -using LabelsMapT = std::unordered_map; +using LabelsMapT = std::unordered_map; -/// Use PrettyTree to render ade::TensptrT graph as an ascii art +/// Use PrettyTree to render teq::TensptrT graph as an ascii art struct PrettyEquation final { PrettyEquation (void) : drawer_( - [](ade::iTensor*& root) -> std::vector + [](teq::iTensor*& root) -> std::vector { - if (ade::iFunctor* f = dynamic_cast(root)) + if (teq::iFunctor* f = dynamic_cast(root)) { auto& children = f->get_children(); - std::vector tens(children.size()); + std::vector tens(children.size()); std::transform(children.begin(), children.end(), tens.begin(), - [](const ade::FuncArg& child) + [](const teq::FuncArg& child) { return child.get_tensor().get(); }); @@ -37,7 +37,7 @@ struct PrettyEquation final } return {}; }, - [this](std::ostream& out, ade::iTensor*& root) + [this](std::ostream& out, teq::iTensor*& root) { if (root) { @@ -46,7 +46,7 @@ struct PrettyEquation final { out << it->second << "="; } - if (auto var = dynamic_cast(root)) + if (auto var = dynamic_cast(root)) { out << (var->is_const() ? "constant:" : "variable:"); } @@ -59,12 +59,12 @@ struct PrettyEquation final }) {} /// Stream equation of ptr to out - void print (std::ostream& out, const ade::TensptrT& ptr) + void print (std::ostream& out, const teq::TensptrT& ptr) { drawer_.print(out, ptr.get()); } - void print (std::ostream& out, ade::iTensor* ptr) + void print (std::ostream& out, teq::iTensor* ptr) { drawer_.print(out, ptr); } @@ -76,7 +76,7 @@ struct PrettyEquation final private: /// Actual ascii renderer - PrettyTree drawer_; + PrettyTree drawer_; }; -#endif // DBG_ADE_HPP +#endif // DBG_TEQ_HPP diff --git a/dbg/stream/ade_csv.hpp b/dbg/stream/teq_csv.hpp similarity index 81% rename from dbg/stream/ade_csv.hpp rename to dbg/stream/teq_csv.hpp index 7a273a31f..2251180b1 100644 --- a/dbg/stream/ade_csv.hpp +++ b/dbg/stream/teq_csv.hpp @@ -2,15 +2,15 @@ #include #include -#include "ade/ileaf.hpp" -#include "ade/ifunctor.hpp" +#include "teq/ileaf.hpp" +#include "teq/ifunctor.hpp" #include "estd/estd.hpp" -#include "dbg/stream/ade.hpp" +#include "dbg/stream/teq.hpp" -#ifndef DBG_ADE_CSV_HPP -#define DBG_ADE_CSV_HPP +#ifndef DBG_TEQ_CSV_HPP +#define DBG_TEQ_CSV_HPP const char label_delim = ':'; @@ -31,15 +31,15 @@ enum NODE_TYPE CACHED_FUNC, }; -using GetTypeF = std::function; +using GetTypeF = std::function; -struct CSVEquation final : public ade::iTraveler +struct CSVEquation final : public teq::iTraveler { CSVEquation (GetTypeF get_ftype = - [](ade::iFunctor* func) { return FUNCTOR; }) : + [](teq::iFunctor* func) { return FUNCTOR; }) : get_ftype_(get_ftype) {} - void visit (ade::iLeaf* leaf) override + void visit (teq::iLeaf* leaf) override { if (estd::has(nodes_, leaf)) { @@ -63,7 +63,7 @@ struct CSVEquation final : public ade::iTraveler }); } - void visit (ade::iFunctor* func) override + void visit (teq::iFunctor* func) override { if (estd::has(nodes_, func)) { @@ -88,10 +88,10 @@ struct CSVEquation final : public ade::iTraveler auto& children = func->get_children(); for (size_t i = 0, n = children.size(); i < n; ++i) { - const ade::FuncArg& child = children[i]; + const teq::FuncArg& child = children[i]; auto coorder = child.get_coorder().get(); auto tens = child.get_tensor().get(); - if (ade::is_identity(coorder)) + if (teq::is_identity(coorder)) { coorder = nullptr; } @@ -153,11 +153,11 @@ struct CSVEquation final : public ade::iTraveler struct Edge { - ade::iFunctor* func_; + teq::iFunctor* func_; - ade::iTensor* child_; + teq::iTensor* child_; - ade::iCoordMap* coorder_; + teq::iCoordMap* coorder_; std::string edge_label_; }; @@ -176,11 +176,11 @@ struct CSVEquation final : public ade::iTraveler std::vector edges_; - std::unordered_map nodes_; + std::unordered_map nodes_; - std::unordered_map coorders_; + std::unordered_map coorders_; GetTypeF get_ftype_; }; -#endif // DBG_ADE_CSV_HPP +#endif // DBG_TEQ_CSV_HPP diff --git a/ead/README_EAD.md b/ead/README_EAD.md deleted file mode 100644 index b59572115..000000000 --- a/ead/README_EAD.md +++ /dev/null @@ -1,3 +0,0 @@ -# EAD (Eigen-ADE) - -Provides straight forward ADE iLeaf implementation using Variable to store Eigen Tensors. diff --git a/ead/age/plugins/pyapis.py b/ead/age/plugins/pyapis.py deleted file mode 100644 index 1282c8eb0..000000000 --- a/ead/age/plugins/pyapis.py +++ /dev/null @@ -1,243 +0,0 @@ -import re -import logging - -from gen.plugin_base import PluginBase -from gen.file_rep import FileRep - -from ead.age.plugins.template import build_template -from ead.age.plugins.apis import api_header - -_pybindt = 'PybindT' - -_header_template = ''' -// type to replace template arguments in pybind -using {pybind} = {pybind_type}; -//>>> ^ pybind, pybind_type -''' - -_source_template = ''' -namespace py = pybind11; - -namespace pyage -{{ - -//>>> unique_wrap -{unique_wrap} - -}} - -//>>> modname -PYBIND11_MODULE({modname}, m_{modname}) -{{ - m_{modname}.doc() = "pybind for {modname} api"; - - //>>> modname - py::class_ tensor(m_{modname}, "Tensor"); - - //>>> defs - {defs} -}} -''' - -def _sub_pybind(stmt, source): - _type_pattern = '([^\w]){}([^\w])'.format(source) - _type_replace = '\\1{}\\2'.format(_pybindt) - return re.sub(_type_pattern, _type_replace, ' ' + stmt + ' ').strip() - -def _strip_template_prefix(template): - _template_prefixes = ['typename', 'class'] - for template_prefix in _template_prefixes: - if template.startswith(template_prefix): - return template[len(template_prefix):].strip() - return template - -_func_fmt = ''' -{outtype} {funcname}_{idx} ({param_decl}) -{{ - return {namespace}::{funcname}({args}); -}} -''' -def _wrap_func(idx, api, namespace): - if 'template' in api and len(api['template']) > 0: - templates = [_strip_template_prefix(typenames) - for typenames in api['template'].split(',')] - else: - templates = [] - outtype = 'ade::TensptrT' - if isinstance(api['out'], dict) and 'type' in api['out']: - outtype = api['out']['type'] - - out = _func_fmt.format( - outtype=outtype, - namespace=namespace, - funcname=api['name'], - idx=idx, - param_decl=', '.join([arg['dtype'] + ' ' + arg['name'] - for arg in api['args']]), - args=', '.join([arg['name'] for arg in api['args']])) - for temp in templates: - out = _sub_pybind(out, temp) - return out - -def _handle_pybind(pybind_type): - return _pybindt - -def _handle_pybind_type(pybind_type): - return pybind_type - -def _handle_unique_wrap(pybind_type, apis, namespace): - return '\n\n'.join([ - _wrap_func(i, api, namespace) - for i, api in enumerate(apis)] - ) - -def _handle_defs(pybind_type, apis, module_name, first_module): - _mdef_tmpl = 'm_{module_name}.def("{pyfunc}", '+\ - '&pyage::{func}_{idx}, {description}, {pyargs});' - - _class_def_tmpl = 'py::class_::type,{outtype}>(m_{module_name}, "{name}");' - - cnames = {} - def checkpy(cname): - if cname in cnames: - out = cname + str(cnames[cname]) - cnames[cname] = cnames[cname] + 1 - else: - out = cname - cnames[cname] = 0 - return out - - def parse_header_args(arg): - if 'default' in arg: - defext = ' = {}'.format(arg['default']) - else: - defext = '' - return '{dtype} {name}{defext}'.format( - dtype = arg['dtype'], - name = arg['name'], - defext = defext) - - def parse_description(arg): - if 'description' in arg: - description = ': {}'.format(arg['description']) - else: - description = '' - outtype = 'ade::TensptrT' - if isinstance(arg['out'], dict) and 'type' in arg['out']: - outtype = arg['out']['type'] - return '"{outtype} {func} ({args}){description}"'.format( - outtype = outtype, - func = arg['name'], - args = ', '.join([parse_header_args(arg) for arg in arg['args']]), - description = description) - - def parse_pyargs(arg): - if 'default' in arg: - defext = ' = {}'.format(arg['default']) - else: - defext = '' - return 'py::arg("{name}"){defext}'.format( - name = arg['name'], - defext = defext) - - outtypes = set() - for api in apis: - if 'template' in api and len(api['template']) > 0: - templates = [_strip_template_prefix(typenames) - for typenames in api['template'].split(',')] - else: - templates = [] - if isinstance(api['out'], dict) and 'type' in api['out']: - outtype = api['out']['type'] - for temp in templates: - outtype = _sub_pybind(outtype, temp) - outtypes.add(outtype) - - class_defs = [] - if first_module: - for outtype in outtypes: - if 'ade::TensptrT' == outtype: - continue - class_defs.append(_class_def_tmpl.format( - module_name=module_name, - outtype=outtype, - name=outtype.split('::')[-1])) - - return '\n '.join(class_defs) + '\n ' +\ - '\n '.join([_mdef_tmpl.format( - module_name=module_name, - pyfunc=checkpy(api['name']), - func=api['name'], idx=i, - description=parse_description(api), - pyargs=', '.join([parse_pyargs(arg) for arg in api['args']])) - for i, api in enumerate(apis)]) - -_plugin_id = 'PYBINDER' - -class PyAPIsPlugin: - - def plugin_id(self): - return _plugin_id - - def process(self, generated_files, arguments): - _hdr_file = 'pyapi.hpp' - _submodule_def = ' py::module m_{name} = m_{prename}.def_submodule("{submod}", "A submodule of \'{prename}\'");\n' - - plugin_key = 'api' - if plugin_key not in arguments: - logging.warning( - 'no relevant arguments found for plugin %s', _plugin_id) - return - - api = arguments[plugin_key] - bindtype = api.get('pybind_type', 'double') - - generated_files[_hdr_file] = FileRep( - _header_template.format( - pybind=_pybindt, pybind_type=bindtype), - user_includes=[], internal_refs=[]) - - contents = {} - for namespace in api['namespaces']: - definitions = api['namespaces'][namespace] - if namespace == '' or namespace == '_': - module = 'age' - namespace = '' - else: - module = namespace - uwraps = _handle_unique_wrap(bindtype, definitions, namespace) - - mods = module.split('::') - mod = mods[0] - modname = '_'.join(mods) - mod_def = '' - if len(mods) > 1: - mod_def = _submodule_def.format( - name=modname, prename='_'.join(mods[:-1]), submod=mods[-1]) - defs = mod_def + _handle_defs(bindtype, definitions, modname, mod not in contents) - if mod in contents: - existing_uwraps, existing_defs = contents[mod] - contents[mod] = ( - existing_uwraps + '\n\n' + uwraps, - existing_defs + '\n\n' + defs) - else: - contents[mod] = (uwraps, defs) - - src_file_tmpl = 'pyapi_{}.cpp' - for mod in contents: - unique_wrap, defs = contents[mod] - src_file = src_file_tmpl.format(mod) - generated_files[src_file] = FileRep( - _source_template.format( - modname=mod, - unique_wrap=''.join(unique_wrap), - defs=''.join(defs)), - user_includes=[ - '"pybind11/pybind11.h"', - '"pybind11/stl.h"', - ], - internal_refs=[_hdr_file, api_header]) - - return generated_files - -PluginBase.register(PyAPIsPlugin) diff --git a/ead/coord.hpp b/ead/coord.hpp deleted file mode 100644 index d343b1445..000000000 --- a/ead/coord.hpp +++ /dev/null @@ -1,59 +0,0 @@ -#include "ade/coord.hpp" - -#ifndef EAD_COORD_HPP -#define EAD_COORD_HPP - -namespace ead -{ - -struct CoordMap final : public ade::iCoordMap -{ - CoordMap (ade::CoordT indices, bool bijective) : - indices_(indices), bijective_(bijective) {} - - ade::iCoordMap* connect (const ade::iCoordMap& rhs) const override - { - return nullptr; - } - - void forward (ade::CoordT::iterator out, - ade::CoordT::const_iterator in) const override - { - std::copy(indices_.begin(), indices_.end(), out); - } - - iCoordMap* reverse (void) const override - { - return nullptr; - } - - std::string to_string (void) const override - { - return fmts::to_string(indices_.begin(), indices_.end()); - } - - void access (std::function cb) const override {} - - bool is_bijective (void) const override - { - return bijective_; - } - -private: - ade::CoordT indices_; - - bool bijective_; -}; - -/// Type of iCoordMap smartpointer -using CoordptrT = std::shared_ptr; - -CoordptrT reduce (std::vector red_dims); - -CoordptrT extend (ade::RankT rank, std::vector ext); - -CoordptrT permute (std::vector dims); - -} - -#endif // EAD_COORD_HPP diff --git a/ead/ead.hpp b/ead/ead.hpp deleted file mode 100644 index 3796d4e13..000000000 --- a/ead/ead.hpp +++ /dev/null @@ -1,5 +0,0 @@ -#include "ead/grader.hpp" -#include "ead/serialize.hpp" -#include "ead/session.hpp" - -#include "ead/generated/api.hpp" diff --git a/ead/grader.hpp b/ead/grader.hpp deleted file mode 100644 index 5c529c319..000000000 --- a/ead/grader.hpp +++ /dev/null @@ -1,482 +0,0 @@ -/// -/// grader.hpp -/// ead -/// -/// Purpose: -/// Implement ead gradient definition for supported operations -/// - -#include - -#include "ade/grad_def.hpp" - -#include "ead/generated/api.hpp" - -#include "ead/constant.hpp" - -#ifndef EAD_GRADER_HPP -#define EAD_GRADER_HPP - -namespace ead -{ - -template -NodeptrT reduce_grad (const ade::FuncArg& child, - NodeptrT bwd, size_t idx) -{ - const ade::Shape& shape = child.get_tensor()->shape(); - ade::CoordptrT revshaper(child.get_shaper()->reverse()); - CoordptrT revcoord; - { - auto coorder = child.get_coorder(); - assert(nullptr != coorder); - ade::CoordT dims; - coorder->forward(dims.begin(), dims.begin()); - ade::CoordT bcast; - std::fill(bcast.begin(), bcast.end(), 1); - for (ade::RankT d : dims) - { - if (d < ade::rank_cap) - { - bcast[d] = shape.at(d); - } - } - revcoord = std::make_shared(bcast, false); - } - return make_functor(ade::Opcode{"EXTEND",age::EXTEND}, { - FuncArg(bwd, revshaper, revcoord) - }); -} - -template -NodeptrT permute_grad (ade::iFunctor* fwd, - NodeptrT bwd, size_t idx) -{ - const auto& child = fwd->get_children()[0]; - ade::CoordptrT revshaper(child.get_shaper()->reverse()); - CoordptrT revcoord; - { - auto coorder = child.get_coorder(); - assert(nullptr != coorder); - ade::CoordT dims; - coorder->forward(dims.begin(), dims.begin()); - - ade::CoordT order; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - order[dims[i]] = i; - } - revcoord = std::make_shared(order, true); - } - return make_functor(ade::Opcode{"PERMUTE",age::PERMUTE},{ - FuncArg(bwd, revshaper, revcoord) - }); -} - -template -NodeptrT extend_grad (ade::iFunctor* fwd, - NodeptrT bwd, size_t idx) -{ - const auto& child = fwd->get_children()[0]; - ade::CoordptrT revshaper(child.get_shaper()->reverse()); - CoordptrT revcoord; - { - auto coorder = child.get_coorder(); - assert(nullptr != coorder); - ade::CoordT dims; - coorder->forward(dims.begin(), dims.begin()); - std::vector red_dims; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) - { - if (dims[i] > 1) - { - red_dims.push_back(i); - } - } - revcoord = reduce(red_dims); - } - return make_functor(ade::Opcode{"REDUCE_SUM",age::REDUCE_SUM},{ - FuncArg(bwd, revshaper, revcoord) - }); -} - -template -struct GradientBuilder final : public ade::iGradientBuilder -{ - /// Implementation of iGradientBuilder - ade::TensptrT local_derivative (ade::FuncptrT op, - size_t arg_idx) const override - { - const ade::ArgsT& args = op->get_children(); - ade::TensptrT out; - ade::Opcode opcode = op->get_opcode(); - switch ((age::_GENERATED_OPCODE) opcode.code_) - { - case age::ABS: - out = tenncor::div(NodeConverters::to_node(args[0].get_tensor()), - NodeConverters::to_node(op))->get_tensor(); - break; - case age::NEG: - out = make_constant_scalar( - -1, args[0].get_tensor()->shape())->get_tensor(); - break; - case age::SIN: - out = tenncor::cos(NodeConverters::to_node(args[0].get_tensor()))->get_tensor(); - break; - case age::COS: - out = tenncor::neg(tenncor::sin( - NodeConverters::to_node(args[0].get_tensor())))->get_tensor(); - break; - case age::TAN: - out = tenncor::div(make_constant_scalar(1, - args[0].get_tensor()->shape()), - tenncor::pow( - tenncor::cos(NodeConverters::to_node(args[0].get_tensor())), - make_constant_scalar(2, args[0].get_tensor()->shape()) - ) - )->get_tensor(); - break; - case age::EXP: - out = op; - break; - case age::LOG: - out = tenncor::div( - make_constant_scalar(1,args[0].get_tensor()->shape()), - NodeConverters::to_node(args[0].get_tensor()) - )->get_tensor(); - break; - case age::SQRT: - out = tenncor::div( - make_constant_scalar(1,args[0].get_tensor()->shape()), - tenncor::mul( - make_constant_scalar(2, - args[0].get_tensor()->shape()), - NodeConverters::to_node(op) - ) - )->get_tensor(); - break; - case age::SQUARE: - out = tenncor::mul( - make_constant_scalar(2, args[0].get_tensor()->shape()), - NodeConverters::to_node(args[0].get_tensor()) - )->get_tensor(); - break; - case age::CUBE: - out = tenncor::mul( - make_constant_scalar(3, args[0].get_tensor()->shape()), - tenncor::square(NodeConverters::to_node(args[0].get_tensor())) - )->get_tensor(); - break; - case age::SIGMOID: - out = tenncor::sigmoid_grad( - NodeConverters::to_node(args[0].get_tensor()))->get_tensor(); - break; - case age::SIGMOID_GRAD: - out = tenncor::mul( - NodeConverters::to_node(op), - tenncor::sub( - make_constant_scalar(1, - args[0].get_tensor()->shape()), - tenncor::mul( - make_constant_scalar(2, - args[0].get_tensor()->shape()), - tenncor::sigmoid(NodeConverters::to_node(args[0].get_tensor())) - ) - ) - )->get_tensor(); - break; - case age::TANH: - out = tenncor::sub( - make_constant_scalar(1,args[0].get_tensor()->shape()), - tenncor::square(NodeConverters::to_node(op)) - )->get_tensor(); - break; - case age::ROUND: - case age::REDUCE_SUM: - case age::EXTEND: - case age::PERMUTE: - case age::ADD: - case age::SLICE: - case age::PAD: - out = get_const_one(args[0].get_tensor()->shape()); - break; - case age::MUL: - case age::CONV: - out = args[(size_t)(arg_idx==0)].get_tensor(); - break; - case age::MAX: - case age::MIN: - out = tenncor::eq(NodeConverters::to_node(op), - NodeConverters::to_node(args[arg_idx].get_tensor()))->get_tensor(); - break; - case age::POW: - out = (arg_idx==0 ? - tenncor::mul( - NodeConverters::to_node(args[1].get_tensor()), - tenncor::pow( - NodeConverters::to_node(args[0].get_tensor()), - tenncor::sub( - NodeConverters::to_node(args[1].get_tensor()), - make_constant_scalar(1, - args[0].get_tensor()->shape()) - ) - ) - ) : - tenncor::mul(tenncor::log(NodeConverters::to_node(args[0].get_tensor())), - NodeConverters::to_node(op)) - )->get_tensor(); - break; - case age::SUB: - out = make_constant_scalar(arg_idx == 0 ? - 1 : -1, args[0].get_tensor()->shape())->get_tensor(); - break; - case age::DIV: - { - auto denom = NodeConverters::to_node(args[1].get_tensor()); - out = (arg_idx==0 ? - tenncor::div( - make_constant_scalar(1, - args[0].get_tensor()->shape()), - denom - ) : - tenncor::div( - tenncor::div( - tenncor::neg(NodeConverters::to_node(args[0].get_tensor())), - denom), - denom - ))->get_tensor(); - } - break; - case age::EQ: - case age::NEQ: - case age::GT: - case age::LT: - case age::RAND_UNIF: - case age::SELECT: - out = get_const_zero(args[0].get_tensor()->shape()); - break; - case age::REDUCE_PROD: // todo: prevent divide by zero - out = tenncor::div( - reduce_grad(args[0], NodeConverters::to_node(op), arg_idx), - NodeConverters::to_node(args[0].get_tensor()) - )->get_tensor(); - break; - case age::REDUCE_MAX: - case age::REDUCE_MIN: - out = tenncor::eq( - reduce_grad(args[0], NodeConverters::to_node(op), arg_idx), - NodeConverters::to_node(args[0].get_tensor()) - )->get_tensor(); - break; - case age::MATMUL: - { - NodeptrT lhs = NodeConverters::to_node(args[0].get_tensor()); - NodeptrT rhs = NodeConverters::to_node(args[1].get_tensor()); - out = (0 == arg_idx ? - // ext_rhs - tenncor::permute(tenncor::extend(rhs, 2, { - lhs->shape().at(1)}), {0,2,1}) : - // ext_lhs - tenncor::permute(tenncor::extend(lhs, 2, { - rhs->shape().at(0)}), {2,1,0}) - )->get_tensor(); - } - break; - case age::CONV_IMG_GRAD: - logs::fatal("cannot derive CONV_IMG_GRAD"); - break; - case age::CONV_KRN_GRAD: - logs::fatal("cannot derive CONV_KRN_GRAD"); - break; - default: - logs::fatalf("Unknown op %s", opcode.name_.c_str()); - } - return out; - } - - /// Implementation of iGradientBuilder - ade::TensptrT chain_rule (ade::FuncptrT op, const ade::TensptrT& local_der, - ade::TensptrT supcomp_grad, size_t arg_idx) const override - { - NodeptrT out; - ade::Opcode opcode = op->get_opcode(); - switch (opcode.code_) - { - case age::ABS: - case age::NEG: - case age::SIN: - case age::COS: - case age::TAN: - case age::EXP: - case age::LOG: - case age::SQRT: - case age::SQUARE: - case age::CUBE: - case age::ROUND: - case age::SIGMOID: - case age::SIGMOID_GRAD: - case age::TANH: - case age::ADD: - case age::MUL: - case age::MAX: - case age::MIN: - case age::POW: - case age::SUB: - case age::DIV: - case age::EQ: - case age::NEQ: - case age::GT: - case age::LT: - case age::RAND_UNIF: - out = tenncor::mul(NodeConverters::to_node(local_der), - NodeConverters::to_node(supcomp_grad)); - break; - case age::REDUCE_MAX: - case age::REDUCE_MIN: - case age::REDUCE_PROD: - case age::REDUCE_SUM: - out = tenncor::mul(NodeConverters::to_node(local_der), reduce_grad( - op->get_children()[0], NodeConverters::to_node(supcomp_grad), arg_idx)); - break; - case age::EXTEND: - out = tenncor::mul(NodeConverters::to_node(local_der), extend_grad( - op.get(), NodeConverters::to_node(supcomp_grad), arg_idx)); - break; - case age::PERMUTE: - out = tenncor::mul(NodeConverters::to_node(local_der), permute_grad( - op.get(), NodeConverters::to_node(supcomp_grad), arg_idx)); - break; - case age::MATMUL: - out = tenncor::reduce_sum( - tenncor::permute( - tenncor::mul(NodeConverters::to_node(local_der), - tenncor::extend(NodeConverters::to_node(supcomp_grad), 2, { - op->get_children()[0]. - get_tensor()->shape().at(0) - })), - 0 == arg_idx ? - std::vector{2, 1, 0} : - std::vector{0, 2, 1}), 2, 1); - break; - case age::CONV: - { - ade::Opcode opcode; - auto args = op->get_children(); - ade::CoordptrT fwd_shaper = - args[(size_t)(0 == arg_idx)].get_shaper(); - ade::CoordptrT rev_shaper( - args[arg_idx].get_shaper()->reverse()); - if (arg_idx == 0) - { - opcode = ade::Opcode{"CONV_IMG_GRAD", - age::CONV_IMG_GRAD}; - } - else - { - opcode = ade::Opcode{"CONV_KRN_GRAD", - age::CONV_KRN_GRAD}; - } - ade::CoordptrT full_shaper( - fwd_shaper->connect(*rev_shaper)); - out = make_functor(opcode, { - FuncArg(NodeConverters::to_node(local_der), full_shaper, nullptr), - FuncArg(NodeConverters::to_node(supcomp_grad), rev_shaper, nullptr), - }); - } - break; - case age::SLICE: - { - ade::CoordT slicings; - auto& child = op->get_children()[0]; - child.get_coorder()->forward( - slicings.begin(), slicings.begin()); - ade::DimT dimension = slicings[2]; - ade::DimT dim = child.get_tensor()->shape().at(dimension); - ade::DimT left_pad = slicings[0]; - ade::DimT right_pad = dim - (left_pad + slicings[1]); - out = tenncor::mul(NodeConverters::to_node(local_der), - tenncor::pad(NodeConverters::to_node(supcomp_grad), - std::pair{ - left_pad, right_pad}, dimension)); - } - break; - case age::PAD: - { - ade::CoordT paddings; - auto& child = op->get_children()[0]; - child.get_coorder()->forward( - paddings.begin(), paddings.begin()); - ade::DimT dimension = paddings[2]; - ade::DimT dim = op->shape().at(dimension); - ade::DimT offset = paddings[0]; - ade::DimT extent = dim - paddings[1] - offset; - out = tenncor::mul(NodeConverters::to_node(local_der), - tenncor::slice(NodeConverters::to_node(supcomp_grad), - offset, extent, dimension)); - } - break; - case age::SELECT: - { - if (0 == arg_idx) - { - out = NodeConverters::to_node(local_der); - break; - } - auto condition = NodeConverters::to_node( - op->get_children()[0].get_tensor()); - auto then = NodeConverters::to_node(supcomp_grad); - auto otherwise = make_constant_scalar(0, op->shape()); - if (1 < arg_idx) - { - std::swap(then, otherwise); - } - out = tenncor::if_then_else(condition, then, otherwise); - } - break; - case age::CONV_IMG_GRAD: - logs::fatal("cannot derive CONV_IMG_GRAD"); - break; - case age::CONV_KRN_GRAD: - logs::fatal("cannot derive CONV_KRN_GRAD"); - break; - default: - logs::fatalf("Unknown op %s", opcode.name_.c_str()); - } - return out->get_tensor(); - } - - /// Implementation of iGradientBuilder - ade::TensptrT get_const_one (ade::Shape shape) const override - { - return make_constant_scalar(1, shape)->get_tensor(); - } - - /// Implementation of iGradientBuilder - ade::TensptrT get_const_zero (ade::Shape shape) const override - { - return make_constant_scalar(0, shape)->get_tensor(); - } - - /// Implementation of iGradientBuilder - ade::TensptrT add (ade::TensptrT& lhs, ade::TensptrT& rhs) const override - { - return ade::TensptrT(Functor::get(ade::Opcode{"ADD", age::ADD}, { - identity_map(NodeConverters::to_node(lhs)), - identity_map(NodeConverters::to_node(rhs)) - })); - } -}; - -/// Derive root with respect to target and optimized -template -NodeptrT derive (NodeptrT root, NodeptrT target) -{ - GradientBuilder builder; - ade::TensptrT derivative = builder.derive( - root->get_tensor(), target->get_tensor()); - return NodeConverters::to_node(derivative); -} - -} - -#endif // EAD_GRADER_HPP diff --git a/ead/src/eigen.cpp b/ead/src/eigen.cpp deleted file mode 100644 index 03580dd4a..000000000 --- a/ead/src/eigen.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include "ead/eigen.hpp" - -#ifdef EAD_EIGEN_HPP - -namespace ead -{ - -DimensionsT shape_convert (ade::Shape shape) -{ - DimensionsT dims; - std::copy(shape.begin(), shape.end(), dims.begin()); - return dims; -} - -} - -#endif diff --git a/ead/BUILD.bazel b/eteq/BUILD.bazel similarity index 68% rename from ead/BUILD.bazel rename to eteq/BUILD.bazel index b090a09f6..22ff155a8 100644 --- a/ead/BUILD.bazel +++ b/eteq/BUILD.bazel @@ -37,23 +37,23 @@ filegroup( ######### LIBRARY ######### py_binary( - name = "agen", - srcs = glob(["age/agen.py", "age/plugins/*.py"]), + name = "egen", + srcs = glob(["gen/egen.py", "gen/plugins/*.py"]), deps = ["//gen:gen"], ) config_setting( name = "fast_build", values = { - "define": "EAD_CFG=MIN", + "define": "ETEQ_CFG=MIN", }, ) genrule( name = "generated_ead", srcs = select({ - "//conditions:default": ["//cfg:ead"], - ":fast_build": ["//cfg:ead_min"] + "//conditions:default": ["//cfg:eteq"], + ":fast_build": ["//cfg:eteq_min"] }), outs = [ "generated/api.hpp", @@ -65,41 +65,41 @@ genrule( "generated/pyapi.hpp", "generated/pyapi_tenncor.cpp", ], - tools = ["//ead:agen"], + tools = ["//eteq:egen"], cmd = select({ - "//conditions:default": "$(location //ead:agen) " + - "--cfg $(location //cfg:ead) " + + "//conditions:default": "$(location //eteq:egen) " + + "--cfg $(location //cfg:eteq) " + "--out $(@D)/generated " + "--strip_prefix=$$(dirname $(@D))", - ":fast_build": "$(location //ead:agen) " + - "--cfg $(location //cfg:ead_min) " + + ":fast_build": "$(location //eteq:egen) " + + "--cfg $(location //cfg:eteq_min) " + "--out $(@D)/generated " + "--strip_prefix=$$(dirname $(@D))", }) ) cc_library( - name = "ead", + name = "eteq", hdrs = glob([ "*.hpp", "opt/*.hpp", ]) + [ - "//ead:generated/api.hpp", - "//ead:generated/opcode.hpp", - "//ead:generated/dtype.hpp", - "//ead:generated/pyapi.hpp", + "//eteq:generated/api.hpp", + "//eteq:generated/opcode.hpp", + "//eteq:generated/dtype.hpp", + "//eteq:generated/pyapi.hpp", ], srcs = glob([ "src/*.cpp", ]) + [ - "//ead:generated/api.cpp", - "//ead:generated/opcode.cpp", - "//ead:generated/dtype.cpp", + "//eteq:generated/api.cpp", + "//eteq:generated/opcode.cpp", + "//eteq:generated/dtype.cpp", ], copts = ["-std=c++17"], deps = [ - "//ade:ade", + "//teq:teq", "//pbm:pbm", "//tag:tag", "//opt:opt", @@ -110,15 +110,15 @@ cc_library( pybind_library( name = "tenncor_py", - cc_srcs = ["//ead:generated/pyapi_tenncor.cpp"], - cc_deps = ["//ead:ead"], + cc_srcs = ["//eteq:generated/pyapi_tenncor.cpp"], + cc_deps = ["//eteq:eteq"], ) pybind_library( - name = "ead_py", - cc_srcs = ["//ead:python/ead.cpp"], - cc_deps = ["//ead:ead"], - py_deps = ["//ead:tenncor_py"], + name = "eteq_py", + cc_srcs = ["//eteq:python/eteq.cpp"], + cc_deps = ["//eteq:eteq"], + py_deps = ["//eteq:tenncor_py"], visibility = ["//visibility:public"], ) @@ -126,10 +126,10 @@ pybind_library( cc_test( name = "ctest", - srcs = ["//ead:ctest_srcs"], + srcs = ["//eteq:ctest_srcs"], copts = ["-std=c++17"], deps = [ - "//ead:ead", + "//eteq:eteq", "//dbg:stream_out", "@gtest//:gtest", "@com_github_mingkaic_cppkg//diff:diff", @@ -144,9 +144,9 @@ cc_test( py_test( name = "ptest", - srcs = ["//ead:ptest_srcs"], + srcs = ["//eteq:ptest_srcs"], deps = [ - "//ead:ead_py", + "//eteq:eteq_py", "//testutil:pyunit_util", ], data = ["//testutil:ead_testcase_template"], @@ -158,7 +158,7 @@ cc_binary( name = "benchmark", srcs = ["bm/benchmark.cpp"], deps = [ - "//ead:ead", + "//eteq:eteq", "@com_github_google_benchmark//:benchmark", ], copts = ["-std=c++17"], diff --git a/eteq/README_ETEQ.md b/eteq/README_ETEQ.md new file mode 100644 index 000000000..d3e668d1b --- /dev/null +++ b/eteq/README_ETEQ.md @@ -0,0 +1,3 @@ +# ETEQ (Eigen-TEQ) + +TEQ extension using Eigen matrices/tensors and operators. diff --git a/ead/bm/benchmark.cpp b/eteq/bm/benchmark.cpp similarity index 62% rename from ead/bm/benchmark.cpp rename to eteq/bm/benchmark.cpp index 74b22a618..23480f965 100644 --- a/ead/bm/benchmark.cpp +++ b/eteq/bm/benchmark.cpp @@ -2,28 +2,28 @@ #include "benchmark/benchmark.h" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" -#include "ead/parse.hpp" +#include "eteq/parse.hpp" static std::random_device rnd_device; static std::mt19937 mersenne_engine(rnd_device()); -ade::Shape rand_shape (int n) +teq::Shape rand_shape (int n) { - std::vector slist; - ade::RankT cap = (ade::RankT) std::min(255, n); - for (ade::RankT i = 0; i < ade::rank_cap && cap > 1; - ++i, cap = (ade::RankT) std::min(255, n)) + std::vector slist; + teq::RankT cap = (teq::RankT) std::min(255, n); + for (teq::RankT i = 0; i < teq::rank_cap && cap > 1; + ++i, cap = (teq::RankT) std::min(255, n)) { - std::uniform_int_distribution dist(1, cap); - ade::RankT c = dist(mersenne_engine); + std::uniform_int_distribution dist(1, cap); + teq::RankT c = dist(mersenne_engine); n /= c; slist.push_back(c); } - return ade::Shape(slist); + return teq::Shape(slist); } @@ -44,8 +44,6 @@ BENCHMARK_TEMPLATE(NAME, double)->Range(64, 2048)\ BENCHMARK_TEMPLATE(NAME, float)->Range(64, 2048)\ ->Complexity(benchmark::oN);\ BENCHMARK_TEMPLATE(NAME, int32_t)->Range(64, 2048)\ - ->Complexity(benchmark::oN);\ -BENCHMARK_TEMPLATE(NAME, int64_t)->Range(64, 2048)\ ->Complexity(benchmark::oN); @@ -57,12 +55,12 @@ static void NAME(benchmark::State& state)\ for (auto _ : state)\ {\ state.PauseTiming();\ - ade::Shape shape = rand_shape(n);\ + teq::Shape shape = rand_shape(n);\ std::vector data = random_data(shape.n_elems(), -35, 35);\ std::vector convdata(data.begin(), data.end());\ - ead::VarptrT var = ead::make_variable(convdata.data(), shape, "var");\ - ead::NodeptrT out = FUNC(ead::NodeptrT(var));\ - ead::Session session;\ + eteq::VarptrT var = eteq::make_variable(convdata.data(), shape, "var");\ + eteq::NodeptrT out = FUNC(eteq::NodeptrT(var));\ + eteq::Session session;\ session.track({out->get_tensor()});\ state.ResumeTiming();\ session.update();\ @@ -79,12 +77,12 @@ static void NAME(benchmark::State& state)\ for (auto _ : state)\ {\ state.PauseTiming();\ - ade::Shape shape = rand_shape(n);\ + teq::Shape shape = rand_shape(n);\ std::vector data = random_data(shape.n_elems(), 0, 35);\ std::vector convdata(data.begin(), data.end());\ - ead::VarptrT var = ead::make_variable(convdata.data(), shape, "var");\ - ead::NodeptrT out = FUNC(ead::NodeptrT(var));\ - ead::Session session;\ + eteq::VarptrT var = eteq::make_variable(convdata.data(), shape, "var");\ + eteq::NodeptrT out = FUNC(eteq::NodeptrT(var));\ + eteq::Session session;\ session.track({out->get_tensor()});\ state.ResumeTiming();\ session.update();\ @@ -128,15 +126,15 @@ static void NAME(benchmark::State& state)\ for (auto _ : state)\ {\ state.PauseTiming();\ - ade::Shape shape = rand_shape(n);\ + teq::Shape shape = rand_shape(n);\ std::vector data = random_data(shape.n_elems(), 1, 4);\ std::vector data2 = random_data(shape.n_elems(), 1, 4);\ std::vector convdata(data.begin(), data.end());\ std::vector convdata2(data2.begin(), data2.end());\ - ead::VarptrT var = ead::make_variable(convdata.data(), shape, "var");\ - ead::VarptrT var2 = ead::make_variable(convdata2.data(), shape, "var2");\ - ead::NodeptrT out = FUNC(ead::NodeptrT(var), ead::NodeptrT(var2));\ - ead::Session session;\ + eteq::VarptrT var = eteq::make_variable(convdata.data(), shape, "var");\ + eteq::VarptrT var2 = eteq::make_variable(convdata2.data(), shape, "var2");\ + eteq::NodeptrT out = FUNC(eteq::NodeptrT(var), eteq::NodeptrT(var2));\ + eteq::Session session;\ session.track({out->get_tensor()});\ state.ResumeTiming();\ session.update();\ @@ -179,23 +177,23 @@ static void BM_Matmul(benchmark::State& state) for (auto _ : state) { state.PauseTiming(); - std::uniform_int_distribution distc(9, std::min(255ul, n - 1)); - ade::DimT common_dim = distc(mersenne_engine); + std::uniform_int_distribution distc(9, std::min(255ul, n - 1)); + teq::DimT common_dim = distc(mersenne_engine); int remaining = (double) n / common_dim; std::uniform_int_distribution<> distsides(1, std::min(255, remaining)); - ade::DimT left_dim = distsides(mersenne_engine); - ade::DimT right_dim = distsides(mersenne_engine); - ade::Shape leftshape({common_dim, left_dim}); - ade::Shape rightshape({right_dim, common_dim}); + teq::DimT left_dim = distsides(mersenne_engine); + teq::DimT right_dim = distsides(mersenne_engine); + teq::Shape leftshape({common_dim, left_dim}); + teq::Shape rightshape({right_dim, common_dim}); std::vector data = random_data(leftshape.n_elems(), -35, 35); std::vector data2 = random_data(rightshape.n_elems(), -35, 35); std::vector convdata(data.begin(), data.end()); std::vector convdata2(data2.begin(), data2.end()); - ead::VarptrT var = ead::make_variable(convdata.data(), leftshape, "var"); - ead::VarptrT var2 = ead::make_variable(convdata2.data(), rightshape, "var2"); - ead::NodeptrT out = tenncor::matmul( - ead::NodeptrT(var), ead::NodeptrT(var2)); - ead::Session session; + eteq::VarptrT var = eteq::make_variable(convdata.data(), leftshape, "var"); + eteq::VarptrT var2 = eteq::make_variable(convdata2.data(), rightshape, "var2"); + eteq::NodeptrT out = tenncor::matmul( + eteq::NodeptrT(var), eteq::NodeptrT(var2)); + eteq::Session session; session.track({out->get_tensor()}); state.ResumeTiming(); session.update(); @@ -215,37 +213,33 @@ BENCHMARK_TEMPLATE(BM_Matmul, int32_t) ->Range(64, 2048) ->Complexity(benchmark::oN); -BENCHMARK_TEMPLATE(BM_Matmul, int64_t) - ->Range(64, 2048) - ->Complexity(benchmark::oN); - static void BM_MatmulComplex(benchmark::State& state) { - std::vector alist = {3, 2}; - std::vector blist = {4, 3}; - std::vector clist = {2, 4}; - ade::Shape ashape(alist); - ade::Shape bshape(blist); - ade::Shape cshape(clist); + std::vector alist = {3, 2}; + std::vector blist = {4, 3}; + std::vector clist = {2, 4}; + teq::Shape ashape(alist); + teq::Shape bshape(blist); + teq::Shape cshape(clist); - ead::VarptrT a = ead::make_variable(ashape); - ead::VarptrT b = ead::make_variable(bshape); - ead::VarptrT c = ead::make_variable(cshape); + eteq::VarptrT a = eteq::make_variable(ashape); + eteq::VarptrT b = eteq::make_variable(bshape); + eteq::VarptrT c = eteq::make_variable(cshape); - ead::NodeptrT atens(a); - ead::NodeptrT btens(b); - ead::NodeptrT ctens(c); + eteq::NodeptrT atens(a); + eteq::NodeptrT btens(b); + eteq::NodeptrT ctens(c); auto d = tenncor::matmul(atens, btens); auto e = tenncor::matmul(ctens, d); auto f = tenncor::matmul(tenncor::transpose(d), tenncor::transpose(ctens)); auto dest = tenncor::matmul(e, f); - ead::NodeptrT da = ead::derive(dest, atens); - ead::NodeptrT db = ead::derive(dest, btens); - ead::NodeptrT dc = ead::derive(dest, ctens); - ead::Session session; + eteq::NodeptrT da = eteq::derive(dest, atens); + eteq::NodeptrT db = eteq::derive(dest, btens); + eteq::NodeptrT dc = eteq::derive(dest, ctens); + eteq::Session session; session.track({ da->get_tensor(), db->get_tensor(), @@ -278,50 +272,50 @@ BENCHMARK(BM_MatmulComplex); static void BM_SigmoidMLP(benchmark::State& state) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); - - ead::VarptrT in = ead::make_variable(in_shape); - ead::VarptrT weight0 = ead::make_variable(weight0_shape); - ead::VarptrT bias0 = ead::make_variable(bias0_shape); - ead::VarptrT weight1 = ead::make_variable(weight1_shape); - ead::VarptrT bias1 = ead::make_variable(bias1_shape); - ead::VarptrT out = ead::make_variable(out_shape); - - ead::NodeptrT intens(in); - ead::NodeptrT weight0tens(weight0); - ead::NodeptrT bias0tens(bias0); - ead::NodeptrT weight1tens(weight1); - ead::NodeptrT bias1tens(bias1); - ead::NodeptrT outtens(out); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); + + eteq::VarptrT in = eteq::make_variable(in_shape); + eteq::VarptrT weight0 = eteq::make_variable(weight0_shape); + eteq::VarptrT bias0 = eteq::make_variable(bias0_shape); + eteq::VarptrT weight1 = eteq::make_variable(weight1_shape); + eteq::VarptrT bias1 = eteq::make_variable(bias1_shape); + eteq::VarptrT out = eteq::make_variable(out_shape); + + eteq::NodeptrT intens(in); + eteq::NodeptrT weight0tens(weight0); + eteq::NodeptrT bias0tens(bias0); + eteq::NodeptrT weight1tens(weight1); + eteq::NodeptrT bias1tens(bias1); + eteq::NodeptrT outtens(out); auto layer0 = tenncor::add( tenncor::matmul(intens, weight0tens), tenncor::extend(bias0tens, 1, {3})); auto sig0 = tenncor::div( - ead::make_constant_scalar(1, ade::Shape({9, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({9, 3})), + eteq::make_constant_scalar(1, teq::Shape({9, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({9, 3})), tenncor::exp(tenncor::neg(layer0)))); auto layer1 = tenncor::add( tenncor::matmul(sig0, weight1tens), tenncor::extend(bias1tens, 1, {3})); - auto sig1 = tenncor::div(ead::make_constant_scalar(1, ade::Shape({5, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({5, 3})), + auto sig1 = tenncor::div(eteq::make_constant_scalar(1, teq::Shape({5, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({5, 3})), tenncor::exp(tenncor::neg(layer1)))); auto err = tenncor::pow(tenncor::sub(outtens, sig1), - ead::make_constant_scalar(2, out_shape)); + eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0tens); - auto db0 = ead::derive(err, bias0tens); - auto dw1 = ead::derive(err, weight1tens); - auto db1 = ead::derive(err, bias1tens); - ead::Session session; + auto dw0 = eteq::derive(err, weight0tens); + auto db0 = eteq::derive(err, bias0tens); + auto dw1 = eteq::derive(err, weight1tens); + auto db1 = eteq::derive(err, bias1tens); + eteq::Session session; session.track({ dw0->get_tensor(), db0->get_tensor(), @@ -361,26 +355,26 @@ BENCHMARK(BM_SigmoidMLP); static void BM_OptimizedSigmoidMLP(benchmark::State& state) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); - - ead::VarptrT in = ead::make_variable(in_shape); - ead::VarptrT weight0 = ead::make_variable(weight0_shape); - ead::VarptrT bias0 = ead::make_variable(bias0_shape); - ead::VarptrT weight1 = ead::make_variable(weight1_shape); - ead::VarptrT bias1 = ead::make_variable(bias1_shape); - ead::VarptrT out = ead::make_variable(out_shape); - - ead::NodeptrT intens(in); - ead::NodeptrT weight0tens(weight0); - ead::NodeptrT bias0tens(bias0); - ead::NodeptrT weight1tens(weight1); - ead::NodeptrT bias1tens(bias1); - ead::NodeptrT outtens(out); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); + + eteq::VarptrT in = eteq::make_variable(in_shape); + eteq::VarptrT weight0 = eteq::make_variable(weight0_shape); + eteq::VarptrT bias0 = eteq::make_variable(bias0_shape); + eteq::VarptrT weight1 = eteq::make_variable(weight1_shape); + eteq::VarptrT bias1 = eteq::make_variable(bias1_shape); + eteq::VarptrT out = eteq::make_variable(out_shape); + + eteq::NodeptrT intens(in); + eteq::NodeptrT weight0tens(weight0); + eteq::NodeptrT bias0tens(bias0); + eteq::NodeptrT weight1tens(weight1); + eteq::NodeptrT bias1tens(bias1); + eteq::NodeptrT outtens(out); auto layer0 = tenncor::add( tenncor::matmul(intens, weight0tens), @@ -393,16 +387,16 @@ static void BM_OptimizedSigmoidMLP(benchmark::State& state) auto sig1 = tenncor::sigmoid(layer1); auto err = tenncor::pow(tenncor::sub(outtens, sig1), - ead::make_constant_scalar(2, out_shape)); + eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0tens); - auto db0 = ead::derive(err, bias0tens); - auto dw1 = ead::derive(err, weight1tens); - auto db1 = ead::derive(err, bias1tens); + auto dw0 = eteq::derive(err, weight0tens); + auto db0 = eteq::derive(err, bias0tens); + auto dw1 = eteq::derive(err, weight1tens); + auto db1 = eteq::derive(err, bias1tens); // optimize - auto rules = ead::parse_file("cfg/optimizations.rules"); - ade::TensT roots = { + auto rules = eteq::parse_file("cfg/optimizations.rules"); + teq::TensT roots = { dw0->get_tensor(), db0->get_tensor(), dw1->get_tensor(), @@ -410,7 +404,7 @@ static void BM_OptimizedSigmoidMLP(benchmark::State& state) }; opt::optimize(roots, rules); - ead::Session session; + eteq::Session session; session.track({ dw0->get_tensor(), db0->get_tensor(), diff --git a/ead/constant.hpp b/eteq/constant.hpp similarity index 80% rename from ead/constant.hpp rename to eteq/constant.hpp index 42e6fbd29..8edea3c3b 100644 --- a/ead/constant.hpp +++ b/eteq/constant.hpp @@ -1,12 +1,12 @@ #include "tag/prop.hpp" -#include "ead/ileaf.hpp" -#include "ead/inode.hpp" +#include "eteq/ileaf.hpp" +#include "eteq/inode.hpp" -#ifndef EAD_CONSTANT_HPP -#define EAD_CONSTANT_HPP +#ifndef ETEQ_CONSTANT_HPP +#define ETEQ_CONSTANT_HPP -namespace ead +namespace eteq { static const size_t label_limit = 5; @@ -14,9 +14,9 @@ static const size_t label_limit = 5; template struct Constant final : public iLeaf { - static Constant* get (T* data, ade::Shape shape); + static Constant* get (T* data, teq::Shape shape); - static Constant* get_scalar (T scalar, ade::Shape shape) + static Constant* get_scalar (T scalar, teq::Shape shape) { size_t n = shape.n_elems(); T buffer[n]; @@ -69,7 +69,7 @@ struct Constant final : public iLeaf } private: - Constant (T* data, ade::Shape shape) : + Constant (T* data, teq::Shape shape) : iLeaf(data, shape) {} }; @@ -85,7 +85,7 @@ struct ConstantNode final : public iNode void update (void) override {} - ade::TensptrT get_tensor (void) override + teq::TensptrT get_tensor (void) const override { return cst_; } @@ -95,10 +95,10 @@ struct ConstantNode final : public iNode }; template -Constant* Constant::get (T* data, ade::Shape shape) +Constant* Constant::get (T* data, teq::Shape shape) { static bool registered = register_builder,T>( - [](ade::TensptrT tens) + [](teq::TensptrT tens) { return std::make_shared>( std::static_pointer_cast>(tens)); @@ -109,7 +109,7 @@ Constant* Constant::get (T* data, ade::Shape shape) } template -NodeptrT make_constant_scalar (T scalar, ade::Shape shape) +NodeptrT make_constant_scalar (T scalar, teq::Shape shape) { auto out = std::make_shared>( std::shared_ptr>(Constant::get_scalar(scalar, shape)) @@ -119,7 +119,7 @@ NodeptrT make_constant_scalar (T scalar, ade::Shape shape) } template -NodeptrT make_constant (T* data, ade::Shape shape) +NodeptrT make_constant (T* data, teq::Shape shape) { auto out = std::make_shared>( std::shared_ptr>(Constant::get(data, shape)) @@ -130,4 +130,4 @@ NodeptrT make_constant (T* data, ade::Shape shape) } -#endif // EAD_CONSTANT_HPP +#endif // ETEQ_CONSTANT_HPP diff --git a/eteq/coord.hpp b/eteq/coord.hpp new file mode 100644 index 000000000..a151d0ffa --- /dev/null +++ b/eteq/coord.hpp @@ -0,0 +1,59 @@ +#include "teq/coord.hpp" + +#ifndef ETEQ_COORD_HPP +#define ETEQ_COORD_HPP + +namespace eteq +{ + +struct CoordMap final : public teq::iCoordMap +{ + CoordMap (teq::CoordT indices, bool bijective) : + indices_(indices), bijective_(bijective) {} + + teq::iCoordMap* connect (const teq::iCoordMap& rhs) const override + { + return nullptr; + } + + void forward (teq::CoordT::iterator out, + teq::CoordT::const_iterator in) const override + { + std::copy(indices_.begin(), indices_.end(), out); + } + + iCoordMap* reverse (void) const override + { + return nullptr; + } + + std::string to_string (void) const override + { + return fmts::to_string(indices_.begin(), indices_.end()); + } + + void access (std::function cb) const override {} + + bool is_bijective (void) const override + { + return bijective_; + } + +private: + teq::CoordT indices_; + + bool bijective_; +}; + +/// Type of iCoordMap smartpointer +using CoordptrT = std::shared_ptr; + +CoordptrT reduce (std::vector red_dims); + +CoordptrT extend (teq::RankT rank, std::vector ext); + +CoordptrT permute (std::vector dims); + +} + +#endif // ETEQ_COORD_HPP diff --git a/ead/eigen.hpp b/eteq/eigen.hpp similarity index 83% rename from ead/eigen.hpp rename to eteq/eigen.hpp index 69af2ae0a..cc47ba553 100644 --- a/ead/eigen.hpp +++ b/eteq/eigen.hpp @@ -1,14 +1,14 @@ #include "Eigen/Core" #include "unsupported/Eigen/CXX11/Tensor" -#include "ade/shape.hpp" +#include "teq/shape.hpp" -#include "ead/generated/dtype.hpp" +#include "eteq/generated/dtype.hpp" -#ifndef EAD_EIGEN_HPP -#define EAD_EIGEN_HPP +#ifndef ETEQ_EIGEN_HPP +#define ETEQ_EIGEN_HPP -namespace ead +namespace eteq { // eigen shape @@ -141,9 +141,9 @@ inline EigenptrT make_eigenmatrix (DimensionsT dims, } template -inline TensorT make_tensor (const ade::Shape& shape) +inline TensorT make_tensor (const teq::Shape& shape) { - std::array slist; + std::array slist; std::copy(shape.begin(), shape.end(), slist.begin()); TensorT out(slist); out.setZero(); @@ -151,7 +151,7 @@ inline TensorT make_tensor (const ade::Shape& shape) } template -inline MatMapT make_matmap (T* data, const ade::Shape& shape) +inline MatMapT make_matmap (T* data, const teq::Shape& shape) { if (nullptr == data) { @@ -161,9 +161,9 @@ inline MatMapT make_matmap (T* data, const ade::Shape& shape) } template -inline TensMapT make_tensmap (T* data, const ade::Shape& shape) +inline TensMapT make_tensmap (T* data, const teq::Shape& shape) { - std::array slist; + std::array slist; std::copy(shape.begin(), shape.end(), slist.begin()); if (nullptr == data) { @@ -173,21 +173,21 @@ inline TensMapT make_tensmap (T* data, const ade::Shape& shape) } template -ade::Shape get_shape (const TensorT& tens) +teq::Shape get_shape (const TensorT& tens) { auto slist = tens.dimensions(); - return ade::Shape(std::vector(slist.begin(), slist.end())); + return teq::Shape(std::vector(slist.begin(), slist.end())); } template -ade::Shape get_shape (const TensMapT& tens) +teq::Shape get_shape (const TensMapT& tens) { auto slist = tens.dimensions(); - return ade::Shape(std::vector(slist.begin(), slist.end())); + return teq::Shape(std::vector(slist.begin(), slist.end())); } -DimensionsT shape_convert (ade::Shape shape); +DimensionsT shape_convert (teq::Shape shape); } -#endif // EAD_EIGEN_HPP +#endif // ETEQ_EIGEN_HPP diff --git a/eteq/eteq.hpp b/eteq/eteq.hpp new file mode 100644 index 000000000..b14f444f2 --- /dev/null +++ b/eteq/eteq.hpp @@ -0,0 +1,5 @@ +#include "eteq/grader.hpp" +#include "eteq/serialize.hpp" +#include "eteq/session.hpp" + +#include "eteq/generated/api.hpp" diff --git a/ead/funcarg.hpp b/eteq/funcarg.hpp similarity index 52% rename from ead/funcarg.hpp rename to eteq/funcarg.hpp index cdf14e32b..07b929ff6 100644 --- a/ead/funcarg.hpp +++ b/eteq/funcarg.hpp @@ -1,19 +1,19 @@ -#include "ade/funcarg.hpp" +#include "teq/funcarg.hpp" -#include "ead/coord.hpp" -#include "ead/inode.hpp" +#include "eteq/coord.hpp" +#include "eteq/inode.hpp" -#ifndef EAD_FUNCARG_HPP -#define EAD_FUNCARG_HPP +#ifndef ETEQ_FUNCARG_HPP +#define ETEQ_FUNCARG_HPP -namespace ead +namespace eteq { template struct FuncArg final { /// Construct FuncArg with specific coorder_ and map_io_ flag - FuncArg (NodeptrT node, ade::CoordptrT shaper, CoordptrT coorder) : + FuncArg (NodeptrT node, teq::CoordptrT shaper, CoordptrT coorder) : node_(node), shaper_(shaper), coorder_(coorder) { if (node_ == nullptr) @@ -23,13 +23,13 @@ struct FuncArg final } /// Return shape of tensor filtered through coordinate mapper - ade::Shape shape (void) const + teq::Shape shape (void) const { - return ade::apply_shaper(shaper_, node_->get_tensor()->shape()); + return teq::apply_shaper(shaper_, node_->shape()); } /// Return tensor being mapped - ade::TensptrT get_tensor (void) const + teq::TensptrT get_tensor (void) const { return node_->get_tensor(); } @@ -40,7 +40,7 @@ struct FuncArg final } /// Return shaper coord map - ade::CoordptrT get_shaper (void) const + teq::CoordptrT get_shaper (void) const { return shaper_; } @@ -64,7 +64,7 @@ struct FuncArg final NodeptrT node_; /// Shape mapper - ade::CoordptrT shaper_; + teq::CoordptrT shaper_; /// Coordinate mapper CoordptrT coorder_; @@ -76,26 +76,26 @@ using ArgsT = std::vector>; template FuncArg identity_map (NodeptrT node) { - return FuncArg(node, ade::identity, nullptr); + return FuncArg(node, teq::identity, nullptr); } template -FuncArg reduce_map (NodeptrT node, ade::RankT offset, ade::RankT ndims) +FuncArg reduce_map (NodeptrT node, teq::RankT offset, teq::RankT ndims) { - if (offset >= ade::rank_cap) + if (offset >= teq::rank_cap) { logs::fatalf("cannot dimensions [%d,...] greater or equal to %d", - offset, ade::rank_cap); + offset, teq::rank_cap); } - ade::RankT n = std::min(offset + ndims, ade::rank_cap); - ade::Shape shape = node->get_tensor()->shape(); - std::vector dims; // dims are allowed to be non-contiguous - std::vector slist; + teq::RankT n = std::min(offset + ndims, teq::rank_cap); + teq::Shape shape = node->shape(); + std::vector dims; // dims are allowed to be non-contiguous + std::vector slist; dims.reserve(n); slist.reserve(n); - for (ade::RankT i = offset; i < n; ++i) + for (teq::RankT i = offset; i < n; ++i) { if (shape.at(i) > 1) { @@ -104,74 +104,74 @@ FuncArg reduce_map (NodeptrT node, ade::RankT offset, ade::RankT ndims) slist.push_back(shape.at(i)); } - return FuncArg(node, ade::reduce(offset, slist), reduce(dims)); + return FuncArg(node, teq::reduce(offset, slist), reduce(dims)); } template FuncArg extend_map (NodeptrT node, - ade::RankT rank, std::vector ext) + teq::RankT rank, std::vector ext) { - return FuncArg(node, ade::extend(rank, ext), extend(rank, ext)); + return FuncArg(node, teq::extend(rank, ext), extend(rank, ext)); } template -FuncArg permute_map (NodeptrT node, std::vector order) +FuncArg permute_map (NodeptrT node, std::vector order) { - return FuncArg(node, ade::permute(order), permute(order)); + return FuncArg(node, teq::permute(order), permute(order)); } template -FuncArg slice_map (NodeptrT node, ade::RankT offset, - ade::RankT extent, ade::RankT dimension) +FuncArg slice_map (NodeptrT node, teq::RankT offset, + teq::RankT extent, teq::RankT dimension) { - if (dimension >= ade::rank_cap) + if (dimension >= teq::rank_cap) { logs::fatalf("cannot slice dimension %d beyond rank_cap %d", - dimension, ade::rank_cap); + dimension, teq::rank_cap); } - ade::CoordT slicings; - std::fill(slicings.begin(), slicings.end(), ade::rank_cap); + teq::CoordT slicings; + std::fill(slicings.begin(), slicings.end(), teq::rank_cap); slicings[0] = offset; slicings[1] = extent; slicings[2] = dimension; return FuncArg(node, - std::make_shared( - [=](ade::MatrixT fwd) + std::make_shared( + [=](teq::MatrixT fwd) { - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { fwd[i][i] = 1; } - fwd[ade::rank_cap][dimension] = + fwd[teq::rank_cap][dimension] = extent - node->shape().at(dimension); }), std::make_shared(slicings, false)); } template -FuncArg pad_map (NodeptrT node, - const std::pair& padding, - ade::RankT dimension) +FuncArg pad_map (NodeptrT node, + const std::pair& padding, + teq::RankT dimension) { - if (dimension >= ade::rank_cap) + if (dimension >= teq::rank_cap) { logs::fatalf("cannot pad dimension %d beyond rank_cap %d", - dimension, ade::rank_cap); + dimension, teq::rank_cap); } - ade::CoordT paddings; - std::fill(paddings.begin(), paddings.end(), ade::rank_cap); + teq::CoordT paddings; + std::fill(paddings.begin(), paddings.end(), teq::rank_cap); paddings[0] = padding.first; paddings[1] = padding.second; paddings[2] = dimension; return FuncArg(node, - std::make_shared( - [=](ade::MatrixT fwd) + std::make_shared( + [=](teq::MatrixT fwd) { - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { fwd[i][i] = 1; } - fwd[ade::rank_cap][dimension] = + fwd[teq::rank_cap][dimension] = padding.first + padding.second; }), std::make_shared(paddings, false)); @@ -179,4 +179,4 @@ FuncArg pad_map (NodeptrT node, } -#endif // EAD_FUNCARG_HPP +#endif // ETEQ_FUNCARG_HPP diff --git a/ead/functor.hpp b/eteq/functor.hpp similarity index 74% rename from ead/functor.hpp rename to eteq/functor.hpp index 3588dc18c..fea214442 100644 --- a/ead/functor.hpp +++ b/eteq/functor.hpp @@ -1,21 +1,21 @@ -#include "ade/iopfunc.hpp" +#include "teq/iopfunc.hpp" -#include "ead/generated/opcode.hpp" +#include "eteq/generated/opcode.hpp" -#include "ead/funcarg.hpp" -#include "ead/constant.hpp" -#include "ead/operator.hpp" +#include "eteq/funcarg.hpp" +#include "eteq/constant.hpp" +#include "eteq/operator.hpp" -#ifndef EAD_FUNCTOR_HPP -#define EAD_FUNCTOR_HPP +#ifndef ETEQ_FUNCTOR_HPP +#define ETEQ_FUNCTOR_HPP -namespace ead +namespace eteq { template -struct Functor final : public ade::iOperableFunc +struct Functor final : public teq::iOperableFunc { - static Functor* get (ade::Opcode opcode, ArgsT args); + static Functor* get (teq::Opcode opcode, ArgsT args); static Functor* get (Functor&& other) { @@ -29,7 +29,7 @@ struct Functor final : public ade::iOperableFunc Functor& operator = (Functor&& other) = delete; /// Implementation of iTensor - const ade::Shape& shape (void) const override + const teq::Shape& shape (void) const override { return shape_; } @@ -41,21 +41,21 @@ struct Functor final : public ade::iOperableFunc } /// Implementation of iFunctor - ade::Opcode get_opcode (void) const override + teq::Opcode get_opcode (void) const override { return opcode_; } /// Implementation of iFunctor - const ade::ArgsT& get_children (void) const override + const teq::ArgsT& get_children (void) const override { return args_; } /// Implementation of iFunctor - void update_child (ade::FuncArg arg, size_t index) override + void update_child (teq::FuncArg arg, size_t index) override { - ade::Shape arg_shape = arg.shape(); + teq::Shape arg_shape = arg.shape(); if (false == arg_shape.compatible_after(shape_, 0)) { logs::fatalf("cannot update child %d to argument with " @@ -101,13 +101,13 @@ struct Functor final : public ade::iOperableFunc /// Implementation of iData size_t type_code (void) const override { - return age::get_type(); + return egen::get_type(); } /// Implementation of iData std::string type_label (void) const override { - return age::name_type(age::get_type()); + return egen::name_type(egen::get_type()); } /// Implementation of iData @@ -129,22 +129,22 @@ struct Functor final : public ade::iOperableFunc void initialize (void) { std::vector> datamaps; - for (const ade::FuncArg& arg : args_) + for (const teq::FuncArg& arg : args_) { auto tens = arg.get_tensor(); auto coorder = static_cast(arg.get_coorder().get()); datamaps.push_back(OpArg{ - NodeConverters::to_node(tens)->data(), + TO_NODE(tens)->data(), tens->shape(), coorder }); } - age::typed_exec((age::_GENERATED_OPCODE) opcode_.code_, + egen::typed_exec((egen::_GENERATED_OPCODE) opcode_.code_, shape_, out_, datamaps); } private: - Functor (ade::Opcode opcode, ade::Shape shape, ade::ArgsT args) : + Functor (teq::Opcode opcode, teq::Shape shape, teq::ArgsT args) : opcode_(opcode), shape_(shape), args_(args) { // initialize(); @@ -155,13 +155,13 @@ struct Functor final : public ade::iOperableFunc EigenptrT out_ = nullptr; /// Operation encoding - ade::Opcode opcode_; + teq::Opcode opcode_; /// Shape info built at construction time according to arguments - ade::Shape shape_; + teq::Shape shape_; /// Tensor arguments (and children) - ade::ArgsT args_; + teq::ArgsT args_; }; template @@ -179,7 +179,7 @@ struct FunctorNode final : public iNode func_->update(); } - ade::TensptrT get_tensor (void) override + teq::TensptrT get_tensor (void) const override { return func_; } @@ -189,10 +189,10 @@ struct FunctorNode final : public iNode }; template -Functor* Functor::get (ade::Opcode opcode, ArgsT args) +Functor* Functor::get (teq::Opcode opcode, ArgsT args) { static bool registered = register_builder,T>( - [](ade::TensptrT tens) + [](teq::TensptrT tens) { return std::make_shared>( std::static_pointer_cast>(tens)); @@ -206,10 +206,10 @@ Functor* Functor::get (ade::Opcode opcode, ArgsT args) opcode.name_.c_str()); } - ade::Shape shape = args[0].shape(); + teq::Shape shape = args[0].shape(); for (size_t i = 1, n = nargs; i < n; ++i) { - ade::Shape ishape = args[i].shape(); + teq::Shape ishape = args[i].shape(); if (false == ishape.compatible_after(shape, 0)) { logs::fatalf("cannot perform `%s` with incompatible shapes %s " @@ -218,13 +218,13 @@ Functor* Functor::get (ade::Opcode opcode, ArgsT args) } } - ade::ArgsT input_args; + teq::ArgsT input_args; input_args.reserve(nargs); std::transform(args.begin(), args.end(), std::back_inserter(input_args), [](FuncArg& arg) { - return ade::FuncArg( + return teq::FuncArg( arg.get_tensor(), arg.get_shaper(), arg.map_io(), @@ -234,7 +234,7 @@ Functor* Functor::get (ade::Opcode opcode, ArgsT args) } template -NodeptrT make_functor (ade::Opcode opcode, ArgsT args) +NodeptrT make_functor (teq::Opcode opcode, ArgsT args) { return std::make_shared>( std::shared_ptr>(Functor::get(opcode, args)) @@ -243,4 +243,4 @@ NodeptrT make_functor (ade::Opcode opcode, ArgsT args) } -#endif // EAD_FUNCTOR_HPP +#endif // ETEQ_FUNCTOR_HPP diff --git a/ead/age/agen.py b/eteq/gen/egen.py similarity index 88% rename from ead/age/agen.py rename to eteq/gen/egen.py index fc705c642..f19256651 100644 --- a/ead/age/agen.py +++ b/eteq/gen/egen.py @@ -6,15 +6,15 @@ import sys import logging -from ead.age.plugins.dtypes import DTypesPlugin -from ead.age.plugins.opcodes import OpcodesPlugin -from ead.age.plugins.apis import APIsPlugin -from ead.age.plugins.pyapis import PyAPIsPlugin +from eteq.gen.plugins.dtypes import DTypesPlugin +from eteq.gen.plugins.opcodes import OpcodesPlugin +from eteq.gen.plugins.apis import APIsPlugin +from eteq.gen.plugins.pyapis import PyAPIsPlugin from gen.dump import PrintDump, FileDump from gen.generate import generate -prog_description = 'Generate c++ glue layer mapping ADE and some data-processing library.' +prog_description = 'Generate c++ glue layer mapping TEQ and some data-processing library.' def parse(cfg_str): args = yaml.safe_load(cfg_str) diff --git a/ead/age/plugins/apis.py b/eteq/gen/plugins/apis.py similarity index 66% rename from ead/age/plugins/apis.py rename to eteq/gen/plugins/apis.py index c42fa922d..6ec576787 100644 --- a/ead/age/plugins/apis.py +++ b/eteq/gen/plugins/apis.py @@ -3,7 +3,7 @@ from gen.plugin_base import PluginBase from gen.file_rep import FileRep -from ead.age.plugins.template import build_template +from eteq.gen.plugins.template import build_template _ns_template = ''' //>>> namespace @@ -26,6 +26,9 @@ //>>> template_namespaces {template_namespaces} +//>>> operators +{operators} + #endif // _GENERATED_API_HPP ''' @@ -35,6 +38,9 @@ //>>> src_namespaces {src_namespaces} +//>>> operators +{operators} + #endif ''' @@ -50,8 +56,8 @@ def _parse_args(arg, accept_def = True): def _nullcheck(args): tens = list(filter(lambda arg: - arg['dtype'] == 'ade::TensptrT' or - arg['dtype'] == 'ead::NodeptrT', args)) + arg['dtype'] == 'teq::TensptrT' or + arg['dtype'] == 'eteq::NodeptrT', args)) if len(tens) == 0: return 'false' varnames = [ten['name'] for ten in tens] @@ -71,11 +77,11 @@ def _decl_func(api): else: template_prefix = 'template <{}>\n'.format(template) - outtype = 'ade::TensptrT' + outtype = 'teq::TensptrT' if isinstance(api['out'], dict) and 'type' in api['out']: outtype = api['out']['type'] - return _decl_tmp.format( + declaration = _decl_tmp.format( template_prefix=template_prefix, comment = comment, outtype = outtype, @@ -85,6 +91,21 @@ def _decl_func(api): for arg in api['args'] ])) + decl_operator = None + op = api.get('operator', '') + if len(op) > 0: + decl_operator = _decl_tmp.format( + template_prefix=template_prefix, + comment = comment, + outtype = outtype, + funcname = 'operator ' + op, + args = ', '.join([ + _parse_args(arg, accept_def=False) + for arg in api['args'] + ])) + + return declaration, decl_operator + _template_defn_tmp = ''' template <{template_args}> {outtype} {funcname} ({args}) @@ -100,7 +121,7 @@ def _template_defn_func(api): if 'template' not in api: return None - outtype = 'ade::TensptrT' + outtype = 'teq::TensptrT' if isinstance(api['out'], dict): if 'type' in api['out']: outtype = api['out']['type'] @@ -108,7 +129,7 @@ def _template_defn_func(api): else: outval = api['out'] - return _template_defn_tmp.format( + temp_definition = _template_defn_tmp.format( template_args = api['template'], outtype = outtype, funcname = api['name'], @@ -119,6 +140,22 @@ def _template_defn_func(api): null_check = _nullcheck(api['args']), block = outval) + temp_operator = None + op = api.get('operator', '') + if len(op) > 0: + temp_operator = _template_defn_tmp.format( + template_args = api['template'], + outtype = outtype, + funcname = 'operator ' + op, + args = ', '.join([ + _parse_args(arg, accept_def=False) + for arg in api['args'] + ]), + null_check = _nullcheck(api['args']), + block = outval) + + return temp_definition, temp_operator + _defn_tmp = ''' {outtype} {funcname} ({args}) {{ @@ -133,7 +170,7 @@ def _defn_func(api): if 'template' in api: return None - outtype = 'ade::TensptrT' + outtype = 'teq::TensptrT' if isinstance(api['out'], dict): if 'type' in api['out']: outtype = api['out']['type'] @@ -141,7 +178,7 @@ def _defn_func(api): else: outval = api['out'] - return _defn_tmp.format( + definition = _defn_tmp.format( outtype = outtype, funcname = api['name'], args = ', '.join([ @@ -151,20 +188,41 @@ def _defn_func(api): null_check = _nullcheck(api['args']), block = outval) + defn_operator = None + op = api.get('operator', '') + if len(op) > 0: + defn_operator = _defn_tmp.format( + outtype = outtype, + funcname = 'operator ' + op, + args = ', '.join([ + _parse_args(arg, accept_def=False) + for arg in api['args'] + ]), + null_check = _nullcheck(api['args']), + block = outval) + + return definition, defn_operator + def _handle_api_header(apis): - return [_decl_func(api) for api in apis] + decls = [_decl_func(api) for api in apis] + return [decl[0] for decl in decls], [ + decl[1] for decl in decls if decl[1] is not None] def _handle_api_source(apis): - return [ + defns = [ defn for defn in [_defn_func(api) for api in apis] if defn is not None ] + return [defn[0] for defn in defns], [ + defn[1] for defn in defns if defn[1] is not None] def _handle_api_templates(apis): - return [ + temps = [ defn for defn in [_template_defn_func(api) for api in apis] if defn is not None ] + return [defn[0] for defn in temps], [ + defn[1] for defn in temps if defn[1] is not None] _plugin_id = "API" @@ -183,17 +241,22 @@ def process(self, generated_files, arguments): 'no relevant arguments found for plugin %s', _plugin_id) return - module = globals() api = arguments[plugin_key] hdr_namespaces = [] src_namespaces = [] template_namespaces = [] + hdr_operators = [] + src_operators = [] for namespace in api['namespaces']: definitions = api['namespaces'][namespace] - hdrs = _handle_api_header(definitions) - srcs = _handle_api_source(definitions) - templates = _handle_api_templates(definitions) + hdrs, hdr_ops = _handle_api_header(definitions) + srcs, srcs_ops = _handle_api_source(definitions) + templates, temp_ops = _handle_api_templates(definitions) + + hdr_operators += hdr_ops + hdr_operators += temp_ops + src_operators += srcs_ops if len(hdrs) > 0: hdr_defs = '\n'.join(hdrs) @@ -222,12 +285,15 @@ def process(self, generated_files, arguments): generated_files[api_header] = FileRep( _header_template.format( hdr_namespaces=''.join(hdr_namespaces), - template_namespaces=''.join(template_namespaces)), + template_namespaces=''.join(template_namespaces), + operators=''.join(hdr_operators)), user_includes=api.get('includes', []), internal_refs=[]) generated_files[_src_file] = FileRep( - _source_template.format(src_namespaces=''.join(src_namespaces)), + _source_template.format( + src_namespaces=''.join(src_namespaces), + operators=''.join(src_operators)), user_includes=[], internal_refs=[api_header]) diff --git a/ead/age/plugins/dtypes.py b/eteq/gen/plugins/dtypes.py similarity index 87% rename from ead/age/plugins/dtypes.py rename to eteq/gen/plugins/dtypes.py index 35078f2b9..7b37e0eb5 100644 --- a/ead/age/plugins/dtypes.py +++ b/eteq/gen/plugins/dtypes.py @@ -3,13 +3,13 @@ from gen.plugin_base import PluginBase from gen.file_rep import FileRep -from ead.age.plugins.template import build_template +from eteq.gen.plugins.template import build_template _header_template = ''' #ifndef _GENERATED_DTYPES_HPP #define _GENERATED_DTYPES_HPP -namespace age +namespace egen {{ enum _GENERATED_DTYPE @@ -32,6 +32,14 @@ return BAD_TYPE; }} +template +struct TypeInfo +{{ + static const _GENERATED_DTYPE type = BAD_TYPE; + + TypeInfo (void) = delete; +}}; + //>>> mapping {mapping} @@ -71,7 +79,7 @@ _source_template = ''' #ifdef _GENERATED_DTYPES_HPP -namespace age +namespace egen {{ struct EnumHash @@ -129,11 +137,20 @@ def _handle_enumeration(dtypes): dtype_codes = list(dtypes.keys()) return ',\n '.join(dtype_codes) + ',' +_dtype_mapping_tmp = '''template <> +_GENERATED_DTYPE get_type<{dtype}> (void); + +template <> +struct TypeInfo<{dtype}> +{{ + static const _GENERATED_DTYPE type = {code}; + + TypeInfo (void) = delete; +}};''' + def _handle_mapping(dtypes): - _dtype_mapping_tmp = 'template <>\n'+\ - '_GENERATED_DTYPE get_type<{dtype}> (void);' return '\n\n'.join([ - _dtype_mapping_tmp.format(dtype=dtypes[code]) + _dtype_mapping_tmp.format(code=code, dtype=dtypes[code]) for code in dtypes ]) @@ -147,7 +164,7 @@ def _handle_conversions(dtypes): ]) def _handle_cases(dtypes): - _dtype_case_tmp = 'case age::{code}: GENERIC_MACRO({dtype}) break;' + _dtype_case_tmp = 'case egen::{code}: GENERIC_MACRO({dtype}) break;' return '\\\n '.join([ _dtype_case_tmp.format(code=code, dtype=dtypes[code]) for code in dtypes @@ -168,7 +185,7 @@ def _handle_name2types(dtypes): ]) def _handle_typesizes(dtypes): - _size_case_tmp = 'case age::{code}: return sizeof({dtype});' + _size_case_tmp = 'case egen::{code}: return sizeof({dtype});' return '\n '.join([ _size_case_tmp.format(code=code, dtype=dtypes[code]) for code in dtypes diff --git a/ead/age/plugins/opcodes.py b/eteq/gen/plugins/opcodes.py similarity index 97% rename from ead/age/plugins/opcodes.py rename to eteq/gen/plugins/opcodes.py index 9c99e2a9b..f66857202 100644 --- a/ead/age/plugins/opcodes.py +++ b/eteq/gen/plugins/opcodes.py @@ -3,13 +3,13 @@ from gen.plugin_base import PluginBase from gen.file_rep import FileRep -from ead.age.plugins.template import build_template +from eteq.gen.plugins.template import build_template _header_template = ''' #ifndef _GENERATED_OPCODES_HPP #define _GENERATED_OPCODES_HPP -namespace age +namespace egen {{ enum _GENERATED_OPCODE @@ -44,7 +44,7 @@ _source_template = ''' #ifdef _GENERATED_OPCODES_HPP -namespace age +namespace egen {{ struct EnumHash diff --git a/eteq/gen/plugins/pyapis.py b/eteq/gen/plugins/pyapis.py new file mode 100644 index 000000000..d210e04a6 --- /dev/null +++ b/eteq/gen/plugins/pyapis.py @@ -0,0 +1,303 @@ +import re +import logging + +from gen.plugin_base import PluginBase +from gen.file_rep import FileRep + +from eteq.gen.plugins.template import build_template +from eteq.gen.plugins.apis import api_header + +_pybindt = 'PybindT' + +_header_template = ''' +// type to replace template arguments in pybind +using {pybind} = {pybind_type}; +//>>> ^ pybind, pybind_type +''' + +_source_template = ''' +namespace py = pybind11; + +namespace pyegen +{{ + +//>>> name_mangling +{name_mangling} + +}} + +//>>> modname +PYBIND11_MODULE({modname}, m_{modname}) +{{ + m_{modname}.doc() = "pybind for {modname} api"; + + //>>> modname + py::class_ tensor(m_{modname}, "Tensor"); + + //>>> defs + {defs} +}} +''' + +def _sub_pybind(stmt, source): + _type_pattern = '([^\\w]){}([^\\w])'.format(source) + _type_replace = '\\1{}\\2'.format(_pybindt) + return re.sub(_type_pattern, _type_replace, ' ' + stmt + ' ').strip() + +def _strip_template_prefix(template): + _template_prefixes = ['typename', 'class'] + for template_prefix in _template_prefixes: + if template.startswith(template_prefix): + return template[len(template_prefix):].strip() + # todo: parse valued templates variable (e.g.: size_t N) + return template + +_func_fmt = ''' +{outtype} {funcname}_{idx} ({param_decl}) +{{ + return {namespace}::{funcname}({args}); +}} +''' +def _mangle_func(idx, api, namespace): + outtype = 'teq::TensptrT' + if isinstance(api['out'], dict) and 'type' in api['out']: + outtype = api['out']['type'] + + out = _func_fmt.format( + outtype=outtype, + namespace=namespace, + funcname=api['name'], + idx=idx, + param_decl=', '.join([arg['dtype'] + ' ' + arg['name'] + for arg in api['args']]), + args=', '.join([arg['name'] for arg in api['args']])) + + for typenames in api.get('template', '').split(','): + out = _sub_pybind(out, _strip_template_prefix(typenames)) + return out + +def _handle_pybind(pybind_type): + return _pybindt + +def _handle_pybind_type(pybind_type): + return pybind_type + +def _handle_name_mangling(pybind_type, apis, namespace): + return '\n\n'.join([ + _mangle_func(i, api, namespace) + for i, api in enumerate(apis)] + ) + +def _parse_header_args(arg): + if 'default' in arg: + defext = ' = {}'.format(arg['default']) + else: + defext = '' + return '{dtype} {name}{defext}'.format( + dtype = arg['dtype'], + name = arg['name'], + defext = defext) + +def _parse_description(arg): + if 'description' in arg: + description = ': {}'.format(arg['description']) + else: + description = '' + outtype = 'teq::TensptrT' + if isinstance(arg['out'], dict) and 'type' in arg['out']: + outtype = arg['out']['type'] + return '"{outtype} {func} ({args}){description}"'.format( + outtype = outtype, + func = arg['name'], + args = ', '.join([_parse_header_args(arg) for arg in arg['args']]), + description = description) + +def _parse_pyargs(arg): + if 'default' in arg: + defext = ' = {}'.format(arg['default']) + else: + defext = '' + return 'py::arg("{name}"){defext}'.format( + name = arg['name'], + defext = defext) + +_py_op = { + ('-', 1): '__neg__', + ('+', 2): '__add__', + ('*', 2): '__mul__', + ('-', 2): '__sub__', + ('/', 2): '__truediv__', + ('==', 2): '__eq__', + ('!=', 2): '__ne__', + ('<', 2): '__lt__', + ('>', 2): '__gt__', +} + +__py_op_rev = { + '+': '__radd__', + '*': '__rmul__', + '-': '__rsub__', + '/': '__rtruediv__', +} + +_def_op_tmpl = '{label}.def("{pyop}", []({params}){{ return {operator}; }}, py::is_operator());' + +def _def_op(t2labels, api): + templates = [_strip_template_prefix(typenames) + for typenames in api.get('template', '').split(',')] + + rep_type = 'teq::TensptrT' + label = 'tensor' + if isinstance(api['out'], dict) and 'type' in api['out']: + rep_type = api['out']['type'] + label_type = rep_type + for template in templates: + label_type = _sub_pybind(label_type, template) + label = t2labels.get(label_type, label) + + op = api['operator'] + args = [arg['name'] for arg in api['args']] + if len(args) == 1: + operator = op + args[0] + else: + operator = op.join(args) + + outtype = 'teq::TensptrT' + if isinstance(api['out'], dict) and 'type' in api['out']: + outtype = api['out']['type'] + + params = [arg['dtype'] + ' ' + arg['name'] for arg in api['args']] + if len(api['args']) > 1 and\ + api['args'][0]['dtype'] != outtype and op in __py_op_rev: + pyop = __py_op_rev[op] + params = params[::-1] + else: + pyop = _py_op[(op, len(api['args']))] + + out = _def_op_tmpl.format( + label = label, + pyop = pyop, + params = ', '.join(params), + operator = operator, + ) + + for typenames in api.get('template', '').split(','): + out = _sub_pybind(out, _strip_template_prefix(typenames)) + return out + +def _handle_defs(pybind_type, apis, module_name, first_module): + _mdef_tmpl = 'm_{module_name}.def("{func}", '+\ + '&pyegen::{func}_{idx}, {description}, {pyargs});' + + _class_def_tmpl = 'py::class_::type,{outtype}> {label}(m_{module_name}, "{name}");' + + outtypes = set() + for api in apis: + templates = [_strip_template_prefix(typenames) + for typenames in api.get('template', '').split(',')] + if isinstance(api['out'], dict) and 'type' in api['out']: + outtype = api['out']['type'] + for temp in templates: + outtype = _sub_pybind(outtype, temp) + outtypes.add(outtype) + + class_defs = [] + atype_labels = {} + if first_module: + for i, outtype in enumerate(outtypes): + if 'teq::TensptrT' == outtype: + continue + label = 'class_{}'.format(i) + atype_labels[outtype] = label + class_defs.append(_class_def_tmpl.format( + module_name=module_name, + outtype=outtype, + label=label, + name=outtype.split('::')[-1])) + + func_defs = [_mdef_tmpl.format( + module_name=module_name, + func=api['name'], idx=i, + description=_parse_description(api), + pyargs=', '.join([_parse_pyargs(arg) for arg in api['args']])) + for i, api in enumerate(apis)] + + operator_defs = [_def_op(atype_labels, api) for api in apis if 'operator' in api] + + defs = [ + '\n '.join(class_defs), + '\n\n '.join(func_defs), + '\n\n '.join(operator_defs), + ] + return '\n\n '.join([d for d in defs if len(d) > 0]) + +_plugin_id = 'PYBINDER' + +class PyAPIsPlugin: + + def plugin_id(self): + return _plugin_id + + def process(self, generated_files, arguments): + _hdr_file = 'pyapi.hpp' + _submodule_def = ' py::module m_{name} = m_{prename}.def_submodule("{submod}", "A submodule of \'{prename}\'");\n ' + + plugin_key = 'api' + if plugin_key not in arguments: + logging.warning( + 'no relevant arguments found for plugin %s', _plugin_id) + return + + api = arguments[plugin_key] + bindtype = api.get('pybind_type', 'double') + + generated_files[_hdr_file] = FileRep( + _header_template.format( + pybind=_pybindt, pybind_type=bindtype), + user_includes=[], internal_refs=[]) + + contents = {} + for namespace in api['namespaces']: + definitions = api['namespaces'][namespace] + if namespace == '' or namespace == '_': + module = 'egen' + namespace = '' + else: + module = namespace + uwraps = _handle_name_mangling(bindtype, definitions, namespace) + + mods = module.split('::') + mod = mods[0] + modname = '_'.join(mods) + mod_def = '' + if len(mods) > 1: + mod_def = _submodule_def.format( + name=modname, prename='_'.join(mods[:-1]), submod=mods[-1]) + defs = mod_def + _handle_defs(bindtype, definitions, modname, mod not in contents) + if mod in contents: + existing_uwraps, existing_defs = contents[mod] + contents[mod] = ( + existing_uwraps + '\n\n' + uwraps, + existing_defs + '\n\n' + defs) + else: + contents[mod] = (uwraps, defs) + + src_file_tmpl = 'pyapi_{}.cpp' + for mod in contents: + name_mangling, defs = contents[mod] + src_file = src_file_tmpl.format(mod) + generated_files[src_file] = FileRep( + _source_template.format( + modname=mod, + name_mangling=''.join(name_mangling), + defs=''.join(defs)), + user_includes=[ + '"pybind11/pybind11.h"', + '"pybind11/stl.h"', + '"pybind11/operators.h"', + ], + internal_refs=[_hdr_file, api_header]) + + return generated_files + +PluginBase.register(PyAPIsPlugin) diff --git a/ead/age/plugins/template.py b/eteq/gen/plugins/template.py similarity index 100% rename from ead/age/plugins/template.py rename to eteq/gen/plugins/template.py diff --git a/eteq/grader.hpp b/eteq/grader.hpp new file mode 100644 index 000000000..2fd8e8003 --- /dev/null +++ b/eteq/grader.hpp @@ -0,0 +1,426 @@ +/// +/// grader.hpp +/// eteq +/// +/// Purpose: +/// Implement eteq gradient definition for supported operations +/// + +#include + +#include "teq/grad_def.hpp" + +#include "eteq/generated/api.hpp" + +#include "eteq/constant.hpp" + +#ifndef ETEQ_GRADER_HPP +#define ETEQ_GRADER_HPP + +namespace eteq +{ + +template +NodeptrT reduce_grad (const teq::FuncArg& child, + NodeptrT bwd, size_t idx) +{ + const teq::Shape& shape = child.get_tensor()->shape(); + teq::CoordptrT revshaper(child.get_shaper()->reverse()); + CoordptrT revcoord; + { + auto coorder = child.get_coorder(); + assert(nullptr != coorder); + teq::CoordT dims; + coorder->forward(dims.begin(), dims.begin()); + teq::CoordT bcast; + std::fill(bcast.begin(), bcast.end(), 1); + for (teq::RankT d : dims) + { + if (d < teq::rank_cap) + { + bcast[d] = shape.at(d); + } + } + revcoord = std::make_shared(bcast, false); + } + return make_functor(teq::Opcode{"EXTEND",egen::EXTEND}, { + FuncArg(bwd, revshaper, revcoord) + }); +} + +template +NodeptrT permute_grad (teq::iFunctor* fwd, + NodeptrT bwd, size_t idx) +{ + const auto& child = fwd->get_children()[0]; + teq::CoordptrT revshaper(child.get_shaper()->reverse()); + CoordptrT revcoord; + { + auto coorder = child.get_coorder(); + assert(nullptr != coorder); + teq::CoordT dims; + coorder->forward(dims.begin(), dims.begin()); + + teq::CoordT order; + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + order[dims[i]] = i; + } + revcoord = std::make_shared(order, true); + } + return make_functor(teq::Opcode{"PERMUTE",egen::PERMUTE},{ + FuncArg(bwd, revshaper, revcoord) + }); +} + +template +NodeptrT extend_grad (teq::iFunctor* fwd, + NodeptrT bwd, size_t idx) +{ + const auto& child = fwd->get_children()[0]; + teq::CoordptrT revshaper(child.get_shaper()->reverse()); + CoordptrT revcoord; + { + auto coorder = child.get_coorder(); + assert(nullptr != coorder); + teq::CoordT dims; + coorder->forward(dims.begin(), dims.begin()); + std::vector red_dims; + for (teq::RankT i = 0; i < teq::rank_cap; ++i) + { + if (dims[i] > 1) + { + red_dims.push_back(i); + } + } + revcoord = reduce(red_dims); + } + return make_functor(teq::Opcode{"REDUCE_SUM",egen::REDUCE_SUM},{ + FuncArg(bwd, revshaper, revcoord) + }); +} + +template +struct GradientBuilder final : public teq::iGradientBuilder +{ + /// Implementation of iGradientBuilder + teq::TensptrT local_derivative (teq::FuncptrT op, + size_t arg_idx) const override + { + const teq::ArgsT& args = op->get_children(); + NodeptrT out = nullptr; + teq::Opcode opcode = op->get_opcode(); + switch ((egen::_GENERATED_OPCODE) opcode.code_) + { + case egen::ABS: + out = TO_NODE(args[0].get_tensor()) / TO_NODE(op); + break; + case egen::NEG: + out = make_constant_scalar( + -1, args[0].get_tensor()->shape()); + break; + case egen::SIN: + out = tenncor::cos(TO_NODE(args[0].get_tensor())); + break; + case egen::COS: + out = -tenncor::sin(TO_NODE(args[0].get_tensor())); + break; + case egen::TAN: + out = (T) 1 / tenncor::pow( + tenncor::cos(TO_NODE(args[0].get_tensor())), (T) 2); + break; + case egen::EXP: + out = TO_NODE(op); + break; + case egen::LOG: + out = (T) 1 / TO_NODE(args[0].get_tensor()); + break; + case egen::SQRT: + out = (T) 1 / ((T) 2 * TO_NODE(op)); + break; + case egen::SQUARE: + out = (T) 2 * TO_NODE(args[0].get_tensor()); + break; + case egen::CUBE: + out = (T) 3 * tenncor::square(TO_NODE(args[0].get_tensor())); + break; + case egen::SIGMOID: + out = tenncor::sigmoid_grad( + TO_NODE(args[0].get_tensor())); + break; + case egen::SIGMOID_GRAD: + out = TO_NODE(op) * ((T) 1 - (T) 2 * + tenncor::sigmoid(TO_NODE(args[0].get_tensor()))); + break; + case egen::TANH: + out = (T) 1 - tenncor::square(TO_NODE(op)); + break; + case egen::ROUND: + case egen::REDUCE_SUM: + case egen::EXTEND: + case egen::PERMUTE: + case egen::ADD: + case egen::SLICE: + case egen::PAD: + out = make_constant_scalar(1, args[0].get_tensor()->shape()); + break; + case egen::MUL: + case egen::CONV: + out = TO_NODE(args[(size_t)(arg_idx==0)].get_tensor()); + break; + case egen::MAX: + case egen::MIN: + out = TO_NODE(op) == TO_NODE(args[arg_idx].get_tensor()); + break; + case egen::POW: + out = arg_idx==0 ? + TO_NODE(args[1].get_tensor()) * + tenncor::pow( + TO_NODE(args[0].get_tensor()), + TO_NODE(args[1].get_tensor()) - (T) 1 + ) : + tenncor::log(TO_NODE(args[0].get_tensor())) * + TO_NODE(op); + break; + case egen::SUB: + out = make_constant_scalar(arg_idx == 0 ? + 1 : -1, args[0].get_tensor()->shape()); + break; + case egen::DIV: + { + auto denom = TO_NODE(args[1].get_tensor()); + out = arg_idx==0 ? + (T) 1 / denom : + -TO_NODE(args[0].get_tensor()) / denom / denom; + } + break; + case egen::EQ: + case egen::NEQ: + case egen::GT: + case egen::LT: + case egen::RAND_UNIF: + case egen::SELECT: + out = make_constant_scalar(0, args[0].get_tensor()->shape()); + break; + case egen::REDUCE_PROD: // todo: prevent divide by zero + out = + reduce_grad(args[0], TO_NODE(op), arg_idx) / + TO_NODE(args[0].get_tensor()); + break; + case egen::REDUCE_MAX: + case egen::REDUCE_MIN: + out = + reduce_grad(args[0], TO_NODE(op), arg_idx) == + TO_NODE(args[0].get_tensor()); + break; + case egen::MATMUL: + { + NodeptrT lhs = TO_NODE(args[0].get_tensor()); + NodeptrT rhs = TO_NODE(args[1].get_tensor()); + out = 0 == arg_idx ? + // ext_rhs + tenncor::permute(tenncor::extend(rhs, 2, { + lhs->shape().at(1)}), {0,2,1}) : + // ext_lhs + tenncor::permute(tenncor::extend(lhs, 2, { + rhs->shape().at(0)}), {2,1,0}); + } + break; + case egen::CONV_IMG_GRAD: + logs::fatal("cannot derive CONV_IMG_GRAD"); + break; + case egen::CONV_KRN_GRAD: + logs::fatal("cannot derive CONV_KRN_GRAD"); + break; + default: + logs::fatalf("Unknown op %s", opcode.name_.c_str()); + } + return out->get_tensor(); + } + + /// Implementation of iGradientBuilder + teq::TensptrT chain_rule (teq::FuncptrT op, const teq::TensptrT& local_der, + teq::TensptrT supcomp_grad, size_t arg_idx) const override + { + NodeptrT out = nullptr; + teq::Opcode opcode = op->get_opcode(); + switch (opcode.code_) + { + case egen::ABS: + case egen::NEG: + case egen::SIN: + case egen::COS: + case egen::TAN: + case egen::EXP: + case egen::LOG: + case egen::SQRT: + case egen::SQUARE: + case egen::CUBE: + case egen::ROUND: + case egen::SIGMOID: + case egen::SIGMOID_GRAD: + case egen::TANH: + case egen::ADD: + case egen::MUL: + case egen::MAX: + case egen::MIN: + case egen::POW: + case egen::SUB: + case egen::DIV: + case egen::EQ: + case egen::NEQ: + case egen::GT: + case egen::LT: + case egen::RAND_UNIF: + out = TO_NODE(local_der) * + TO_NODE(supcomp_grad); + break; + case egen::REDUCE_MAX: + case egen::REDUCE_MIN: + case egen::REDUCE_PROD: + case egen::REDUCE_SUM: + out = TO_NODE(local_der) * reduce_grad( + op->get_children()[0], TO_NODE(supcomp_grad), arg_idx); + break; + case egen::EXTEND: + out = TO_NODE(local_der) * extend_grad( + op.get(), TO_NODE(supcomp_grad), arg_idx); + break; + case egen::PERMUTE: + out = TO_NODE(local_der) * permute_grad( + op.get(), TO_NODE(supcomp_grad), arg_idx); + break; + case egen::MATMUL: + out = tenncor::reduce_sum( + tenncor::permute( + TO_NODE(local_der) * + tenncor::extend(TO_NODE(supcomp_grad), 2, { + op->get_children()[0]. + get_tensor()->shape().at(0) + }), + 0 == arg_idx ? + std::vector{2, 1, 0} : + std::vector{0, 2, 1}), 2, 1); + break; + case egen::CONV: + { + teq::Opcode opcode; + auto args = op->get_children(); + teq::CoordptrT fwd_shaper = + args[(size_t)(0 == arg_idx)].get_shaper(); + teq::CoordptrT rev_shaper( + args[arg_idx].get_shaper()->reverse()); + if (arg_idx == 0) + { + opcode = teq::Opcode{"CONV_IMG_GRAD", + egen::CONV_IMG_GRAD}; + } + else + { + opcode = teq::Opcode{"CONV_KRN_GRAD", + egen::CONV_KRN_GRAD}; + } + teq::CoordptrT full_shaper( + fwd_shaper->connect(*rev_shaper)); + out = make_functor(opcode, { + FuncArg(TO_NODE(local_der), full_shaper, nullptr), + FuncArg(TO_NODE(supcomp_grad), rev_shaper, nullptr), + }); + } + break; + case egen::SLICE: + { + teq::CoordT slicings; + auto& child = op->get_children()[0]; + child.get_coorder()->forward( + slicings.begin(), slicings.begin()); + teq::DimT dimension = slicings[2]; + teq::DimT dim = child.get_tensor()->shape().at(dimension); + teq::DimT left_pad = slicings[0]; + teq::DimT right_pad = dim - (left_pad + slicings[1]); + out = TO_NODE(local_der) * + tenncor::pad(TO_NODE(supcomp_grad), + std::pair{ + left_pad, right_pad}, dimension); + } + break; + case egen::PAD: + { + teq::CoordT paddings; + auto& child = op->get_children()[0]; + child.get_coorder()->forward( + paddings.begin(), paddings.begin()); + teq::DimT dimension = paddings[2]; + teq::DimT dim = op->shape().at(dimension); + teq::DimT offset = paddings[0]; + teq::DimT extent = dim - paddings[1] - offset; + out = TO_NODE(local_der) * + tenncor::slice(TO_NODE(supcomp_grad), + offset, extent, dimension); + } + break; + case egen::SELECT: + { + if (0 == arg_idx) + { + out = TO_NODE(local_der); + break; + } + auto condition = TO_NODE( + op->get_children()[0].get_tensor()); + auto then = TO_NODE(supcomp_grad); + auto otherwise = make_constant_scalar(0, op->shape()); + if (1 < arg_idx) + { + std::swap(then, otherwise); + } + out = tenncor::if_then_else(condition, then, otherwise); + } + break; + case egen::CONV_IMG_GRAD: + logs::fatal("cannot derive CONV_IMG_GRAD"); + break; + case egen::CONV_KRN_GRAD: + logs::fatal("cannot derive CONV_KRN_GRAD"); + break; + default: + logs::fatalf("Unknown op %s", opcode.name_.c_str()); + } + return out->get_tensor(); + } + + /// Implementation of iGradientBuilder + teq::TensptrT get_const_one (teq::Shape shape) const override + { + return make_constant_scalar(1, shape)->get_tensor(); + } + + /// Implementation of iGradientBuilder + teq::TensptrT get_const_zero (teq::Shape shape) const override + { + return make_constant_scalar(0, shape)->get_tensor(); + } + + /// Implementation of iGradientBuilder + teq::TensptrT add (teq::TensptrT& lhs, teq::TensptrT& rhs) const override + { + return teq::TensptrT(Functor::get(teq::Opcode{"ADD", egen::ADD}, { + identity_map(TO_NODE(lhs)), + identity_map(TO_NODE(rhs)) + })); + } +}; + +/// Derive root with respect to target and optimized +template +NodeptrT derive (NodeptrT root, NodeptrT target) +{ + GradientBuilder builder; + teq::TensptrT derivative = builder.derive( + root->get_tensor(), target->get_tensor()); + return TO_NODE(derivative); +} + +} + +#endif // ETEQ_GRADER_HPP diff --git a/ead/ileaf.hpp b/eteq/ileaf.hpp similarity index 70% rename from ead/ileaf.hpp rename to eteq/ileaf.hpp index ceeed87a6..80dd0c4c9 100644 --- a/ead/ileaf.hpp +++ b/eteq/ileaf.hpp @@ -1,28 +1,28 @@ /// /// ileaf.hpp -/// ead +/// eteq /// /// Purpose: /// Define interfaces and building blocks for an equation graph /// -#include "ade/ileaf.hpp" +#include "teq/ileaf.hpp" -#include "ead/eigen.hpp" +#include "eteq/eigen.hpp" -#ifndef EAD_ILEAF_HPP -#define EAD_ILEAF_HPP +#ifndef ETEQ_ILEAF_HPP +#define ETEQ_ILEAF_HPP -namespace ead +namespace eteq { template -struct iLeaf : public ade::iLeaf +struct iLeaf : public teq::iLeaf { virtual ~iLeaf (void) = default; /// Implementation of iTensor - const ade::Shape& shape (void) const override + const teq::Shape& shape (void) const override { return shape_; } @@ -42,13 +42,13 @@ struct iLeaf : public ade::iLeaf /// Implementation of iData size_t type_code (void) const override { - return age::get_type(); + return egen::get_type(); } /// Implementation of iData std::string type_label (void) const override { - return age::name_type(age::get_type()); + return egen::name_type(egen::get_type()); } /// Implementation of iData @@ -58,7 +58,7 @@ struct iLeaf : public ade::iLeaf } protected: - iLeaf (T* data, ade::Shape shape) : + iLeaf (T* data, teq::Shape shape) : data_(make_tensmap(data, shape)), shape_(shape) {} @@ -66,9 +66,9 @@ struct iLeaf : public ade::iLeaf TensorT data_; /// Shape utility to avoid excessive conversion between data_.dimensions() - ade::Shape shape_; + teq::Shape shape_; }; } -#endif // EAD_ILEAF_HPP +#endif // ETEQ_ILEAF_HPP diff --git a/ead/inode.hpp b/eteq/inode.hpp similarity index 61% rename from ead/inode.hpp rename to eteq/inode.hpp index 55fbc26be..43398129f 100644 --- a/ead/inode.hpp +++ b/eteq/inode.hpp @@ -1,30 +1,38 @@ #include "estd/estd.hpp" -#include "ade/itensor.hpp" +#include "teq/itensor.hpp" -#include "ead/eigen.hpp" +#include "eteq/eigen.hpp" -#ifndef EAD_INODE_HPP -#define EAD_INODE_HPP +#ifndef ETEQ_INODE_HPP +#define ETEQ_INODE_HPP -namespace ead +namespace eteq { template struct iNode { + static_assert(egen::TypeInfo::type != egen::BAD_TYPE, + "Cannot create node of unknown type"); + virtual ~iNode (void) = default; - ade::Shape shape (void) + teq::Shape shape (void) { return get_tensor()->shape(); } + std::string to_string (void) const + { + return get_tensor()->to_string(); + } + virtual T* data (void) = 0; virtual void update (void) = 0; - virtual ade::TensptrT get_tensor (void) = 0; + virtual teq::TensptrT get_tensor (void) const = 0; }; template @@ -34,19 +42,19 @@ template using NodesT = std::vector>; template -using NodeBuilderF = std::function(ade::TensptrT)>; +using NodeBuilderF = std::function(teq::TensptrT)>; template struct NodeConverters final { static std::unordered_map> builders_; - static NodeptrT to_node (ade::TensptrT tens) + static NodeptrT to_node (teq::TensptrT tens) { const std::type_info& tp = typeid(*tens); return estd::must_getf(builders_, tp.hash_code(), "unknown tensor type `%s` with `%s` dtype", - tp.name(), age::name_type(age::get_type()).c_str())(tens); + tp.name(), egen::name_type(egen::get_type()).c_str())(tens); } NodeConverters (void) = delete; @@ -63,6 +71,8 @@ bool register_builder (NodeBuilderF builder) emplace(tp.hash_code(), builder).second; } +#define TO_NODE(tens) NodeConverters::to_node(tens) + } -#endif // EAD_INODE_HPP +#endif // ETEQ_INODE_HPP diff --git a/ead/operator.hpp b/eteq/operator.hpp similarity index 84% rename from ead/operator.hpp rename to eteq/operator.hpp index 222e1ad7d..03d1a8c42 100644 --- a/ead/operator.hpp +++ b/eteq/operator.hpp @@ -1,112 +1,112 @@ /// /// operator.hpp -/// ead +/// eteq /// /// Purpose: /// Define functions manipulating tensor data values /// No function in this file makes any attempt to check for nullptrs /// -#include "ead/eigen.hpp" -#include "ead/coord.hpp" -#include "ead/random.hpp" +#include "eteq/eigen.hpp" +#include "eteq/coord.hpp" +#include "eteq/random.hpp" -#ifndef EAD_OPERATOR_HPP -#define EAD_OPERATOR_HPP +#ifndef ETEQ_OPERATOR_HPP +#define ETEQ_OPERATOR_HPP -namespace ead +namespace eteq { -static inline bool is_2d (ade::Shape shape) +static inline bool is_2d (teq::Shape shape) { return std::all_of(shape.begin() + 2, shape.end(), - [](ade::DimT dim) { return 1 == dim; }); + [](teq::DimT dim) { return 1 == dim; }); } template struct OpArg final { - OpArg (T* data, ade::Shape shape, CoordMap* coorder) : + OpArg (T* data, teq::Shape shape, CoordMap* coorder) : data_(data), shape_(shape), coorder_(coorder) {} T* data_; - ade::Shape shape_; + teq::Shape shape_; CoordMap* coorder_ = nullptr; }; template using ReduceOutT = Eigen::TensorReductionOp,const TensMapT>; + const std::array,const TensMapT>; namespace internal { template -inline std::array dim_copy (std::vector d) +inline std::array dim_copy (std::vector d) { - std::array out; + std::array out; auto it = d.begin(); std::copy(it, it + N, out.begin()); return out; } -#define _EAD_INTERNAL_V2A_CASE(N, PROCESS, RED)\ +#define _ETEQ_INTERNAL_V2A_CASE(N, PROCESS, RED)\ case N: return make_eigentensor,TensMapT>(\ shape_convert(outshape), [vdims](TensMapT& in) {\ -return in.PROCESS(ead::internal::dim_copy(vdims)); },\ +return in.PROCESS(::eteq::internal::dim_copy(vdims)); },\ make_tensmap(in.data_, in.shape_)); -#define _EAD_INTERNAL_V2A(PROCESS, RED) {\ +#define _ETEQ_INTERNAL_V2A(PROCESS, RED) {\ assert(nullptr != in.coorder_);\ - ade::CoordT coord;\ + teq::CoordT coord;\ in.coorder_->forward(coord.begin(), coord.begin());\ - std::vector vdims;\ + std::vector vdims;\ std::copy_if(coord.begin(), coord.end(), std::back_inserter(vdims),\ - [](ade::RankT d) { return d < ade::rank_cap; });\ + [](teq::RankT d) { return d < teq::rank_cap; });\ switch (vdims.size()) {\ - _EAD_INTERNAL_V2A_CASE(0, PROCESS, RED)\ - _EAD_INTERNAL_V2A_CASE(1, PROCESS, RED)\ - _EAD_INTERNAL_V2A_CASE(2, PROCESS, RED)\ - _EAD_INTERNAL_V2A_CASE(3, PROCESS, RED)\ - _EAD_INTERNAL_V2A_CASE(4, PROCESS, RED)\ - _EAD_INTERNAL_V2A_CASE(5, PROCESS, RED)\ - _EAD_INTERNAL_V2A_CASE(6, PROCESS, RED)\ - _EAD_INTERNAL_V2A_CASE(7, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(0, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(1, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(2, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(3, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(4, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(5, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(6, PROCESS, RED)\ + _ETEQ_INTERNAL_V2A_CASE(7, PROCESS, RED)\ default: break;\ } return make_eigentensor,TensMapT>(\ shape_convert(outshape), [vdims](TensMapT& in) {\ - return in.PROCESS(ead::internal::dim_copy<8>(vdims));\ + return in.PROCESS(::eteq::internal::dim_copy<8>(vdims));\ }, make_tensmap(in.data_, in.shape_));\ } } template -EigenptrT reduce_sum (ade::Shape& outshape, const OpArg& in) -_EAD_INTERNAL_V2A(sum, Eigen::internal::SumReducer) +EigenptrT reduce_sum (teq::Shape& outshape, const OpArg& in) +_ETEQ_INTERNAL_V2A(sum, Eigen::internal::SumReducer) template -EigenptrT reduce_prod (ade::Shape& outshape, const OpArg& in) -_EAD_INTERNAL_V2A(prod, Eigen::internal::ProdReducer) +EigenptrT reduce_prod (teq::Shape& outshape, const OpArg& in) +_ETEQ_INTERNAL_V2A(prod, Eigen::internal::ProdReducer) template -EigenptrT reduce_min (ade::Shape& outshape, const OpArg& in) -_EAD_INTERNAL_V2A(minimum, Eigen::internal::MinReducer) +EigenptrT reduce_min (teq::Shape& outshape, const OpArg& in) +_ETEQ_INTERNAL_V2A(minimum, Eigen::internal::MinReducer) template -EigenptrT reduce_max (ade::Shape& outshape, const OpArg& in) -_EAD_INTERNAL_V2A(maximum, Eigen::internal::MaxReducer) +EigenptrT reduce_max (teq::Shape& outshape, const OpArg& in) +_ETEQ_INTERNAL_V2A(maximum, Eigen::internal::MaxReducer) template -EigenptrT extend (ade::Shape& outshape, const OpArg& in) +EigenptrT extend (teq::Shape& outshape, const OpArg& in) { assert(nullptr != in.coorder_); - ade::CoordT coord; + teq::CoordT coord; in.coorder_->forward(coord.begin(), coord.begin()); return make_eigentensor>,TensMapT>( + const teq::CoordT,const TensMapT>,TensMapT>( shape_convert(outshape), [coord](TensMapT& in) { @@ -115,10 +115,10 @@ EigenptrT extend (ade::Shape& outshape, const OpArg& in) } template -EigenptrT permute (ade::Shape& outshape, const OpArg& in) +EigenptrT permute (teq::Shape& outshape, const OpArg& in) { assert(nullptr != in.coorder_); - ade::CoordT reorder; + teq::CoordT reorder; in.coorder_->forward(reorder.begin(), reorder.begin()); if (is_2d(outshape) && reorder[0] == 1 && reorder[1] == 0) { @@ -132,7 +132,7 @@ EigenptrT permute (ade::Shape& outshape, const OpArg& in) }, make_matmap(in.data_, in.shape_)); } return make_eigentensor>,TensMapT>( + const teq::CoordT,TensMapT>,TensMapT>( shape_convert(outshape), [reorder](TensMapT& in) { @@ -141,21 +141,21 @@ EigenptrT permute (ade::Shape& outshape, const OpArg& in) } template -EigenptrT slice (ade::Shape& outshape, const OpArg& in) +EigenptrT slice (teq::Shape& outshape, const OpArg& in) { assert(nullptr != in.coorder_); - ade::CoordT slicing; + teq::CoordT slicing; in.coorder_->forward(slicing.begin(), slicing.begin()); - ade::ShapeT offset; - ade::ShapeT extent; + teq::ShapeT offset; + teq::ShapeT extent; std::fill(offset.begin(), offset.end(), 0); std::copy(in.shape_.begin(), in.shape_.end(), extent.begin()); - ade::RankT dimension = slicing[2]; + teq::RankT dimension = slicing[2]; offset[dimension] = slicing[0]; extent[dimension] = slicing[1]; return make_eigentensor + const teq::ShapeT, const teq::ShapeT, + TensMapT >, TensMapT>( shape_convert(outshape), @@ -166,20 +166,20 @@ EigenptrT slice (ade::Shape& outshape, const OpArg& in) } template -EigenptrT pad (ade::Shape& outshape, const OpArg& in) +EigenptrT pad (teq::Shape& outshape, const OpArg& in) { assert(nullptr != in.coorder_); - ade::CoordT padding; + teq::CoordT padding; in.coorder_->forward(padding.begin(), padding.begin()); - std::array,ade::rank_cap> paddings; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + std::array,teq::rank_cap> paddings; + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { paddings[i] = std::make_pair(0, 0); } paddings[padding[2]] = std::make_pair(padding[0], padding[1]); return make_eigentensor,ade::rank_cap>, - const ead::TensMapT + const std::array,teq::rank_cap>, + const TensMapT >, TensMapT>( shape_convert(outshape), @@ -192,7 +192,7 @@ EigenptrT pad (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take absolute value of inputs template -EigenptrT abs (ade::Shape& outshape, const OpArg& in) +EigenptrT abs (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -217,7 +217,7 @@ EigenptrT abs (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take negatives of inputs template -EigenptrT neg (ade::Shape& outshape, const OpArg& in) +EigenptrT neg (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -242,7 +242,7 @@ EigenptrT neg (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take sine of inputs template -EigenptrT sin (ade::Shape& outshape, const OpArg& in) +EigenptrT sin (teq::Shape& outshape, const OpArg& in) { #ifdef __cpp_if_constexpr if constexpr(!std::is_integral::value) @@ -276,7 +276,7 @@ EigenptrT sin (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take cosine of inputs template -EigenptrT cos (ade::Shape& outshape, const OpArg& in) +EigenptrT cos (teq::Shape& outshape, const OpArg& in) { #ifdef __cpp_if_constexpr if constexpr(!std::is_integral::value) @@ -310,7 +310,7 @@ EigenptrT cos (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take tangent of inputs template -EigenptrT tan (ade::Shape& outshape, const OpArg& in) +EigenptrT tan (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -339,7 +339,7 @@ EigenptrT tan (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take exponent of inputs template -EigenptrT exp (ade::Shape& outshape, const OpArg& in) +EigenptrT exp (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -364,7 +364,7 @@ EigenptrT exp (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take natural log of inputs template -EigenptrT log (ade::Shape& outshape, const OpArg& in) +EigenptrT log (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -389,7 +389,7 @@ EigenptrT log (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take square root of inputs template -EigenptrT sqrt (ade::Shape& outshape, const OpArg& in) +EigenptrT sqrt (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -414,7 +414,7 @@ EigenptrT sqrt (ade::Shape& outshape, const OpArg& in) /// Given reference to output array, and input vector ref, /// make output elements take rounded values of inputs template -EigenptrT round (ade::Shape& outshape, const OpArg& in) +EigenptrT round (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -437,7 +437,7 @@ EigenptrT round (ade::Shape& outshape, const OpArg& in) } template -EigenptrT sigmoid (ade::Shape& outshape, const OpArg& in) +EigenptrT sigmoid (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -460,7 +460,7 @@ EigenptrT sigmoid (ade::Shape& outshape, const OpArg& in) } template -EigenptrT sigmoid_grad (ade::Shape& outshape, const OpArg& in) +EigenptrT sigmoid_grad (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -498,7 +498,7 @@ EigenptrT sigmoid_grad (ade::Shape& outshape, const OpArg& in) } template -EigenptrT tanh (ade::Shape& outshape, const OpArg& in) +EigenptrT tanh (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -521,7 +521,7 @@ EigenptrT tanh (ade::Shape& outshape, const OpArg& in) } template -EigenptrT square (ade::Shape& outshape, const OpArg& in) +EigenptrT square (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -544,7 +544,7 @@ EigenptrT square (ade::Shape& outshape, const OpArg& in) } template -EigenptrT cube (ade::Shape& outshape, const OpArg& in) +EigenptrT cube (teq::Shape& outshape, const OpArg& in) { if (is_2d(outshape)) { @@ -570,7 +570,7 @@ EigenptrT cube (ade::Shape& outshape, const OpArg& in) /// same index apply std::pow operator /// Only accept 2 arguments template -EigenptrT pow (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT pow (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -609,7 +609,7 @@ EigenptrT pow (ade::Shape& outshape, const OpArg& a, const OpArg& b) } template -EigenptrT add (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT add (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -641,7 +641,7 @@ EigenptrT add (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// same index subtract /// Only accept 2 arguments template -EigenptrT sub (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT sub (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -670,7 +670,7 @@ EigenptrT sub (ade::Shape& outshape, const OpArg& a, const OpArg& b) } template -EigenptrT mul (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT mul (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -702,7 +702,7 @@ EigenptrT mul (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// same index divide /// Only accept 2 arguments template -EigenptrT div (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT div (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -734,7 +734,7 @@ EigenptrT div (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// same index apply == operator /// Only accept 2 arguments template -EigenptrT eq (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT eq (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -776,7 +776,7 @@ EigenptrT eq (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// same index apply != operator /// Only accept 2 arguments template -EigenptrT neq (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT neq (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -818,7 +818,7 @@ EigenptrT neq (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// same index apply < operator /// Only accept 2 arguments template -EigenptrT lt (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT lt (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -860,7 +860,7 @@ EigenptrT lt (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// same index apply > operator /// Only accept 2 arguments template -EigenptrT gt (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT gt (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -901,7 +901,7 @@ EigenptrT gt (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// Given arguments, for every mapped index i in range [0:max_nelems], /// take the minimum all elements for all arguments template -EigenptrT min (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT min (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -932,7 +932,7 @@ EigenptrT min (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// Given arguments, for every mapped index i in range [0:max_nelems], /// take the maximum all elements for all arguments template -EigenptrT max (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT max (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -964,7 +964,7 @@ EigenptrT max (ade::Shape& outshape, const OpArg& a, const OpArg& b) /// same index apply std::uniform_distributon function /// Only accept 2 arguments template -EigenptrT rand_uniform (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT rand_uniform (teq::Shape& outshape, const OpArg& a, const OpArg& b) { if (is_2d(outshape)) { @@ -995,7 +995,7 @@ EigenptrT rand_uniform (ade::Shape& outshape, const OpArg& a, const OpArg< } template -EigenptrT select (ade::Shape& outshape, +EigenptrT select (teq::Shape& outshape, const OpArg& condition, const OpArg& then, const OpArg& otherwise) { @@ -1027,7 +1027,7 @@ EigenptrT select (ade::Shape& outshape, } template -EigenptrT matmul (ade::Shape& outshape, const OpArg& a, const OpArg& b) +EigenptrT matmul (teq::Shape& outshape, const OpArg& a, const OpArg& b) { assert(is_2d(outshape)); return make_eigenmatrix,MatMapT>, @@ -1041,16 +1041,16 @@ EigenptrT matmul (ade::Shape& outshape, const OpArg& a, const OpArg& b) } template -EigenptrT convolution (ade::Shape& outshape, const OpArg& input, const OpArg& kernel) +EigenptrT convolution (teq::Shape& outshape, const OpArg& input, const OpArg& kernel) { assert(nullptr != kernel.coorder_); - ade::CoordT kernel_dims; + teq::CoordT kernel_dims; kernel.coorder_->forward(kernel_dims.begin(), kernel_dims.begin()); - ade::ShapeT dims; + teq::ShapeT dims; std::copy(kernel_dims.begin(), kernel_dims.end(), dims.begin()); return make_eigentensor,const TensMapT>, std::vector>>(shape_convert(outshape), [&](std::vector>& args) @@ -1062,29 +1062,29 @@ EigenptrT convolution (ade::Shape& outshape, const OpArg& input, const OpA } template -EigenptrT convolution_image_grad (ade::Shape& imageshape, +EigenptrT convolution_image_grad (teq::Shape& imageshape, const OpArg& kernel, const OpArg& super_composite) { return make_eigentensor, - const ade::ShapeT, + const teq::ShapeT, const Eigen::TensorCwiseBinaryOp< Eigen::internal::scalar_product_op, const Eigen::TensorBroadcastingOp< - const std::array, + const std::array, const Eigen::TensorReshapingOp< - const std::array, + const std::array, Eigen::TensorReverseOp< - const std::array, - ead::TensMapT + const std::array, + TensMapT > > >, const Eigen::TensorPatchOp< - const ade::ShapeT, + const teq::ShapeT, const Eigen::TensorPaddingOp< - const std::array,ade::rank_cap>, - const ead::TensMapT + const std::array,teq::rank_cap>, + const TensMapT > > > @@ -1094,10 +1094,10 @@ EigenptrT convolution_image_grad (ade::Shape& imageshape, { auto& outshape = super_composite.shape_; - ade::ShapeT patch_dims; + teq::ShapeT patch_dims; std::copy(outshape.begin(), outshape.end(), patch_dims.begin()); - Eigen::array,ade::rank_cap> paddings; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + Eigen::array,teq::rank_cap> paddings; + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { int paddsize = outshape.at(i) - 1; paddings[i] = std::make_pair(paddsize, paddsize); @@ -1105,20 +1105,20 @@ EigenptrT convolution_image_grad (ade::Shape& imageshape, auto patched = args[0].pad(paddings) .extract_patches(patch_dims); - std::array revflags; + std::array revflags; std::fill(revflags.begin(), revflags.end(), true); - std::array pshape; + std::array pshape; std::copy(outshape.begin(), outshape.end(), pshape.begin()); - pshape[ade::rank_cap] = 1; - std::array expansion; + pshape[teq::rank_cap] = 1; + std::array expansion; std::fill(expansion.begin(), expansion.end(), 1); - expansion[ade::rank_cap] = imageshape.n_elems(); + expansion[teq::rank_cap] = imageshape.n_elems(); auto partial = args[1] .reverse(revflags) .reshape(pshape) .broadcast(expansion) * patched; - ade::ShapeT shapespace; + teq::ShapeT shapespace; std::iota(shapespace.begin(), shapespace.end(), 0); return partial.sum(shapespace); }, { @@ -1127,24 +1127,24 @@ EigenptrT convolution_image_grad (ade::Shape& imageshape, } template -EigenptrT convolution_kernel_grad (ade::Shape& kernelshape, +EigenptrT convolution_kernel_grad (teq::Shape& kernelshape, const OpArg& image, const OpArg& super_composite) { return make_eigentensor, - const ade::ShapeT, + const teq::ShapeT, const Eigen::TensorCwiseBinaryOp< Eigen::internal::scalar_product_op, const Eigen::TensorBroadcastingOp< - const std::array, + const std::array, const Eigen::TensorReshapingOp< - const std::array, - ead::TensMapT + const std::array, + TensMapT > >, const Eigen::TensorPatchOp< - const ade::ShapeT, - const ead::TensMapT + const teq::ShapeT, + const TensMapT > > >, @@ -1153,21 +1153,21 @@ EigenptrT convolution_kernel_grad (ade::Shape& kernelshape, { auto& outshape = super_composite.shape_; - ade::ShapeT patch_dims; + teq::ShapeT patch_dims; std::copy(outshape.begin(), outshape.end(), patch_dims.begin()); auto patched = args[0].extract_patches(patch_dims); - std::array pshape; + std::array pshape; std::copy(outshape.begin(), outshape.end(), pshape.begin()); - pshape[ade::rank_cap] = 1; - std::array expansion; + pshape[teq::rank_cap] = 1; + std::array expansion; std::fill(expansion.begin(), expansion.end(), 1); - expansion[ade::rank_cap] = kernelshape.n_elems(); + expansion[teq::rank_cap] = kernelshape.n_elems(); auto partial = args[1] .reshape(pshape) .broadcast(expansion) * patched; - ade::ShapeT shapespace; + teq::ShapeT shapespace; std::iota(shapespace.begin(), shapespace.end(), 0); return partial.sum(shapespace); }, { @@ -1177,4 +1177,4 @@ EigenptrT convolution_kernel_grad (ade::Shape& kernelshape, } -#endif // EAD_OPERATOR_HPP +#endif // ETEQ_OPERATOR_HPP diff --git a/ead/parse.hpp b/eteq/parse.hpp similarity index 76% rename from ead/parse.hpp rename to eteq/parse.hpp index 9e878b0d3..acc372967 100644 --- a/ead/parse.hpp +++ b/eteq/parse.hpp @@ -1,11 +1,11 @@ #include "opt/parse.hpp" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" -#ifndef EAD_PARSE_HPP -#define EAD_PARSE_HPP +#ifndef ETEQ_PARSE_HPP +#define ETEQ_PARSE_HPP -namespace ead +namespace eteq { static std::vector vectorize (::NumList* list) @@ -28,7 +28,7 @@ static CoordptrT coorderize (::NumList* list) std::vector clist = vectorize(list); if (clist.size() > 0) { - ade::CoordT carr; + teq::CoordT carr; std::copy(clist.begin(), clist.end(), carr.begin()); out = std::make_shared(carr, false); // todo: figure out bijectivity } @@ -40,11 +40,10 @@ struct ScalarConvr final : public opt::iConverter { ScalarConvr (double scalar) : scalar_(scalar) {} - ade::TensptrT build (const opt::ContexT& ctx, - ade::Shape outshape) const override + teq::TensptrT build (const opt::ContexT& ctx, + teq::Shape outshape) const override { - return ead::make_constant_scalar((T) scalar_, - outshape)->get_tensor(); + return make_constant_scalar((T) scalar_, outshape)->get_tensor(); } std::string to_string (void) const override @@ -60,8 +59,8 @@ struct AnyConvr final : public opt::iConverter { AnyConvr (std::string any_id) : any_id_(any_id) {} - ade::TensptrT build (const opt::ContexT& ctx, - ade::Shape outshape) const override + teq::TensptrT build (const opt::ContexT& ctx, + teq::Shape outshape) const override { const opt::CtxValT& val = estd::must_getf(ctx, any_id_, "cannot find any id `%s` in conversion", any_id_.c_str()); @@ -83,7 +82,7 @@ struct AnyConvr final : public opt::iConverter struct BuilderArg final { BuilderArg (opt::ConvptrT arg, - ade::CoordptrT shaper, CoordptrT coorder) : + teq::CoordptrT shaper, CoordptrT coorder) : arg_(arg), shaper_(shaper), coorder_(coorder) { if (nullptr == arg) @@ -94,7 +93,7 @@ struct BuilderArg final opt::ConvptrT arg_; - ade::CoordptrT shaper_; + teq::CoordptrT shaper_; CoordptrT coorder_; }; @@ -105,22 +104,22 @@ template struct FuncConvr final : public opt::iConverter { FuncConvr (std::string op, BuilderArgsT args) : - opcode_({op, age::get_op(op)}), args_(args) {} + opcode_({op, egen::get_op(op)}), args_(args) {} - ade::TensptrT build (const opt::ContexT& ctx, - ade::Shape outshape) const override + teq::TensptrT build (const opt::ContexT& ctx, + teq::Shape outshape) const override { ArgsT args; for (auto& arg : args_) { - ade::Shape childshape = outshape; - if (ade::is_identity(arg.shaper_.get())) + teq::Shape childshape = outshape; + if (teq::is_identity(arg.shaper_.get())) { - childshape = ade::apply_shaper(arg.shaper_, childshape); + childshape = teq::apply_shaper(arg.shaper_, childshape); } auto tens = arg.arg_->build(ctx, childshape); args.push_back(FuncArg( - NodeConverters::to_node(tens), + TO_NODE(tens), arg.shaper_, arg.coorder_)); } return make_functor(opcode_, args)->get_tensor(); @@ -140,7 +139,7 @@ struct FuncConvr final : public opt::iConverter args.begin(), args.end()).c_str()); } - ade::Opcode opcode_; + teq::Opcode opcode_; BuilderArgsT args_; }; @@ -154,16 +153,16 @@ struct GroupConvr final : public opt::iConverter assert(group_ == "sum" || group_ == "prod"); // todo: generalize this for ordered-groups } - ade::TensptrT build (const opt::ContexT& ctx, - ade::Shape outshape) const override + teq::TensptrT build (const opt::ContexT& ctx, + teq::Shape outshape) const override { - ade::TensT args; + teq::TensT args; for (auto& arg : args_) { - ade::Shape childshape = outshape; - if (ade::is_identity(arg.shaper_.get())) + teq::Shape childshape = outshape; + if (teq::is_identity(arg.shaper_.get())) { - childshape = ade::apply_shaper(arg.shaper_, childshape); + childshape = teq::apply_shaper(arg.shaper_, childshape); } args.push_back(arg.arg_->build(ctx, childshape)); } @@ -174,13 +173,13 @@ struct GroupConvr final : public opt::iConverter ctx, variadic_, opt::CtxValT()); args.insert(args.end(), varargs.begin(), varargs.end()); } - ead::NodesT outs; + eteq::NodesT outs; outs.reserve(args.size()); std::transform(args.begin(), args.end(), std::back_inserter(outs), - [](ade::TensptrT tens) + [](teq::TensptrT tens) { - return NodeConverters::to_node(tens); + return TO_NODE(tens); }); if (group_ == "sum") { @@ -219,14 +218,14 @@ struct ConverterBuilder final : public opt::iConverterBuilder { opt::CstConvertF build_cconv (void) const override { - return [](ade::iTensor* tens) + return [](teq::iTensor* tens) { - ade::TensptrT out = nullptr; - if (auto f = dynamic_cast(tens)) + teq::TensptrT out = nullptr; + if (auto f = dynamic_cast(tens)) { f->update(); T* data = (T*) f->data(); - out = ead::make_constant(data, tens->shape())->get_tensor(); + out = make_constant(data, tens->shape())->get_tensor(); } return out; }; @@ -267,8 +266,8 @@ struct ConverterBuilder final : public opt::iConverterBuilder { ::Arg* arg = (::Arg*) it->val_; opt::ConvptrT warg = build(arg->subgraph_, ctx); - ade::CoordptrT shaper = this->shaperize(arg->shaper_); - CoordptrT coorder = ead::coorderize(arg->coorder_); + teq::CoordptrT shaper = this->shaperize(arg->shaper_); + CoordptrT coorder = eteq::coorderize(arg->coorder_); args.push_back(BuilderArg(warg, shaper, coorder)); } std::string label(branch->label_); @@ -295,9 +294,9 @@ struct ConverterBuilder final : public opt::iConverterBuilder return out; } - ade::CoordptrT shaperize (::NumList* list) const override + teq::CoordptrT shaperize (::NumList* list) const override { - ade::CoordptrT out = nullptr; + teq::CoordptrT out = nullptr; if (nullptr == list) { return out; @@ -305,14 +304,14 @@ struct ConverterBuilder final : public opt::iConverterBuilder std::vector slist = vectorize(list); if (slist.size() > 0) { - out = std::make_shared( - [&slist](ade::MatrixT m) + out = std::make_shared( + [&slist](teq::MatrixT m) { - for (size_t i = 0; i < ade::mat_dim; ++i) + for (size_t i = 0; i < teq::mat_dim; ++i) { - for (size_t j = 0; j < ade::mat_dim; ++j) + for (size_t j = 0; j < teq::mat_dim; ++j) { - size_t index = i * ade::mat_dim + j; + size_t index = i * teq::mat_dim + j; if (index < slist.size()) { m[i][j] = slist[index]; @@ -324,9 +323,9 @@ struct ConverterBuilder final : public opt::iConverterBuilder return out; } - ade::CoordptrT coorderize (::NumList* list) const override + teq::CoordptrT coorderize (::NumList* list) const override { - return ead::coorderize(list); + return eteq::coorderize(list); } }; @@ -346,4 +345,4 @@ opt::OptCtx parse_file (std::string filename) } -#endif // EAD_PARSE_HPP +#endif // ETEQ_PARSE_HPP diff --git a/ead/python/ead.cpp b/eteq/python/eteq.cpp similarity index 58% rename from ead/python/ead.cpp rename to eteq/python/eteq.cpp index b9263a340..f17c6da6d 100644 --- a/ead/python/ead.cpp +++ b/eteq/python/eteq.cpp @@ -2,17 +2,17 @@ #include "pybind11/numpy.h" #include "pybind11/stl.h" -#include "ade/ade.hpp" +#include "teq/teq.hpp" -#include "ead/generated/api.hpp" -#include "ead/generated/pyapi.hpp" -#include "ead/grader.hpp" -#include "ead/constant.hpp" -#include "ead/variable.hpp" -#include "ead/functor.hpp" -#include "ead/session.hpp" -#include "ead/random.hpp" -#include "ead/parse.hpp" +#include "eteq/generated/api.hpp" +#include "eteq/generated/pyapi.hpp" +#include "eteq/grader.hpp" +#include "eteq/constant.hpp" +#include "eteq/variable.hpp" +#include "eteq/functor.hpp" +#include "eteq/session.hpp" +#include "eteq/random.hpp" +#include "eteq/parse.hpp" namespace py = pybind11; @@ -20,13 +20,13 @@ namespace pyead { // todo: move these to a common file -ade::Shape p2cshape (std::vector& pyshape) +teq::Shape p2cshape (std::vector& pyshape) { - return ade::Shape(std::vector( + return teq::Shape(std::vector( pyshape.rbegin(), pyshape.rend())); } -std::vector c2pshape (const ade::Shape& cshape) +std::vector c2pshape (const teq::Shape& cshape) { auto it = cshape.begin(); auto et = cshape.end(); @@ -34,12 +34,12 @@ std::vector c2pshape (const ade::Shape& cshape) { --et; } - std::vector fwd(it, et); - return std::vector(fwd.rbegin(), fwd.rend()); + std::vector fwd(it, et); + return std::vector(fwd.rbegin(), fwd.rend()); } template -py::array typedata_to_array (ead::iNode* tnode, py::dtype dtype) +py::array typedata_to_array (eteq::iNode* tnode, py::dtype dtype) { auto pshape = c2pshape(tnode->shape()); return py::array(dtype, @@ -47,7 +47,7 @@ py::array typedata_to_array (ead::iNode* tnode, py::dtype dtype) tnode->data()); } -std::vector arr2vec (ade::Shape& outshape, py::array data) +std::vector arr2vec (teq::Shape& outshape, py::array data) { py::buffer_info info = data.request(); outshape = p2cshape(info.shape); @@ -116,27 +116,27 @@ std::vector arr2vec (ade::Shape& outshape, py::array data) } -PYBIND11_MODULE(ead, m) +PYBIND11_MODULE(eteq, m) { - m.doc() = "ead variables"; + m.doc() = "eteq variables"; // ==== node ==== - auto node = (py::class_,ead::NodeptrT>) - py::module::import("ead.tenncor").attr("NodeptrT"); + auto node = (py::class_,eteq::NodeptrT>) + py::module::import("eteq.tenncor").attr("NodeptrT"); node .def("__str__", [](py::object self) - { - auto dnode = self.cast*>(); - return dnode->get_tensor()->to_string(); - }, - "Return string representation of this tensor instance") + { return self.cast*>()->to_string(); }, + "Return string representation of internal tensor") + .def("as_tens", + [](py::object self) + { return self.cast*>()->get_tensor(); }, + "Return internal tensor of this node instance") .def("shape", [](py::object self) { - auto dnode = self.cast*>(); - ade::Shape shape = dnode->get_tensor()->shape(); + teq::Shape shape = self.cast*>()->shape(); auto pshape = pyead::c2pshape(shape); std::vector ipshape(pshape.begin(), pshape.end()); return py::array(ipshape.size(), ipshape.data()); @@ -145,79 +145,72 @@ PYBIND11_MODULE(ead, m) .def("children", [](py::object self) { - auto dnode = self.cast*>(); - std::vector tens; - if (auto f = dynamic_cast( - dnode->get_tensor().get())) + std::vector tens; + if (auto f = dynamic_cast( + self.cast*>()->get_tensor().get())) { auto args = f->get_children(); std::transform(args.begin(), args.end(), std::back_inserter(tens), - [](ade::FuncArg& mten) + [](teq::FuncArg& mten) { return mten.get_tensor(); }); } return tens; }) - .def("as_tens", - [](py::object self) - { - auto dnode = self.cast*>(); - return dnode->get_tensor(); - }) .def("get", [](py::object self) { - auto dnode = self.cast*>(); - return pyead::typedata_to_array(dnode, + return pyead::typedata_to_array( + self.cast*>(), py::dtype::of()); }); // ==== session ==== - py::class_ isess(m, "iSession"); - py::class_ session(m, "Session", isess); + py::class_ isess(m, "iSession"); + py::class_ session(m, "Session", isess); isess .def("track", - [](py::object self, ead::NodesT roots) + [](py::object self, eteq::NodesT roots) { - auto sess = self.cast(); - ade::TensT troots; + auto sess = self.cast(); + teq::TensT troots; troots.reserve(roots.size()); std::transform(roots.begin(), roots.end(), std::back_inserter(troots), - [](ead::NodeptrT& node) + [](eteq::NodeptrT& node) { return node->get_tensor(); }); sess->track(troots); }) .def("update", - [](py::object self, std::vector> nodes) + [](py::object self, std::vector> nodes) { - auto sess = self.cast(); - std::unordered_set updates; - for (ead::NodeptrT& node : nodes) + auto sess = self.cast(); + std::unordered_set updates; + for (eteq::NodeptrT& node : nodes) { updates.emplace(node->get_tensor().get()); } sess->update(updates); }, "Calculate every node in the graph given list of updated data nodes", - py::arg("nodes") = std::vector>{}) + py::arg("nodes") = std::vector>{}) .def("update_target", - [](py::object self, std::vector> targeted, - std::vector> updated) + [](py::object self, std::vector> targeted, + std::vector> updated) { - auto sess = self.cast(); - std::unordered_set targets; - std::unordered_set updates; - for (ead::NodeptrT& node : targeted) + auto sess = self.cast(); + std::unordered_set targets; + std::unordered_set updates; + for (eteq::NodeptrT& node : targeted) { targets.emplace(node->get_tensor().get()); } - for (ead::NodeptrT& node : updated) + for (eteq::NodeptrT& node : updated) { updates.emplace(node->get_tensor().get()); } @@ -225,39 +218,35 @@ PYBIND11_MODULE(ead, m) }, "Calculate node relevant to targets in the graph given list of updated data", py::arg("targets"), - py::arg("updated") = std::vector>{}); + py::arg("updated") = std::vector>{}); - py::implicitly_convertible(); + py::implicitly_convertible(); session .def(py::init()) .def("optimize", [](py::object self, std::string filename) { - auto sess = self.cast(); - opt::OptCtx rules = ead::parse_file(filename); + auto sess = self.cast(); + opt::OptCtx rules = eteq::parse_file(filename); sess->optimize(rules); }, py::arg("filename") = "cfg/optimizations.rules", "Optimize using rules for specified filename"); // ==== constant ==== - py::class_,std::shared_ptr>> constant( - m, "Constant", node); - - py::implicitly_convertible,ead::ConstantNode>(); + py::class_,std::shared_ptr>, + eteq::iNode> constant(m, "Constant"); // ==== variable ==== - py::class_,ead::VarptrT> variable( - m, "Variable", node); - - py::implicitly_convertible,ead::VariableNode>(); + py::class_,eteq::VarptrT, + eteq::iNode> variable(m, "Variable"); variable .def("assign", [](py::object self, py::array data) { - auto var = self.cast*>(); - ade::Shape shape; + auto var = self.cast*>(); + teq::Shape shape; std::vector vec = pyead::arr2vec(shape, data); var->assign(vec.data(), shape); }, @@ -269,23 +258,23 @@ PYBIND11_MODULE(ead, m) .def("scalar_constant", [](PybindT scalar, std::vector slist) { - return ead::make_constant_scalar(scalar, + return eteq::make_constant_scalar(scalar, pyead::p2cshape(slist)); }, "Return scalar constant node") .def("constant", [](py::array data) { - ade::Shape shape; + teq::Shape shape; std::vector vec = pyead::arr2vec(shape, data); - return ead::make_constant(vec.data(), shape); + return eteq::make_constant(vec.data(), shape); }, "Return constant node with data") // variable creation .def("scalar_variable", [](PybindT scalar, std::vector slist, std::string label) { - return ead::make_variable_scalar(scalar, pyead::p2cshape(slist), label); + return eteq::make_variable_scalar(scalar, pyead::p2cshape(slist), label); }, "Return labelled variable containing numpy data array", py::arg("scalar"), @@ -294,21 +283,21 @@ PYBIND11_MODULE(ead, m) .def("variable", [](py::array data, std::string label) { - ade::Shape shape; + teq::Shape shape; std::vector vec = pyead::arr2vec(shape, data); - return ead::make_variable(vec.data(), shape, label); + return eteq::make_variable(vec.data(), shape, label); }, "Return labelled variable containing numpy data array", py::arg("data"), py::arg("label") = "") // other stuff - .def("derive", &ead::derive, + .def("derive", &eteq::derive, "Return derivative of first tensor with respect to second tensor (deprecated)") .def("seed", [](size_t seed) { - ead::get_engine().seed(seed); + eteq::get_engine().seed(seed); }, "Seed internal RNG"); } diff --git a/ead/random.hpp b/eteq/random.hpp similarity index 93% rename from ead/random.hpp rename to eteq/random.hpp index 30ab3ebce..db877ddde 100644 --- a/ead/random.hpp +++ b/eteq/random.hpp @@ -2,10 +2,10 @@ #include #include -#ifndef EAD_RANDOM_HPP -#define EAD_RANDOM_HPP +#ifndef ETEQ_RANDOM_HPP +#define ETEQ_RANDOM_HPP -namespace ead +namespace eteq { /// RNG engine used @@ -54,4 +54,4 @@ GenF norm_gen (T mean, T stdev) } -#endif // EAD_RANDOM_HPP +#endif // ETEQ_RANDOM_HPP diff --git a/ead/serialize.hpp b/eteq/serialize.hpp similarity index 56% rename from ead/serialize.hpp rename to eteq/serialize.hpp index 8f6645a20..19f53928d 100644 --- a/ead/serialize.hpp +++ b/eteq/serialize.hpp @@ -1,6 +1,6 @@ /// /// serialize.hpp -/// ead +/// eteq /// /// Purpose: /// Define functions for marshal and unmarshal data sources @@ -8,27 +8,27 @@ #include "pbm/data.hpp" -#include "ead/generated/opcode.hpp" -#include "ead/generated/dtype.hpp" +#include "eteq/generated/opcode.hpp" +#include "eteq/generated/dtype.hpp" -#include "ead/coord.hpp" -#include "ead/constant.hpp" -#include "ead/variable.hpp" -#include "ead/functor.hpp" +#include "eteq/coord.hpp" +#include "eteq/constant.hpp" +#include "eteq/variable.hpp" +#include "eteq/functor.hpp" -#ifndef EAD_SERIALIZE_HPP -#define EAD_SERIALIZE_HPP +#ifndef ETEQ_SERIALIZE_HPP +#define ETEQ_SERIALIZE_HPP -namespace ead +namespace eteq { static std::unordered_set non_bijectives = { - age::REDUCE_SUM, - age::REDUCE_PROD, - age::REDUCE_MIN, - age::REDUCE_MAX, - age::EXTEND, + egen::REDUCE_SUM, + egen::REDUCE_PROD, + egen::REDUCE_MIN, + egen::REDUCE_MAX, + egen::EXTEND, }; static bool is_big_endian(void) @@ -44,11 +44,11 @@ static bool is_big_endian(void) struct EADSaver final : public pbm::iSaver { - std::string save_leaf (ade::iLeaf* leaf) override + std::string save_leaf (teq::iLeaf* leaf) override { char* data = (char*) leaf->data(); size_t nelems = leaf->shape().n_elems(); - size_t nbytes = age::type_size((age::_GENERATED_DTYPE) leaf->type_code()); + size_t nbytes = egen::type_size((egen::_GENERATED_DTYPE) leaf->type_code()); if (is_big_endian() && nbytes > 1) { size_t totalbytes = nelems * nbytes; @@ -64,15 +64,15 @@ struct EADSaver final : public pbm::iSaver return std::string(data, nelems * nbytes); } - std::vector save_shaper (const ade::CoordptrT& mapper) override + std::vector save_shaper (const teq::CoordptrT& mapper) override { std::vector out; mapper->access( - [&out](const ade::MatrixT& mat) + [&out](const teq::MatrixT& mat) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { out.push_back(mat[i][j]); } @@ -81,43 +81,43 @@ struct EADSaver final : public pbm::iSaver return out; } - std::vector save_coorder (const ade::CoordptrT& mapper) override + std::vector save_coorder (const teq::CoordptrT& mapper) override { if (nullptr == mapper) { return std::vector(); } - ade::CoordT coord; + teq::CoordT coord; mapper->forward(coord.begin(), coord.begin()); return std::vector(coord.begin(), coord.end()); } }; #define _OUT_GENERIC(realtype)leaf = is_const?\ -ade::TensptrT(Constant::get((realtype*) pb, shape)):\ -ade::TensptrT(Variable::get((realtype*) pb, shape, label)); +teq::TensptrT(Constant::get((realtype*) pb, shape)):\ +teq::TensptrT(Variable::get((realtype*) pb, shape, label)); #define _OUT_GENFUNC(realtype){\ ArgsT eargs;eargs.reserve(args.size());\ std::transform(args.begin(), args.end(), std::back_inserter(eargs),\ -[](ade::FuncArg arg){\ +[](teq::FuncArg arg){\ return FuncArg(\ NodeConverters::to_node(arg.get_tensor()),\ arg.get_shaper(),\ std::static_pointer_cast(arg.get_coorder()));\ });\ -func = ade::TensptrT(\ -Functor::get(ade::Opcode{opname, age::get_op(opname)},eargs));} +func = teq::TensptrT(\ +Functor::get(teq::Opcode{opname, egen::get_op(opname)},eargs));} /// Unmarshal cortenn::Source as Variable containing context of source struct EADLoader final : public pbm::iLoader { - ade::TensptrT generate_leaf (const char* pb, ade::Shape shape, + teq::TensptrT generate_leaf (const char* pb, teq::Shape shape, std::string typelabel, std::string label, bool is_const) override { - ade::TensptrT leaf; - age::_GENERATED_DTYPE gencode = age::get_type(typelabel); - size_t nbytes = age::type_size(gencode); + teq::TensptrT leaf; + egen::_GENERATED_DTYPE gencode = egen::get_type(typelabel); + size_t nbytes = egen::type_size(gencode); if (is_big_endian() && nbytes > 1) { size_t totalbytes = shape.n_elems() * nbytes; @@ -138,66 +138,66 @@ struct EADLoader final : public pbm::iLoader return leaf; } - ade::TensptrT generate_func (std::string opname, ade::ArgsT args) override + teq::TensptrT generate_func (std::string opname, teq::ArgsT args) override { if (args.empty()) { logs::fatalf("cannot generate func %s without args", opname.c_str()); } - size_t gencode = age::BAD_TYPE; + size_t gencode = egen::BAD_TYPE; auto arg = args[0].get_tensor().get(); - if (auto leaf = dynamic_cast(arg)) + if (auto leaf = dynamic_cast(arg)) { gencode = leaf->type_code(); } - else if (auto func = dynamic_cast(arg)) + else if (auto func = dynamic_cast(arg)) { gencode = func->type_code(); } else { - logs::fatalf("cannot generate func from non-ead tensor arg %s", + logs::fatalf("cannot generate func from non-eteq tensor arg %s", arg->to_string().c_str()); } - ade::TensptrT func = nullptr; - TYPE_LOOKUP(_OUT_GENFUNC, (age::_GENERATED_DTYPE) gencode); + teq::TensptrT func = nullptr; + TYPE_LOOKUP(_OUT_GENFUNC, (egen::_GENERATED_DTYPE) gencode); return func; } - ade::CoordptrT generate_shaper (std::vector coord) override + teq::CoordptrT generate_shaper (std::vector coord) override { - if (ade::mat_dim * ade::mat_dim != coord.size()) + if (teq::mat_dim * teq::mat_dim != coord.size()) { logs::fatal("cannot deserialize non-matrix shape map"); } - return std::make_shared( - [&](ade::MatrixT fwd) + return std::make_shared( + [&](teq::MatrixT fwd) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - fwd[i][j] = coord[i * ade::mat_dim + j]; + fwd[i][j] = coord[i * teq::mat_dim + j]; } } }); } - ade::CoordptrT generate_coorder ( + teq::CoordptrT generate_coorder ( std::string opname, std::vector coord) override { if (0 == coord.size()) // is identity { return nullptr; } - if (ade::rank_cap + 1 < coord.size()) + if (teq::rank_cap + 1 < coord.size()) { logs::fatal("cannot deserialize non-vector coordinate map"); } - bool is_bijective = false == estd::has(non_bijectives, age::get_op(opname)); - ade::CoordT indices; + bool is_bijective = false == estd::has(non_bijectives, egen::get_op(opname)); + teq::CoordT indices; auto cit = coord.begin(); - std::copy(cit, cit + ade::rank_cap, indices.begin()); + std::copy(cit, cit + teq::rank_cap, indices.begin()); return std::make_shared(indices, is_bijective); } }; @@ -206,4 +206,4 @@ struct EADLoader final : public pbm::iLoader } -#endif // EAD_SERIALIZE_HPP +#endif // ETEQ_SERIALIZE_HPP diff --git a/ead/session.hpp b/eteq/session.hpp similarity index 74% rename from ead/session.hpp rename to eteq/session.hpp index 69ec63231..2331c71a1 100644 --- a/ead/session.hpp +++ b/eteq/session.hpp @@ -1,26 +1,26 @@ #include #include -#include "ade/traveler.hpp" +#include "teq/traveler.hpp" #include "opt/optimize.hpp" -#include "ead/constant.hpp" -#include "ead/functor.hpp" +#include "eteq/constant.hpp" +#include "eteq/functor.hpp" -#ifndef EAD_SESSION_HPP -#define EAD_SESSION_HPP +#ifndef ETEQ_SESSION_HPP +#define ETEQ_SESSION_HPP -namespace ead +namespace eteq { -using TensSetT = std::unordered_set; +using TensSetT = std::unordered_set; struct iSession { virtual ~iSession (void) = default; - virtual void track (ade::TensT roots) = 0; + virtual void track (teq::TensT roots) = 0; /// update all nodes related to updated, if updated set is empty /// update all nodes related to the leaves (so everyone) @@ -41,24 +41,24 @@ struct SizeT final // don't update parent node if it is part of ignored set struct Session final : public iSession { - void track (ade::TensT roots) override + void track (teq::TensT roots) override { tracked_.insert(roots.begin(), roots.end()); - ade::ParentFinder pfinder; - for (ade::TensptrT& root : roots) + teq::ParentFinder pfinder; + for (teq::TensptrT& root : roots) { root->accept(pfinder); root->accept(stat_); } auto& statmap = stat_.graphsize_; - std::list all_ops; + std::list all_ops; for (auto& statpair : statmap) { if (0 < statpair.second.upper_) { // ensure we only track operable functors - auto op = dynamic_cast(statpair.first); + auto op = dynamic_cast(statpair.first); if (nullptr == op) { logs::fatalf("cannot track non-operable functor %s", @@ -68,16 +68,16 @@ struct Session final : public iSession } } all_ops.sort( - [&statmap](ade::iOperableFunc* a, ade::iOperableFunc* b) + [&statmap](teq::iOperableFunc* a, teq::iOperableFunc* b) { return statmap[a].upper_ < statmap[b].upper_; }); requirements_.clear(); - for (ade::iOperableFunc* op : all_ops) + for (teq::iOperableFunc* op : all_ops) { auto& args = op->get_children(); TensSetT unique_children; - for (const ade::FuncArg& arg : args) + for (const teq::FuncArg& arg : args) { auto tens = arg.get_tensor().get(); if (0 < statmap[tens].upper_) // ignore leaves @@ -93,7 +93,7 @@ struct Session final : public iSession for (auto& parent_pair : assocs.second) { parents_[assocs.first].emplace( - static_cast(parent_pair.first)); + static_cast(parent_pair.first)); } } } @@ -101,8 +101,8 @@ struct Session final : public iSession // this function is expected to be called repeatedly during runtime void update (TensSetT updated = {}, TensSetT ignores = {}) override { - std::unordered_map fulfilments; - for (ade::iTensor* unodes : updated) + std::unordered_map fulfilments; + for (teq::iTensor* unodes : updated) { auto& node_parents = parents_[unodes]; for (auto& node_parent : node_parents) @@ -130,13 +130,13 @@ struct Session final : public iSession // this function is expected to be called repeatedly during runtime void update_target (TensSetT target, TensSetT updated = {}) override { - ade::OnceTraveler targetted; + teq::OnceTraveler targetted; for (auto& tens : target) { tens->accept(targetted); } - std::unordered_map fulfilments; - for (ade::iTensor* unodes : updated) + std::unordered_map fulfilments; + for (teq::iTensor* unodes : updated) { auto& node_parents = parents_[unodes]; for (auto& node_parent : node_parents) @@ -163,24 +163,24 @@ struct Session final : public iSession void optimize (const opt::OptCtx& rules) { - ade::TensT tracked(tracked_.begin(), tracked_.end()); + teq::TensT tracked(tracked_.begin(), tracked_.end()); opt::optimize(tracked, rules); stat_.graphsize_.clear(); parents_.clear(); track(tracked); } - std::unordered_set tracked_; + std::unordered_set tracked_; - ade::GraphStat stat_; + teq::GraphStat stat_; - std::unordered_map> parents_; + std::unordered_map> parents_; // List of operatible nodes and its number of unique children ordered from leaf to root - std::vector> requirements_; // todo: test minimal requirements + std::vector> requirements_; // todo: test minimal requirements }; } -#endif // EAD_SESSION_HPP +#endif // ETEQ_SESSION_HPP diff --git a/ead/src/coord.cpp b/eteq/src/coord.cpp similarity index 65% rename from ead/src/coord.cpp rename to eteq/src/coord.cpp index 74aec8a79..18382160a 100644 --- a/ead/src/coord.cpp +++ b/eteq/src/coord.cpp @@ -1,43 +1,43 @@ -#include "ead/coord.hpp" +#include "eteq/coord.hpp" -#ifdef EAD_COORD_HPP +#ifdef ETEQ_COORD_HPP -namespace ead +namespace eteq { -CoordptrT reduce (std::vector red_dims) +CoordptrT reduce (std::vector red_dims) { - ade::RankT n_red = red_dims.size(); + teq::RankT n_red = red_dims.size(); if (std::any_of(red_dims.begin(), red_dims.end(), - [](ade::RankT& d) { return d >= ade::rank_cap; })) + [](teq::RankT& d) { return d >= teq::rank_cap; })) { logs::fatalf( "cannot reduce using dimensions greater or equal to rank_cap: %s", fmts::to_string(red_dims.begin(), red_dims.end()).c_str()); } - if (n_red > ade::rank_cap) + if (n_red > teq::rank_cap) { logs::fatalf("cannot reduce %d rank when only ranks are capped at %d", - n_red, ade::rank_cap); + n_red, teq::rank_cap); } - ade::CoordT rdims; + teq::CoordT rdims; auto it = rdims.begin(); - std::fill(it, rdims.end(), ade::rank_cap); + std::fill(it, rdims.end(), teq::rank_cap); std::copy(red_dims.begin(), red_dims.end(), it); return std::make_shared(rdims, false); } -CoordptrT extend (ade::RankT rank, std::vector ext) +CoordptrT extend (teq::RankT rank, std::vector ext) { - ade::RankT n_ext = ext.size(); + teq::RankT n_ext = ext.size(); if (std::any_of(ext.begin(), ext.end(), - [](ade::DimT& d) { return 0 == d; })) + [](teq::DimT& d) { return 0 == d; })) { logs::fatalf("cannot extend using zero dimensions %s", fmts::to_string(ext.begin(), ext.end()).c_str()); } - if (rank + n_ext > ade::rank_cap) + if (rank + n_ext > teq::rank_cap) { logs::fatalf("cannot extend shape rank %d beyond rank_cap with n_ext %d", rank, n_ext); @@ -48,14 +48,14 @@ CoordptrT extend (ade::RankT rank, std::vector ext) return nullptr; } - ade::CoordT bcast; + teq::CoordT bcast; auto it = bcast.begin(); std::fill(it, bcast.end(), 1); std::copy(ext.begin(), ext.end(), it + rank); return std::make_shared(bcast, false); } -CoordptrT permute (std::vector dims) +CoordptrT permute (std::vector dims) { if (dims.size() == 0) { @@ -63,9 +63,9 @@ CoordptrT permute (std::vector dims) return nullptr; } - bool visited[ade::rank_cap]; - std::memset(visited, false, ade::rank_cap); - for (ade::RankT i = 0, n = dims.size(); i < n; ++i) + bool visited[teq::rank_cap]; + std::memset(visited, false, teq::rank_cap); + for (teq::RankT i = 0, n = dims.size(); i < n; ++i) { if (visited[dims[i]]) { @@ -74,7 +74,7 @@ CoordptrT permute (std::vector dims) } visited[dims[i]] = true; } - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { if (false == visited[i]) { @@ -82,7 +82,7 @@ CoordptrT permute (std::vector dims) } } - ade::CoordT order; + teq::CoordT order; std::copy(dims.begin(), dims.end(), order.begin()); return std::make_shared(order, true); } diff --git a/eteq/src/eigen.cpp b/eteq/src/eigen.cpp new file mode 100644 index 000000000..233f04308 --- /dev/null +++ b/eteq/src/eigen.cpp @@ -0,0 +1,17 @@ +#include "eteq/eigen.hpp" + +#ifdef ETEQ_EIGEN_HPP + +namespace eteq +{ + +DimensionsT shape_convert (teq::Shape shape) +{ + DimensionsT dims; + std::copy(shape.begin(), shape.end(), dims.begin()); + return dims; +} + +} + +#endif diff --git a/ead/src/random.cpp b/eteq/src/random.cpp similarity index 57% rename from ead/src/random.cpp rename to eteq/src/random.cpp index a671f208c..9570f1caf 100644 --- a/ead/src/random.cpp +++ b/eteq/src/random.cpp @@ -1,8 +1,8 @@ -#include "ead/random.hpp" +#include "eteq/random.hpp" -#ifdef EAD_RANDOM_HPP +#ifdef ETEQ_RANDOM_HPP -namespace ead +namespace eteq { EngineT& get_engine (void) diff --git a/ead/test/main.cpp b/eteq/test/main.cpp similarity index 100% rename from ead/test/main.cpp rename to eteq/test/main.cpp diff --git a/ead/test/ptest.py b/eteq/test/ptest.py similarity index 79% rename from ead/test/ptest.py rename to eteq/test/ptest.py index e66e1a729..7d264a996 100644 --- a/ead/test/ptest.py +++ b/eteq/test/ptest.py @@ -1,11 +1,12 @@ import json +import random import logging import unittest import numpy as np import tensorflow as tf -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq from testutil.generate_testcases import generate_testcases @@ -43,6 +44,11 @@ def _normalize_shape(arr1, arr2): normalized_s2 = list(shape2) + [1] * (maxn - n2) return normalized_s1, normalized_s2 +def _round_helper(x): + if isinstance(x, float): + return round(x) + return tc.round(x) + class EADTest(unittest.TestCase): def _array_eq(self, arr1, arr2): msg = 'diff arrays:\n{}\n{}'.format(arr1, arr2) @@ -70,19 +76,19 @@ def prod(arr): def _common_unary(self, shape, api, real, derive): data = np.random.rand(*shape) * 34 - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') out = api(var) - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() fout = out.get() self._array_close(real(data), fout) - var2 = ead.variable(data, 'var2') - ex = ead.derive(out, var) - zero = ead.derive(out, var2) + var2 = eteq.variable(data, 'var2') + ex = eteq.derive(out, var) + zero = eteq.derive(out, var2) sess.track([ex, zero]) sess.update() @@ -96,7 +102,7 @@ def _common_unary(self, shape, api, real, derive): def _common_unary_tf(self, shape, api, tf_op): data = np.random.rand(*shape) - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') out = api(var) tf_var = tf.Variable(data) @@ -105,7 +111,7 @@ def _common_unary_tf(self, shape, api, tf_op): tfsess = tf.compat.v1.Session() tfsess.run(tf_var.initializer) - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -113,9 +119,9 @@ def _common_unary_tf(self, shape, api, tf_op): real = tfsess.run(tf_out) self._array_close(real, fout) - var2 = ead.variable(data, 'var2') - ex = ead.derive(out, var) - zero = ead.derive(out, var2) + var2 = eteq.variable(data, 'var2') + ex = eteq.derive(out, var) + zero = eteq.derive(out, var2) tf_grad = tf.gradients(tf_out, [tf_var])[0] @@ -132,49 +138,71 @@ def _common_unary_tf(self, shape, api, tf_op): def _common_binary(self, shape, api, real, derive): data = np.random.rand(*shape) data2 = np.random.rand(*shape) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') + cst = random.uniform(0.5, 5) + cst2 = random.uniform(0.5, 5) + out = api(var, var2) both = api(var, var) + clhs = api(var, cst) + crhs = api(cst2, var2) - sess = ead.Session() - sess.track([out, both]) + sess = eteq.Session() + sess.track([out, both, clhs, crhs]) sess.update() fout = out.get() fboth = both.get() + fclhs = clhs.get() + fcrhs = crhs.get() self._array_close(real(data, data2), fout) self._array_close(real(data, data), fboth) + self._array_close(real(data, cst), fclhs) + self._array_close(real(cst2, data2), fcrhs) - var3 = ead.variable(data, 'var3') + var3 = eteq.variable(data, 'var3') - zero = ead.derive(out, var3) - ex = ead.derive(out, var) - ex2 = ead.derive(out, var2) - ex3 = ead.derive(both, var) + zero = eteq.derive(out, var3) + ex = eteq.derive(out, var) + ex2 = eteq.derive(out, var2) + ex3 = eteq.derive(both, var) + ex4 = eteq.derive(clhs, var) + ex5 = eteq.derive(crhs, var2) - sess.track([zero, ex, ex2, ex3]) + sess.track([zero, ex, ex2, ex3, ex4, ex5]) sess.update() rej = zero.get() der = ex.get() der2 = ex2.get() der3 = ex3.get() + der4 = ex4.get() + der5 = ex5.get() data0 = np.zeros(shape, dtype=np.float32) exdata = derive(0, (data, data2)) exdata2 = derive(1, (data, data2)) exdata3 = derive(0, (data, data)) + derive(1, (data, data)) + exdata4 = derive(0, (data, cst)) + exdata5 = derive(1, (cst2, data2)) + + if isinstance(exdata4, float): + exdata4 = np.array([exdata4] * np.prod(shape)).reshape(shape) + if isinstance(exdata5, float): + exdata5 = np.array([exdata5] * np.prod(shape)).reshape(shape) self._array_eq(data0, rej) self._array_close(exdata, der) self._array_close(exdata2, der2) self._array_close(exdata3, der3) + self._array_close(exdata4, der4) + self._array_close(exdata5, der5) def _common_reduce_1d(self, dim_reduce, tf_reduce): shape = [3, 4, 5] data = np.random.rand(*shape) - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') tf_var = tf.Variable(data) tfsess = tf.compat.v1.Session() @@ -183,7 +211,7 @@ def _common_reduce_1d(self, dim_reduce, tf_reduce): out = dim_reduce(var, 1) tf_out = tf_reduce(tf_var, [1]) - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -192,9 +220,9 @@ def _common_reduce_1d(self, dim_reduce, tf_reduce): self._array_close(tf_fout, fout) - var2 = ead.variable(data, 'var2') - ex = ead.derive(out, var) - zero = ead.derive(out, var2) + var2 = eteq.variable(data, 'var2') + ex = eteq.derive(out, var) + zero = eteq.derive(out, var2) sess.track([ex, zero]) sess.update() @@ -216,7 +244,7 @@ def _common_reduce(self, all_reduce, dim_reduce, tf_reduce): ] for shape in shapes: data = np.random.rand(*shape) - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') tf_var = tf.Variable(data) tfsess = tf.compat.v1.Session() @@ -227,7 +255,7 @@ def _common_reduce(self, all_reduce, dim_reduce, tf_reduce): tf_out = tf_reduce(tf_var) tf_out2 = tf_reduce(tf_var, [0, 1]) - sess = ead.Session() + sess = eteq.Session() sess.track([out, out2]) sess.update() @@ -239,10 +267,10 @@ def _common_reduce(self, all_reduce, dim_reduce, tf_reduce): self._array_close(tf_fout, fout) self._array_close(tf_fout2, fout2) - var2 = ead.variable(data, 'var2') - ex = ead.derive(out, var) - ex2 = ead.derive(out2, var) - zero = ead.derive(out, var2) + var2 = eteq.variable(data, 'var2') + ex = eteq.derive(out, var) + ex2 = eteq.derive(out2, var) + zero = eteq.derive(out, var2) sess.track([ex, ex2, zero]) sess.update() @@ -269,9 +297,9 @@ def test_variable(self): data1 = np.ones(shape, dtype=np.float32) data0 = np.zeros(shape, dtype=np.float32) data = np.random.rand(*shape) * 234 - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') - sess = ead.Session() + sess = eteq.Session() sess.track([var]) sess.update() fout = var.get() @@ -281,9 +309,9 @@ def test_variable(self): self.assertEqual(shape, padding + list(fout.shape)) self._array_close(data, fout) - var2 = ead.variable(data, 'var2') - one = ead.derive(var, var) - zero = ead.derive(var, var2) + var2 = eteq.variable(data, 'var2') + one = eteq.derive(var, var) + zero = eteq.derive(var, var2) sess.track([one, zero]) sess.update() @@ -417,125 +445,149 @@ def test_pow(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + def pow_der(i, data): + a, b = data + if i == 0: + return b * a ** (b - 1) + return a ** b * np.log(a) for shape in shapes: - def pow_der(i, data): - a, b = data - if i == 0: - return b * a ** (b - 1) - return a ** b * np.log(a) self._common_binary(shape, tc.pow, lambda x, y: x ** y, pow_der) def test_add(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + generic_add = lambda x, y: x + y + add_der = lambda i, data: data1 for shape in shapes: data1 = np.ones(shape, dtype=np.float32) - self._common_binary(shape, tc.add, lambda x, y: x + y, - lambda i, data: data1) + self._common_binary(shape, tc.add, generic_add, add_der) + self._common_binary(shape, generic_add, generic_add, add_der) def test_sub(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + generic_sub = lambda x, y: x - y + def sub_der(i, data): + if i == 0: + return data1 + return -data1 for shape in shapes: data1 = np.ones(shape, dtype=np.float32) - def sub_der(i, data): - if i == 0: - return data1 - return -data1 - self._common_binary(shape, tc.sub, lambda x, y: x - y, sub_der) + self._common_binary(shape, tc.sub, generic_sub, sub_der) + self._common_binary(shape, generic_sub, generic_sub, sub_der) def test_mul(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + generic_mul = lambda x, y: x * y + def mul_der(i, data): + if i == 0: + return data[1] + return data[0] for shape in shapes: - def mul_der(i, data): - if i == 0: - return data[1] - return data[0] - self._common_binary(shape, tc.mul, lambda x, y: x * y, mul_der) + self._common_binary(shape, tc.mul, generic_mul, mul_der) + self._common_binary(shape, generic_mul, generic_mul, mul_der) def test_div(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + generic_div = lambda x, y: x / y + def div_der(i, data): + a, b = data + if i == 0: + return 1 / b + return -a / (b * b) for shape in shapes: - def div_der(i, data): - a, b = data - if i == 0: - return 1 / b - return -a / (b * b) - self._common_binary(shape, tc.div, lambda x, y: x / y, div_der) + self._common_binary(shape, tc.div, generic_div, div_der) + self._common_binary(shape, generic_div, generic_div, div_der) def test_min(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + def min_der(i, data): + a, b = data + if i == 0: + return (a <= b).astype(float) + return (b <= a).astype(float) for shape in shapes: - def min_der(i, data): - a, b = data - if i == 0: - return (a <= b).astype(float) - return (b <= a).astype(float) self._common_binary(shape, tc.min, np.minimum, min_der) def test_max(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + def max_der(i, data): + a, b = data + if i == 0: + return (a >= b).astype(float) + return (b >= a).astype(float) for shape in shapes: - def max_der(i, data): - a, b = data - if i == 0: - return (a >= b).astype(float) - return (b >= a).astype(float) self._common_binary(shape, tc.max, np.maximum, max_der) def test_eq(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + np_eq = lambda x, y: np.round(x) == np.round(y) + eq_der = lambda i, data: data0 for shape in shapes: data0 = np.zeros(shape, dtype=np.float32) self._common_binary(shape, - lambda x, y: tc.eq(tc.round(x), tc.round(y)), - lambda x, y: np.round(x) == np.round(y), - lambda i, data: data0) + lambda x, y: tc.eq(_round_helper(x), _round_helper(y)), + np_eq, eq_der) + self._common_binary(shape, + lambda x, y: _round_helper(x) == _round_helper(y), + np_eq, eq_der) def test_neq(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + np_neq = lambda x, y: np.round(x) != np.round(y) + neq_der = lambda i, data: data0 for shape in shapes: data0 = np.zeros(shape, dtype=np.float32) self._common_binary(shape, - lambda x, y: tc.neq(tc.round(x), tc.round(y)), - lambda x, y: np.round(x) != np.round(y), - lambda i, data: data0) + lambda x, y: tc.neq(_round_helper(x), _round_helper(y)), + np_neq, neq_der) + self._common_binary(shape, + lambda x, y: _round_helper(x) != _round_helper(y), + np_neq, neq_der) def test_lt(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + np_lt = lambda x, y: np.round(x) < np.round(y) + lt_der = lambda i, data: data0 for shape in shapes: data0 = np.zeros(shape, dtype=np.float32) self._common_binary(shape, - lambda x, y: tc.lt(tc.round(x), tc.round(y)), - lambda x, y: np.round(x) < np.round(y), - lambda i, data: data0) + lambda x, y: tc.lt(_round_helper(x), _round_helper(y)), + np_lt, lt_der) + self._common_binary(shape, + lambda x, y: _round_helper(x) < _round_helper(y), + np_lt, lt_der) def test_gt(self): shapes = [[3, 4, 5]] if 'elementary.shape' in _test_data: shapes += _test_data['elementary.shape'] + np_gt = lambda x, y: np.round(x) > np.round(y) + gt_der = lambda i, data: data0 for shape in shapes: data0 = np.zeros(shape, dtype=np.float32) self._common_binary(shape, - lambda x, y: tc.gt(tc.round(x), tc.round(y)), - lambda x, y: np.round(x) > np.round(y), - lambda i, data: data0) + lambda x, y: tc.gt(_round_helper(x), _round_helper(y)), + np_gt, gt_der) + self._common_binary(shape, + lambda x, y: _round_helper(x) > _round_helper(y), + np_gt, gt_der) def test_nelems(self): shapes = [[3, 4, 5]] @@ -562,17 +614,17 @@ def test_extend(self): shape = [2] data = np.random.rand(*shape) * 13 expected_out = np.array(list(data) * 3).reshape([3, 2]) - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') out = tc.extend(var, 1, [3]) - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() fout = out.get() self._array_close(expected_out, fout) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) sess.track([ex]) sess.update() @@ -610,7 +662,7 @@ def test_rl2norm(self): ] for shape in shapes: data = np.random.rand(*shape) - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') tf_var = tf.Variable(data) tfsess = tf.compat.v1.Session() @@ -619,7 +671,7 @@ def test_rl2norm(self): out = tc.reduce_l2norm(var) tf_out = tf.norm(tf_var) - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -628,9 +680,9 @@ def test_rl2norm(self): self._array_close(tf_fout, fout) - var2 = ead.variable(data, 'var2') - ex = ead.derive(out, var) - zero = ead.derive(out, var2) + var2 = eteq.variable(data, 'var2') + ex = eteq.derive(out, var) + zero = eteq.derive(out, var2) sess.track([ex, zero]) sess.update() @@ -669,8 +721,8 @@ def test_matmul(self): data = np.random.rand(*lshape) data2 = np.random.rand(*rshape) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) @@ -685,7 +737,7 @@ def test_matmul(self): tf_out = tf.matmul(tf_var, tf_var2) # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() fout = out.get() @@ -696,11 +748,11 @@ def test_matmul(self): # check regular matmul self._array_close(tf_fout, fout) - var3 = ead.variable(data, 'var3') + var3 = eteq.variable(data, 'var3') - zero = ead.derive(out, var3) - ex = ead.derive(out, var) - ex2 = ead.derive(out, var2) + zero = eteq.derive(out, var3) + ex = eteq.derive(out, var) + ex2 = eteq.derive(out, var2) sess.track([zero, ex, ex2]) sess.update() @@ -736,7 +788,7 @@ def test_matmul(self): self._array_close(tf_fboth, fboth) - ex3 = ead.derive(both, var) + ex3 = eteq.derive(both, var) sess.track([ex3]) sess.update() @@ -763,15 +815,15 @@ def test_convolution(self): tf_data = data.reshape(tf_shape) tf_kdata = kernel.reshape(tf_kernelshape) - var = ead.variable(data, 'var') - vkernel = ead.variable(kernel, 'vkernel') + var = eteq.variable(data, 'var') + vkernel = eteq.variable(kernel, 'vkernel') tf_var = tf.Variable(tf_data) tf_kernel = tf.Variable(tf_kdata) out = tc.convolution(var, vkernel, list(range(8))) - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -787,10 +839,10 @@ def test_convolution(self): tf_fout = tf_fout.reshape([tf_fout.shape[1], tf_fout.shape[2]]) self._array_close(tf_fout, fout) - var2 = ead.variable(data, 'var2') - zero = ead.derive(out, var2) - ex = ead.derive(out, var) - ex2 = ead.derive(out, vkernel) + var2 = eteq.variable(data, 'var2') + zero = eteq.derive(out, var2) + ex = eteq.derive(out, var) + ex2 = eteq.derive(out, vkernel) sess.track([zero, ex, ex2]) sess.update() @@ -821,15 +873,15 @@ def test_conv2d(self): kshape = [2, 2, 2, 4] kdata = np.random.rand(*kshape).astype(np.float32) - image = ead.variable(data, 'image') - kernel = ead.variable(kdata, 'vkernel') + image = eteq.variable(data, 'image') + kernel = eteq.variable(kdata, 'vkernel') tfimage = tf.Variable(data) tfkernel = tf.Variable(kdata) out = tc.nn.conv2d(image, kernel) - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -843,10 +895,10 @@ def test_conv2d(self): tfconv_output = tfsess.run(tfoutput) self._array_close(tfconv_output, conv_output) - var2 = ead.variable(data, 'var2') - zero = ead.derive(out, var2) - ex = ead.derive(out, image) - ex2 = ead.derive(out, kernel) + var2 = eteq.variable(data, 'var2') + zero = eteq.derive(out, var2) + ex = eteq.derive(out, image) + ex2 = eteq.derive(out, kernel) sess.track([zero, ex, ex2]) sess.update() @@ -869,8 +921,8 @@ def test_grader_scenario1(self): # REDUCE -> MUL data = np.random.rand(3,10) data2 = np.random.rand(10) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) @@ -882,7 +934,7 @@ def test_grader_scenario1(self): # REDUCE -> MUL tf_out = tf.multiply(tf.reduce_sum(tf_var, 0), tf_var2) # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -892,7 +944,7 @@ def test_grader_scenario1(self): # REDUCE -> MUL # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -905,8 +957,8 @@ def test_grader_scenario2(self): # EXTEND -> MUL data = np.random.rand(10) data2 = np.random.rand(3,10) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) @@ -918,7 +970,7 @@ def test_grader_scenario2(self): # EXTEND -> MUL tf_out = tf_var * tf_var2 # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -928,7 +980,7 @@ def test_grader_scenario2(self): # EXTEND -> MUL # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -941,8 +993,8 @@ def test_grader_scenario3(self): # PERMUTE -> MUL data = np.random.rand(10,3) data2 = np.random.rand(3,10) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) @@ -954,7 +1006,7 @@ def test_grader_scenario3(self): # PERMUTE -> MUL tf_out = tf.transpose(tf_var) * tf_var2 # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -964,7 +1016,7 @@ def test_grader_scenario3(self): # PERMUTE -> MUL # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -978,9 +1030,9 @@ def test_grader_scenario4(self): # MATMUL -> MUL data2 = np.random.rand(3,5) data3 = np.random.rand(10,5) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') - var3 = ead.variable(data3, 'var3') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') + var3 = eteq.variable(data3, 'var3') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) tf_var3 = tf.Variable(data3) @@ -994,7 +1046,7 @@ def test_grader_scenario4(self): # MATMUL -> MUL tf_out = tf.multiply(tf.matmul(tf_var, tf_var2), tf_var3) # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -1004,7 +1056,7 @@ def test_grader_scenario4(self): # MATMUL -> MUL # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -1018,9 +1070,9 @@ def test_grader_scenario5(self): # MATMUL -> MATMUL data2 = np.random.rand(3,5) data3 = np.random.rand(5,4) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') - var3 = ead.variable(data3, 'var3') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') + var3 = eteq.variable(data3, 'var3') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) tf_var3 = tf.Variable(data3) @@ -1034,7 +1086,7 @@ def test_grader_scenario5(self): # MATMUL -> MATMUL tf_out = tf.matmul(tf.matmul(tf_var, tf_var2), tf_var3) # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -1044,7 +1096,7 @@ def test_grader_scenario5(self): # MATMUL -> MATMUL # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -1057,8 +1109,8 @@ def test_grader_scenario6(self): # REDUCE -> MATMUL data = np.random.rand(4,10,3) data2 = np.random.rand(3,5) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) @@ -1070,7 +1122,7 @@ def test_grader_scenario6(self): # REDUCE -> MATMUL tf_out = tf.matmul(tf.reduce_sum(tf_var, 0), tf_var2) # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -1080,7 +1132,7 @@ def test_grader_scenario6(self): # REDUCE -> MATMUL # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -1094,8 +1146,8 @@ def test_grader_scenario7(self): # EXTEND -> MATMUL data2 = np.random.rand(3,5) ones = np.ones([10, 3]) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) @@ -1107,7 +1159,7 @@ def test_grader_scenario7(self): # EXTEND -> MATMUL tf_out = tf.matmul(tf_var * ones, tf_var2) # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -1117,7 +1169,7 @@ def test_grader_scenario7(self): # EXTEND -> MATMUL # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -1134,7 +1186,7 @@ def test_grader_scenario8(self): ones = np.ones([10, 5]) ones2 = np.ones([3, 5]) - var = ead.variable(data, 'var') + var = eteq.variable(data, 'var') tf_var = tf.Variable(data) tfsess = tf.compat.v1.Session() @@ -1146,7 +1198,7 @@ def test_grader_scenario8(self): tf_out = tf.matmul(tf_var * ones, tf.transpose(tf_var * ones2)) # evaluate regular matmul - sess = ead.Session() + sess = eteq.Session() sess.track([out]) sess.update() @@ -1156,7 +1208,7 @@ def test_grader_scenario8(self): # check regular matmul self._array_close(tf_fout, out.get()) - ex = ead.derive(out, var) + ex = eteq.derive(out, var) tf_grad = tf.gradients(tf_out, [tf_var])[0] sess.track([ex]) @@ -1174,9 +1226,9 @@ def test_grader_scenario9(self): data2 = np.random.rand(*bshape) data3 = np.random.rand(*cshape) - a = ead.variable(data, 'a') - b = ead.variable(data2, 'b') - c = ead.variable(data3, 'c') + a = eteq.variable(data, 'a') + b = eteq.variable(data2, 'b') + c = eteq.variable(data3, 'c') tf_a = tf.Variable(data) tf_b = tf.Variable(data2) tf_c = tf.Variable(data3) @@ -1191,9 +1243,9 @@ def test_grader_scenario9(self): tf_f = tf.matmul(tf.transpose(tf_d), tf.transpose(tf_c)) tf_dest = tf.matmul(tf_e, tf_f) - da = ead.derive(dest, a) - db = ead.derive(dest, b) - dc = ead.derive(dest, c) + da = eteq.derive(dest, a) + db = eteq.derive(dest, b) + dc = eteq.derive(dest, c) tf_da, tf_db, tf_dc = tf.gradients(tf_dest, [tf_a, tf_b, tf_c]) tfsess = tf.compat.v1.Session() @@ -1201,7 +1253,7 @@ def test_grader_scenario9(self): tfsess.run(tf_b.initializer) tfsess.run(tf_c.initializer) - sess = ead.Session() + sess = eteq.Session() sess.track([dest, da, db, dc]) sess.update() @@ -1220,7 +1272,7 @@ def test_grader_scenario9(self): # log to file logging.basicConfig(filename='/tmp/ead_ptest.log',level=logging.DEBUG) - logging.info("running ptest for ead") + logging.info("running ptest for eteq") _test_data = generate_testcases( test_template['test_cases'], diff --git a/ead/test/test_api.cpp b/eteq/test/test_api.cpp similarity index 54% rename from ead/test/test_api.cpp rename to eteq/test/test_api.cpp index dc0036907..a087cae48 100644 --- a/ead/test/test_api.cpp +++ b/eteq/test/test_api.cpp @@ -6,20 +6,26 @@ #include "exam/exam.hpp" -#include "ead/generated/api.hpp" -#include "ead/session.hpp" -#include "ead/constant.hpp" -#include "ead/variable.hpp" -#include "ead/grader.hpp" +#include "eteq/generated/api.hpp" +#include "eteq/session.hpp" +#include "eteq/constant.hpp" +#include "eteq/variable.hpp" +#include "eteq/grader.hpp" using UnaryDblF = std::function; template -using UnaryOpF = std::function(ead::NodeptrT&)>; +using UnaryOpF = std::function(eteq::NodeptrT&)>; template -using BinaryOpF = std::function(ead::NodeptrT&,ead::NodeptrT&)>; +using BinaryOpF = std::function(eteq::NodeptrT&,eteq::NodeptrT&)>; + +template +using LhsBinaryOpF = std::function(eteq::NodeptrT&,T&)>; + +template +using RhsBinaryOpF = std::function(T&,eteq::NodeptrT&)>; template using BinaryFwdF = std::function; @@ -32,11 +38,11 @@ using MatVecT = std::vector>; static const int FREIVALD_N = 10; -static MatVecT create_2d (ead::NodeptrT data) +static MatVecT create_2d (eteq::NodeptrT data) { int32_t* ptr = (int32_t*) data->data(); - ade::DimT C = data->shape().at(0); - ade::DimT R = data->shape().at(1); + teq::DimT C = data->shape().at(0); + teq::DimT R = data->shape().at(1); MatVecT res; for (size_t y = 0; y < R; y++) @@ -57,9 +63,9 @@ static MatVecT create_2d (ead::NodeptrT data) static bool freivald (MatVecT a, MatVecT b, MatVecT c) { - ade::RankT cdim = b.size(); - ade::RankT bdim = b[0].size(); - ade::RankT adim = a.size(); + teq::RankT cdim = b.size(); + teq::RankT bdim = b[0].size(); + teq::RankT adim = a.size(); // a has shape [cdim, adim] // b has shape [bdim, cdim] // c has shape [bdim, adim] @@ -70,7 +76,7 @@ static bool freivald (MatVecT a, MatVecT b, MatVecT c) // generate r of len b[0].size() or c[0].size() std::vector r(bdim); std::uniform_int_distribution dist{0, 1}; - std::generate(r.begin(), r.end(), [&]() { return dist(ead::get_engine()); }); + std::generate(r.begin(), r.end(), [&]() { return dist(eteq::get_engine()); }); // p = matmul(a, matmul(b, r)) - matmul(c, r) std::vector br; // matmul(b, r) @@ -122,25 +128,25 @@ static bool freivald (MatVecT a, MatVecT b, MatVecT c) static void unary_generic (UnaryOpF op, - std::function,ade::Shape&,std::vector&)> verify, + std::function,teq::Shape&,std::vector&)> verify, std::function&)> bwverify) { - std::vector slist = {2, 3, 4}; - ade::Shape shape(slist); + std::vector slist = {2, 3, 4}; + teq::Shape shape(slist); std::vector data = { 22, 15, 74, 38, 61, 95, 62, 81, 99, 76, 7, 22, 56, 50, 19, 13, 12, 10, 31, 40, 60, 54, 6, 83 }; - ead::NodeptrT src = ead::make_constant(data.data(), shape); - ead::NodeptrT dest = op(src); + eteq::NodeptrT src = eteq::make_constant(data.data(), shape); + eteq::NodeptrT dest = op(src); dest->update(); verify(dest, shape, data); - ead::Session session; + eteq::Session session; - ead::NodeptrT gsrc = ead::derive(dest, src); + eteq::NodeptrT gsrc = eteq::derive(dest, src); session.track({gsrc->get_tensor()}); session.update(); @@ -152,15 +158,15 @@ static void unary_generic (UnaryOpF op, static void unar_elem (std::vector data, - std::vector shape_list, + std::vector shape_list, UnaryOpF op, UnaryDblF fwd, UnaryDblF bwd) { - ade::Shape shape(shape_list); - ade::NElemT n = shape.n_elems(); + teq::Shape shape(shape_list); + teq::NElemT n = shape.n_elems(); assert(data.size() == n); - ead::NodeptrT src = ead::make_constant(data.data(), shape); - ead::NodeptrT dest = op(src); + eteq::NodeptrT src = eteq::make_constant(data.data(), shape); + eteq::NodeptrT dest = op(src); dest->update(); { @@ -173,9 +179,9 @@ static void unar_elem (std::vector data, EXPECT_DOUBLE_EQ(fwd(data[i]), optr[i]); } - ead::Session session; + eteq::Session session; - ead::NodeptrT gsrc = ead::derive(dest, src); + eteq::NodeptrT gsrc = eteq::derive(dest, src); session.track({gsrc->get_tensor()}); session.update(); @@ -195,7 +201,7 @@ static void unary_elementary (UnaryOpF op, UnaryDblF fwd, UnaryDblF bwd) { // tensor operation - std::vector slist = {2, 3, 4}; + std::vector slist = {2, 3, 4}; std::vector data = { 59, 10, 28, 10, 67, 62, 23, 4, 55, 77, 28, 16, 82, 52, 47, 16, 7, 85, 37, 2, 8, 52, 62, 43 @@ -203,7 +209,7 @@ static void unary_elementary (UnaryOpF op, unar_elem(data, slist, op, fwd, bwd); // matrix optimized operation - std::vector slist_2d = {2, 3}; + std::vector slist_2d = {2, 3}; std::vector data_2d = { 59, 10, 28, 10, 67, 62, @@ -213,18 +219,21 @@ static void unary_elementary (UnaryOpF op, static void binar_elem (std::vector data, std::vector data2, - std::vector shape_list, BinaryOpF op, - BinaryFwdF fwd, BinaryBwdF bwd) + std::vector shape_list, BinaryOpF op, + LhsBinaryOpF lhs_op, RhsBinaryOpF rhs_op, + BinaryFwdF fwd, BinaryBwdF bwd, double cst) { - ade::Shape shape(shape_list); - ade::NElemT n = shape.n_elems(); + teq::Shape shape(shape_list); + teq::NElemT n = shape.n_elems(); assert(data.size() == n); assert(data2.size() == n); - ead::NodeptrT src = ead::make_constant(data.data(), shape); - ead::NodeptrT src2 = ead::make_constant(data2.data(), shape); - ead::NodeptrT dest = op(src, src2); + eteq::NodeptrT src = eteq::make_constant(data.data(), shape); + eteq::NodeptrT src2 = eteq::make_constant(data2.data(), shape); + eteq::NodeptrT dest = op(src, src2); + eteq::NodeptrT clhs = lhs_op(src, cst); + eteq::NodeptrT crhs = rhs_op(cst, src2); dest->update(); { @@ -237,10 +246,31 @@ static void binar_elem (std::vector data, std::vector data2, EXPECT_DOUBLE_EQ(fwd(data[i], data2[i]), optr[i]); } - ead::Session session; + clhs->update(); + crhs->update(); + { + auto gotshape = clhs->shape(); + ASSERT_ARREQ(shape_list, gotshape); + } + { + auto gotshape = crhs->shape(); + ASSERT_ARREQ(shape_list, gotshape); + } + double* lptr = (double*) clhs->data(); + for (size_t i = 0; i < n; ++i) + { + EXPECT_DOUBLE_EQ(fwd(data[i], cst), lptr[i]); + } + double* rptr = (double*) crhs->data(); + for (size_t i = 0; i < n; ++i) + { + EXPECT_DOUBLE_EQ(fwd(cst, data2[i]), rptr[i]); + } + + eteq::Session session; - ead::NodeptrT dest2 = op(src, src); - ead::NodeptrT gsame = ead::derive(dest2, src); + eteq::NodeptrT dest2 = op(src, src); + eteq::NodeptrT gsame = eteq::derive(dest2, src); session.track({gsame->get_tensor()}); session.update(); { @@ -253,7 +283,7 @@ static void binar_elem (std::vector data, std::vector data2, EXPECT_DOUBLE_EQ(bwd(data[i], data[i], 1.0, 1.0), goptr[i]); } - ead::NodeptrT gleft = ead::derive(dest, src); + eteq::NodeptrT gleft = eteq::derive(dest, src); session.track({gleft->get_tensor()}); session.update(); { @@ -266,7 +296,7 @@ static void binar_elem (std::vector data, std::vector data2, EXPECT_DOUBLE_EQ(bwd(data[i], data2[i], 1.0, 0.0), goptr2[i]); } - ead::NodeptrT gright = ead::derive(dest, src2); + eteq::NodeptrT gright = eteq::derive(dest, src2); session.track({gright->get_tensor()}); session.update(); { @@ -282,10 +312,11 @@ static void binar_elem (std::vector data, std::vector data2, static void binary_elementary (BinaryOpF op, + LhsBinaryOpF lhs_op, RhsBinaryOpF rhs_op, BinaryFwdF fwd, BinaryBwdF bwd) { // tensor operation - std::vector slist = {3, 2, 4}; + std::vector slist = {3, 2, 4}; std::vector data = { 0.0919361505, 0.5135099474, 0.3147548326, 0.0281299379, 0.3705218798, 0.6808164860, 0.1933972592, 0.2326945471, 0.4600163558, 0.1600801317, 0.9942654588, 0.8739832345, @@ -299,10 +330,12 @@ static void binary_elementary (BinaryOpF op, 0.0504231590, 0.8494357051, 0.0908431573, 0.1567913571, 0.1211327459, 0.5269402648 }; - binar_elem(data, data2, slist, op, fwd, bwd); + double cst = 0.7819955055; + + binar_elem(data, data2, slist, op, lhs_op, rhs_op, fwd, bwd, cst); // matrix optimized operation - std::vector slist_2d = {3, 2}; + std::vector slist_2d = {3, 2}; std::vector data_2d = { 0.0919361505, 0.5135099474, 0.3147548326, 0.0281299379, 0.3705218798, 0.6808164860, @@ -311,20 +344,23 @@ static void binary_elementary (BinaryOpF op, 0.2547977589, 0.8808089905, 0.4323663340, 0.5710527217, 0.6207772267, 0.8574923091, }; - binar_elem(data_2d, data2_2d, slist_2d, op, fwd, bwd); + binar_elem(data_2d, data2_2d, slist_2d, op, lhs_op, rhs_op, fwd, bwd, cst); } static void binar_elem_int (std::vector data, std::vector data2, - std::vector shape_list, BinaryOpF op, - BinaryFwdF fwd, BinaryBwdF bwd) + std::vector shape_list, BinaryOpF op, + LhsBinaryOpF lhs_op, RhsBinaryOpF rhs_op, + BinaryFwdF fwd, BinaryBwdF bwd, int32_t cst) { - ade::Shape shape(shape_list); - ade::NElemT n = shape.n_elems(); + teq::Shape shape(shape_list); + teq::NElemT n = shape.n_elems(); - ead::NodeptrT src = ead::make_constant(data.data(), shape); - ead::NodeptrT src2 = ead::make_constant(data2.data(), shape); - ead::NodeptrT dest = op(src, src2); + eteq::NodeptrT src = eteq::make_constant(data.data(), shape); + eteq::NodeptrT src2 = eteq::make_constant(data2.data(), shape); + eteq::NodeptrT dest = op(src, src2); + eteq::NodeptrT clhs = lhs_op(src, cst); + eteq::NodeptrT crhs = rhs_op(cst, src2); dest->update(); { @@ -337,10 +373,31 @@ static void binar_elem_int (std::vector data, std::vector data EXPECT_EQ(fwd(data[i], data2[i]), optr[i]); } - ead::Session session; + clhs->update(); + crhs->update(); + { + auto gotshape = clhs->shape(); + ASSERT_ARREQ(shape_list, gotshape); + } + { + auto gotshape = crhs->shape(); + ASSERT_ARREQ(shape_list, gotshape); + } + int32_t* lptr = (int32_t*) clhs->data(); + for (size_t i = 0; i < n; ++i) + { + EXPECT_EQ(fwd(data[i], cst), lptr[i]); + } + int32_t* rptr = (int32_t*) crhs->data(); + for (size_t i = 0; i < n; ++i) + { + EXPECT_EQ(fwd(cst, data2[i]), rptr[i]); + } + + eteq::Session session; - ead::NodeptrT dest2 = op(src, src); - ead::NodeptrT gsame = ead::derive(dest2, src); + eteq::NodeptrT dest2 = op(src, src); + eteq::NodeptrT gsame = eteq::derive(dest2, src); session.track({gsame->get_tensor()}); session.update(); { @@ -353,7 +410,7 @@ static void binar_elem_int (std::vector data, std::vector data EXPECT_EQ(bwd(data[i], data[i], 1.0, 1.0), goptr[i]); } - ead::NodeptrT gleft = ead::derive(dest, src); + eteq::NodeptrT gleft = eteq::derive(dest, src); session.track({gleft->get_tensor()}); session.update(); { @@ -366,7 +423,7 @@ static void binar_elem_int (std::vector data, std::vector data EXPECT_EQ(bwd(data[i], data2[i], 1.0, 0.0), goptr2[i]); } - ead::NodeptrT gright = ead::derive(dest, src2); + eteq::NodeptrT gright = eteq::derive(dest, src2); session.track({gright->get_tensor()}); session.update(); { @@ -382,10 +439,11 @@ static void binar_elem_int (std::vector data, std::vector data static void binary_elementary_int (BinaryOpF op, + LhsBinaryOpF lhs_op, RhsBinaryOpF rhs_op, BinaryFwdF fwd, BinaryBwdF bwd) { // tensor operation - std::vector slist = {4, 3, 2}; + std::vector slist = {4, 3, 2}; std::vector data = { 1, 2, 3, 0, 1, 2, 2, 1, 1, 3, 3, 1, 2, 2, 3, 0, 1, 3, 3, 1, 2, 0, 0, 2 @@ -395,10 +453,12 @@ static void binary_elementary_int (BinaryOpF op, 1, 3, 1, 3, 1, 0, 2, 1, 2, 2, 0, 1 }; - binar_elem_int(data, data2, slist, op, fwd, bwd); + int32_t cst = 2; + + binar_elem_int(data, data2, slist, op, lhs_op, rhs_op, fwd, bwd, cst); // matrix optimized operation - std::vector slist_2d = {4, 2}; + std::vector slist_2d = {4, 2}; std::vector data_2d = { 1, 2, 3, 0, 1, 2, 2, 1, @@ -408,14 +468,14 @@ static void binary_elementary_int (BinaryOpF op, 3, 3, 2, 2, }; - binar_elem_int(data_2d, data2_2d, slist_2d, op, fwd, bwd); + binar_elem_int(data_2d, data2_2d, slist_2d, op, lhs_op, rhs_op, fwd, bwd, cst); } TEST(API, Abs) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::abs(a); }, + [](eteq::NodeptrT& a) { return tenncor::abs(a); }, [](double d) { return std::abs(d); }, [](double d) { return d / std::abs(d); }); } @@ -423,17 +483,21 @@ TEST(API, Abs) TEST(API, Neg) { + auto fwd = [](double d) { return -d; }; + auto bwd = [](double d) { return -1.0; }; + unary_elementary( + [](eteq::NodeptrT& a) { return tenncor::neg(a); }, + fwd, bwd); unary_elementary( - [](ead::NodeptrT& a) { return tenncor::neg(a); }, - [](double d) { return -d; }, - [](double d) { return -1.0; }); + [](eteq::NodeptrT& a) { return -a; }, + fwd, bwd); } TEST(API, Sin) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::sin(a); }, + [](eteq::NodeptrT& a) { return tenncor::sin(a); }, [](double d) { return std::sin(d); }, [](double d) { return std::cos(d); }); } @@ -442,7 +506,7 @@ TEST(API, Sin) TEST(API, Cos) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::cos(a); }, + [](eteq::NodeptrT& a) { return tenncor::cos(a); }, [](double d) { return std::cos(d); }, [](double d) { return -std::sin(d); }); } @@ -451,7 +515,7 @@ TEST(API, Cos) TEST(API, Tan) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::tan(a); }, + [](eteq::NodeptrT& a) { return tenncor::tan(a); }, [](double d) { return std::tan(d); }, [](double d) { double denom = std::cos(d); @@ -463,7 +527,7 @@ TEST(API, Tan) TEST(API, Exp) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::exp(a); }, + [](eteq::NodeptrT& a) { return tenncor::exp(a); }, [](double d) { return std::exp(d); }, [](double d) { return std::exp(d); }); } @@ -472,7 +536,7 @@ TEST(API, Exp) TEST(API, Log) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::log(a); }, + [](eteq::NodeptrT& a) { return tenncor::log(a); }, [](double d) { return std::log(d); }, [](double d) { return 1.0 / d; }); } @@ -481,7 +545,7 @@ TEST(API, Log) TEST(API, Sqrt) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::sqrt(a); }, + [](eteq::NodeptrT& a) { return tenncor::sqrt(a); }, [](double d) { return std::sqrt(d); }, [](double d) { return 1.0 / (2 * std::sqrt(d)); }); } @@ -490,7 +554,7 @@ TEST(API, Sqrt) TEST(API, Round) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::round(a); }, + [](eteq::NodeptrT& a) { return tenncor::round(a); }, [](double d) { return std::round(d); }, [](double d) { return 1.0; }); } @@ -499,7 +563,7 @@ TEST(API, Round) TEST(API, Sigmoid) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::sigmoid(a); }, + [](eteq::NodeptrT& a) { return tenncor::sigmoid(a); }, [](double d) { return 1 / (1 + std::exp(-d)); }, [](double d) { @@ -512,7 +576,7 @@ TEST(API, Sigmoid) TEST(API, SigmoidGrad) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::sigmoid_grad(a); }, + [](eteq::NodeptrT& a) { return tenncor::sigmoid_grad(a); }, [](double d) { double sig = 1 / (1 + std::exp(-d)); @@ -530,7 +594,7 @@ TEST(API, SigmoidGrad) TEST(API, Tanh) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::tanh(a); }, + [](eteq::NodeptrT& a) { return tenncor::tanh(a); }, [](double d) { double e2d = std::exp(2 * d); @@ -548,7 +612,7 @@ TEST(API, Tanh) TEST(API, Square) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::square(a); }, + [](eteq::NodeptrT& a) { return tenncor::square(a); }, [](double d) { return d * d; }, [](double d) { return 2 * d; }); } @@ -557,7 +621,7 @@ TEST(API, Square) TEST(API, Cube) { unary_elementary( - [](ead::NodeptrT& a) { return tenncor::cube(a); }, + [](eteq::NodeptrT& a) { return tenncor::cube(a); }, [](double d) { return d * d * d; }, [](double d) { return 3 * d * d; }); } @@ -566,7 +630,11 @@ TEST(API, Cube) TEST(API, Pow) { binary_elementary( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return tenncor::pow(a, b); }, + [](eteq::NodeptrT& a, double& b) + { return tenncor::pow(a, b); }, + [](double& a, eteq::NodeptrT& b) { return tenncor::pow(a, b); }, [](double a, double b) { return std::pow(a, b); }, [](double a, double b, double leftg, double rightg) @@ -579,60 +647,112 @@ TEST(API, Pow) TEST(API, Add) { + auto fwd = [](double a, double b) { return a + b; }; + auto bwd = [](double a, double b, double leftg, double rightg) + { return leftg + rightg; }; binary_elementary( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) { return tenncor::add(a, b); }, - [](double a, double b) { return a + b; }, - [](double a, double b, double leftg, double rightg) - { - return leftg + rightg; - }); + [](eteq::NodeptrT& a, double& b) + { return tenncor::add(a, b); }, + [](double& a, eteq::NodeptrT& b) + { return tenncor::add(a, b); }, + fwd, bwd); + binary_elementary( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a + b; }, + [](eteq::NodeptrT& a, double& b) + { return a + b; }, + [](double& a, eteq::NodeptrT& b) + { return a + b; }, + fwd, bwd); } TEST(API, Sub) { + auto fwd = [](double a, double b) { return a - b; }; + auto bwd = [](double a, double b, double leftg, double rightg) + { return leftg - rightg; }; binary_elementary( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) { return tenncor::sub(a, b); }, - [](double a, double b) { return a - b; }, - [](double a, double b, double leftg, double rightg) - { - return leftg - rightg; - }); + [](eteq::NodeptrT& a, double& b) + { return tenncor::sub(a, b); }, + [](double& a, eteq::NodeptrT& b) + { return tenncor::sub(a, b); }, + fwd, bwd); + binary_elementary( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a - b; }, + [](eteq::NodeptrT& a, double& b) + { return a - b; }, + [](double& a, eteq::NodeptrT& b) + { return a - b; }, + fwd, bwd); } TEST(API, Mul) { - binary_elementary( - [](ead::NodeptrT& a, ead::NodeptrT& b) - { return tenncor::mul(a, b); }, - [](double a, double b) { return a * b; }, - [](double a, double b, double leftg, double rightg) + auto fwd = [](double a, double b) { return a * b; }; + auto bwd = [](double a, double b, double leftg, double rightg) { return leftg * b + rightg * a; - }); + }; + binary_elementary( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return tenncor::mul(a, b); }, + [](eteq::NodeptrT& a, double& b) + { return tenncor::mul(a, b); }, + [](double& a, eteq::NodeptrT& b) + { return tenncor::mul(a, b); }, + fwd, bwd); + binary_elementary( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a * b; }, + [](eteq::NodeptrT& a, double& b) + { return a * b; }, + [](double& a, eteq::NodeptrT& b) + { return a * b; }, + fwd, bwd); } TEST(API, Div) { - binary_elementary( - [](ead::NodeptrT& a, ead::NodeptrT& b) - { return tenncor::div(a, b); }, - [](double a, double b) { return a / b; }, - [](double a, double b, double leftg, double rightg) + auto fwd = [](double a, double b) { return a / b; }; + auto bwd = [](double a, double b, double leftg, double rightg) { return (leftg * b - rightg * a) / (b * b); - }); + }; + binary_elementary( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return tenncor::div(a, b); }, + [](eteq::NodeptrT& a, double& b) + { return tenncor::div(a, b); }, + [](double& a, eteq::NodeptrT& b) + { return tenncor::div(a, b); }, + fwd, bwd); + binary_elementary( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a / b; }, + [](eteq::NodeptrT& a, double& b) + { return a / b; }, + [](double& a, eteq::NodeptrT& b) + { return a / b; }, + fwd, bwd); } TEST(API, Min) { binary_elementary( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return tenncor::min(a, b); }, + [](eteq::NodeptrT& a, double& b) + { return tenncor::min(a, b); }, + [](double& a, eteq::NodeptrT& b) { return tenncor::min(a, b); }, [](double a, double b) { return std::min(a, b); }, [](double a, double b, double leftg, double rightg) @@ -654,7 +774,11 @@ TEST(API, Min) TEST(API, Max) { binary_elementary( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return tenncor::max(a, b); }, + [](eteq::NodeptrT& a, double& b) + { return tenncor::max(a, b); }, + [](double& a, eteq::NodeptrT& b) { return tenncor::max(a, b); }, [](double a, double b) { return std::max(a, b); }, [](double a, double b, double leftg, double rightg) @@ -675,61 +799,105 @@ TEST(API, Max) TEST(API, Eq) { + auto fwd = [](int32_t a, int32_t b) { return a == b; }; + auto bwd = [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) + { return 0; }; binary_elementary_int( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) { return tenncor::eq(a, b); }, - [](int32_t a, int32_t b) { return a == b; }, - [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) - { - return 0; - }); + [](eteq::NodeptrT& a, int32_t& b) + { return tenncor::eq(a, b); }, + [](int32_t& a, eteq::NodeptrT& b) + { return tenncor::eq(a, b); }, + fwd, bwd); + binary_elementary_int( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a == b; }, + [](eteq::NodeptrT& a, int32_t& b) + { return a == b; }, + [](int32_t& a, eteq::NodeptrT& b) + { return a == b; }, + fwd, bwd); } TEST(API, Neq) { + auto fwd = [](int32_t a, int32_t b) { return a != b; }; + auto bwd = [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) + { return 0; }; binary_elementary_int( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) { return tenncor::neq(a, b); }, - [](int32_t a, int32_t b) { return a != b; }, - [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) - { - return 0; - }); + [](eteq::NodeptrT& a, int32_t& b) + { return tenncor::neq(a, b); }, + [](int32_t& a, eteq::NodeptrT& b) + { return tenncor::neq(a, b); }, + fwd, bwd); + binary_elementary_int( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a != b; }, + [](eteq::NodeptrT& a, int32_t& b) + { return a != b; }, + [](int32_t& a, eteq::NodeptrT& b) + { return a != b; }, + fwd, bwd); } TEST(API, Lt) { + auto fwd = [](int32_t a, int32_t b) { return a < b; }; + auto bwd = [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) + { return 0; }; binary_elementary_int( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) { return tenncor::lt(a, b); }, - [](int32_t a, int32_t b) { return a < b; }, - [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) - { - return 0; - }); + [](eteq::NodeptrT& a, int32_t& b) + { return tenncor::lt(a, b); }, + [](int32_t& a, eteq::NodeptrT& b) + { return tenncor::lt(a, b); }, + fwd, bwd); + binary_elementary_int( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a < b; }, + [](eteq::NodeptrT& a, int32_t& b) + { return a < b; }, + [](int32_t& a, eteq::NodeptrT& b) + { return a < b; }, + fwd, bwd); } TEST(API, Gt) { + auto fwd = [](int32_t a, int32_t b) { return a > b; }; + auto bwd = [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) + { return 0; }; binary_elementary_int( - [](ead::NodeptrT& a, ead::NodeptrT& b) + [](eteq::NodeptrT& a, eteq::NodeptrT& b) { return tenncor::gt(a, b); }, - [](int32_t a, int32_t b) { return a > b; }, - [](int32_t a, int32_t b, int32_t leftg, int32_t rightg) - { - return 0; - }); + [](eteq::NodeptrT& a, int32_t& b) + { return tenncor::gt(a, b); }, + [](int32_t& a, eteq::NodeptrT& b) + { return tenncor::gt(a, b); }, + fwd, bwd); + binary_elementary_int( + [](eteq::NodeptrT& a, eteq::NodeptrT& b) + { return a > b; }, + [](eteq::NodeptrT& a, int32_t& b) + { return a > b; }, + [](int32_t& a, eteq::NodeptrT& b) + { return a > b; }, + fwd, bwd); } TEST(API, NElems) { unary_generic( - [](ead::NodeptrT& src) { return tenncor::n_elems(src); }, - [](ead::NodeptrT out, ade::Shape& shape, std::vector&) + [](eteq::NodeptrT& src) { return tenncor::n_elems(src); }, + [](eteq::NodeptrT out, teq::Shape& shape, std::vector&) { ASSERT_EQ(1, out->shape().n_elems()); double got = *((double*) out->data()); @@ -748,10 +916,10 @@ TEST(API, NElems) TEST(API, NDims) { - ade::RankT dim = 2; + teq::RankT dim = 2; unary_generic( - [dim](ead::NodeptrT& src) { return tenncor::n_dims(src, dim); }, - [dim](ead::NodeptrT out, ade::Shape& shape, std::vector&) + [dim](eteq::NodeptrT& src) { return tenncor::n_dims(src, dim); }, + [dim](eteq::NodeptrT out, teq::Shape& shape, std::vector&) { ASSERT_EQ(1, out->shape().n_elems()); double got = *((double*) out->data()); @@ -771,8 +939,8 @@ TEST(API, NDims) TEST(API, Rsum) { unary_generic( - [](ead::NodeptrT& src) { return tenncor::reduce_sum(src); }, - [](ead::NodeptrT out, ade::Shape& shape, std::vector& data) + [](eteq::NodeptrT& src) { return tenncor::reduce_sum(src); }, + [](eteq::NodeptrT out, teq::Shape& shape, std::vector& data) { size_t n = out->shape().n_elems(); { @@ -791,25 +959,25 @@ TEST(API, Rsum) } }); unary_generic( - [](ead::NodeptrT& src) { return tenncor::reduce_sum(src, 1, 1); }, - [](ead::NodeptrT out, ade::Shape& shape, std::vector& data) + [](eteq::NodeptrT& src) { return tenncor::reduce_sum(src, 1, 1); }, + [](eteq::NodeptrT out, teq::Shape& shape, std::vector& data) { - std::vector expect_list(shape.begin(), shape.end()); + std::vector expect_list(shape.begin(), shape.end()); expect_list[1] = 1; - ade::Shape gotshape = out->shape(); + teq::Shape gotshape = out->shape(); EXPECT_ARREQ(expect_list, gotshape); - ade::CoordT coord; - ade::DimT d = shape.at(1); + teq::CoordT coord; + teq::DimT d = shape.at(1); double* got = (double*) out->data(); for (size_t i = 0, n = gotshape.n_elems(); i < n; ++i) { - coord = ade::coordinate(gotshape, i); + coord = teq::coordinate(gotshape, i); double acc = 0; for (size_t j = 0; j < d; ++j) { coord[1] = j; - acc += data[ade::index(shape, coord)]; + acc += data[teq::index(shape, coord)]; } EXPECT_DOUBLE_EQ(acc, got[i]); } @@ -826,9 +994,9 @@ TEST(API, Rsum) TEST(API, Rprod) { - std::vector slist = {2, 2, 3}; - ade::Shape shape(slist); - std::vector data = { + std::vector slist = {2, 2, 3}; + teq::Shape shape(slist); + std::vector data = { 2, 1, 7, 3, @@ -839,9 +1007,9 @@ TEST(API, Rprod) 7, 2, }; - ead::NodeptrT src = ead::make_constant(data.data(), shape); - ead::NodeptrT dest = tenncor::reduce_prod(src); - ead::NodeptrT dest2 = tenncor::reduce_prod(src, 1, 1); + eteq::NodeptrT src = eteq::make_constant(data.data(), shape); + eteq::NodeptrT dest = tenncor::reduce_prod(src); + eteq::NodeptrT dest2 = tenncor::reduce_prod(src, 1, 1); dest->update(); { @@ -849,39 +1017,39 @@ TEST(API, Rprod) { ASSERT_EQ(1, n); } - size_t got = *((size_t*) dest->data()); + int32_t got = *((int32_t*) dest->data()); - size_t expect = std::accumulate(data.begin(), data.end(), 1, std::multiplies()); + int32_t expect = std::accumulate(data.begin(), data.end(), 1, std::multiplies()); EXPECT_EQ(expect, got); } dest2->update(); { - std::vector expect_list(shape.begin(), shape.end()); + std::vector expect_list(shape.begin(), shape.end()); expect_list[1] = 1; - ade::Shape gotshape = dest2->shape(); + teq::Shape gotshape = dest2->shape(); EXPECT_ARREQ(expect_list, gotshape); - ade::CoordT coord; - ade::DimT d = shape.at(1); - size_t* got = (size_t*) dest2->data(); + teq::CoordT coord; + teq::DimT d = shape.at(1); + int32_t* got = (int32_t*) dest2->data(); for (size_t i = 0, n = gotshape.n_elems(); i < n; ++i) { - coord = ade::coordinate(gotshape, i); - size_t acc = 1; + coord = teq::coordinate(gotshape, i); + int32_t acc = 1; for (size_t j = 0; j < d; ++j) { coord[1] = j; - acc *= data[ade::index(shape, coord)]; + acc *= data[teq::index(shape, coord)]; } EXPECT_EQ(acc, got[i]); } } - ead::Session session; + eteq::Session session; - ead::NodeptrT gsrc = ead::derive(dest, src); - ead::NodeptrT gsrc2 = ead::derive(dest2, src); + eteq::NodeptrT gsrc = eteq::derive(dest, src); + eteq::NodeptrT gsrc2 = eteq::derive(dest2, src); session.track({ gsrc->get_tensor(), gsrc2->get_tensor(), @@ -890,11 +1058,11 @@ TEST(API, Rprod) auto gotshape = gsrc->shape(); ASSERT_ARREQ(slist, gotshape); - size_t* goptr = (size_t*) gsrc->data(); + int32_t* goptr = (int32_t*) gsrc->data(); { size_t n = data.size(); - std::vector left(n, 1); - std::vector right(n, 1); + std::vector left(n, 1); + std::vector right(n, 1); for (size_t i = 1; i < n; ++i) { left[i] = data[i - 1] * left[i - 1]; @@ -902,12 +1070,12 @@ TEST(API, Rprod) } for (size_t i = 0; i < n; ++i) { - size_t expect = left[i] * right[i]; + int32_t expect = left[i] * right[i]; EXPECT_EQ(expect, goptr[i]); } } - std::vector ex_grad = { + std::vector ex_grad = { 7, 3, 2, 1, @@ -919,7 +1087,7 @@ TEST(API, Rprod) }; auto gotshape2 = gsrc2->shape(); ASSERT_ARREQ(slist, gotshape2); - size_t* goptr2 = (size_t*) gsrc2->data(); + int32_t* goptr2 = (int32_t*) gsrc2->data(); { for (size_t i = 0, n = ex_grad.size(); i < n; ++i) { @@ -932,8 +1100,8 @@ TEST(API, Rprod) TEST(API, Rmin) { unary_generic( - [](ead::NodeptrT& src) { return tenncor::reduce_min(src); }, - [](ead::NodeptrT out, ade::Shape& shape, std::vector& data) + [](eteq::NodeptrT& src) { return tenncor::reduce_min(src); }, + [](eteq::NodeptrT out, teq::Shape& shape, std::vector& data) { size_t n = out->shape().n_elems(); ASSERT_EQ(1, n); @@ -958,46 +1126,46 @@ TEST(API, Rmin) } }); unary_generic( - [](ead::NodeptrT& src) { return tenncor::reduce_min(src, 1, 1); }, - [](ead::NodeptrT out, ade::Shape& shape, std::vector& data) + [](eteq::NodeptrT& src) { return tenncor::reduce_min(src, 1, 1); }, + [](eteq::NodeptrT out, teq::Shape& shape, std::vector& data) { - std::vector expect_list(shape.begin(), shape.end()); + std::vector expect_list(shape.begin(), shape.end()); expect_list[1] = 1; - ade::Shape gotshape = out->shape(); + teq::Shape gotshape = out->shape(); EXPECT_ARREQ(expect_list, gotshape); - ade::CoordT coord; - ade::DimT d = shape.at(1); + teq::CoordT coord; + teq::DimT d = shape.at(1); double* got = (double*) out->data(); for (size_t i = 0, n = gotshape.n_elems(); i < n; ++i) { - coord = ade::coordinate(gotshape, i); - double acc = data[ade::index(shape, coord)]; + coord = teq::coordinate(gotshape, i); + double acc = data[teq::index(shape, coord)]; for (size_t j = 1; j < d; ++j) { coord[1] = j; - acc = std::min(acc, data[ade::index(shape, coord)]); + acc = std::min(acc, data[teq::index(shape, coord)]); } EXPECT_DOUBLE_EQ(acc, got[i]); } }, [](double* gout, std::vector& og) { - ade::Shape inshape({2, 3, 4}); - ade::Shape outshape({2, 1, 4}); - ade::CoordT coord; - ade::DimT d = 3; + teq::Shape inshape({2, 3, 4}); + teq::Shape outshape({2, 1, 4}); + teq::CoordT coord; + teq::DimT d = 3; size_t m = og.size(); size_t n = outshape.n_elems(); std::vector expect(m, 0); for (size_t i = 0; i < n; ++i) { - coord = ade::coordinate(outshape, i); - size_t min_idx = ade::index(inshape, coord); + coord = teq::coordinate(outshape, i); + size_t min_idx = teq::index(inshape, coord); for (size_t j = 1; j < d; ++j) { coord[1] = j; - size_t idx = ade::index(inshape, coord); + size_t idx = teq::index(inshape, coord); if (og[min_idx] > og[idx]) { min_idx = idx; @@ -1014,8 +1182,8 @@ TEST(API, Rmin) TEST(API, Rmax) { unary_generic( - [](ead::NodeptrT& src) { return tenncor::reduce_max(src); }, - [](ead::NodeptrT out, ade::Shape& shape, std::vector& data) + [](eteq::NodeptrT& src) { return tenncor::reduce_max(src); }, + [](eteq::NodeptrT out, teq::Shape& shape, std::vector& data) { size_t n = out->shape().n_elems(); ASSERT_EQ(1, n); @@ -1040,46 +1208,46 @@ TEST(API, Rmax) } }); unary_generic( - [](ead::NodeptrT& src) { return tenncor::reduce_max(src, 1, 1); }, - [](ead::NodeptrT out, ade::Shape& shape, std::vector& data) + [](eteq::NodeptrT& src) { return tenncor::reduce_max(src, 1, 1); }, + [](eteq::NodeptrT out, teq::Shape& shape, std::vector& data) { - std::vector expect_list(shape.begin(), shape.end()); + std::vector expect_list(shape.begin(), shape.end()); expect_list[1] = 1; - ade::Shape gotshape = out->shape(); + teq::Shape gotshape = out->shape(); EXPECT_ARREQ(expect_list, gotshape); - ade::CoordT coord; - ade::DimT d = shape.at(1); + teq::CoordT coord; + teq::DimT d = shape.at(1); double* got = (double*) out->data(); for (size_t i = 0, n = gotshape.n_elems(); i < n; ++i) { - coord = ade::coordinate(gotshape, i); - double acc = data[ade::index(shape, coord)]; + coord = teq::coordinate(gotshape, i); + double acc = data[teq::index(shape, coord)]; for (size_t j = 1; j < d; ++j) { coord[1] = j; - acc = std::max(acc, data[ade::index(shape, coord)]); + acc = std::max(acc, data[teq::index(shape, coord)]); } EXPECT_DOUBLE_EQ(acc, got[i]); } }, [](double* gout, std::vector& og) { - ade::Shape inshape({2, 3, 4}); - ade::Shape outshape({2, 1, 4}); - ade::CoordT coord; - ade::DimT d = 3; + teq::Shape inshape({2, 3, 4}); + teq::Shape outshape({2, 1, 4}); + teq::CoordT coord; + teq::DimT d = 3; size_t m = og.size(); size_t n = outshape.n_elems(); std::vector expect(m, 0); for (size_t i = 0; i < n; ++i) { - coord = ade::coordinate(outshape, i); - size_t max_idx = ade::index(inshape, coord); + coord = teq::coordinate(outshape, i); + size_t max_idx = teq::index(inshape, coord); for (size_t j = 1; j < d; ++j) { coord[1] = j; - size_t idx = ade::index(inshape, coord); + size_t idx = teq::index(inshape, coord); if (og[max_idx] < og[idx]) { max_idx = idx; @@ -1095,37 +1263,37 @@ TEST(API, Rmax) TEST(API, Permute) { - std::vector slist = {4, 3, 2}; - std::vector pidx = {2, 0, 1}; - ade::Shape shape(slist); - ade::NElemT nelem = shape.n_elems(); + std::vector slist = {4, 3, 2}; + std::vector pidx = {2, 0, 1}; + teq::Shape shape(slist); + teq::NElemT nelem = shape.n_elems(); std::vector data = { 70, 36, 93, 50, 59, 98, 39, 5, 54, 84, 100, 94, 75, 64, 30, 17, 90, 79, 21, 54, 6, 7, 69, 53 }; - ead::NodeptrT src = ead::make_constant(data.data(), shape); - ead::NodeptrT dest = tenncor::permute(src, pidx); + eteq::NodeptrT src = eteq::make_constant(data.data(), shape); + eteq::NodeptrT dest = tenncor::permute(src, pidx); dest->update(); size_t n = dest->shape().n_elems(); ASSERT_EQ(nelem, n); double* got = (double*) dest->data(); - ade::CoordT coord, temp; + teq::CoordT coord, temp; for (size_t i = 0; i < n; ++i) { - coord = temp = ade::coordinate(shape, i); + coord = temp = teq::coordinate(shape, i); for (int32_t j = 0, n = slist.size(); j < n; ++j) { coord[j] = temp[pidx[j]]; } - EXPECT_EQ(data[i], got[ade::index(dest->shape(), coord)]); + EXPECT_EQ(data[i], got[teq::index(dest->shape(), coord)]); } - ead::Session session; + eteq::Session session; - ead::NodeptrT gsrc = ead::derive(dest, src); + eteq::NodeptrT gsrc = eteq::derive(dest, src); session.track({gsrc->get_tensor()}); session.update(); { @@ -1142,19 +1310,19 @@ TEST(API, Permute) TEST(API, Extend) { - std::vector slist = {2, 5}; - std::vector ext = {1, 3}; - ade::Shape shape(slist); - ade::NElemT nelem = shape.n_elems(); + std::vector slist = {2, 5}; + std::vector ext = {1, 3}; + teq::Shape shape(slist); + teq::NElemT nelem = shape.n_elems(); std::vector data = { 51, 42, 9, 43, 37, 36, 65, 95, 10, 33 }; - ead::NodeptrT src = ead::make_constant(data.data(), shape); - ead::NodeptrT dest = tenncor::extend(src, slist.size(), ext); + eteq::NodeptrT src = eteq::make_constant(data.data(), shape); + eteq::NodeptrT dest = tenncor::extend(src, slist.size(), ext); dest->update(); - size_t ext_nelem = ade::Shape(ext).n_elems(); + size_t ext_nelem = teq::Shape(ext).n_elems(); size_t n = dest->shape().n_elems(); ASSERT_EQ(nelem * ext_nelem, n); double* got = (double*) dest->data(); @@ -1166,9 +1334,9 @@ TEST(API, Extend) } } - ead::Session session; + eteq::Session session; - ead::NodeptrT gsrc = ead::derive(dest, src); + eteq::NodeptrT gsrc = eteq::derive(dest, src); session.track({gsrc->get_tensor()}); session.update(); { @@ -1185,12 +1353,12 @@ TEST(API, Extend) TEST(API, Matmul) { - std::vector alist = {3, 2}; - std::vector blist = {4, 3}; - std::vector sqrlist = {3, 3}; - ade::Shape ashape(alist); - ade::Shape bshape(blist); - ade::Shape cshape(sqrlist); + std::vector alist = {3, 2}; + std::vector blist = {4, 3}; + std::vector sqrlist = {3, 3}; + teq::Shape ashape(alist); + teq::Shape bshape(blist); + teq::Shape cshape(sqrlist); std::vector data = { 40, 1, 23, @@ -1216,12 +1384,12 @@ TEST(API, Matmul) 23+77, 23+77, 23+77, 23+77, }; - ead::NodeptrT a = ead::make_constant(data.data(), ashape); - ead::NodeptrT b = ead::make_constant(data2.data(), bshape); - ead::NodeptrT dest = tenncor::matmul(a, b); + eteq::NodeptrT a = eteq::make_constant(data.data(), ashape); + eteq::NodeptrT b = eteq::make_constant(data2.data(), bshape); + eteq::NodeptrT dest = tenncor::matmul(a, b); dest->update(); - ade::Shape gotshape = dest->shape(); + teq::Shape gotshape = dest->shape(); EXPECT_EQ(4, gotshape.at(0)); EXPECT_EQ(2, gotshape.at(1)); int32_t* optr = (int32_t*) dest->data(); @@ -1232,25 +1400,25 @@ TEST(API, Matmul) MatVecT ddc = create_2d(dest); EXPECT_TRUE(freivald(dda, ddb, ddc)); - ead::Session session; + eteq::Session session; - ead::NodeptrT c = ead::make_constant(data3.data(), cshape); - ead::NodeptrT dest2 = tenncor::matmul(c, c); - ead::NodeptrT gsame = ead::derive(dest2, c); + eteq::NodeptrT c = eteq::make_constant(data3.data(), cshape); + eteq::NodeptrT dest2 = tenncor::matmul(c, c); + eteq::NodeptrT gsame = eteq::derive(dest2, c); session.track({gsame->get_tensor()}); session.update(); - ade::Shape gcshape = gsame->shape(); + teq::Shape gcshape = gsame->shape(); { - std::vector glist(gcshape.begin(), gcshape.end()); + std::vector glist(gcshape.begin(), gcshape.end()); ASSERT_ARREQ(sqrlist, glist); } - ead::NodeptrT gleft = ead::derive(dest, a); + eteq::NodeptrT gleft = eteq::derive(dest, a); session.track({gleft->get_tensor()}); session.update(); - ade::Shape gashape = gleft->shape(); + teq::Shape gashape = gleft->shape(); { - std::vector glist(gashape.begin(), gashape.end()); + std::vector glist(gashape.begin(), gashape.end()); ASSERT_ARREQ(alist, glist); int32_t* ga = (int32_t*) gleft->data(); ASSERT_NE(nullptr, ga); @@ -1258,12 +1426,12 @@ TEST(API, Matmul) ASSERT_ARREQ(expect_ga, ga_data); } - ead::NodeptrT gright = ead::derive(dest, b); + eteq::NodeptrT gright = eteq::derive(dest, b); session.track({gright->get_tensor()}); session.update(); - ade::Shape gbshape = gright->shape(); + teq::Shape gbshape = gright->shape(); { - std::vector glist(gbshape.begin(), gbshape.end()); + std::vector glist(gbshape.begin(), gbshape.end()); ASSERT_ARREQ(blist, glist); int32_t* gb = (int32_t*) gright->data(); ASSERT_NE(nullptr, gb); @@ -1273,15 +1441,15 @@ TEST(API, Matmul) } -static void test_rand_unif (std::vector shape_list) +static void test_rand_unif (std::vector shape_list) { double hi = 3.2234; double lo = 0.2547977589; - ade::Shape shape(shape_list); + teq::Shape shape(shape_list); - ead::NodeptrT src = ead::make_constant_scalar(lo, shape); - ead::NodeptrT src2 = ead::make_constant_scalar(hi, shape); - ead::NodeptrT dest = tenncor::random::rand_unif(src, src2); + eteq::NodeptrT src = eteq::make_constant_scalar(lo, shape); + eteq::NodeptrT src2 = eteq::make_constant_scalar(hi, shape); + eteq::NodeptrT dest = tenncor::random::rand_unif(src, src2); dest->update(); { @@ -1296,9 +1464,9 @@ static void test_rand_unif (std::vector shape_list) EXPECT_GT(hi, optr[i]); } - ead::Session session; + eteq::Session session; - ead::NodeptrT gleft = ead::derive(dest, src); + eteq::NodeptrT gleft = eteq::derive(dest, src); session.track({gleft->get_tensor()}); session.update(); { @@ -1308,7 +1476,7 @@ static void test_rand_unif (std::vector shape_list) double* goptr2 = (double*) gleft->data(); EXPECT_DOUBLE_EQ(0, goptr2[0]); - ead::NodeptrT gright = ead::derive(dest, src); + eteq::NodeptrT gright = eteq::derive(dest, src); session.track({gright->get_tensor()}); session.update(); { @@ -1323,22 +1491,22 @@ static void test_rand_unif (std::vector shape_list) TEST(API, RandUniform) { // tensor operation - std::vector slist = {31, 21, 14}; + std::vector slist = {31, 21, 14}; test_rand_unif(slist); // matrix optimized operation - std::vector slist_2d = {31, 14}; + std::vector slist_2d = {31, 14}; test_rand_unif(slist_2d); } TEST(API, Convolution) { - std::vector alist = {2, 4, 3, 3}; - std::vector blist = {1, 2, 2, 1}; - ade::Shape shape(alist); - ade::Shape kshape(blist); - std::vector expectslist = { + std::vector alist = {2, 4, 3, 3}; + std::vector blist = {1, 2, 2, 1}; + teq::Shape shape(alist); + teq::Shape kshape(blist); + std::vector expectslist = { 2, 3, 2, 3, 1, 1, 1, 1, }; @@ -1387,11 +1555,11 @@ TEST(API, Convolution) 2083, 2021, }; - ead::NodeptrT img = ead::make_constant(data.data(), shape); - ead::NodeptrT kernel = ead::make_constant(data2.data(), kshape); - std::vector dims(ade::rank_cap); + eteq::NodeptrT img = eteq::make_constant(data.data(), shape); + eteq::NodeptrT kernel = eteq::make_constant(data2.data(), kshape); + std::vector dims(teq::rank_cap); std::iota(dims.begin(), dims.end(), 0); - ead::NodeptrT dest = tenncor::convolution(img, kernel, dims); + eteq::NodeptrT dest = tenncor::convolution(img, kernel, dims); dest->update(); { @@ -1404,9 +1572,9 @@ TEST(API, Convolution) ASSERT_ARREQ(expect_out, outdata); } - ead::Session session; + eteq::Session session; - ead::NodeptrT gleft = ead::derive(dest, img); + eteq::NodeptrT gleft = eteq::derive(dest, img); ASSERT_NE(nullptr, gleft); session.track({gleft->get_tensor()}); session.update(); @@ -1418,7 +1586,7 @@ TEST(API, Convolution) ASSERT_ARREQ(expect_ga, ga_data); } - ead::NodeptrT gright = ead::derive(dest, kernel); + eteq::NodeptrT gright = eteq::derive(dest, kernel); ASSERT_NE(nullptr, gright); session.track({gright->get_tensor()}); session.update(); diff --git a/ead/test/test_coord.cpp b/eteq/test/test_coord.cpp similarity index 52% rename from ead/test/test_coord.cpp rename to eteq/test/test_coord.cpp index 48b6cf2e0..b25e91786 100644 --- a/ead/test/test_coord.cpp +++ b/eteq/test/test_coord.cpp @@ -6,26 +6,26 @@ #include "exam/exam.hpp" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" TEST(COORD, Connect) { - ead::CoordMap cmap({1, 2, 3, 4, 5, 6, 7, 8}, false); - ead::CoordMap other({9, 8, 7, 6, 5, 4, 3, 2}, true); + eteq::CoordMap cmap({1, 2, 3, 4, 5, 6, 7, 8}, false); + eteq::CoordMap other({9, 8, 7, 6, 5, 4, 3, 2}, true); EXPECT_EQ(nullptr, cmap.connect(other)); } TEST(COORD, Forward) { - ade::CoordT expect_a = {1, 2, 3, 4, 5, 6, 7, 8}; - ade::CoordT expect_b = {9, 8, 7, 6, 5, 4, 3, 2}; + teq::CoordT expect_a = {1, 2, 3, 4, 5, 6, 7, 8}; + teq::CoordT expect_b = {9, 8, 7, 6, 5, 4, 3, 2}; - ead::CoordMap a(expect_a, false); - ead::CoordMap b(expect_b, true); + eteq::CoordMap a(expect_a, false); + eteq::CoordMap b(expect_b, true); - ade::CoordT out; + teq::CoordT out; a.forward(out.begin(), out.begin()); EXPECT_ARREQ(expect_a, out); @@ -36,7 +36,7 @@ TEST(COORD, Forward) TEST(COORD, Reverse) { - ead::CoordMap cmap({1, 2, 3, 4, 5, 6, 7, 8}, false); + eteq::CoordMap cmap({1, 2, 3, 4, 5, 6, 7, 8}, false); EXPECT_EQ(nullptr, cmap.reverse()); } diff --git a/ead/test/test_equation.cpp b/eteq/test/test_equation.cpp similarity index 86% rename from ead/test/test_equation.cpp rename to eteq/test/test_equation.cpp index 50fcf8e12..c1d3c9854 100644 --- a/ead/test/test_equation.cpp +++ b/eteq/test/test_equation.cpp @@ -6,24 +6,24 @@ #include "exam/exam.hpp" -#include "ead/generated/api.hpp" +#include "eteq/generated/api.hpp" -#include "ead/session.hpp" -#include "ead/grader.hpp" -#include "ead/constant.hpp" -#include "ead/variable.hpp" +#include "eteq/session.hpp" +#include "eteq/grader.hpp" +#include "eteq/constant.hpp" +#include "eteq/variable.hpp" -#include "ead/parse.hpp" +#include "eteq/parse.hpp" TEST(EQUATION, MatmulComplex) { - std::vector alist = {3, 2}; - std::vector blist = {4, 3}; - std::vector clist = {2, 4}; - ade::Shape ashape(alist); - ade::Shape bshape(blist); - ade::Shape cshape(clist); + std::vector alist = {3, 2}; + std::vector blist = {4, 3}; + std::vector clist = {2, 4}; + teq::Shape ashape(alist); + teq::Shape bshape(blist); + teq::Shape cshape(clist); std::vector data = { 40, 1, 23, @@ -56,20 +56,20 @@ TEST(EQUATION, MatmulComplex) 112505257984, 278567649280, }; - ead::NodeptrT a = ead::make_variable(data.data(), ashape); - ead::NodeptrT b = ead::make_variable(data2.data(), bshape); - ead::NodeptrT c = ead::make_variable(data3.data(), cshape); + eteq::NodeptrT a = eteq::make_variable(data.data(), ashape); + eteq::NodeptrT b = eteq::make_variable(data2.data(), bshape); + eteq::NodeptrT c = eteq::make_variable(data3.data(), cshape); auto d = tenncor::matmul(a, b); auto e = tenncor::matmul(c, d); auto f = tenncor::matmul(tenncor::transpose(d), tenncor::transpose(c)); auto dest = tenncor::matmul(e, f); - auto da = ead::derive(dest, a); - auto db = ead::derive(dest, b); - auto dc = ead::derive(dest, c); + auto da = eteq::derive(dest, a); + auto db = eteq::derive(dest, b); + auto dc = eteq::derive(dest, c); - ead::Session session; + eteq::Session session; session.track({ dest->get_tensor(), da->get_tensor(), @@ -112,12 +112,12 @@ TEST(EQUATION, MatmulComplex) TEST(EQUATION, SigmoidMLP_Slow) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); std::vector in_data = { 0.8575073725, 0.0910915775, 0.9133499042, @@ -197,31 +197,31 @@ TEST(EQUATION, SigmoidMLP_Slow) 0.4350741570, 0.3949956178, 0.2341486792, 0.1348473539, 0.8681677362, }; - ead::NodeptrT in = ead::make_variable(in_data.data(), in_shape); - ead::NodeptrT weight0 = ead::make_variable(w0_data.data(), weight0_shape); - ead::NodeptrT bias0 = ead::make_variable(b0_data.data(), bias0_shape); - ead::NodeptrT weight1 = ead::make_variable(w1_data.data(), weight1_shape); - ead::NodeptrT bias1 = ead::make_variable(b1_data.data(), bias1_shape); - ead::NodeptrT out = ead::make_variable(out_data.data(), out_shape); + eteq::NodeptrT in = eteq::make_variable(in_data.data(), in_shape); + eteq::NodeptrT weight0 = eteq::make_variable(w0_data.data(), weight0_shape); + eteq::NodeptrT bias0 = eteq::make_variable(b0_data.data(), bias0_shape); + eteq::NodeptrT weight1 = eteq::make_variable(w1_data.data(), weight1_shape); + eteq::NodeptrT bias1 = eteq::make_variable(b1_data.data(), bias1_shape); + eteq::NodeptrT out = eteq::make_variable(out_data.data(), out_shape); auto layer0 = tenncor::add(tenncor::matmul(in, weight0), tenncor::extend(bias0, 1, {3})); - auto sig0 = tenncor::div(ead::make_constant_scalar(1, ade::Shape({9, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({9, 3})), + auto sig0 = tenncor::div(eteq::make_constant_scalar(1, teq::Shape({9, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({9, 3})), tenncor::exp(tenncor::neg(layer0)))); auto layer1 = tenncor::add(tenncor::matmul(sig0, weight1), tenncor::extend(bias1, 1, {3})); - auto sig1 = tenncor::div(ead::make_constant_scalar(1, ade::Shape({5, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({5, 3})), + auto sig1 = tenncor::div(eteq::make_constant_scalar(1, teq::Shape({5, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({5, 3})), tenncor::exp(tenncor::neg(layer1)))); - auto err = tenncor::pow(tenncor::sub(out, sig1), ead::make_constant_scalar(2, out_shape)); + auto err = tenncor::pow(tenncor::sub(out, sig1), eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0); - auto db0 = ead::derive(err, bias0); - auto dw1 = ead::derive(err, weight1); - auto db1 = ead::derive(err, bias1); + auto dw0 = eteq::derive(err, weight0); + auto db0 = eteq::derive(err, bias0); + auto dw1 = eteq::derive(err, weight1); + auto db1 = eteq::derive(err, bias1); - ead::Session session; + eteq::Session session; session.track({ dw0->get_tensor(), db0->get_tensor(), @@ -333,12 +333,12 @@ TEST(EQUATION, SigmoidMLP_Slow) TEST(EQUATION, OptimizedSigmoidMLP_Slow) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); std::vector in_data = { 0.8575073725, 0.0910915775, 0.9133499042, @@ -418,33 +418,33 @@ TEST(EQUATION, OptimizedSigmoidMLP_Slow) 0.4350741570, 0.3949956178, 0.2341486792, 0.1348473539, 0.8681677362, }; - ead::NodeptrT in = ead::make_variable(in_data.data(), in_shape); - ead::NodeptrT weight0 = ead::make_variable(w0_data.data(), weight0_shape); - ead::NodeptrT bias0 = ead::make_variable(b0_data.data(), bias0_shape); - ead::NodeptrT weight1 = ead::make_variable(w1_data.data(), weight1_shape); - ead::NodeptrT bias1 = ead::make_variable(b1_data.data(), bias1_shape); - ead::NodeptrT out = ead::make_variable(out_data.data(), out_shape); + eteq::NodeptrT in = eteq::make_variable(in_data.data(), in_shape); + eteq::NodeptrT weight0 = eteq::make_variable(w0_data.data(), weight0_shape); + eteq::NodeptrT bias0 = eteq::make_variable(b0_data.data(), bias0_shape); + eteq::NodeptrT weight1 = eteq::make_variable(w1_data.data(), weight1_shape); + eteq::NodeptrT bias1 = eteq::make_variable(b1_data.data(), bias1_shape); + eteq::NodeptrT out = eteq::make_variable(out_data.data(), out_shape); auto layer0 = tenncor::add(tenncor::matmul(in, weight0), tenncor::extend(bias0, 1, {3})); - auto sig0 = tenncor::div(ead::make_constant_scalar(1, ade::Shape({9, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({9, 3})), + auto sig0 = tenncor::div(eteq::make_constant_scalar(1, teq::Shape({9, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({9, 3})), tenncor::exp(tenncor::neg(layer0)))); auto layer1 = tenncor::add(tenncor::matmul(sig0, weight1), tenncor::extend(bias1, 1, {3})); - auto sig1 = tenncor::div(ead::make_constant_scalar(1, ade::Shape({5, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({5, 3})), + auto sig1 = tenncor::div(eteq::make_constant_scalar(1, teq::Shape({5, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({5, 3})), tenncor::exp(tenncor::neg(layer1)))); - auto err = tenncor::pow(tenncor::sub(out, sig1), ead::make_constant_scalar(2, out_shape)); + auto err = tenncor::pow(tenncor::sub(out, sig1), eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0); - auto db0 = ead::derive(err, bias0); - auto dw1 = ead::derive(err, weight1); - auto db1 = ead::derive(err, bias1); + auto dw0 = eteq::derive(err, weight0); + auto db0 = eteq::derive(err, bias0); + auto dw1 = eteq::derive(err, weight1); + auto db1 = eteq::derive(err, bias1); // optimize - auto rules = ead::parse_file("cfg/optimizations.rules"); - ade::TensT roots = { + auto rules = eteq::parse_file("cfg/optimizations.rules"); + teq::TensT roots = { dw0->get_tensor(), db0->get_tensor(), dw1->get_tensor(), @@ -452,7 +452,7 @@ TEST(EQUATION, OptimizedSigmoidMLP_Slow) }; opt::optimize(roots, rules); - ead::Session session; + eteq::Session session; session.track({ dw0->get_tensor(), db0->get_tensor(), @@ -564,12 +564,12 @@ TEST(EQUATION, OptimizedSigmoidMLP_Slow) TEST(EQUATION, SigmoidMLP_Fast) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); std::vector in_data = { 0.8575073725, 0.0910915775, 0.9133499042, @@ -649,12 +649,12 @@ TEST(EQUATION, SigmoidMLP_Fast) 0.4350741570, 0.3949956178, 0.2341486792, 0.1348473539, 0.8681677362, }; - ead::NodeptrT in = ead::make_variable(in_data.data(), in_shape); - ead::NodeptrT weight0 = ead::make_variable(w0_data.data(), weight0_shape); - ead::NodeptrT bias0 = ead::make_variable(b0_data.data(), bias0_shape); - ead::NodeptrT weight1 = ead::make_variable(w1_data.data(), weight1_shape); - ead::NodeptrT bias1 = ead::make_variable(b1_data.data(), bias1_shape); - ead::NodeptrT out = ead::make_variable(out_data.data(), out_shape); + eteq::NodeptrT in = eteq::make_variable(in_data.data(), in_shape); + eteq::NodeptrT weight0 = eteq::make_variable(w0_data.data(), weight0_shape); + eteq::NodeptrT bias0 = eteq::make_variable(b0_data.data(), bias0_shape); + eteq::NodeptrT weight1 = eteq::make_variable(w1_data.data(), weight1_shape); + eteq::NodeptrT bias1 = eteq::make_variable(b1_data.data(), bias1_shape); + eteq::NodeptrT out = eteq::make_variable(out_data.data(), out_shape); auto layer0 = tenncor::add(tenncor::matmul(in, weight0), tenncor::extend(bias0, 1, {3})); auto sig0 = tenncor::sigmoid(layer0); @@ -662,14 +662,14 @@ TEST(EQUATION, SigmoidMLP_Fast) auto layer1 = tenncor::add(tenncor::matmul(sig0, weight1), tenncor::extend(bias1, 1, {3})); auto sig1 = tenncor::sigmoid(layer1); - auto err = tenncor::pow(tenncor::sub(out, sig1), ead::make_constant_scalar(2, out_shape)); + auto err = tenncor::pow(tenncor::sub(out, sig1), eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0); - auto db0 = ead::derive(err, bias0); - auto dw1 = ead::derive(err, weight1); - auto db1 = ead::derive(err, bias1); + auto dw0 = eteq::derive(err, weight0); + auto db0 = eteq::derive(err, bias0); + auto dw1 = eteq::derive(err, weight1); + auto db1 = eteq::derive(err, bias1); - ead::Session session; + eteq::Session session; session.track({ dw0->get_tensor(), db0->get_tensor(), @@ -778,12 +778,12 @@ TEST(EQUATION, SigmoidMLP_Fast) TEST(EQUATION, OptimizedSigmoidMLP_Fast) { - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); std::vector in_data = { 0.8575073725, 0.0910915775, 0.9133499042, @@ -863,12 +863,12 @@ TEST(EQUATION, OptimizedSigmoidMLP_Fast) 0.4350741570, 0.3949956178, 0.2341486792, 0.1348473539, 0.8681677362, }; - ead::NodeptrT in = ead::make_variable(in_data.data(), in_shape); - ead::NodeptrT weight0 = ead::make_variable(w0_data.data(), weight0_shape); - ead::NodeptrT bias0 = ead::make_variable(b0_data.data(), bias0_shape); - ead::NodeptrT weight1 = ead::make_variable(w1_data.data(), weight1_shape); - ead::NodeptrT bias1 = ead::make_variable(b1_data.data(), bias1_shape); - ead::NodeptrT out = ead::make_variable(out_data.data(), out_shape); + eteq::NodeptrT in = eteq::make_variable(in_data.data(), in_shape); + eteq::NodeptrT weight0 = eteq::make_variable(w0_data.data(), weight0_shape); + eteq::NodeptrT bias0 = eteq::make_variable(b0_data.data(), bias0_shape); + eteq::NodeptrT weight1 = eteq::make_variable(w1_data.data(), weight1_shape); + eteq::NodeptrT bias1 = eteq::make_variable(b1_data.data(), bias1_shape); + eteq::NodeptrT out = eteq::make_variable(out_data.data(), out_shape); auto layer0 = tenncor::add(tenncor::matmul(in, weight0), tenncor::extend(bias0, 1, {3})); auto sig0 = tenncor::sigmoid(layer0); @@ -876,16 +876,16 @@ TEST(EQUATION, OptimizedSigmoidMLP_Fast) auto layer1 = tenncor::add(tenncor::matmul(sig0, weight1), tenncor::extend(bias1, 1, {3})); auto sig1 = tenncor::sigmoid(layer1); - auto err = tenncor::pow(tenncor::sub(out, sig1), ead::make_constant_scalar(2, out_shape)); + auto err = tenncor::pow(tenncor::sub(out, sig1), eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0); - auto db0 = ead::derive(err, bias0); - auto dw1 = ead::derive(err, weight1); - auto db1 = ead::derive(err, bias1); + auto dw0 = eteq::derive(err, weight0); + auto db0 = eteq::derive(err, bias0); + auto dw1 = eteq::derive(err, weight1); + auto db1 = eteq::derive(err, bias1); // optimize - auto rules = ead::parse_file("cfg/optimizations.rules"); - ade::TensT roots = { + auto rules = eteq::parse_file("cfg/optimizations.rules"); + teq::TensT roots = { dw0->get_tensor(), db0->get_tensor(), dw1->get_tensor(), @@ -893,7 +893,7 @@ TEST(EQUATION, OptimizedSigmoidMLP_Fast) }; opt::optimize(roots, rules); - ead::Session session; + eteq::Session session; session.track({ dw0->get_tensor(), db0->get_tensor(), diff --git a/ead/test/test_random.cpp b/eteq/test/test_random.cpp similarity index 88% rename from ead/test/test_random.cpp rename to eteq/test/test_random.cpp index e48618554..90e6ca0c8 100644 --- a/ead/test/test_random.cpp +++ b/eteq/test/test_random.cpp @@ -6,14 +6,14 @@ #include "exam/exam.hpp" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" TEST(RANDOM, UniformValueDouble) { double a = 4; double b = 16; - double c = ead::unif(a, b); + double c = eteq::unif(a, b); EXPECT_LE(a, c); EXPECT_GE(b, c); } @@ -23,7 +23,7 @@ TEST(RANDOM, UniformValueInt) { size_t a = 4; size_t b = 16; - size_t c = ead::unif(a, b); + size_t c = eteq::unif(a, b); EXPECT_LE(a, c); EXPECT_GE(b, c); } @@ -33,7 +33,7 @@ TEST(RANDOM, UniformGenDouble) { double a = 4; double b = 16; - auto gen = ead::unif_gen(a, b); + auto gen = eteq::unif_gen(a, b); std::vector out(10); std::generate(out.begin(), out.end(), gen); for (auto c : out) @@ -48,7 +48,7 @@ TEST(RANDOM, UniformGenInt) { size_t a = 4; size_t b = 16; - auto gen = ead::unif_gen(a, b); + auto gen = eteq::unif_gen(a, b); std::vector out(10); std::generate(out.begin(), out.end(), gen); for (auto c : out) @@ -63,7 +63,7 @@ TEST(RANDOM, UniformGenInt) // { // double a = 16; // double b = 4; -// auto gen = ead::norm_gen(a, b); +// auto gen = eteq::norm_gen(a, b); // std::vector out(1000); // std::generate(out.begin(), out.end(), gen); // size_t stdevs[3]; diff --git a/ead/test/test_serialize.cpp b/eteq/test/test_serialize.cpp similarity index 77% rename from ead/test/test_serialize.cpp rename to eteq/test/test_serialize.cpp index ad75e465e..ba75e74d3 100644 --- a/ead/test/test_serialize.cpp +++ b/eteq/test/test_serialize.cpp @@ -12,15 +12,15 @@ #include "exam/exam.hpp" -#include "dbg/stream/ade.hpp" +#include "dbg/stream/teq.hpp" #include "pbm/save.hpp" #include "pbm/load.hpp" #include "tag/prop.hpp" -#include "ead/serialize.hpp" -#include "ead/ead.hpp" +#include "eteq/serialize.hpp" +#include "eteq/eteq.hpp" const std::string testdir = "models/test"; @@ -28,33 +28,33 @@ const std::string testdir = "models/test"; TEST(SERIALIZE, SaveGraph) { - std::string expect_pbfile = testdir + "/ead_test.pbx"; + std::string expect_pbfile = testdir + "/eteq_test.pbx"; std::string got_pbfile = "got_ead_test.pbx"; cortenn::Graph graph; - ade::Shape in_shape({10, 3}); - ade::Shape weight0_shape({9, 10}); - ade::Shape bias0_shape({9}); - ade::Shape weight1_shape({5, 9}); - ade::Shape bias1_shape({5}); - ade::Shape out_shape({5,3}); + teq::Shape in_shape({10, 3}); + teq::Shape weight0_shape({9, 10}); + teq::Shape bias0_shape({9}); + teq::Shape weight1_shape({5, 9}); + teq::Shape bias1_shape({5}); + teq::Shape out_shape({5,3}); - ead::NodeptrT in = ead::make_variable( + eteq::NodeptrT in = eteq::make_variable( std::vector(in_shape.n_elems()).data(), in_shape, "in"); - ead::NodeptrT weight0 = ead::make_variable( + eteq::NodeptrT weight0 = eteq::make_variable( std::vector(weight0_shape.n_elems()).data(), weight0_shape, "weight0"); - ead::NodeptrT bias0 = ead::make_variable( + eteq::NodeptrT bias0 = eteq::make_variable( std::vector(bias0_shape.n_elems()).data(), bias0_shape, "bias0"); - ead::NodeptrT weight1 = ead::make_variable( + eteq::NodeptrT weight1 = eteq::make_variable( std::vector(weight1_shape.n_elems()).data(), weight1_shape, "weight1"); - ead::NodeptrT bias1 = ead::make_variable( + eteq::NodeptrT bias1 = eteq::make_variable( std::vector(bias1_shape.n_elems()).data(), bias1_shape, "bias1"); - ead::NodeptrT out = ead::make_variable( + eteq::NodeptrT out = eteq::make_variable( std::vector(out_shape.n_elems()).data(), out_shape, "out"); @@ -67,28 +67,28 @@ TEST(SERIALIZE, SaveGraph) preg.property_tag(out->get_tensor(), "training_out"); auto layer0 = tenncor::add(tenncor::matmul(in, weight0), tenncor::extend(bias0, 1, {3})); - auto sig0 = tenncor::div(ead::make_constant_scalar(1, ade::Shape({9, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({9, 3})), + auto sig0 = tenncor::div(eteq::make_constant_scalar(1, teq::Shape({9, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({9, 3})), tenncor::exp(tenncor::neg(layer0)))); auto layer1 = tenncor::add(tenncor::matmul(sig0, weight1), tenncor::extend(bias1, 1, {3})); - auto sig1 = tenncor::div(ead::make_constant_scalar(1, ade::Shape({5, 3})), - tenncor::add(ead::make_constant_scalar(1, ade::Shape({5, 3})), + auto sig1 = tenncor::div(eteq::make_constant_scalar(1, teq::Shape({5, 3})), + tenncor::add(eteq::make_constant_scalar(1, teq::Shape({5, 3})), tenncor::exp(tenncor::neg(layer1)))); - auto err = tenncor::pow(tenncor::sub(out, sig1), ead::make_constant_scalar(2, out_shape)); + auto err = tenncor::pow(tenncor::sub(out, sig1), eteq::make_constant_scalar(2, out_shape)); - auto dw0 = ead::derive(err, weight0); - auto db0 = ead::derive(err, bias0); - auto dw1 = ead::derive(err, weight1); - auto db1 = ead::derive(err, bias1); + auto dw0 = eteq::derive(err, weight0); + auto db0 = eteq::derive(err, bias0); + auto dw1 = eteq::derive(err, weight1); + auto db1 = eteq::derive(err, bias1); preg.property_tag(dw0->get_tensor(), "derivative_dw0"); preg.property_tag(db0->get_tensor(), "derivative_db0"); preg.property_tag(dw1->get_tensor(), "derivative_dw1"); preg.property_tag(db1->get_tensor(), "derivative_db1"); - pbm::GraphSaver saver; + pbm::GraphSaver saver; dw0->get_tensor()->accept(saver); db0->get_tensor()->accept(saver); dw1->get_tensor()->accept(saver); @@ -127,21 +127,21 @@ TEST(SERIALIZE, LoadGraph) { cortenn::Graph in; { - std::fstream inputstr(testdir + "/ead_test.pbx", + std::fstream inputstr(testdir + "/eteq_test.pbx", std::ios::in | std::ios::binary); ASSERT_TRUE(inputstr.is_open()); ASSERT_TRUE(in.ParseFromIstream(&inputstr)); } pbm::GraphInfo out; - pbm::load_graph(out, in); + pbm::load_graph(out, in); EXPECT_EQ(4, out.roots_.size()); auto& reg = tag::get_reg(); tag::Query q; std::vector root_props; - std::unordered_map propdtens; + std::unordered_map propdtens; for (auto tens : out.roots_) { tens->accept(q); @@ -200,7 +200,7 @@ TEST(SERIALIZE, LoadGraph) std::string got; std::string line; { - std::ifstream expectstr(testdir + "/ead_test.txt"); + std::ifstream expectstr(testdir + "/eteq_test.txt"); ASSERT_TRUE(expectstr.is_open()); while (std::getline(expectstr, line)) { diff --git a/ead/test/test_session.cpp b/eteq/test/test_session.cpp similarity index 72% rename from ead/test/test_session.cpp rename to eteq/test/test_session.cpp index 1e0b17f4f..15da75f6e 100644 --- a/ead/test/test_session.cpp +++ b/eteq/test/test_session.cpp @@ -6,24 +6,24 @@ #include "exam/exam.hpp" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" TEST(SESSION, Update) { - ade::Shape shape; + teq::Shape shape; - ead::VarptrT a = ead::make_variable( + eteq::VarptrT a = eteq::make_variable( std::vector(shape.n_elems(), 1).data(), shape); - ead::NodeptrT b = ead::make_variable( + eteq::NodeptrT b = eteq::make_variable( std::vector(shape.n_elems(), 1).data(), shape); - ead::NodeptrT c = ead::make_variable( + eteq::NodeptrT c = eteq::make_variable( std::vector(shape.n_elems(), 2).data(), shape); auto x = tenncor::add(convert_to_node(a), b); auto target = tenncor::mul(x, c); - ead::Session session; + eteq::Session session; session.track({ target->get_tensor(), }); @@ -45,19 +45,19 @@ TEST(SESSION, Update) TEST(SESSION, TargetedUpdate) { - ade::Shape shape; + teq::Shape shape; - ead::VarptrT a = ead::make_variable( + eteq::VarptrT a = eteq::make_variable( std::vector(shape.n_elems(), 1).data(), shape); - ead::NodeptrT b = ead::make_variable( + eteq::NodeptrT b = eteq::make_variable( std::vector(shape.n_elems(), 1).data(), shape); - ead::NodeptrT c = ead::make_variable( + eteq::NodeptrT c = eteq::make_variable( std::vector(shape.n_elems(), 2).data(), shape); auto x = tenncor::add(convert_to_node(a), b); auto target = tenncor::mul(x, c); - ead::Session session; + eteq::Session session; session.track({ target->get_tensor(), }); @@ -69,7 +69,7 @@ TEST(SESSION, TargetedUpdate) double d = 2; a->assign(&d, shape); - session.update_target(ead::TensSetT{x->get_tensor().get()}); + session.update_target(eteq::TensSetT{x->get_tensor().get()}); // expect target to not be updated data = (double*) target->data(); diff --git a/ead/variable.hpp b/eteq/variable.hpp similarity index 75% rename from ead/variable.hpp rename to eteq/variable.hpp index 563b59ffd..0df9b6bea 100644 --- a/ead/variable.hpp +++ b/eteq/variable.hpp @@ -1,34 +1,34 @@ /// /// variable.hpp -/// ead +/// eteq /// /// Purpose: /// Define data structures for owning, and passing /// generalized and type-specific data /// -#include "ead/ileaf.hpp" -#include "ead/inode.hpp" +#include "eteq/ileaf.hpp" +#include "eteq/inode.hpp" -#ifndef EAD_VARIABLE_HPP -#define EAD_VARIABLE_HPP +#ifndef ETEQ_VARIABLE_HPP +#define ETEQ_VARIABLE_HPP -namespace ead +namespace eteq { /// Leaf node containing data template struct Variable final : public iLeaf { - static Variable* get (T* ptr, ade::Shape shape, std::string label = ""); + static Variable* get (T* ptr, teq::Shape shape, std::string label = ""); - static Variable* get (ade::Shape shape, std::string label = "") + static Variable* get (teq::Shape shape, std::string label = "") { return Variable::get(std::vector(shape.n_elems(), 0), shape, label); } - static Variable* get (T scalar, ade::Shape shape, std::string label = "") + static Variable* get (T scalar, teq::Shape shape, std::string label = "") { if (label.empty()) { @@ -38,7 +38,7 @@ struct Variable final : public iLeaf shape, label); } - static Variable* get (std::vector data, ade::Shape shape, + static Variable* get (std::vector data, teq::Shape shape, std::string label = "") { if (data.size() != shape.n_elems()) @@ -83,10 +83,10 @@ struct Variable final : public iLeaf return *this; } - void assign (void* input, age::_GENERATED_DTYPE dtype, ade::Shape shape) + void assign (void* input, egen::_GENERATED_DTYPE dtype, teq::Shape shape) { std::vector data; - age::type_convert(data, input, dtype, shape.n_elems()); + egen::type_convert(data, input, dtype, shape.n_elems()); this->data_ = make_tensmap(data.data(), shape); } @@ -106,7 +106,7 @@ struct Variable final : public iLeaf std::string label_; // todo: make private private: - Variable (T* data, ade::Shape shape, std::string label) : + Variable (T* data, teq::Shape shape, std::string label) : iLeaf(data, shape), label_(label) {} Variable (const Variable& other) = default; @@ -127,19 +127,19 @@ struct VariableNode final : public iNode void update (void) override {} - ade::TensptrT get_tensor (void) override + teq::TensptrT get_tensor (void) const override { return var_; } - void assign (T* input, ade::Shape shape) + void assign (T* input, teq::Shape shape) { - var_->assign(input, age::get_type(), shape); + var_->assign(input, egen::get_type(), shape); } void assign (TensMapT* tensmap) { - var_->assign(tensmap->data(), age::get_type(), get_shape(*tensmap)); + var_->assign(tensmap->data(), egen::get_type(), get_shape(*tensmap)); } std::string get_label (void) const @@ -152,10 +152,10 @@ struct VariableNode final : public iNode }; template -Variable* Variable::get (T* ptr, ade::Shape shape, std::string label) +Variable* Variable::get (T* ptr, teq::Shape shape, std::string label) { static bool registered = register_builder,T>( - [](ade::TensptrT tens) + [](teq::TensptrT tens) { return std::make_shared>( std::static_pointer_cast>(tens)); @@ -175,7 +175,7 @@ NodeptrT convert_to_node (VarptrT var) } template -VarptrT make_variable_scalar (T scalar, ade::Shape shape, std::string label = "") +VarptrT make_variable_scalar (T scalar, teq::Shape shape, std::string label = "") { return std::make_shared>( std::shared_ptr>(Variable::get(scalar, shape, label)) @@ -183,7 +183,7 @@ VarptrT make_variable_scalar (T scalar, ade::Shape shape, std::string label = } template -VarptrT make_variable (ade::Shape shape, std::string label = "") +VarptrT make_variable (teq::Shape shape, std::string label = "") { return std::make_shared>( std::shared_ptr>(Variable::get(shape, label)) @@ -191,7 +191,7 @@ VarptrT make_variable (ade::Shape shape, std::string label = "") } template -VarptrT make_variable (T* data, ade::Shape shape, std::string label = "") +VarptrT make_variable (T* data, teq::Shape shape, std::string label = "") { return std::make_shared>( std::shared_ptr>(Variable::get(data, shape, label)) @@ -200,4 +200,4 @@ VarptrT make_variable (T* data, ade::Shape shape, std::string label = "") } -#endif // EAD_VARIABLE_HPP +#endif // ETEQ_VARIABLE_HPP diff --git a/experimental/distance_finder.hpp b/experimental/distance_finder.hpp index c8a1ec949..8d35719ce 100644 --- a/experimental/distance_finder.hpp +++ b/experimental/distance_finder.hpp @@ -1,16 +1,16 @@ -#include "ade/ade.hpp" +#include "teq/teq.hpp" namespace experimental { -using DistanceMapT = std::unordered_map; +using DistanceMapT = std::unordered_map; -using EdgeDistanceMapT = std::unordered_map; +using EdgeDistanceMapT = std::unordered_map; -struct DistanceFinder final : public ade::iTraveler +struct DistanceFinder final : public teq::iTraveler { /// Implementation of iTraveler - void visit (ade::iLeaf* leaf) override + void visit (teq::iLeaf* leaf) override { if (false == estd::has(distances_, leaf)) { @@ -19,7 +19,7 @@ struct DistanceFinder final : public ade::iTraveler } /// Implementation of iTraveler - void visit (ade::iFunctor* func) override + void visit (teq::iFunctor* func) override { if (false == estd::has(distances_, func)) { diff --git a/layr/BUILD.bazel b/layr/BUILD.bazel new file mode 100644 index 000000000..ace2862bc --- /dev/null +++ b/layr/BUILD.bazel @@ -0,0 +1,48 @@ +licenses(["notice"]) + +package( + default_visibility = ["//visibility:public"], +) + +filegroup( + name = "srcs", + srcs = glob([ + "*.hpp", + "src/*.cpp", + ]) + ["BUILD.bazel"], +) + +filegroup( + name = "test_srcs", + srcs = glob([ + "test/*.hpp", + "test/*.cpp", + ]), +) + +######### LIBRARY ######### + +cc_library( + name = "modl", + hdrs = glob(["*.hpp"]), + srcs = glob(["src/*.cpp"]), + copts = ["-std=c++17"], + deps = ["//eteq:eteq"], +) + +######### TEST ######### + +cc_test( + name = "test", + srcs = ["//modl:test_srcs"], + copts = ["-std=c++17"], + deps = [ + "//modl:modl", + "//dbg:stream_out", + "//testutil:tutil", + "@gtest//:gtest", + "@com_github_mingkaic_cppkg//diff:diff", + "@com_github_mingkaic_cppkg//exam:exam", + ], + linkstatic = True, +) diff --git a/layr/README_LAYR.md b/layr/README_LAYR.md new file mode 100644 index 000000000..177aa177e --- /dev/null +++ b/layr/README_LAYR.md @@ -0,0 +1,3 @@ +# LAYR (LAYeR models) + +Components of common Machine Learning layers and models diff --git a/rocnnet/modl/activations.hpp b/layr/activations.hpp similarity index 79% rename from rocnnet/modl/activations.hpp rename to layr/activations.hpp index 66bd242fe..a93f3d191 100644 --- a/rocnnet/modl/activations.hpp +++ b/layr/activations.hpp @@ -1,11 +1,11 @@ -#include "ead/generated/api.hpp" +#include "eteq/generated/api.hpp" -#include "rocnnet/modl/layer.hpp" +#include "layr/layer.hpp" -#ifndef MODL_ACTIVATIONS_HPP -#define MODL_ACTIVATIONS_HPP +#ifndef LAYR_ACTIVATIONS_HPP +#define LAYR_ACTIVATIONS_HPP -namespace modl +namespace layr { struct ActivationBuilder final : public iLayerBuilder @@ -13,7 +13,7 @@ struct ActivationBuilder final : public iLayerBuilder ActivationBuilder (std::string act_type, std::string label) : act_type_(act_type), label_(label) {} - void set_tensor (ade::TensptrT tens, std::string target) override {} + void set_tensor (teq::TensptrT tens, std::string target) override {} void set_sublayer (LayerptrT layer) override {} // activation has no sublayer @@ -27,7 +27,7 @@ struct ActivationBuilder final : public iLayerBuilder const std::string sigmoid_layer_key = get_layer_reg().register_tagr(layers_key_prefix + "sigmoid", -[](ade::TensrefT ref, std::string label) +[](teq::TensrefT ref, std::string label) { get_layer_reg().layer_tag(ref, sigmoid_layer_key, label); }, @@ -38,7 +38,7 @@ get_layer_reg().register_tagr(layers_key_prefix + "sigmoid", const std::string tanh_layer_key = get_layer_reg().register_tagr(layers_key_prefix + "tanh", -[](ade::TensrefT ref, std::string label) +[](teq::TensrefT ref, std::string label) { get_layer_reg().layer_tag(ref, tanh_layer_key, label); }, @@ -60,7 +60,7 @@ struct Activation final : public iLayer act_type_(act_type), activation_(estd::must_getf(activations, act_type, "failed to find activation `%s`", act_type.c_str())), - placeholder_(ead::make_constant_scalar(0, {})) + placeholder_(eteq::make_constant_scalar(0, {})) { tag(placeholder_->get_tensor(), LayerId()); } @@ -70,7 +70,7 @@ struct Activation final : public iLayer label_(label_prefix + other.get_label()), act_type_(other.act_type_), activation_(other.activation_), - placeholder_(ead::make_constant_scalar(0, {})) + placeholder_(eteq::make_constant_scalar(0, {})) { tag(placeholder_->get_tensor(), LayerId()); } @@ -107,7 +107,7 @@ struct Activation final : public iLayer return label_; } - ead::NodeptrT connect (ead::NodeptrT input) const override + eteq::NodeptrT connect (eteq::NodeptrT input) const override { auto out = activation_(input); recursive_tag(out->get_tensor(), { @@ -116,13 +116,13 @@ struct Activation final : public iLayer return out; } - ade::TensT get_contents (void) const override + teq::TensT get_contents (void) const override { return {placeholder_->get_tensor()}; } private: - iLayer* clone_impl (std::string label_prefix) const override + iLayer* clone_impl (const std::string& label_prefix) const override { return new Activation(*this, label_prefix); } @@ -133,7 +133,7 @@ struct Activation final : public iLayer NonLinearF activation_; - ead::NodeptrT placeholder_; + eteq::NodeptrT placeholder_; }; using ActivationptrT = std::shared_ptr; @@ -144,4 +144,4 @@ LayerptrT tanh (std::string label = "tanh"); } -#endif // MODL_ACTIVATIONS_HPP +#endif // LAYR_ACTIVATIONS_HPP diff --git a/rocnnet/modl/dbn.hpp b/layr/broken/dbn.hpp similarity index 66% rename from rocnnet/modl/dbn.hpp rename to layr/broken/dbn.hpp index 9d57a78ac..efdcf4994 100644 --- a/rocnnet/modl/dbn.hpp +++ b/layr/broken/dbn.hpp @@ -1,9 +1,9 @@ -#include "rocnnet/modl/rbm.hpp" +#include "layr/rbm.hpp" -#ifndef MODL_DBN_HPP -#define MODL_DBN_HPP +#ifndef LAYR_DBN_HPP +#define LAYR_DBN_HPP -namespace modl +namespace layr { struct DBN final : public iMarshalSet @@ -11,24 +11,24 @@ struct DBN final : public iMarshalSet DBN (RBMptrT rbm, std::string label) : iMarshalSet(label), rbm_(rbm) { - ade::DimT n_input = rbm_->get_ninput(); - ade::Shape weight_shape({rbm_->get_noutput(), n_input}); - ade::NElemT nweight = weight_shape.n_elems(); + teq::DimT n_input = rbm_->get_ninput(); + teq::Shape weight_shape({rbm_->get_noutput(), n_input}); + teq::NElemT nweight = weight_shape.n_elems(); PybindT bound = 1.0 / std::sqrt(n_input); std::uniform_real_distribution dist(-bound, bound); auto gen = [&dist]() { - return dist(ead::get_engine()); + return dist(eteq::get_engine()); }; std::vector wdata(nweight); std::generate(wdata.begin(), wdata.end(), gen); - ead::VarptrT weight = ead::make_variable( + eteq::VarptrT weight = eteq::make_variable( wdata.data(), weight_shape, "log_weight"); - ead::VarptrT bias = ead::make_variable_scalar( - 0.0, ade::Shape({hiddens.back()}), "log_bias"); + eteq::VarptrT bias = eteq::make_variable_scalar( + 0.0, teq::Shape({hiddens.back()}), "log_bias"); log_weight_ = std::make_shared(weight); log_bias_ = std::make_shared(bias); @@ -54,12 +54,12 @@ struct DBN final : public iMarshalSet DBN& operator = (DBN&& other) = default; // input of shape - ead::NodeptrT operator () (ead::NodeptrT input) + eteq::NodeptrT operator () (eteq::NodeptrT input) { - ead::NodeptrT output = (*rbm_)(input); - return age::softmax(tenncor::nn::fully_connect({output}, - {ead::convert_to_node(log_weight_->var_)}, - ead::convert_to_node(log_bias_->var_))); + eteq::NodeptrT output = (*rbm_)(input); + return tenncor::softmax(tenncor::nn::fully_connect({output}, + {eteq::convert_to_node(log_weight_->var_)}, + eteq::convert_to_node(log_bias_->var_))); } uint8_t get_ninput (void) const @@ -101,4 +101,4 @@ using DBNptrT = std::shared_ptr; } -#endif // MODL_DBN_HPP +#endif // LAYR_DBN_HPP diff --git a/rocnnet/modl/rnn.hpp b/layr/broken/rnn.hpp similarity index 76% rename from rocnnet/modl/rnn.hpp rename to layr/broken/rnn.hpp index 9c9ee19c1..16f6eff87 100644 --- a/rocnnet/modl/rnn.hpp +++ b/layr/broken/rnn.hpp @@ -1,6 +1,4 @@ -#include "ead/generated/api.hpp" - -#include "rocnnet/modl/marshal.hpp" +#include "eteq/generated/api.hpp" #ifndef MODL_RNN_HPP #define MODL_RNN_HPP @@ -10,11 +8,11 @@ namespace modl struct RNN final : public iMarshalSet { - RNN (ade::DimT n_input, ade::DimT n_output, size_t timestep, + RNN (teq::DimT n_input, teq::DimT n_output, size_t timestep, NonLinearF nonlin, std::string label) : iMarshalSet(label), nonlin_(nonlin), - bias_(ead::make_variable_scalar( - 0.0, ade::Shape({n_output}), "bias") + bias_(eteq::make_variable_scalar( + 0.0, teq::Shape({n_output}), "bias") { assert(timestep > 0); { @@ -22,30 +20,30 @@ struct RNN final : public iMarshalSet std::uniform_real_distribution dist(-bound, bound); auto gen = [&dist]() { - return dist(ead::get_engine()); + return dist(eteq::get_engine()); }; std::vector wdata(n_output * n_input); std::generate(wdata.begin(), wdata.end(), gen); - ead::VarptrT weight = ead::make_variable( - wdata.data(), ade::Shape({n_output, n_input}), "weight_0"); + eteq::VarptrT weight = eteq::make_variable( + wdata.data(), teq::Shape({n_output, n_input}), "weight_0"); layers_.push_back(std::make_shared(weight)); } for (size_t i = 1; i < timestep; ++i) { - ade::Shape weight_shape({n_output, n_output}); - ade::NElemT nweight = weight_shape.n_elems(); + teq::Shape weight_shape({n_output, n_output}); + teq::NElemT nweight = weight_shape.n_elems(); PybindT bound = 1.0 / std::sqrt(n_output); std::uniform_real_distribution dist(-bound, bound); auto gen = [&dist]() { - return dist(ead::get_engine()); + return dist(eteq::get_engine()); }; std::vector wdata(nweight); std::generate(wdata.begin(), wdata.end(), gen); - ead::VarptrT weight = ead::make_variable( + eteq::VarptrT weight = eteq::make_variable( wdata.data(), weight_shape, fmts::sprintf("weight_%d", i)); layers_.push_back(std::make_shared(weight)); @@ -73,10 +71,10 @@ struct RNN final : public iMarshalSet // expect all inputs of shape - ead::NodesT operator () (ead::NodesT inputs) + eteq::NodesT operator () (eteq::NodesT inputs) { // sanity check - const ade::Shape& in_shape = input->shape(); + const teq::Shape& in_shape = input->shape(); uint8_t ninput = get_ninput(); if (in_shape.at(0) != ninput) { @@ -91,7 +89,7 @@ struct RNN final : public iMarshalSet nins, weights_.size()); } - ead::NodesT outs; + eteq::NodesT outs; outs.reserve(nins); outs.push_back(nonlin_(tenncor::nn::fully_connect( {inputs[0]}, {weights_[0]}, bias_))); @@ -105,12 +103,12 @@ struct RNN final : public iMarshalSet return outs; } - ade::DimT get_ninput (void) const + teq::DimT get_ninput (void) const { return weights_.front()->var_->shape().at(1); } - ade::DimT get_noutput (void) const + teq::DimT get_noutput (void) const { return weights_.back()->var_->shape().at(0); } diff --git a/layr/conv.hpp b/layr/conv.hpp new file mode 100644 index 000000000..151dc2a1e --- /dev/null +++ b/layr/conv.hpp @@ -0,0 +1,208 @@ +#include "eteq/generated/api.hpp" + +#include "layr/layer.hpp" + +#ifndef LAYR_CONV_HPP +#define LAYR_CONV_HPP + +namespace layr +{ + +const std::string conv_weight_key = "weight"; + +const std::string conv_bias_key = "bias"; + +struct ConvBuilder final : public iLayerBuilder +{ + ConvBuilder (std::string label) : label_(label) {} + + void set_tensor (teq::TensptrT tens, std::string target) override + { + if (target == conv_weight_key) + { + weight_ = eteq::NodeConverters::to_node(tens); + return; + } + else if (target == conv_bias_key) + { + bias_ = eteq::NodeConverters::to_node(tens); + return; + } + logs::warnf("attempt to create convolution layer " + "with unknown tensor `%s` with label `%s`", + tens->to_string().c_str(), target.c_str()); + } + + void set_sublayer (LayerptrT layer) override {} // dense has no sublayer + + LayerptrT build (void) const override; + +private: + eteq::NodeptrT weight_ = nullptr; + + eteq::NodeptrT bias_ = nullptr; + + std::string label_; +}; + +const std::string conv_layer_key = +get_layer_reg().register_tagr(layers_key_prefix + "conv", +[](teq::TensrefT ref, std::string label) +{ + get_layer_reg().layer_tag(ref, conv_layer_key, label); +}, +[](std::string label) -> LBuilderptrT +{ + return std::make_shared(label); +}); + +struct Conv final : public iLayer +{ + Conv (std::pair filter_hw, + teq::DimT in_ncol, teq::DimT out_ncol, + const std::string& label) : + label_(label) + { + teq::Shape kernelshape({out_ncol, in_ncol, + filter_hw.second, filter_hw.first}); + size_t ndata = kernelshape.n_elems(); + + size_t input_size = filter_hw.first * filter_hw.second * in_ncol; + PybindT bound = 1.0 / std::sqrt(input_size); + std::uniform_real_distribution dist(-bound, bound); + auto gen = [&dist]() + { + return dist(eteq::get_engine()); + }; + std::vector data(ndata); + std::generate(data.begin(), data.end(), gen); + + eteq::VarptrT weight = eteq::make_variable( + data.data(), kernelshape, "weight"); + eteq::VarptrT bias = eteq::make_variable_scalar( + 0.0, teq::Shape({out_ncol}), "bias"); + weight_ = std::make_shared(weight); + bias_ = std::make_shared(bias); + } + + Conv (eteq::NodeptrT weight, + eteq::NodeptrT bias, + std::string label) : + label_(label), + weight_(weight), + bias_(bias) + { + tag(weight_->get_tensor(), LayerId(conv_weight_key)); + if (bias) + { + tag(bias_->get_tensor(), LayerId(conv_bias_key)); + } + } + + Conv (const Conv& other, + std::string label_prefix = "") + { + copy_helper(other, label_prefix); + } + + Conv& operator = (const Conv& other) + { + if (this != &other) + { + copy_helper(other); + } + return *this; + } + + Conv (Conv&& other) = default; + + Conv& operator = (Conv&& other) = default; + + Conv* clone (std::string label_prefix = "") const + { + return static_cast(this->clone_impl(label_prefix)); + } + + uint8_t get_ninput (void) const override + { + return weight_->shape().at(1); + } + + uint8_t get_noutput (void) const override + { + return weight_->shape().at(0); + } + + std::string get_ltype (void) const override + { + return conv_layer_key; + } + + std::string get_label (void) const override + { + return label_; + } + + eteq::NodeptrT connect (eteq::NodeptrT input) const override + { + auto out = tenncor::nn::conv2d(input, + eteq::convert_to_node(weight_), + eteq::convert_to_node(bias_)); + auto leaves = { + input->get_tensor().get(), + weight_->get_tensor().get(), + }; + if (bias) + { + leaves.push_back(bias_->get_tensor().get()); + } + recursive_tag(out->get_tensor(), leaves, LayerId()); + return out; + } + + teq::TensT get_contents (void) const override + { + return { + weight_->get_tensor(), + nullptr == bias_ ? nullptr : bias_->get_tensor(), + }; + } + +private: + iLayer* clone_impl (const std::string& label_prefix) const override + { + return new Conv(*this, label_prefix); + } + + void copy_helper (const Conv& other, std::string label_prefix = "") + { + label_ = label_prefix + other.label_; + weight_ = std::make_shared>( + std::shared_ptr>( + eteq::Variable::get( + *static_cast*>( + other.weight_->get_tensor().get())))); + tag(weight_->get_tensor(), LayerId(conv_weight_key)); + if (other.bias_) + { + bias_ = std::make_shared>( + std::shared_ptr>( + eteq::Variable::get( + *static_cast*>( + other.bias_->get_tensor().get())))); + tag(bias_->get_tensor(), LayerId(conv_bias_key)); + } + } + + std::string label_; + + eteq::NodeptrT weight_; + + eteq::NodeptrT bias_ = nullptr; +}; + +using ConvptrT = std::shared_ptr; + +} + +#endif // LAYR_CONV_HPP diff --git a/rocnnet/modl/dense.hpp b/layr/dense.hpp similarity index 63% rename from rocnnet/modl/dense.hpp rename to layr/dense.hpp index 0aa018c24..35875d7a5 100644 --- a/rocnnet/modl/dense.hpp +++ b/layr/dense.hpp @@ -1,13 +1,12 @@ -#include "ead/generated/api.hpp" +#include "eteq/generated/api.hpp" -#include "rocnnet/eqns/init.hpp" +#include "layr/init.hpp" +#include "layr/layer.hpp" -#include "rocnnet/modl/layer.hpp" +#ifndef LAYR_DENSE_HPP +#define LAYR_DENSE_HPP -#ifndef MODL_DENSE_HPP -#define MODL_DENSE_HPP - -namespace modl +namespace layr { const std::string weight_key = "weight"; @@ -18,16 +17,16 @@ struct DenseBuilder final : public iLayerBuilder { DenseBuilder (std::string label) : label_(label) {} - void set_tensor (ade::TensptrT tens, std::string target) override + void set_tensor (teq::TensptrT tens, std::string target) override { if (target == weight_key) { - weight_ = ead::NodeConverters::to_node(tens); + weight_ = eteq::NodeConverters::to_node(tens); return; } else if (target == bias_key) { - bias_ = ead::NodeConverters::to_node(tens); + bias_ = eteq::NodeConverters::to_node(tens); return; } logs::warnf("attempt to create dense layer " @@ -40,16 +39,16 @@ struct DenseBuilder final : public iLayerBuilder LayerptrT build (void) const override; private: - ead::NodeptrT weight_ = nullptr; + eteq::NodeptrT weight_ = nullptr; - ead::NodeptrT bias_ = nullptr; + eteq::NodeptrT bias_ = nullptr; std::string label_; }; const std::string dense_layer_key = get_layer_reg().register_tagr(layers_key_prefix + "dense", -[](ade::TensrefT ref, std::string label) +[](teq::TensrefT ref, std::string label) { get_layer_reg().layer_tag(ref, dense_layer_key, label); }, @@ -60,23 +59,23 @@ get_layer_reg().register_tagr(layers_key_prefix + "dense", struct Dense final : public iLayer { - Dense (ade::DimT nunits, ade::DimT indim, - eqns::InitF weight_init, - eqns::InitF bias_init, + Dense (teq::DimT nunits, teq::DimT indim, + layr::InitF weight_init, + layr::InitF bias_init, const std::string& label) : label_(label), - weight_(weight_init(ade::Shape({nunits, indim}), weight_key)) + weight_(weight_init(teq::Shape({nunits, indim}), weight_key)) { tag(weight_->get_tensor(), LayerId(weight_key)); if (bias_init) { - bias_ = bias_init(ade::Shape({nunits}), bias_key); + bias_ = bias_init(teq::Shape({nunits}), bias_key); tag(bias_->get_tensor(), LayerId(bias_key)); } } - Dense (ead::NodeptrT weight, - ead::NodeptrT bias, + Dense (eteq::NodeptrT weight, + eteq::NodeptrT bias, std::string label) : label_(label), weight_(weight), @@ -133,18 +132,22 @@ struct Dense final : public iLayer return label_; } - ead::NodeptrT connect (ead::NodeptrT input) const override + eteq::NodeptrT connect (eteq::NodeptrT input) const override { auto out = tenncor::nn::fully_connect({input}, {weight_}, bias_); - recursive_tag(out->get_tensor(), { + auto leaves = { input->get_tensor().get(), weight_->get_tensor().get(), - bias_->get_tensor().get(), - }, LayerId()); + }; + if (bias) + { + leaves.push_back(bias_->get_tensor().get()); + } + recursive_tag(out->get_tensor(), leaves, LayerId()); return out; } - ade::TensT get_contents (void) const override + teq::TensT get_contents (void) const override { return { weight_->get_tensor(), @@ -153,7 +156,7 @@ struct Dense final : public iLayer } private: - iLayer* clone_impl (std::string label_prefix) const override + iLayer* clone_impl (const std::string& label_prefix) const override { return new Dense(*this, label_prefix); } @@ -161,18 +164,18 @@ struct Dense final : public iLayer void copy_helper (const Dense& other, std::string label_prefix = "") { label_ = label_prefix + other.label_; - weight_ = std::make_shared>( - std::shared_ptr>( - ead::Variable::get( - *static_cast*>( + weight_ = std::make_shared>( + std::shared_ptr>( + eteq::Variable::get( + *static_cast*>( other.weight_->get_tensor().get())))); tag(weight_->get_tensor(), LayerId(weight_key)); if (other.bias_) { - bias_ = std::make_shared>( - std::shared_ptr>( - ead::Variable::get( - *static_cast*>( + bias_ = std::make_shared>( + std::shared_ptr>( + eteq::Variable::get( + *static_cast*>( other.bias_->get_tensor().get())))); tag(bias_->get_tensor(), LayerId(bias_key)); } @@ -180,13 +183,13 @@ struct Dense final : public iLayer std::string label_; - ead::NodeptrT weight_; + eteq::NodeptrT weight_; - ead::NodeptrT bias_; + eteq::NodeptrT bias_; }; using DenseptrT = std::shared_ptr; } -#endif // MODL_DENSE_HPP +#endif // LAYR_DENSE_HPP diff --git a/rocnnet/eqns/err_approx.hpp b/layr/err_approx.hpp similarity index 59% rename from rocnnet/eqns/err_approx.hpp rename to layr/err_approx.hpp index 1166d7f68..8cc56ea75 100644 --- a/rocnnet/eqns/err_approx.hpp +++ b/layr/err_approx.hpp @@ -1,40 +1,40 @@ #include -#include "ead/generated/pyapi.hpp" +#include "eteq/generated/pyapi.hpp" -#include "ead/constant.hpp" -#include "ead/variable.hpp" -#include "ead/session.hpp" +#include "eteq/constant.hpp" +#include "eteq/variable.hpp" +#include "eteq/session.hpp" -#ifndef EQNS_ERR_APPROX_HPP -#define EQNS_ERR_APPROX_HPP +#ifndef LAYR_ERR_APPROX_HPP +#define LAYR_ERR_APPROX_HPP -namespace eqns +namespace layr { -using VarErrsT = std::vector,ead::NodeptrT>>; +using VarErrsT = std::vector,eteq::NodeptrT>>; struct VarAssign { std::string label_; - ead::VarptrT target_; + eteq::VarptrT target_; - ead::NodeptrT source_; + eteq::NodeptrT source_; }; -using AssignsT = std::list; +using AssignsT = std::vector; -using AssignGroupsT = std::list; +using AssignGroupsT = std::vector; // approximate error of sources given error of root using ApproxF = std::function; -using UpdateStepF = std::function; +using UpdateStepF = std::function; -using NodeUnarF = std::function(ead::NodeptrT)>; +using NodeUnarF = std::function(eteq::NodeptrT)>; -ead::NodeptrT identity (ead::NodeptrT node); +eteq::NodeptrT identity (eteq::NodeptrT node); // Stochastic Gradient Descent Approximation // for each (x, err) in leaves @@ -60,4 +60,4 @@ void assign_groups (AssignGroupsT& groups, UpdateStepF update_step); } -#endif // EQNS_ERR_APPROX_HPP +#endif // LAYR_ERR_APPROX_HPP diff --git a/rocnnet/eqns/init.hpp b/layr/init.hpp similarity index 60% rename from rocnnet/eqns/init.hpp rename to layr/init.hpp index bd759dc28..cf647e2a2 100644 --- a/rocnnet/eqns/init.hpp +++ b/layr/init.hpp @@ -1,26 +1,26 @@ -#include "ead/variable.hpp" -#include "ead/random.hpp" +#include "eteq/variable.hpp" +#include "eteq/random.hpp" -#ifndef EQNS_INIT_HPP -#define EQNS_INIT_HPP +#ifndef LAYR_INIT_HPP +#define LAYR_INIT_HPP -namespace eqns +namespace layr { template -using InitF = std::function(ade::Shape,std::string)>; +using InitF = std::function(teq::Shape,std::string)>; template -using ShapeFactorF = std::function; +using ShapeFactorF = std::function; template -T fanio (ade::Shape shape) +T fanio (teq::Shape shape) { return shape.at(0) + shape.at(1); } template -T fanavg (ade::Shape shape) +T fanavg (teq::Shape shape) { return fanio(shape) / 2; } @@ -28,11 +28,11 @@ T fanavg (ade::Shape shape) const size_t max_repick = 5; template -void truncated_normal (std::vector& out, ade::Shape shape, T mean, T stdev) +void truncated_normal (std::vector& out, teq::Shape shape, T mean, T stdev) { size_t n = shape.n_elems(); out = std::vector(n); - auto gen = ead::norm_gen(mean, stdev); + auto gen = eteq::norm_gen(mean, stdev); std::generate(out.begin(), out.end(), gen); // if T is not decimal, program would fail to compile therefore T is signed T upperbound = mean + 2 * stdev; @@ -62,9 +62,9 @@ template InitF zero_init (void) { return - [](ade::Shape shape, std::string label) + [](teq::Shape shape, std::string label) { - return ead::make_variable_scalar(0, shape, label); + return eteq::make_variable_scalar(0, shape, label); }; } @@ -72,12 +72,12 @@ template InitF variance_scaling_init (T factor, ShapeFactorF sfactor=fanavg) { return - [factor, sfactor](ade::Shape shape, std::string label) + [factor, sfactor](teq::Shape shape, std::string label) { std::vector vec; T stdev = std::sqrt(factor / sfactor(shape)); truncated_normal(vec, shape, 0, stdev); - return ead::make_variable(vec.data(), shape, label); + return eteq::make_variable(vec.data(), shape, label); }; } @@ -85,12 +85,12 @@ template InitF unif_xavier_init (T factor = 1) { return - [factor](ade::Shape shape, std::string label) + [factor](teq::Shape shape, std::string label) { std::vector vec(shape.n_elems()); T bound = factor * std::sqrt(6.0 / fanio(shape)); - std::generate(vec.begin(), vec.end(), ead::unif_gen(-bound, bound)); - return ead::make_variable(vec.data(), shape, label); + std::generate(vec.begin(), vec.end(), eteq::unif_gen(-bound, bound)); + return eteq::make_variable(vec.data(), shape, label); }; } @@ -98,15 +98,15 @@ template InitF norm_xavier_init (T factor = 1) { return - [factor](ade::Shape shape, std::string label) + [factor](teq::Shape shape, std::string label) { std::vector vec(shape.n_elems()); T stdev = factor * std::sqrt(2.0 / fanio(shape)); - std::generate(vec.begin(), vec.end(), ead::norm_gen(0.0, stdev)); - return ead::make_variable(vec.data(), shape, label); + std::generate(vec.begin(), vec.end(), eteq::norm_gen(0.0, stdev)); + return eteq::make_variable(vec.data(), shape, label); }; } } -#endif // EQNS_INIT_HPP +#endif // LAYR_INIT_HPP diff --git a/rocnnet/modl/layer.hpp b/layr/layer.hpp similarity index 76% rename from rocnnet/modl/layer.hpp rename to layr/layer.hpp index 0c80f7c93..f168275b0 100644 --- a/rocnnet/modl/layer.hpp +++ b/layr/layer.hpp @@ -2,18 +2,18 @@ #include "tag/tag.hpp" -#include "ead/constant.hpp" -#include "ead/variable.hpp" +#include "eteq/constant.hpp" +#include "eteq/variable.hpp" -#include "ead/generated/pyapi.hpp" +#include "eteq/generated/pyapi.hpp" -#ifndef MODL_LAYER_HPP -#define MODL_LAYER_HPP +#ifndef LAYR_LAYER_HPP +#define LAYR_LAYER_HPP -namespace modl +namespace layr { -using NonLinearF = std::function(ead::NodeptrT)>; +using NonLinearF = std::function(eteq::NodeptrT)>; const std::string layers_key_prefix = "layer_"; @@ -110,18 +110,18 @@ struct iLayer virtual std::string get_label (void) const = 0; - virtual ead::NodeptrT connect ( - ead::NodeptrT input) const = 0; + virtual eteq::NodeptrT connect ( + eteq::NodeptrT input) const = 0; - virtual ade::TensT get_contents (void) const = 0; + virtual teq::TensT get_contents (void) const = 0; protected: - virtual iLayer* clone_impl (std::string label_prefix) const = 0; + virtual iLayer* clone_impl (const std::string& label_prefix) const = 0; - void tag (ade::TensptrT tensor, LayerId subs) const; + void tag (teq::TensptrT tensor, LayerId subs) const; - void recursive_tag (ade::TensptrT root, - std::unordered_set ignores, LayerId subs) const; + void recursive_tag (teq::TensptrT root, + std::unordered_set ignores, LayerId subs) const; }; using LayerptrT = std::shared_ptr; @@ -130,7 +130,7 @@ struct iLayerBuilder { virtual ~iLayerBuilder (void) = default; - virtual void set_tensor (ade::TensptrT tens, std::string target) = 0; + virtual void set_tensor (teq::TensptrT tens, std::string target) = 0; virtual void set_sublayer (LayerptrT layer) = 0; @@ -145,7 +145,7 @@ struct LayerRegistry final { LayerRegistry (tag::TagRegistry& registry = tag::get_reg()) : tag_reg_(registry) {} - void layer_tag (ade::TensrefT tens, std::string layer_type, std::string name) + void layer_tag (teq::TensrefT tens, std::string layer_type, std::string name) { tag_reg_.add_tag(tens, tag::TagptrT(new LayerTag(layer_type, name))); } @@ -155,7 +155,7 @@ struct LayerRegistry final lbuilders_.emplace(key, builder); return tag_reg_.register_tagr(key, - [this, key](ade::TensrefT ref, std::string label) + [this, key](teq::TensrefT ref, std::string label) { this->layer_tag(ref, key, label); }); @@ -180,17 +180,17 @@ struct LayerRegistry final LayerRegistry& get_layer_reg (void); -void recursive_layer_tag (ade::TensptrT tens, std::string layer_type, - std::string name, std::unordered_set stops, +void recursive_layer_tag (teq::TensptrT tens, std::string layer_type, + std::string name, std::unordered_set stops, LayerRegistry& registry = get_layer_reg()); -LayerptrT load_layer (std::istream& ins, ade::TensT& roots, +LayerptrT load_layer (std::istream& ins, teq::TensT& roots, std::string ltype, std::string label, LayerRegistry& registry = get_layer_reg()); -bool save_layer (std::ostream& outs, const iLayer& layer, ade::TensT roots, +bool save_layer (std::ostream& outs, const iLayer& layer, teq::TensT roots, LayerRegistry& registry = get_layer_reg()); } -#endif // MODL_LAYER_HPP +#endif // LAYR_LAYER_HPP diff --git a/rocnnet/modl/model.hpp b/layr/model.hpp similarity index 86% rename from rocnnet/modl/model.hpp rename to layr/model.hpp index 9eaec3900..d1c182bbb 100644 --- a/rocnnet/modl/model.hpp +++ b/layr/model.hpp @@ -1,16 +1,16 @@ -#include "rocnnet/modl/dense.hpp" +#include "layr/dense.hpp" -#ifndef MODL_MODEL_HPP -#define MODL_MODEL_HPP +#ifndef LAYR_MODEL_HPP +#define LAYR_MODEL_HPP -namespace modl +namespace layr { struct SeqModelBuilder final : public iLayerBuilder { SeqModelBuilder (std::string label) : label_(label) {} - void set_tensor (ade::TensptrT tens, std::string target) override {} // seqmodel has no tensor + void set_tensor (teq::TensptrT tens, std::string target) override {} // seqmodel has no tensor void set_sublayer (LayerptrT layer) override { @@ -27,7 +27,7 @@ struct SeqModelBuilder final : public iLayerBuilder const std::string seq_model_key = get_layer_reg().register_tagr(layers_key_prefix + "seqmodel", -[](ade::TensrefT ref, std::string label) +[](teq::TensrefT ref, std::string label) { get_layer_reg().layer_tag(ref, seq_model_key, label); }, @@ -97,9 +97,9 @@ struct SequentialModel final : public iLayer return label_; } - ead::NodeptrT connect (ead::NodeptrT input) const override + eteq::NodeptrT connect (eteq::NodeptrT input) const override { - ead::NodeptrT out; + eteq::NodeptrT out; for (size_t i = 0, n = layers_.size(); i < n; ++i) { auto& layer = layers_[i]; @@ -112,9 +112,9 @@ struct SequentialModel final : public iLayer return out; } - ade::TensT get_contents (void) const override + teq::TensT get_contents (void) const override { - ade::TensT out; + teq::TensT out; out.reserve(layers_.size()); for (auto& layer : layers_) { @@ -138,7 +138,7 @@ struct SequentialModel final : public iLayer } private: - iLayer* clone_impl (std::string label_prefix) const override + iLayer* clone_impl (const std::string& label_prefix) const override { return new SequentialModel(*this, label_prefix); } @@ -163,4 +163,4 @@ using SeqModelptrT = std::shared_ptr; } -#endif // MODL_MODEL_HPP +#endif // LAYR_MODEL_HPP diff --git a/rocnnet/modl/rbm.hpp b/layr/rbm.hpp similarity index 77% rename from rocnnet/modl/rbm.hpp rename to layr/rbm.hpp index 520947426..fc8fda29a 100644 --- a/rocnnet/modl/rbm.hpp +++ b/layr/rbm.hpp @@ -1,10 +1,10 @@ -#include "rocnnet/modl/dense.hpp" -#include "rocnnet/modl/activations.hpp" +#include "layr/dense.hpp" +#include "layr/activations.hpp" -#ifndef MODL_RBM_HPP -#define MODL_RBM_HPP +#ifndef LAYR_RBM_HPP +#define LAYR_RBM_HPP -namespace modl +namespace layr { const std::string hidden_key = "hidden"; @@ -15,7 +15,7 @@ struct RBMBuilder final : public iLayerBuilder { RBMBuilder (std::string label) : label_(label) {} - void set_tensor (ade::TensptrT tens, std::string target) override {} // rbm has no tensor + void set_tensor (teq::TensptrT tens, std::string target) override {} // rbm has no tensor void set_sublayer (LayerptrT layer) override { @@ -32,7 +32,7 @@ struct RBMBuilder final : public iLayerBuilder const std::string rbm_layer_key = get_layer_reg().register_tagr(layers_key_prefix + "rbm", -[](ade::TensrefT ref, std::string label) +[](teq::TensrefT ref, std::string label) { get_layer_reg().layer_tag(ref, rbm_layer_key, label); }, @@ -43,10 +43,10 @@ get_layer_reg().register_tagr(layers_key_prefix + "rbm", struct RBM final : public iLayer { - RBM (ade::DimT nhidden, ade::DimT nvisible, + RBM (teq::DimT nhidden, teq::DimT nvisible, ActivationptrT activation, - eqns::InitF weight_init, - eqns::InitF bias_init, + layr::InitF weight_init, + layr::InitF bias_init, const std::string& label) : label_(label), hidden_(std::make_shared( @@ -56,14 +56,14 @@ struct RBM final : public iLayer auto hidden_contents = hidden_->get_contents(); auto weight = hidden_contents[0]; auto hbias = hidden_contents[1]; - ead::NodeptrT vbias = nullptr; + eteq::NodeptrT vbias = nullptr; if (bias_init) { - vbias = bias_init(ade::Shape({nvisible}), bias_key); + vbias = bias_init(teq::Shape({nvisible}), bias_key); } visible_ = std::make_shared(tenncor::transpose( - ead::NodeConverters::to_node(weight)), vbias, visible_key); + eteq::NodeConverters::to_node(weight)), vbias, visible_key); tag_sublayers(); } @@ -121,12 +121,12 @@ struct RBM final : public iLayer return label_; } - ead::NodeptrT connect (ead::NodeptrT visible) const override + eteq::NodeptrT connect (eteq::NodeptrT visible) const override { return activation_->connect(hidden_->connect(visible)); } - ade::TensT get_contents (void) const override + teq::TensT get_contents (void) const override { auto out = hidden_->get_contents(); auto vis_contents = visible_->get_contents(); @@ -136,13 +136,13 @@ struct RBM final : public iLayer return out; } - ead::NodeptrT backward_connect (ead::NodeptrT hidden) const + eteq::NodeptrT backward_connect (eteq::NodeptrT hidden) const { return activation_->connect(visible_->connect(hidden)); } private: - iLayer* clone_impl (std::string label_prefix) const override + iLayer* clone_impl (const std::string& label_prefix) const override { return new RBM(*this, label_prefix); } @@ -179,12 +179,12 @@ struct RBM final : public iLayer auto vbias = other.visible_->get_contents()[1]; if (nullptr != vbias) { - vbias = ade::TensptrT(ead::Variable::get( - *static_cast*>(vbias.get()))); + vbias = teq::TensptrT(eteq::Variable::get( + *static_cast*>(vbias.get()))); } visible_ = std::make_shared(tenncor::transpose( - ead::NodeConverters::to_node(hidden_contents[0])), - ead::NodeConverters::to_node(vbias), + eteq::NodeConverters::to_node(hidden_contents[0])), + eteq::NodeConverters::to_node(vbias), label_prefix + visible_key); activation_ = ActivationptrT(other.activation_->clone(label_prefix)); @@ -204,4 +204,4 @@ using RBMptrT = std::shared_ptr; } -#endif // MODL_RBM_HPP +#endif // LAYR_RBM_HPP diff --git a/rocnnet/modl/src/activations.cpp b/layr/src/activations.cpp similarity index 80% rename from rocnnet/modl/src/activations.cpp rename to layr/src/activations.cpp index 5489f737a..76e86d3df 100644 --- a/rocnnet/modl/src/activations.cpp +++ b/layr/src/activations.cpp @@ -1,8 +1,8 @@ -#include "rocnnet/modl/activations.hpp" +#include "layr/activations.hpp" -#ifdef MODL_ACTIVATIONS_HPP +#ifdef LAYR_ACTIVATIONS_HPP -namespace modl +namespace layr { LayerptrT ActivationBuilder::build (void) const diff --git a/layr/src/conv.cpp b/layr/src/conv.cpp new file mode 100644 index 000000000..2c857e21f --- /dev/null +++ b/layr/src/conv.cpp @@ -0,0 +1,19 @@ +#include "layr/conv.hpp" + +#ifdef LAYR_CONV_HPP + +namespace layr +{ + +LayerptrT ConvBuilder::build (void) const +{ + if (nullptr == weight_) + { + logs::fatal("cannot build conv with null weight"); + } + return std::make_shared(weight_, bias_, label_); +} + +} + +#endif diff --git a/rocnnet/modl/src/dense.cpp b/layr/src/dense.cpp similarity index 74% rename from rocnnet/modl/src/dense.cpp rename to layr/src/dense.cpp index 1f092ecdc..ee99a9d69 100644 --- a/rocnnet/modl/src/dense.cpp +++ b/layr/src/dense.cpp @@ -1,8 +1,8 @@ -#include "rocnnet/modl/dense.hpp" +#include "layr/dense.hpp" -#ifdef MODL_DENSE_HPP +#ifdef LAYR_DENSE_HPP -namespace modl +namespace layr { LayerptrT DenseBuilder::build (void) const diff --git a/rocnnet/eqns/src/err_approx.cpp b/layr/src/err_approx.cpp similarity index 52% rename from rocnnet/eqns/src/err_approx.cpp rename to layr/src/err_approx.cpp index f374942f7..9366bb72b 100755 --- a/rocnnet/eqns/src/err_approx.cpp +++ b/layr/src/err_approx.cpp @@ -1,13 +1,13 @@ -#include "ead/generated/api.hpp" +#include "eteq/generated/api.hpp" -#include "rocnnet/eqns/err_approx.hpp" +#include "layr/err_approx.hpp" -#ifdef EQNS_ERR_APPROX_HPP +#ifdef LAYR_ERR_APPROX_HPP -namespace eqns +namespace layr { -ead::NodeptrT identity (ead::NodeptrT node) +eteq::NodeptrT identity (eteq::NodeptrT node) { return node; } @@ -18,12 +18,10 @@ AssignGroupsT sgd (const VarErrsT& leaves, AssignsT assignments; for (size_t i = 0, nleaves = leaves.size(); i < nleaves; ++i) { - auto leaf_node = ead::convert_to_node(leaves[i].first); + auto leaf_node = eteq::convert_to_node(leaves[i].first); auto err = leaves[i].second; - ade::Shape eshape = err->shape(); - auto next = tenncor::sub(leaf_node, - tenncor::mul(err, - ead::make_constant_scalar(learning_rate, eshape))); + teq::Shape eshape = err->shape(); + auto next = leaf_node - err * learning_rate; assignments.push_back(VarAssign{ fmts::sprintf("sgd::%s_grad_%s", root_label.c_str(), leaves[i].first->get_label().c_str()), @@ -40,28 +38,17 @@ AssignGroupsT rms_momentum (const VarErrsT& leaves, PybindT learning_rate, AssignsT leaf_assigns; for (size_t i = 0, nleaves = leaves.size(); i < nleaves; ++i) { - auto leaf_node = ead::convert_to_node(leaves[i].first); + auto leaf_node = eteq::convert_to_node(leaves[i].first); auto err = leaves[i].second; - ade::Shape eshape = err->shape(); - ead::VarptrT momentum = - ead::make_variable_scalar(1, eshape, "momentum"); - auto momentum_node = ead::convert_to_node(momentum); - ead::NodeptrT discount_node = - ead::make_constant_scalar(discount_factor, eshape); - ead::NodeptrT datcount_node = - ead::make_constant_scalar(1.0 - discount_factor, eshape); + teq::Shape eshape = err->shape(); + eteq::VarptrT momentum = + eteq::make_variable_scalar(1, eshape, "momentum"); + auto momentum_node = eteq::convert_to_node(momentum); - auto momentum_next = tenncor::add( - tenncor::mul(discount_node, momentum_node), - tenncor::mul(datcount_node, tenncor::square(err)) - ); - auto leaf_next = tenncor::sub(leaf_node, - tenncor::div( - tenncor::mul(err, - ead::make_constant_scalar(learning_rate, eshape)), - tenncor::add(tenncor::sqrt(momentum_node), - ead::make_constant_scalar(epsilon, eshape)) - )); + auto momentum_next = discount_factor * momentum_node + + PybindT(1.0 - discount_factor) * tenncor::square(err); + auto leaf_next = leaf_node - err * learning_rate / + (tenncor::sqrt(momentum_node) + epsilon); momentum_assigns.push_back(VarAssign{ fmts::sprintf("rms_momentum::%s_momentum_%s", root_label.c_str(), leaves[i].first->get_label().c_str()), @@ -78,8 +65,8 @@ void assign_groups (AssignGroupsT& groups, UpdateStepF update_step) { for (AssignsT& group : groups) { - ead::TensSetT updated_var; - for (eqns::VarAssign& assign : group) + eteq::TensSetT updated_var; + for (layr::VarAssign& assign : group) { updated_var.emplace(assign.target_->get_tensor().get()); assign.target_->assign(assign.source_->data(), diff --git a/rocnnet/modl/src/layer.cpp b/layr/src/layer.cpp similarity index 80% rename from rocnnet/modl/src/layer.cpp rename to layr/src/layer.cpp index ff27d1c67..9b1388d7d 100644 --- a/rocnnet/modl/src/layer.cpp +++ b/layr/src/layer.cpp @@ -1,13 +1,13 @@ #include "pbm/save.hpp" #include "pbm/load.hpp" -#include "ead/serialize.hpp" +#include "eteq/serialize.hpp" -#include "rocnnet/modl/layer.hpp" +#include "layr/layer.hpp" -#ifdef MODL_LAYER_HPP +#ifdef LAYR_LAYER_HPP -namespace modl +namespace layr { size_t LayerTag::tag_id_ = typeid(LayerTag).hash_code(); @@ -38,14 +38,14 @@ std::unordered_map unpack_labels ( return out; } -void iLayer::tag (ade::TensptrT tensor, LayerId subs) const +void iLayer::tag (teq::TensptrT tensor, LayerId subs) const { get_layer_reg().layer_tag(tensor, get_ltype(), subs.to_string(get_label())); } -void iLayer::recursive_tag (ade::TensptrT root, - std::unordered_set ignores, LayerId subs) const +void iLayer::recursive_tag (teq::TensptrT root, + std::unordered_set ignores, LayerId subs) const { recursive_layer_tag(root, get_ltype(), subs.to_string(get_label()), ignores); @@ -57,12 +57,12 @@ LayerRegistry& get_layer_reg (void) return registry; } -void recursive_layer_tag (ade::TensptrT tens, std::string layer_type, - std::string name, std::unordered_set stops, +void recursive_layer_tag (teq::TensptrT tens, std::string layer_type, + std::string name, std::unordered_set stops, LayerRegistry& registry) { tag::recursive_tag(tens, stops, - [&](ade::TensrefT ref) + [&](teq::TensrefT ref) { registry.layer_tag(ref, layer_type, name); }); @@ -126,19 +126,19 @@ struct LayerNode final std::vector subs_; }; -using SublayersT = std::unordered_map; +using SublayersT = std::unordered_map; -using TensLablT = std::unordered_map; +using TensLablT = std::unordered_map; using TensLayerMapT = std::unordered_map; -struct LayerDeserializer final : public ade::OnceTraveler +struct LayerDeserializer final : public teq::OnceTraveler { LayerDeserializer (std::string key, std::string val) : base_(std::make_shared(key, val)) {} /// Implementation of OnceTraveler - void visit_leaf (ade::iLeaf* leaf) override + void visit_leaf (teq::iLeaf* leaf) override { tag::TagRepsT reps = tag::get_reg().get_tags(leaf); @@ -151,7 +151,7 @@ struct LayerDeserializer final : public ade::OnceTraveler } /// Implementation of OnceTraveler - void visit_func (ade::iFunctor* func) override + void visit_func (teq::iFunctor* func) override { auto& children = func->get_children(); for (auto child : children) @@ -174,7 +174,7 @@ struct LayerDeserializer final : public ade::OnceTraveler } LayerptrT build_layer ( - LayerRegistry& registry, ade::OwnerMapT& owners) const + LayerRegistry& registry, teq::OwnerMapT& owners) const { TensLayerMapT layer_tens; for (auto& sublayer : sublayers_) @@ -190,7 +190,7 @@ struct LayerDeserializer final : public ade::OnceTraveler return build_layer_helper(registry, layer_tens, base_.get()); } - std::unordered_set roots_; + std::unordered_set roots_; private: LayerptrT build_layer_helper (LayerRegistry& registry, @@ -222,7 +222,7 @@ struct LayerDeserializer final : public ade::OnceTraveler LNodeptrT base_; }; -LayerptrT load_layer (std::istream& ins, ade::TensT& roots, +LayerptrT load_layer (std::istream& ins, teq::TensT& roots, std::string ltype, std::string label, LayerRegistry& registry) { @@ -233,20 +233,20 @@ LayerptrT load_layer (std::istream& ins, ade::TensT& roots, ltype.c_str()); } pbm::GraphInfo info; - pbm::load_graph(info, graph); + pbm::load_graph(info, graph); - ade::OwnerMapT owners = ade::track_owners( - ade::TensT(info.roots_.begin(), info.roots_.end())); + teq::OwnerMapT owners = teq::track_owners( + teq::TensT(info.roots_.begin(), info.roots_.end())); LayerDeserializer layd(ltype, label); // get all layer labelled nodes in graph - for (ade::TensptrT tens : info.roots_) + for (teq::TensptrT tens : info.roots_) { tens->accept(layd); } roots.reserve(layd.roots_.size()); - for (ade::iTensor* root : layd.roots_) + for (teq::iTensor* root : layd.roots_) { roots.push_back(owners.at(root).lock()); } @@ -254,24 +254,24 @@ LayerptrT load_layer (std::istream& ins, ade::TensT& roots, return layd.build_layer(registry, owners); } -bool save_layer (std::ostream& outs, const iLayer& layer, ade::TensT roots, +bool save_layer (std::ostream& outs, const iLayer& layer, teq::TensT roots, LayerRegistry& registry) { - pbm::GraphSaver saver(registry.get_tag_registry()); + pbm::GraphSaver saver(registry.get_tag_registry()); for (auto& root : roots) { root->accept(saver); } auto contents = layer.get_contents(); - auto owners = ade::track_owners(contents); + auto owners = teq::track_owners(contents); for (auto tens : contents) { tens->accept(saver); } pbm::PathedMapT labels; - for (ade::iLeaf* leaf : saver.leaves_) + for (teq::iLeaf* leaf : saver.leaves_) { if (false == leaf->is_const()) { @@ -287,4 +287,4 @@ bool save_layer (std::ostream& outs, const iLayer& layer, ade::TensT roots, } -#endif // MODL_LAYER_HPP +#endif // LAYR_LAYER_HPP diff --git a/rocnnet/modl/src/model.cpp b/layr/src/model.cpp similarity index 73% rename from rocnnet/modl/src/model.cpp rename to layr/src/model.cpp index de5610c2b..562cdee67 100644 --- a/rocnnet/modl/src/model.cpp +++ b/layr/src/model.cpp @@ -1,8 +1,8 @@ -#include "rocnnet/modl/model.hpp" +#include "layr/model.hpp" -#ifdef MODL_MODEL_HPP +#ifdef LAYR_MODEL_HPP -namespace modl +namespace layr { LayerptrT SeqModelBuilder::build (void) const diff --git a/rocnnet/modl/src/rbm.cpp b/layr/src/rbm.cpp similarity index 85% rename from rocnnet/modl/src/rbm.cpp rename to layr/src/rbm.cpp index aa1469f4c..0ce35d3e7 100644 --- a/rocnnet/modl/src/rbm.cpp +++ b/layr/src/rbm.cpp @@ -1,8 +1,8 @@ -#include "rocnnet/modl/rbm.hpp" +#include "layr/rbm.hpp" -#ifdef MODL_RBM_HPP +#ifdef LAYR_RBM_HPP -namespace modl +namespace layr { LayerptrT RBMBuilder::build (void) const diff --git a/layr/test/main.cpp b/layr/test/main.cpp new file mode 100644 index 000000000..b2fcbd1d0 --- /dev/null +++ b/layr/test/main.cpp @@ -0,0 +1,7 @@ +#include "gtest/gtest.h" + +int main (int argc, char** argv) +{ + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/layr/test/test_approx.cpp b/layr/test/test_approx.cpp new file mode 100644 index 000000000..10732cb4f --- /dev/null +++ b/layr/test/test_approx.cpp @@ -0,0 +1,101 @@ + +#ifndef DISABLE_APPROX_TEST + + +#include "gtest/gtest.h" + +#include "dbg/stream/teq_csv.hpp" + +#include "testutil/tutil.hpp" + +#include "exam/exam.hpp" + +#include "eteq/variable.hpp" + +#include "layr/err_approx.hpp" + + +TEST(APPROX, StochasticGD) +{ + std::vector slist = {18, 9, 3}; + + auto leaf = eteq::make_variable_scalar( + 0, teq::Shape(slist), "leaf"); + auto root = eteq::make_variable_scalar( + 0, teq::Shape(slist), "root"); + + auto groups = layr::sgd(layr::VarErrsT{{leaf, + eteq::convert_to_node(root)}}, 0.67, "stuff"); + ASSERT_EQ(1, groups.size()); + + auto ass = groups.at(0); + ASSERT_EQ(1, ass.size()); + + auto assign = ass.at(0); + EXPECT_STREQ("sgd::stuff_grad_leaf", assign.label_.c_str()); + EXPECT_EQ(assign.target_->get_tensor().get(), leaf->get_tensor().get()); + EXPECT_GRAPHEQ( + "(SUB[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(variable:leaf[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(MUL[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(variable:root[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(constant:0.67[18\\9\\3\\1\\1\\1\\1\\1])", + assign.source_->get_tensor()); +} + + +TEST(APPROX, RMS_Momentum) +{ + std::vector slist = {18, 9, 3}; + + auto leaf = eteq::make_variable_scalar( + 0, teq::Shape(slist), "leaf"); + auto root = eteq::make_variable_scalar( + 0, teq::Shape(slist), "root"); + + auto groups = layr::rms_momentum(layr::VarErrsT{{leaf, + eteq::convert_to_node(root)}}, 0.67, 0.78, + std::numeric_limits::epsilon(), "stuff"); + ASSERT_EQ(2, groups.size()); + + auto mom_ass = groups.at(0); + ASSERT_EQ(1, mom_ass.size()); + + auto var_ass = groups.at(1); + ASSERT_EQ(1, var_ass.size()); + + auto mom_assign = mom_ass.at(0); + EXPECT_STREQ("rms_momentum::stuff_momentum_leaf", mom_assign.label_.c_str()); + auto mom = mom_assign.target_->get_tensor().get(); + EXPECT_NE(mom, leaf->get_tensor().get()); + EXPECT_STREQ("momentum", mom->to_string().c_str()); + EXPECT_GRAPHEQ( + "(ADD[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(MUL[18\\9\\3\\1\\1\\1\\1\\1])\n" + " | `--(variable:momentum[18\\9\\3\\1\\1\\1\\1\\1])\n" + " | `--(constant:0.78[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(MUL[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(SQUARE[18\\9\\3\\1\\1\\1\\1\\1])\n" + " | `--(variable:root[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(constant:0.22[18\\9\\3\\1\\1\\1\\1\\1])", + mom_assign.source_->get_tensor()); + + auto var_assign = var_ass.at(0); + EXPECT_STREQ("rms_momentum::stuff_grad_leaf", var_assign.label_.c_str()); + EXPECT_EQ(var_assign.target_->get_tensor().get(), leaf->get_tensor().get()); + EXPECT_GRAPHEQ( + "(SUB[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(variable:leaf[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(DIV[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(MUL[18\\9\\3\\1\\1\\1\\1\\1])\n" + " | `--(variable:root[18\\9\\3\\1\\1\\1\\1\\1])\n" + " | `--(constant:0.67[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(ADD[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(SQRT[18\\9\\3\\1\\1\\1\\1\\1])\n" + " | `--(variable:momentum[18\\9\\3\\1\\1\\1\\1\\1])\n" + " `--(constant:1.19209e-07[18\\9\\3\\1\\1\\1\\1\\1])", + var_assign.source_->get_tensor()); +} + + +#endif // DISABLE_APPROX_TEST diff --git a/layr/test/test_dense.cpp b/layr/test/test_dense.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/layr/test/test_init.cpp b/layr/test/test_init.cpp new file mode 100644 index 000000000..d92e1df3e --- /dev/null +++ b/layr/test/test_init.cpp @@ -0,0 +1,150 @@ + +#ifndef DISABLE_INIT_TEST + + +#include "gtest/gtest.h" + +#include "exam/exam.hpp" + +#include "layr/init.hpp" + + +TEST(INIT, Zero) +{ + std::vector slist = {18, 9}; + std::string label = "abc"; + + auto z = layr::zero_init()(teq::Shape(slist), label); + auto shape = z->shape(); + ASSERT_ARREQ(slist, shape); + + double* d = z->data(); + for (size_t i = 0, n = shape.n_elems(); i < n; ++i) + { + EXPECT_EQ(0, d[i]); + } + + EXPECT_STREQ(z->to_string().c_str(), label.c_str()); +} + + +TEST(INIT, VarianceScaling) +{ + std::vector slist = {18, 9, 3}; + std::string label = "def"; + double factor = 0.425; + + auto v1 = layr::variance_scaling_init(factor)( + teq::Shape(slist), label); + auto v2 = layr::variance_scaling_init(factor, + [](teq::Shape s){ return s.at(2); })(teq::Shape(slist), label); + { + auto shape = v1->shape(); + ASSERT_ARREQ(slist, shape); + + double ex_stdev = std::sqrt(factor / + ((shape.at(0) + shape.at(1)) / 2)); + double upper = ex_stdev * 2; + double lower = -upper; + + double* d = v1->data(); + for (size_t i = 0, n = shape.n_elems(); i < n; ++i) + { + EXPECT_GT(upper, d[i]); + EXPECT_LT(lower, d[i]); + } + + EXPECT_STREQ(v1->to_string().c_str(), label.c_str()); + } + { + auto shape = v2->shape(); + ASSERT_ARREQ(slist, shape); + + double ex_stdev = std::sqrt(factor / shape.at(2)); + double bound = ex_stdev * 2; + + double* d = v2->data(); + for (size_t i = 0, n = shape.n_elems(); i < n; ++i) + { + EXPECT_GT(bound, d[i]); + EXPECT_LT(-bound, d[i]); + } + + EXPECT_STREQ(v2->to_string().c_str(), label.c_str()); + } +} + + +TEST(INIT, UniformXavier) +{ + std::vector slist = {18, 9, 3}; + std::string label = "ghi"; + double factor = 0.712; + + auto x = layr::unif_xavier_init(factor)( + teq::Shape(slist), label); + + auto shape = x->shape(); + ASSERT_ARREQ(slist, shape); + + double bound = factor * std::sqrt(6.0 / (shape.at(0) + shape.at(1))); + + double* d = x->data(); + for (size_t i = 0, n = shape.n_elems(); i < n; ++i) + { + EXPECT_GT(bound, d[i]); + EXPECT_LT(-bound, d[i]); + } + + EXPECT_STREQ(x->to_string().c_str(), label.c_str()); +} + + +// TEST(INIT, NormalXavier) +// { +// std::vector slist = {11, 12, 10}; +// std::string label = "jkl"; +// double factor = 0.172; + +// auto x = layr::norm_xavier_init(factor)( +// teq::Shape(slist), label); + +// auto shape = x->shape(); +// ASSERT_ARREQ(slist, shape); + +// double stdev = factor * std::sqrt(2.0 / (shape.at(0) + shape.at(1))); + +// double* d = x->data(); +// size_t stdevs[3]; +// size_t n = shape.n_elems(); +// for (size_t i = 0; i < n; ++i) +// { +// double c = d[i]; +// if (-stdev < c && c < stdev) +// { +// ++stdevs[0]; +// } +// if (-2 * stdev < c && c < 2 * stdev) +// { +// ++stdevs[1]; +// } +// if (-3 * stdev < c && c < 3 * stdev) +// { +// ++stdevs[2]; +// } +// } +// double want_68 = (double) stdevs[0] / n; +// double want_95 = (double) stdevs[1] / n; +// double want_99 = (double) stdevs[2] / n; +// EXPECT_LT(60, want_68); +// EXPECT_GT(75, want_68); +// EXPECT_LT(90, want_95); +// EXPECT_GT(100, want_95); +// EXPECT_LT(95, want_99); +// EXPECT_GT(100, want_99); + +// EXPECT_STREQ(x->to_string().c_str(), label.c_str()); +// } + + +#endif // DISABLE_INIT_TEST diff --git a/models/test/ead_test.json b/models/test/eteq_test.json similarity index 100% rename from models/test/ead_test.json rename to models/test/eteq_test.json diff --git a/models/test/ead_test.pbx b/models/test/eteq_test.pbx similarity index 100% rename from models/test/ead_test.pbx rename to models/test/eteq_test.pbx diff --git a/models/test/ead_test.txt b/models/test/eteq_test.txt similarity index 100% rename from models/test/ead_test.txt rename to models/test/eteq_test.txt diff --git a/opt/BUILD.bazel b/opt/BUILD.bazel index 0acb2929c..62f81b7d9 100755 --- a/opt/BUILD.bazel +++ b/opt/BUILD.bazel @@ -20,7 +20,7 @@ cc_library( srcs = glob(["src/*.cpp"]), copts = ["-std=c++17"], deps = [ - "//ade:ade", + "//teq:teq", "//tag:tag", "//opt/parse:parse", "@boost//:functional", @@ -37,7 +37,7 @@ cc_test( copts = ["-std=c++17"], deps = [ "//opt:opt", - "//ead:ead", + "//eteq:eteq", "//testutil:tutil", "@gtest//:gtest", "@com_github_mingkaic_cppkg//exam:exam", diff --git a/opt/README_OPT.md b/opt/README_OPT.md new file mode 100644 index 000000000..9a15f22f3 --- /dev/null +++ b/opt/README_OPT.md @@ -0,0 +1,3 @@ +# OPT (OPTimization) + +Provides parsing of equation transformation and optimization of TEQ graphs diff --git a/opt/candidate.hpp b/opt/candidate.hpp index 973eef973..624b27227 100755 --- a/opt/candidate.hpp +++ b/opt/candidate.hpp @@ -3,7 +3,7 @@ #include -#include "ade/itensor.hpp" +#include "teq/itensor.hpp" #ifndef OPT_CAND_HPP #define OPT_CAND_HPP @@ -11,7 +11,7 @@ namespace opt { -using CtxValT = std::set; +using CtxValT = std::set; using ContexT = std::map; @@ -55,13 +55,13 @@ using CandsT = std::unordered_map; struct CandArg { - ade::TensptrT tensor_; + teq::TensptrT tensor_; CandsT candidates_; - ade::CoordptrT shaper_; + teq::CoordptrT shaper_; - ade::CoordptrT coorder_; + teq::CoordptrT coorder_; }; using CandArgsT = std::vector; diff --git a/opt/iconverter.hpp b/opt/iconverter.hpp index eea9f586a..4b5c97ed8 100644 --- a/opt/iconverter.hpp +++ b/opt/iconverter.hpp @@ -14,8 +14,8 @@ struct iConverter { virtual ~iConverter (void) = default; - virtual ade::TensptrT build ( - const ContexT& ctx, ade::Shape outshape) const = 0; + virtual teq::TensptrT build ( + const ContexT& ctx, teq::Shape outshape) const = 0; virtual std::string to_string (void) const = 0; }; diff --git a/opt/ivoter.hpp b/opt/ivoter.hpp index 3c72e1644..543324eaf 100755 --- a/opt/ivoter.hpp +++ b/opt/ivoter.hpp @@ -14,8 +14,8 @@ namespace opt struct VoterArg final { VoterArg (std::string label, - ade::CoordptrT shaper, - ade::CoordptrT coorder, + teq::CoordptrT shaper, + teq::CoordptrT coorder, SUBGRAPH_TYPE type) : label_(label), shaper_(shaper), @@ -120,9 +120,9 @@ struct VoterArg final std::string label_; - ade::CoordptrT shaper_; + teq::CoordptrT shaper_; - ade::CoordptrT coorder_; + teq::CoordptrT coorder_; SUBGRAPH_TYPE type_; }; diff --git a/opt/matcher.hpp b/opt/matcher.hpp index ea9186b3e..8f4079a3c 100755 --- a/opt/matcher.hpp +++ b/opt/matcher.hpp @@ -1,4 +1,4 @@ -#include "ade/traveler.hpp" +#include "teq/traveler.hpp" #include "tag/group.hpp" @@ -29,14 +29,14 @@ const std::string group_prefix = "group:"; // Using the matcher, the optimizer makes a best attempt at // mapping tensor to zero to many candidates. // The optimizer is responsible for selecting the best candidates -struct Matcher final : public ade::iTraveler +struct Matcher final : public teq::iTraveler { Matcher (void) = default; Matcher (const VoterPool& voters) : voters_(voters) {} /// Implementation of iTraveler - void visit (ade::iLeaf* leaf) override + void visit (teq::iLeaf* leaf) override { if (false == estd::has(candidates_, leaf)) { @@ -65,7 +65,7 @@ struct Matcher final : public ade::iTraveler } /// Implementation of iTraveler - void visit (ade::iFunctor* func) override + void visit (teq::iFunctor* func) override { if (false == estd::has(candidates_, func)) { @@ -76,7 +76,7 @@ struct Matcher final : public ade::iTraveler } if (std::all_of(children.begin(), children.end(), - [this](const ade::FuncArg& child) -> bool + [this](const teq::FuncArg& child) -> bool { auto ctens = child.get_tensor().get(); return estd::has(this->candidates_[ctens], @@ -95,7 +95,7 @@ struct Matcher final : public ade::iTraveler if (scalarize_) { if (std::all_of(children.begin(), children.end(), - [this](const ade::FuncArg& child) -> bool + [this](const teq::FuncArg& child) -> bool { auto ctens = child.get_tensor().get(); std::string scalar_str = scalarize_(ctens); @@ -143,7 +143,7 @@ struct Matcher final : public ade::iTraveler group_prefix + sg->group_); if (voters_.branches_.end() != bit) { - // todo: store sg->children_ as ade::ArgsT + // todo: store sg->children_ as teq::ArgsT CandArgsT args; args.reserve(children.size()); for (auto& sgcpair : sg->children_) @@ -152,8 +152,8 @@ struct Matcher final : public ade::iTraveler args.push_back(CandArg{ ctens, candidates_[sgcpair.first], - ade::identity, - ade::CoordptrT(), + teq::identity, + teq::CoordptrT(), }); } CandsT group_cands = bit->second->inspect(args); @@ -171,13 +171,13 @@ struct Matcher final : public ade::iTraveler VoterPool voters_; // generated as visited - std::unordered_map candidates_; + std::unordered_map candidates_; // heads for functors tag::SubgraphAssocsT group_head_; // functor for returning constant representation of tensor - std::function scalarize_; + std::function scalarize_; }; } diff --git a/opt/optimize.hpp b/opt/optimize.hpp index d914603cc..f2d0024c0 100644 --- a/opt/optimize.hpp +++ b/opt/optimize.hpp @@ -7,7 +7,7 @@ namespace opt { -using CstConvertF = std::function; +using CstConvertF = std::function; struct OptCtx { @@ -18,7 +18,7 @@ struct OptCtx std::unordered_map converts_; }; -ade::TensT optimize (ade::TensT roots, const OptCtx& opts); +teq::TensT optimize (teq::TensT roots, const OptCtx& opts); } diff --git a/opt/parse.hpp b/opt/parse.hpp index 21bc59b5d..d842d09e0 100755 --- a/opt/parse.hpp +++ b/opt/parse.hpp @@ -26,10 +26,10 @@ struct iConverterBuilder virtual ConvptrT build (const ::Subgraph* sg, const RulesContext& ctx) const = 0; // extended interface to create shaper - virtual ade::CoordptrT shaperize (::NumList* list) const = 0; + virtual teq::CoordptrT shaperize (::NumList* list) const = 0; // extended interface to create coorder - virtual ade::CoordptrT coorderize (::NumList* list) const = 0; + virtual teq::CoordptrT coorderize (::NumList* list) const = 0; }; OptCtx parse (std::string content, const iConverterBuilder& builder); diff --git a/opt/rmdups.hpp b/opt/rmdups.hpp index 787c4fe8e..589bb7dbd 100644 --- a/opt/rmdups.hpp +++ b/opt/rmdups.hpp @@ -6,12 +6,12 @@ namespace opt { -void replace_parents (const ade::ParentFinder& pfinder, - ade::iTensor* source, ade::TensptrT target); +void replace_parents (const teq::ParentFinder& pfinder, + teq::iTensor* source, teq::TensptrT target); template -std::vector remove_duplicates (ade::TensT& roots, std::vector tens, - const ade::ParentFinder& pfinder, +std::vector remove_duplicates (teq::TensT& roots, std::vector tens, + const teq::ParentFinder& pfinder, tag::TagRegistry& registry = tag::get_reg()) { if (tens.empty()) @@ -19,11 +19,11 @@ std::vector remove_duplicates (ade::TensT& roots, std::vector tens, return {}; } - std::unordered_set priorities; - std::unordered_map> rindices; + std::unordered_set priorities; + std::unordered_map> rindices; for (size_t i = 0, n = roots.size(); i < n; ++i) { - ade::TensptrT& root = roots[i]; + teq::TensptrT& root = roots[i]; priorities.emplace(root.get()); rindices[root.get()].push_back(i); } @@ -66,16 +66,16 @@ std::vector remove_duplicates (ade::TensT& roots, std::vector tens, return uniques; } -using ImmutablesT = std::vector; +using ImmutablesT = std::vector; -using HFunctorsT = std::vector>; +using HFunctorsT = std::vector>; // identify immutable leaves and organize functors by maxheight void populate_graph (ImmutablesT& immutables, HFunctorsT& functors, - const ade::TensT& roots); + const teq::TensT& roots); // delete and update equivalent immutable leaves and functors -void remove_all_duplicates (ade::TensT& roots, +void remove_all_duplicates (teq::TensT& roots, ImmutablesT& immutables, HFunctorsT& functors); } diff --git a/opt/rules.md b/opt/rules.md index 6e2ab77ab..e599e0e18 100644 --- a/opt/rules.md +++ b/opt/rules.md @@ -15,7 +15,7 @@ There are 4 types of statements in .rules minilanguage: ## Symbol Declaration -A symbol is a generic representation of any node in an ADE graph. +A symbol is a generic representation of any node in an TEQ graph. In conversions, symbols can be used to represent "leaves" of subgraphs. Symbols must be declared before they can be used in conversions. @@ -30,7 +30,7 @@ symbol A // this declares A ## Conversion -A conversion identifies an ADE subgraph and defines a new subgraph to convert to given specied symbols and scalars. +A conversion identifies an TEQ subgraph and defines a new subgraph to convert to given specied symbols and scalars. #### Syntax: diff --git a/opt/src/optimize.cpp b/opt/src/optimize.cpp index 9a9f92dc1..7b34dcba3 100644 --- a/opt/src/optimize.cpp +++ b/opt/src/optimize.cpp @@ -6,7 +6,7 @@ namespace opt { -ade::TensT optimize (ade::TensT roots, const OptCtx& opts) +teq::TensT optimize (teq::TensT roots, const OptCtx& opts) { if (roots.empty()) { @@ -36,7 +36,7 @@ ade::TensT optimize (ade::TensT roots, const OptCtx& opts) CstConvertF const_conv = opts.const_conv_; Matcher matcher(opts.voters_); matcher.scalarize_ = - [&const_conv](ade::iTensor* tens) -> std::string + [&const_conv](teq::iTensor* tens) -> std::string { std::string out; if (auto cst = const_conv(tens)) @@ -49,12 +49,12 @@ ade::TensT optimize (ade::TensT roots, const OptCtx& opts) } return out; }; - ade::GraphStat stat; - ade::ParentFinder pfinder; - std::unordered_map> rindices; + teq::GraphStat stat; + teq::ParentFinder pfinder; + std::unordered_map> rindices; for (size_t i = 0, n = roots.size(); i < n; ++i) { - ade::TensptrT& root = roots[i]; + teq::TensptrT& root = roots[i]; root->accept(stat); root->accept(pfinder); rindices[root.get()].push_back(i); @@ -72,13 +72,13 @@ ade::TensT optimize (ade::TensT roots, const OptCtx& opts) // there are no conversions for leaves for (auto& funcs : functors) { - for (ade::FuncptrT func : funcs) + for (teq::FuncptrT func : funcs) { // although matcher recursively applies to functor children, // it's easier to evaluate near conversion to avoid tracking state changes func->accept(matcher); - ade::TensptrT converted = nullptr; + teq::TensptrT converted = nullptr; auto& cands = matcher.candidates_[func.get()]; // select the best candidate (smallest conversion) // currently first come first serve (todo: implement) diff --git a/opt/src/rmdups.cpp b/opt/src/rmdups.cpp index 19190f927..d3b0cc9a8 100644 --- a/opt/src/rmdups.cpp +++ b/opt/src/rmdups.cpp @@ -5,15 +5,15 @@ namespace opt { -void replace_parents (const ade::ParentFinder& pfinder, - ade::iTensor* source, ade::TensptrT target) +void replace_parents (const teq::ParentFinder& pfinder, + teq::iTensor* source, teq::TensptrT target) { auto it = pfinder.parents_.find(source); if (pfinder.parents_.end() != it) { for (auto& parent_pair : it->second) { - auto f = static_cast(parent_pair.first); + auto f = static_cast(parent_pair.first); auto& children = f->get_children(); for (size_t i : parent_pair.second) { @@ -29,11 +29,11 @@ void replace_parents (const ade::ParentFinder& pfinder, } void populate_graph (ImmutablesT& immutables, HFunctorsT& functors, - const ade::TensT& roots) + const teq::TensT& roots) { - ade::OwnerMapT owners = ade::track_owners(roots); - ade::GraphStat stat; - for (ade::TensptrT root : roots) + teq::OwnerMapT owners = teq::track_owners(roots); + teq::GraphStat stat; + for (teq::TensptrT root : roots) { root->accept(stat); } @@ -42,7 +42,7 @@ void populate_graph (ImmutablesT& immutables, HFunctorsT& functors, root_heights.reserve(roots.size()); std::transform(roots.begin(), roots.end(), std::back_inserter(root_heights), - [&stat](const ade::TensptrT& root) + [&stat](const teq::TensptrT& root) { return stat.graphsize_[root.get()].upper_; }); @@ -59,25 +59,25 @@ void populate_graph (ImmutablesT& immutables, HFunctorsT& functors, if (tag::get_property_reg().has_property(tens, tag::immutable_tag)) { immutables.push_back( - std::static_pointer_cast( + std::static_pointer_cast( owners.at(tens).lock())); } } else { functors[height - 1].push_back( - std::static_pointer_cast( + std::static_pointer_cast( owners.at(tens).lock())); } } } -void remove_all_duplicates (ade::TensT& roots, +void remove_all_duplicates (teq::TensT& roots, ImmutablesT& immutables, HFunctorsT& functors) { // remove equivalent nodes - ade::ParentFinder pfinder; - for (ade::TensptrT& root : roots) + teq::ParentFinder pfinder; + for (teq::TensptrT& root : roots) { root->accept(pfinder); } diff --git a/opt/src/stats.cpp b/opt/src/stats.cpp index bc23a2063..63fd7ac51 100755 --- a/opt/src/stats.cpp +++ b/opt/src/stats.cpp @@ -5,9 +5,9 @@ namespace opt { -bool is_scalar (ade::iLeaf* leaf) +bool is_scalar (teq::iLeaf* leaf) { - ade::Shape shape = leaf->shape(); + teq::Shape shape = leaf->shape(); char* data = (char*) leaf->data(); size_t n = shape.n_elems(); size_t perbytes = leaf->nbytes() / n; @@ -22,31 +22,31 @@ bool is_scalar (ade::iLeaf* leaf) return true; } -std::string to_string (ade::CoordptrT c) +std::string to_string (teq::CoordptrT c) { - if (ade::is_identity(c.get())) + if (teq::is_identity(c.get())) { return ""; } return c->to_string(); } -bool lt (ade::CoordptrT a, ade::CoordptrT b) +bool lt (teq::CoordptrT a, teq::CoordptrT b) { - if (ade::is_identity(a.get())) + if (teq::is_identity(a.get())) { - return false == ade::is_identity(b.get()); + return false == teq::is_identity(b.get()); } return a->to_string() < b->to_string(); } -bool is_equal (ade::CoordptrT a, ade::CoordptrT b) +bool is_equal (teq::CoordptrT a, teq::CoordptrT b) { if (a == b) { return true; } - if (ade::is_identity(a.get()) && ade::is_identity(b.get())) + if (teq::is_identity(a.get()) && teq::is_identity(b.get())) { return true; } @@ -57,8 +57,8 @@ bool is_equal (ade::CoordptrT a, ade::CoordptrT b) return false; } -bool lt (std::unordered_set priorities, - ade::iLeaf* a, ade::iLeaf* b) +bool lt (std::unordered_set priorities, + teq::iLeaf* a, teq::iLeaf* b) { size_t atype = a->type_code(); size_t btype = b->type_code(); @@ -88,9 +88,9 @@ bool lt (std::unordered_set priorities, return atype < btype; } -bool is_equal (ade::iLeaf* a, ade::iLeaf* b) +bool is_equal (teq::iLeaf* a, teq::iLeaf* b) { - ade::Shape shape = a->shape(); + teq::Shape shape = a->shape(); size_t dtype = a->type_code(); if (shape.compatible_after(b->shape(), 0) && dtype == b->type_code()) @@ -103,8 +103,8 @@ bool is_equal (ade::iLeaf* a, ade::iLeaf* b) return false; } -bool lt (std::unordered_set priorities, - ade::iFunctor* a, ade::iFunctor* b) +bool lt (std::unordered_set priorities, + teq::iFunctor* a, teq::iFunctor* b) { size_t acode = a->get_opcode().code_; size_t bcode = b->get_opcode().code_; @@ -124,7 +124,7 @@ bool lt (std::unordered_set priorities, if (tag::get_property_reg().has_property(a, tag::commutative_tag)) { auto arg_lt = - [](ade::FuncArg a, ade::FuncArg b) + [](teq::FuncArg a, teq::FuncArg b) { auto atens = a.get_tensor().get(); auto btens = b.get_tensor().get(); @@ -163,11 +163,11 @@ bool lt (std::unordered_set priorities, return acode < bcode; } -bool is_equal (ade::iFunctor* a, ade::iFunctor* b) +bool is_equal (teq::iFunctor* a, teq::iFunctor* b) { if (a->get_opcode().code_ == b->get_opcode().code_) { - ade::Shape shape = a->shape(); + teq::Shape shape = a->shape(); if (shape.compatible_after(b->shape(), 0)) { auto achildren = a->get_children(); @@ -176,7 +176,7 @@ bool is_equal (ade::iFunctor* a, ade::iFunctor* b) if (tag::get_property_reg().has_property(a, tag::commutative_tag)) { auto arg_lt = - [](ade::FuncArg a, ade::FuncArg b) + [](teq::FuncArg a, teq::FuncArg b) { auto atens = a.get_tensor().get(); auto btens = b.get_tensor().get(); @@ -191,7 +191,7 @@ bool is_equal (ade::iFunctor* a, ade::iFunctor* b) } return std::equal(achildren.begin(), achildren.end(), bchildren.begin(), - [](const ade::FuncArg& a, const ade::FuncArg& b) + [](const teq::FuncArg& a, const teq::FuncArg& b) { return a.get_tensor().get() == b.get_tensor().get() && is_equal(a.get_coorder(), b.get_coorder()); diff --git a/opt/stats.hpp b/opt/stats.hpp index 882573963..5733fe606 100755 --- a/opt/stats.hpp +++ b/opt/stats.hpp @@ -1,4 +1,4 @@ -#include "ade/ade.hpp" +#include "teq/teq.hpp" #include "tag/prop.hpp" @@ -8,34 +8,34 @@ namespace opt { -bool is_scalar (ade::iLeaf* leaf); +bool is_scalar (teq::iLeaf* leaf); // ==== CoordptrT stringification + comparators -std::string to_string (ade::CoordptrT c); +std::string to_string (teq::CoordptrT c); -bool lt (ade::CoordptrT a, ade::CoordptrT b); +bool lt (teq::CoordptrT a, teq::CoordptrT b); -bool is_equal (ade::CoordptrT a, ade::CoordptrT b); +bool is_equal (teq::CoordptrT a, teq::CoordptrT b); // ==== Leaf comparators -bool lt (std::unordered_set priorities, - ade::iLeaf* a, ade::iLeaf* b); +bool lt (std::unordered_set priorities, + teq::iLeaf* a, teq::iLeaf* b); // for any ileaf pair a-b, they are equivalent IFF they are both tagged immutable AND // share same shape and data values -bool is_equal (ade::iLeaf* a, ade::iLeaf* b); +bool is_equal (teq::iLeaf* a, teq::iLeaf* b); // ==== Functor comparators -bool lt (std::unordered_set priorities, - ade::iFunctor* a, ade::iFunctor* b); +bool lt (std::unordered_set priorities, + teq::iFunctor* a, teq::iFunctor* b); // for any functors a-b, they are equivalent IFF a and b are the same opcode AND // share identical function arguments (same children, shapers, and coorders) // order matters UNLESS the op is tagged as commutative -bool is_equal (ade::iFunctor* a, ade::iFunctor* b); +bool is_equal (teq::iFunctor* a, teq::iFunctor* b); } diff --git a/opt/test/test_matcher.cpp b/opt/test/test_matcher.cpp index bba725d7a..d86457da2 100644 --- a/opt/test/test_matcher.cpp +++ b/opt/test/test_matcher.cpp @@ -6,22 +6,22 @@ #include "exam/exam.hpp" -#include "ead/ead.hpp" +#include "eteq/eteq.hpp" #include "opt/voter.hpp" #include "opt/matcher.hpp" #define ELEMENTARY(LABEL, TYPE)opt::VoterArg{\ - LABEL,ade::CoordptrT(),ade::CoordptrT(), TYPE} + LABEL,teq::CoordptrT(),teq::CoordptrT(), TYPE} TEST(MATCHER, OrdrAny) { - std::vector slist = {3, 2}; - ade::Shape shape(slist); - ead::NodeptrT a = ead::make_variable_scalar(2, shape); - ead::NodeptrT b = ead::make_variable_scalar(3, shape); + std::vector slist = {3, 2}; + teq::Shape shape(slist); + eteq::NodeptrT a = eteq::make_variable_scalar(2, shape); + eteq::NodeptrT b = eteq::make_variable_scalar(3, shape); auto f1 = tenncor::pow(a, b); auto f2 = tenncor::pow(a, a); @@ -77,10 +77,10 @@ TEST(MATCHER, OrdrAny) TEST(MATCHER, CommAny) { - std::vector slist = {3, 2}; - ade::Shape shape(slist); - ead::NodeptrT a = ead::make_variable_scalar(2, shape); - ead::NodeptrT b = ead::make_variable_scalar(3, shape); + std::vector slist = {3, 2}; + teq::Shape shape(slist); + eteq::NodeptrT a = eteq::make_variable_scalar(2, shape); + eteq::NodeptrT b = eteq::make_variable_scalar(3, shape); auto f1 = tenncor::mul(a, b); auto f2 = tenncor::mul(a, a); @@ -176,10 +176,10 @@ TEST(MATCHER, CommAny) TEST(MATCHER, Ambiguous_CommAny) { - std::vector slist = {3, 2}; - ade::Shape shape(slist); - ead::NodeptrT a = ead::make_variable_scalar(2, shape); - ead::NodeptrT b = ead::make_variable_scalar(3, shape); + std::vector slist = {3, 2}; + teq::Shape shape(slist); + eteq::NodeptrT a = eteq::make_variable_scalar(2, shape); + eteq::NodeptrT b = eteq::make_variable_scalar(3, shape); auto same = tenncor::mul(a, b); auto sub_l = tenncor::mul(same, a); // match against similar and similar2 diff --git a/opt/test/test_opt.cpp b/opt/test/test_opt.cpp index 26fadf351..7ab543aa6 100644 --- a/opt/test/test_opt.cpp +++ b/opt/test/test_opt.cpp @@ -4,33 +4,33 @@ #include "gtest/gtest.h" -#include "dbg/stream/ade_csv.hpp" +#include "dbg/stream/teq_csv.hpp" #include "testutil/tutil.hpp" #include "exam/exam.hpp" -#include "ead/generated/api.hpp" -#include "ead/parse.hpp" -#include "ead/constant.hpp" +#include "eteq/generated/api.hpp" +#include "eteq/parse.hpp" +#include "eteq/constant.hpp" #include "opt/optimize.hpp" TEST(OPTIMIZE, CalcConstants) { - ead::NodeptrT var = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape(), + eteq::NodeptrT var = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape(), "special_var")); - ead::NodeptrT two = - ead::make_constant_scalar(2, ade::Shape()); - ead::NodeptrT three = - ead::make_constant_scalar(3, ade::Shape()); - ead::NodeptrT four = - ead::make_constant_scalar(4, ade::Shape()); + eteq::NodeptrT two = + eteq::make_constant_scalar(2, teq::Shape()); + eteq::NodeptrT three = + eteq::make_constant_scalar(3, teq::Shape()); + eteq::NodeptrT four = + eteq::make_constant_scalar(4, teq::Shape()); - opt::OptCtx empty_rules = ead::parse(""); + opt::OptCtx empty_rules = eteq::parse(""); { auto vfunc = tenncor::sin(var); @@ -88,14 +88,14 @@ TEST(OPTIMIZE, CalcConstants) TEST(OPTIMIZE, PruneZeroSingles) { - ead::NodeptrT var = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape(), + eteq::NodeptrT var = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape(), "special_var")); - ead::NodeptrT zero = - ead::make_constant_scalar(0, ade::Shape()); + eteq::NodeptrT zero = + eteq::make_constant_scalar(0, teq::Shape()); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); { auto wunfunc = tenncor::pow(var, zero); @@ -181,15 +181,15 @@ TEST(OPTIMIZE, PruneZeroSingles) TEST(OPTIMIZE, PruneZeroGraph) { - ead::NodeptrT var = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape(), "var")); - ead::NodeptrT var2 = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape(), "var2")); + eteq::NodeptrT var = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape(), "var")); + eteq::NodeptrT var2 = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape(), "var2")); - ead::NodeptrT zero = - ead::make_constant_scalar(0, ade::Shape()); + eteq::NodeptrT zero = + eteq::make_constant_scalar(0, teq::Shape()); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); auto got1 = tenncor::cos(zero); auto got3 = tenncor::add(zero, var2); @@ -233,14 +233,14 @@ TEST(OPTIMIZE, PruneZeroGraph) TEST(OPTIMIZE, PruneOneSingles) { - ead::NodeptrT var = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape(), + eteq::NodeptrT var = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape(), "special_var")); - ead::NodeptrT one = - ead::make_constant_scalar(1, ade::Shape()); + eteq::NodeptrT one = + eteq::make_constant_scalar(1, teq::Shape()); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); { auto vfunc = tenncor::pow(var, one); @@ -303,14 +303,14 @@ TEST(OPTIMIZE, PruneOneSingles) TEST(OPTIMIZE, PruneOneGraph) { - ead::NodeptrT var = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape(), + eteq::NodeptrT var = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape(), "var")); - ead::NodeptrT one = - ead::make_constant_scalar(1, ade::Shape()); + eteq::NodeptrT one = + eteq::make_constant_scalar(1, teq::Shape()); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); auto got0 = tenncor::log(one); auto got1 = tenncor::sqrt(one); @@ -343,12 +343,12 @@ TEST(OPTIMIZE, PruneOneGraph) TEST(OPTIMIZE, PruneOpSingles) { - ead::NodeptrT zero = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape(), "special_var0")); - ead::NodeptrT one = ead::convert_to_node( - ead::make_variable_scalar(1, ade::Shape({2, 3}), "special_var")); + eteq::NodeptrT zero = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape(), "special_var0")); + eteq::NodeptrT one = eteq::convert_to_node( + eteq::make_variable_scalar(1, teq::Shape({2, 3}), "special_var")); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); // merge redundent double reduced argument for empty shape { @@ -403,14 +403,14 @@ TEST(OPTIMIZE, PruneOpSingles) TEST(OPTIMIZE, PruneOpGraph) { - ead::NodeptrT zero = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape({3, 4}), "special_var0")); - ead::NodeptrT one = ead::convert_to_node( - ead::make_variable_scalar(1, ade::Shape(), "special_var")); - ead::NodeptrT two = ead::convert_to_node( - ead::make_variable_scalar(2, ade::Shape(), "special_var2")); - ead::NodeptrT three = ead::convert_to_node( - ead::make_variable_scalar(3, ade::Shape(), "special_var3")); + eteq::NodeptrT zero = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape({3, 4}), "special_var0")); + eteq::NodeptrT one = eteq::convert_to_node( + eteq::make_variable_scalar(1, teq::Shape(), "special_var")); + eteq::NodeptrT two = eteq::convert_to_node( + eteq::make_variable_scalar(2, teq::Shape(), "special_var2")); + eteq::NodeptrT three = eteq::convert_to_node( + eteq::make_variable_scalar(3, teq::Shape(), "special_var3")); auto got1 = tenncor::cos(three); auto got3 = tenncor::mul(tenncor::mul(one, three), two); @@ -424,7 +424,7 @@ TEST(OPTIMIZE, PruneOpGraph) auto m = tenncor::min(tenncor::min(got22, got1), tenncor::min(too, got11)); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); auto opted = opt::optimize({ tenncor::sub(tenncor::min(m, tenncor::div(got3, gotn1)), got2)->get_tensor(), @@ -475,12 +475,12 @@ TEST(OPTIMIZE, PruneOpGraph) TEST(OPTIMIZE, GroupSingles) { - ead::NodeptrT one = ead::convert_to_node( - ead::make_variable_scalar(1, ade::Shape(), "special_var")); - ead::NodeptrT two = ead::convert_to_node( - ead::make_variable_scalar(2, ade::Shape(), "special_var2")); + eteq::NodeptrT one = eteq::convert_to_node( + eteq::make_variable_scalar(1, teq::Shape(), "special_var")); + eteq::NodeptrT two = eteq::convert_to_node( + eteq::make_variable_scalar(2, teq::Shape(), "special_var2")); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); // mul and div and next to each level { @@ -509,14 +509,14 @@ TEST(OPTIMIZE, GroupSingles) TEST(OPTIMIZE, ReuseOpGraph) { - ead::NodeptrT zero = ead::convert_to_node( - ead::make_variable_scalar(0, ade::Shape())); - ead::NodeptrT one = ead::convert_to_node( - ead::make_variable_scalar(1, ade::Shape())); - ead::NodeptrT two = ead::convert_to_node( - ead::make_variable_scalar(2, ade::Shape())); - - ead::NodeptrT root; + eteq::NodeptrT zero = eteq::convert_to_node( + eteq::make_variable_scalar(0, teq::Shape())); + eteq::NodeptrT one = eteq::convert_to_node( + eteq::make_variable_scalar(1, teq::Shape())); + eteq::NodeptrT two = eteq::convert_to_node( + eteq::make_variable_scalar(2, teq::Shape())); + + eteq::NodeptrT root; { auto got1 = tenncor::cos(zero); auto got3 = tenncor::add(tenncor::add(one, zero), two); @@ -531,14 +531,14 @@ TEST(OPTIMIZE, ReuseOpGraph) root = tenncor::sub(tenncor::pow(m, tenncor::div(got3, gotn1)), got2); } - ead::NodeptrT subroot; + eteq::NodeptrT subroot; { auto other_got1 = tenncor::cos(zero); auto got22 = tenncor::max(two, zero); subroot = tenncor::mul(other_got1, got22); } - ead::NodeptrT copyroot; + eteq::NodeptrT copyroot; { auto got1 = tenncor::cos(zero); auto got3 = tenncor::add(tenncor::add(one, zero), two); @@ -553,7 +553,7 @@ TEST(OPTIMIZE, ReuseOpGraph) copyroot = tenncor::sub(tenncor::pow(m, tenncor::div(got3, gotn1)), got2); } - ead::NodeptrT splitroot; + eteq::NodeptrT splitroot; { auto got1 = tenncor::cos(zero); auto got3 = tenncor::add(tenncor::add(one, zero), two); @@ -567,7 +567,7 @@ TEST(OPTIMIZE, ReuseOpGraph) splitroot = tenncor::mul(tenncor::mul(got11, got1), tenncor::mul(too, got3)); } - opt::OptCtx empty_rules = ead::parse(""); + opt::OptCtx empty_rules = eteq::parse(""); auto opted = opt::optimize({ subroot->get_tensor(), diff --git a/pbm/README_PBM.md b/pbm/README_PBM.md index 1adf3e7da..953433062 100644 --- a/pbm/README_PBM.md +++ b/pbm/README_PBM.md @@ -1,8 +1,8 @@ -# PBM (Protobuf Marshaller) +# PBM (ProtoBuf Marshaller) -Serialize ADE graphs created by top-level code in protobuf format. +Serialize TEQ graphs created by top-level code in protobuf format. -Saving and loading requires data serialization functors as parameters. This parameterization is to defer data formatting responsibilities to the library implementing ADE. +Saving and loading requires data serialization functors as parameters. This parameterization is to defer data formatting responsibilities to the library implementing TEQ. ## Why Protobuf diff --git a/pbm/data.hpp b/pbm/data.hpp index 0ad99c77f..9234d5a7b 100644 --- a/pbm/data.hpp +++ b/pbm/data.hpp @@ -8,7 +8,7 @@ #include -#include "ade/ade.hpp" +#include "teq/teq.hpp" #include "tag/tag.hpp" @@ -21,30 +21,30 @@ namespace pbm { /// Tensptr vector type -using TensT = std::vector; +using TensT = std::vector; /// String list type used for paths using StringsT = std::list; struct iSaver { - virtual std::string save_leaf (ade::iLeaf* leaf) = 0; + virtual std::string save_leaf (teq::iLeaf* leaf) = 0; - virtual std::vector save_shaper (const ade::CoordptrT& mapper) = 0; + virtual std::vector save_shaper (const teq::CoordptrT& mapper) = 0; - virtual std::vector save_coorder (const ade::CoordptrT& mapper) = 0; + virtual std::vector save_coorder (const teq::CoordptrT& mapper) = 0; }; struct iLoader { - virtual ade::TensptrT generate_leaf (const char* data, ade::Shape shape, + virtual teq::TensptrT generate_leaf (const char* data, teq::Shape shape, std::string typelabel, std::string label, bool is_const) = 0; - virtual ade::TensptrT generate_func (std::string opname, ade::ArgsT args) = 0; + virtual teq::TensptrT generate_func (std::string opname, teq::ArgsT args) = 0; - virtual ade::CoordptrT generate_shaper (std::vector coord) = 0; + virtual teq::CoordptrT generate_shaper (std::vector coord) = 0; - virtual ade::CoordptrT generate_coorder ( + virtual teq::CoordptrT generate_coorder ( std::string opname, std::vector coord) = 0; }; diff --git a/pbm/load.hpp b/pbm/load.hpp index b773ca943..ebce6220a 100644 --- a/pbm/load.hpp +++ b/pbm/load.hpp @@ -61,20 +61,20 @@ struct PathedTens final } /// Return tensor associated with input path if found otherwise nullptr - ade::TensptrT get_labelled (StringsT path) const + teq::TensptrT get_labelled (StringsT path) const { return get_labelled(path.begin(), path.end()); } /// Set input path to reference tensor - void set_labelled (StringsT path, ade::TensptrT tens) + void set_labelled (StringsT path, teq::TensptrT tens) { set_labelled(path.begin(), path.end(), tens); } /// Return tensor associated with path between iterators begin and end /// if found otherwise nullptr - ade::TensptrT get_labelled ( + teq::TensptrT get_labelled ( StringsT::iterator path_begin, StringsT::iterator path_end) const { @@ -104,7 +104,7 @@ struct PathedTens final /// Set path between iterators begin and end to reference tensor void set_labelled (StringsT::iterator path_begin, - StringsT::iterator path_end, ade::TensptrT tens) + StringsT::iterator path_end, teq::TensptrT tens) { if (path_begin == path_end) { @@ -135,14 +135,14 @@ struct PathedTens final std::unordered_map children_; /// Map of labels to tensor leaves - std::unordered_map tens_; + std::unordered_map tens_; }; -/// Contains all information necessary to recreate labelled ADE graph +/// Contains all information necessary to recreate labelled TEQ graph struct GraphInfo final { /// Set of all roots (Tensptrs without any parent) - std::unordered_set roots_; + std::unordered_set roots_; /// Labelled tensors PathedTens tens_; @@ -158,14 +158,14 @@ void load_graph (GraphInfo& out, const cortenn::Graph& in) TensT invec; for (const cortenn::Node& node : nodes) { - ade::TensptrT tens; + teq::TensptrT tens; if (node.has_source()) { const cortenn::Source& source = node.source(); auto& slist = source.shape(); - ade::Shape shape(std::vector(slist.begin(), slist.end())); + teq::Shape shape(std::vector(slist.begin(), slist.end())); std::string data = source.data(); - ade::TensptrT leaf = loader.generate_leaf(data.c_str(), + teq::TensptrT leaf = loader.generate_leaf(data.c_str(), shape, source.typelabel(), node.label(), source.is_const()); invec.push_back(leaf); tens = leaf; @@ -175,21 +175,21 @@ void load_graph (GraphInfo& out, const cortenn::Graph& in) cortenn::Functor func = node.functor(); auto nodeargs = func.args(); std::string opname = func.opname(); - ade::ArgsT args; + teq::ArgsT args; for (auto nodearg : nodeargs) { - ade::TensptrT arg = invec[nodearg.idx()]; + teq::TensptrT arg = invec[nodearg.idx()]; auto shaper_pb = nodearg.shaper(); auto coorder_pb = nodearg.coord(); std::vector shaper_vec(shaper_pb.begin(), shaper_pb.end()); std::vector coord_vec(coorder_pb.begin(), coorder_pb.end()); - ade::CoordptrT shaper = loader.generate_shaper(shaper_vec); - ade::CoordptrT coord = loader.generate_coorder(opname, coord_vec); + teq::CoordptrT shaper = loader.generate_shaper(shaper_vec); + teq::CoordptrT coord = loader.generate_coorder(opname, coord_vec); args.push_back( - ade::FuncArg(arg, shaper, nodearg.fwd(), coord)); + teq::FuncArg(arg, shaper, nodearg.fwd(), coord)); out.roots_.erase(invec[nodearg.idx()]); } - ade::TensptrT f = loader.generate_func(opname, args); + teq::TensptrT f = loader.generate_func(opname, args); invec.push_back(f); tens = f; } diff --git a/pbm/save.hpp b/pbm/save.hpp index 79b146129..1daaba592 100644 --- a/pbm/save.hpp +++ b/pbm/save.hpp @@ -9,8 +9,8 @@ #include #include -#include "ade/traveler.hpp" -#include "ade/functor.hpp" +#include "teq/traveler.hpp" +#include "teq/functor.hpp" #include "pbm/data.hpp" @@ -21,19 +21,19 @@ namespace pbm { /// Map Tensptrs to a string path type -using PathedMapT = std::unordered_map; +using PathedMapT = std::unordered_map; /// Graph serialization traveler template ::value>::type* = nullptr> -struct GraphSaver final : public ade::iTraveler +struct GraphSaver final : public teq::iTraveler { GraphSaver (tag::TagRegistry& registry = tag::get_reg()) : registry_(registry) {} /// Implementation of iTraveler - void visit (ade::iLeaf* leaf) override + void visit (teq::iLeaf* leaf) override { if (false == estd::has(visited_, leaf)) { @@ -44,7 +44,7 @@ struct GraphSaver final : public ade::iTraveler } /// Implementation of iTraveler - void visit (ade::iFunctor* func) override + void visit (teq::iFunctor* func) override { if (false == estd::has(visited_, func)) { @@ -52,7 +52,7 @@ struct GraphSaver final : public ade::iTraveler funcs_.push_back(func); visited_.emplace(func); - ade::ArgsT children = func->get_children(); + teq::ArgsT children = func->get_children(); for (auto& child : children) { child.get_tensor()->accept(*this); @@ -67,20 +67,20 @@ struct GraphSaver final : public ade::iTraveler // this ensures every children of a node appears before the parent, // as is the order of node creations funcs_.sort( - [&](ade::iTensor* a, ade::iTensor* b) + [&](teq::iTensor* a, teq::iTensor* b) { return stat.graphsize_[a].upper_ < stat.graphsize_[b].upper_; }); - std::vector funcs(funcs_.begin(), funcs_.end()); - std::vector leaves(leaves_.begin(), leaves_.end()); + std::vector funcs(funcs_.begin(), funcs_.end()); + std::vector leaves(leaves_.begin(), leaves_.end()); // all nodes in leaf appear before funcs - std::unordered_map ordermap; + std::unordered_map ordermap; size_t nleaves = leaves.size(); for (size_t i = 0; i < nleaves; ++i) { - ade::iLeaf* tens = leaves[i]; + teq::iLeaf* tens = leaves[i]; ordermap[tens] = i; cortenn::Node* pb_node = out.add_nodes(); @@ -90,20 +90,20 @@ struct GraphSaver final : public ade::iTraveler } for (size_t i = 0, n = funcs.size(); i < n; ++i) { - ade::iFunctor* f = funcs[i]; + teq::iFunctor* f = funcs[i]; ordermap[f] = nleaves + i; cortenn::Node* pb_node = out.add_nodes(); pb_node->set_label(f->to_string()); tag_node(pb_node, f, registry_); cortenn::Functor* func = pb_node->mutable_functor(); - ade::Opcode opcode = f->get_opcode(); + teq::Opcode opcode = f->get_opcode(); func->set_opname(opcode.name_); - const ade::ArgsT& children = f->get_children(); + const teq::ArgsT& children = f->get_children(); for (auto& child : children) { cortenn::NodeArg* arg = func->add_args(); - ade::iTensor* tens = child.get_tensor().get(); + teq::iTensor* tens = child.get_tensor().get(); arg->set_idx(ordermap[tens]); std::vector shaper = saver_.save_shaper(child.get_shaper()); @@ -121,21 +121,21 @@ struct GraphSaver final : public ade::iTraveler } /// List of leaves visited (left to right) - std::list leaves_; + std::list leaves_; /// List of functions visited (by depth-first) - std::list funcs_; + std::list funcs_; /// Visited nodes - std::unordered_set visited_; + std::unordered_set visited_; /// Internal traveler - ade::GraphStat stat; + teq::GraphStat stat; private: - void save_data (cortenn::Source& out, ade::iLeaf* in) + void save_data (cortenn::Source& out, teq::iLeaf* in) { - const ade::Shape& shape = in->shape(); + const teq::Shape& shape = in->shape(); google::protobuf::RepeatedField slist( shape.begin(), shape.end()); out.mutable_shape()->Swap(&slist); @@ -145,7 +145,7 @@ struct GraphSaver final : public ade::iTraveler } void tag_node (cortenn::Node* node, - ade::iTensor* tens, tag::TagRegistry& registry) + teq::iTensor* tens, tag::TagRegistry& registry) { google::protobuf::Map* tags = node->mutable_tags(); diff --git a/pbm/test/common.hpp b/pbm/test/common.hpp index 7e350517f..a552b4c7d 100644 --- a/pbm/test/common.hpp +++ b/pbm/test/common.hpp @@ -1,15 +1,15 @@ -#include "ade/ileaf.hpp" +#include "teq/ileaf.hpp" -#ifndef ADE_TEST_COMMON_HPP -#define ADE_TEST_COMMON_HPP +#ifndef PBM_TEST_COMMON_HPP +#define PBM_TEST_COMMON_HPP -struct MockTensor final : public ade::iLeaf +struct MockTensor final : public teq::iLeaf { MockTensor (void) = default; - MockTensor (ade::Shape shape) : shape_(shape) {} + MockTensor (teq::Shape shape) : shape_(shape) {} - const ade::Shape& shape (void) const override + const teq::Shape& shape (void) const override { return shape_; } @@ -49,7 +49,7 @@ struct MockTensor final : public ade::iLeaf return true; } - ade::Shape shape_; + teq::Shape shape_; }; -#endif // ADE_TEST_COMMON_HPP +#endif // PBM_TEST_COMMON_HPP diff --git a/pbm/test/test_load.cpp b/pbm/test/test_load.cpp index 2736befaf..2df6b9b67 100644 --- a/pbm/test/test_load.cpp +++ b/pbm/test/test_load.cpp @@ -8,7 +8,7 @@ #include "exam/exam.hpp" -#include "dbg/stream/ade.hpp" +#include "dbg/stream/teq.hpp" #include "tag/prop.hpp" @@ -22,37 +22,37 @@ const std::string testdir = "models/test"; struct TestLoader : public pbm::iLoader { - ade::TensptrT generate_leaf (const char* pb, ade::Shape shape, + teq::TensptrT generate_leaf (const char* pb, teq::Shape shape, std::string typelabel, std::string label, bool is_const) override { - return ade::TensptrT(new MockTensor(shape)); + return teq::TensptrT(new MockTensor(shape)); } - ade::TensptrT generate_func (std::string opname, ade::ArgsT args) override + teq::TensptrT generate_func (std::string opname, teq::ArgsT args) override { - return ade::TensptrT(ade::Functor::get(ade::Opcode{opname, 0}, args)); + return teq::TensptrT(teq::Functor::get(teq::Opcode{opname, 0}, args)); } - ade::CoordptrT generate_shaper (std::vector coord) override + teq::CoordptrT generate_shaper (std::vector coord) override { - if (ade::mat_dim * ade::mat_dim != coord.size()) + if (teq::mat_dim * teq::mat_dim != coord.size()) { logs::fatal("cannot deserialize non-matrix coordinate map"); } - return std::make_shared( - [&](ade::MatrixT fwd) + return std::make_shared( + [&](teq::MatrixT fwd) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - fwd[i][j] = coord[i * ade::mat_dim + j]; + fwd[i][j] = coord[i * teq::mat_dim + j]; } } }); } - ade::CoordptrT generate_coorder ( + teq::CoordptrT generate_coorder ( std::string opname, std::vector coord) override { return generate_shaper(coord); @@ -78,7 +78,7 @@ TEST(LOAD, LoadGraph) tag::Query q; std::vector root_props; - std::unordered_map propdtens; + std::unordered_map propdtens; for (auto tens : graphinfo.roots_) { tens->accept(q); @@ -121,8 +121,8 @@ TEST(LOAD, LoadGraph) ASSERT_HAS(propdtens, "subtree_dest"); ASSERT_HAS(propdtens, "subtree2_dest"); - ade::TensptrT tree1 = propdtens["subtree_dest"]; - ade::TensptrT tree2 = propdtens["subtree2_dest"]; + teq::TensptrT tree1 = propdtens["subtree_dest"]; + teq::TensptrT tree2 = propdtens["subtree2_dest"]; ASSERT_NE(nullptr, tree1); ASSERT_NE(nullptr, tree2); diff --git a/pbm/test/test_save.cpp b/pbm/test/test_save.cpp index a3918fda2..41ac226b0 100644 --- a/pbm/test/test_save.cpp +++ b/pbm/test/test_save.cpp @@ -8,7 +8,7 @@ #include "gtest/gtest.h" -#include "ade/functor.hpp" +#include "teq/functor.hpp" #include "pbm/save.hpp" @@ -22,20 +22,20 @@ const std::string testdir = "models/test"; struct TestSaver : public pbm::iSaver { - std::string save_leaf (ade::iLeaf* leaf) override + std::string save_leaf (teq::iLeaf* leaf) override { return std::string(leaf->shape().n_elems(), 0); } - std::vector save_shaper (const ade::CoordptrT& mapper) override + std::vector save_shaper (const teq::CoordptrT& mapper) override { std::vector out; mapper->access( - [&out](const ade::MatrixT& mat) + [&out](const teq::MatrixT& mat) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { out.push_back(mat[i][j]); } @@ -44,7 +44,7 @@ struct TestSaver : public pbm::iSaver return out; } - std::vector save_coorder (const ade::CoordptrT& mapper) override + std::vector save_coorder (const teq::CoordptrT& mapper) override { return save_shaper(mapper); } @@ -58,41 +58,41 @@ TEST(SAVE, SaveGraph) { cortenn::Graph graph; - std::vector roots; + std::vector roots; // subtree one - ade::Shape shape({3, 7}); - ade::TensptrT osrc(new MockTensor(shape)); + teq::Shape shape({3, 7}); + teq::TensptrT osrc(new MockTensor(shape)); - ade::Shape shape2({7, 3}); - ade::TensptrT osrc2(new MockTensor(shape2)); + teq::Shape shape2({7, 3}); + teq::TensptrT osrc2(new MockTensor(shape2)); auto& preg = tag::get_property_reg(); preg.property_tag(osrc, "osrc"); preg.property_tag(osrc2, "osrc2"); { - ade::TensptrT src(new MockTensor(shape)); - - ade::Shape shape3({3, 1, 7}); - ade::TensptrT src2(new MockTensor(shape3)); - - ade::TensptrT dest(ade::Functor::get(ade::Opcode{"-", 0}, { - {src2, ade::identity}, - {ade::TensptrT(ade::Functor::get(ade::Opcode{"@", 1}, { - {ade::TensptrT(ade::Functor::get(ade::Opcode{"/", 2}, { - {ade::TensptrT(ade::Functor::get(ade::Opcode{"neg", 3}, { - {osrc, ade::identity}, - })), ade::identity}, - {ade::TensptrT(ade::Functor::get(ade::Opcode{"+", 4}, { - {ade::TensptrT( - ade::Functor::get(ade::Opcode{"sin", 5}, { - {src, ade::identity}})), ade::identity}, - {src, ade::identity}, - })), ade::identity} - })), ade::permute({1, 0})}, - {osrc2, ade::identity} - })), ade::permute({1, 2, 0})}, + teq::TensptrT src(new MockTensor(shape)); + + teq::Shape shape3({3, 1, 7}); + teq::TensptrT src2(new MockTensor(shape3)); + + teq::TensptrT dest(teq::Functor::get(teq::Opcode{"-", 0}, { + {src2, teq::identity}, + {teq::TensptrT(teq::Functor::get(teq::Opcode{"@", 1}, { + {teq::TensptrT(teq::Functor::get(teq::Opcode{"/", 2}, { + {teq::TensptrT(teq::Functor::get(teq::Opcode{"neg", 3}, { + {osrc, teq::identity}, + })), teq::identity}, + {teq::TensptrT(teq::Functor::get(teq::Opcode{"+", 4}, { + {teq::TensptrT( + teq::Functor::get(teq::Opcode{"sin", 5}, { + {src, teq::identity}})), teq::identity}, + {src, teq::identity}, + })), teq::identity} + })), teq::permute({1, 0})}, + {osrc2, teq::identity} + })), teq::permute({1, 2, 0})}, })); roots.push_back(dest); @@ -103,26 +103,26 @@ TEST(SAVE, SaveGraph) // subtree two { - ade::Shape mshape({3, 3}); - ade::TensptrT src(new MockTensor(mshape)); - - ade::TensptrT src2(new MockTensor(mshape)); - - ade::TensptrT src3(new MockTensor(mshape)); - - ade::TensptrT dest(ade::Functor::get(ade::Opcode{"-", 0}, { - {src, ade::identity}, - {ade::TensptrT(ade::Functor::get(ade::Opcode{"*", 6}, { - {ade::TensptrT(ade::Functor::get(ade::Opcode{"abs", 7}, { - {src, ade::identity}, - })), ade::identity}, - {ade::TensptrT(ade::Functor::get(ade::Opcode{"exp", 8}, { - {src2, ade::identity}, - })), ade::identity}, - {ade::TensptrT(ade::Functor::get(ade::Opcode{"neg", 3}, { - {src3, ade::identity}, - })), ade::identity}, - })), ade::identity}, + teq::Shape mshape({3, 3}); + teq::TensptrT src(new MockTensor(mshape)); + + teq::TensptrT src2(new MockTensor(mshape)); + + teq::TensptrT src3(new MockTensor(mshape)); + + teq::TensptrT dest(teq::Functor::get(teq::Opcode{"-", 0}, { + {src, teq::identity}, + {teq::TensptrT(teq::Functor::get(teq::Opcode{"*", 6}, { + {teq::TensptrT(teq::Functor::get(teq::Opcode{"abs", 7}, { + {src, teq::identity}, + })), teq::identity}, + {teq::TensptrT(teq::Functor::get(teq::Opcode{"exp", 8}, { + {src2, teq::identity}, + })), teq::identity}, + {teq::TensptrT(teq::Functor::get(teq::Opcode{"neg", 3}, { + {src3, teq::identity}, + })), teq::identity}, + })), teq::identity}, })); roots.push_back(dest); diff --git a/perf/README_PERF.md b/perf/README_PERF.md new file mode 100644 index 000000000..73ab30147 --- /dev/null +++ b/perf/README_PERF.md @@ -0,0 +1,3 @@ +# PERF (PERFormance measuring) + +Utility for measuring runtime diff --git a/pll/README_CCE.md b/pll/README_CCE.md deleted file mode 100644 index f2ab69558..000000000 --- a/pll/README_CCE.md +++ /dev/null @@ -1 +0,0 @@ -# ConCurrent EAD diff --git a/pll/partition.hpp b/pll/partition.hpp deleted file mode 100644 index 45c6b2e8a..000000000 --- a/pll/partition.hpp +++ /dev/null @@ -1,17 +0,0 @@ -#include "ade/traveler.hpp" - -#ifndef CCE_PARTITION_HPP -#define CCE_PARTITION_HPP - -namespace pll -{ - -using PartGroupsT = std::vector>; - -using OpWeightT = std::unordered_map; - -PartGroupsT k_partition (ade::TensT roots, size_t k, OpWeightT weights = OpWeightT()); - -} - -#endif // CCE_PARTITION_HPP diff --git a/pll/python/pll.cpp b/pll/python/pll.cpp deleted file mode 100644 index 207b61c18..000000000 --- a/pll/python/pll.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include "pybind11/pybind11.h" -#include "pybind11/numpy.h" -#include "pybind11/stl.h" - -#include "ead/generated/pyapi.hpp" -#include "ead/parse.hpp" - -#include "pll/session.hpp" - -namespace py = pybind11; - -PYBIND11_MODULE(pll, m) -{ - m.doc() = "pll session"; - - // ==== session ==== - auto isess = (py::class_) - py::module::import("ead.ead").attr("iSession"); - py::class_ session(m, "Session", isess); - - py::implicitly_convertible(); - session - .def(py::init(), - py::arg("nthread") = 2, - py::arg("weights") = pll::OpWeightT()) - .def("optimize", - [](py::object self, std::string filename) - { - auto sess = self.cast(); - opt::OptCtx rules = ead::parse_file(filename); - sess->optimize(rules); - }, - py::arg("filename") = "cfg/optimizations.rules", - "Optimize using rules for specified filename"); -} diff --git a/pll/rtscale.cpp b/pll/rtscale.cpp deleted file mode 100644 index 069713506..000000000 --- a/pll/rtscale.cpp +++ /dev/null @@ -1,233 +0,0 @@ -// Weigh the runtime of each opcode -#include -#include -#include - -#include "flag/flag.hpp" - -#include "ead/generated/api.hpp" -#include "ead/generated/opcode.hpp" -#include "ead/functor.hpp" - -#include "pll/weights.pb.h" - -#define TIME(action)\ -std::chrono::high_resolution_clock::time_point start =\ - std::chrono::high_resolution_clock::now();\ -action;\ -stat = std::chrono::duration_cast(\ - std::chrono::high_resolution_clock::now() - start).count(); - -double softplus (double x) -{ - return std::log(1 + std::exp(x)); -} - -int main (int argc, const char** argv) -{ - std::string writepath; - flag::FlagSet flags("rt_anubis"); - flags.add_flags() - ("target", flag::opt::value(&writepath), - "filename of json to write weights to"); - - if (false == flags.parse(argc, argv)) - { - return 1; - } - - logs::get_logger().set_log_level(logs::INFO); - - std::unordered_map stats; - size_t mean_stat = 0;//, - // max_stat = 0, - // min_stat = std::numeric_limits::max(); - for (size_t i = 0; i < age::_N_GENERATED_OPCODES; ++i) - { - size_t stat; - auto opcode = (age::_GENERATED_OPCODE) i; - ade::Opcode op{age::name_op(opcode), opcode}; - logs::infof("weighing operation %s", op.name_.c_str()); - switch (i) - { - // elementary unary - case age::ABS: - case age::NEG: - case age::SIN: - case age::COS: - case age::TAN: - case age::EXP: - case age::LOG: - case age::SQRT: - case age::ROUND: - case age::SIGMOID: - case age::SIGMOID_GRAD: - case age::TANH: - case age::SQUARE: - case age::CUBE: - { - auto var = ead::make_constant_scalar( - 0.5, ade::Shape({56, 57, 58})); - auto f = ead::make_functor(op, { - ead::identity_map(var)}); - TIME(f->update()) - } - break; - - // elementary binary - case age::POW: - case age::ADD: - case age::SUB: - case age::MUL: - case age::DIV: - case age::MIN: - case age::MAX: - case age::EQ: - case age::NEQ: - case age::LT: - case age::GT: - case age::RAND_UNIF: - { - auto var = ead::make_constant_scalar( - 0.5, ade::Shape({56, 57, 58})); - auto f = ead::make_functor(op, { - ead::identity_map(var), ead::identity_map(var)}); - TIME(f->update()) - } - break; - - // reductions - case age::REDUCE_SUM: - case age::REDUCE_PROD: - case age::REDUCE_MIN: - case age::REDUCE_MAX: - { - auto var = ead::make_constant_scalar( - 0.5, ade::Shape({56, 57, 58})); - auto f = ead::make_functor(op, { - ead::reduce_map(var, 1, 1)}); - TIME(f->update()) - } - break; - - // other stuff - case age::PERMUTE: - { - auto var = ead::make_constant_scalar( - 0.5, ade::Shape({56, 57, 58})); - auto f = ead::make_functor(op, { - ead::permute_map(var, {2, 0, 1})}); - TIME(f->update()) - } - break; - - case age::EXTEND: - { - auto var = ead::make_constant_scalar( - 0.5, ade::Shape({56, 58})); - auto f = ead::make_functor(op, { - ead::extend_map(var, 2, {57})}); - TIME(f->update()) - } - break; - - case age::SLICE: - { - auto var = ead::make_constant_scalar( - 0.5, ade::Shape({56, 57, 58})); - auto f = ead::make_functor(op, { - ead::slice_map(var, 2, 2, 2)}); - TIME(f->update()) - } - break; - - case age::MATMUL: - { - auto a = ead::make_constant_scalar( - 0.3, ade::Shape({253, 255})); - auto b = ead::make_constant_scalar( - 0.6, ade::Shape({254, 253})); - auto f = tenncor::matmul(a, b); - TIME(f->update()) - } - break; - - case age::CONV: - { - auto img = ead::make_constant_scalar( - 0.3, ade::Shape({254, 255})); - auto kern = ead::make_constant_scalar( - 0.6, ade::Shape({5, 7})); - auto f = tenncor::convolution(img, kern, {0, 1}); - TIME(f->update()) - } - break; - - case age::PAD: - { - auto var = ead::make_constant_scalar( - 0.5, ade::Shape({56, 57, 58})); - auto f = ead::make_functor(op, { - ead::pad_map(var, {3, 4}, 2)}); - TIME(f->update()) - } - break; - - case age::SELECT: - { - ade::Shape shape({56, 57, 58}); - size_t n = shape.n_elems(); - std::vector data; - data.reserve(n); - for (size_t i = 0; i < n; ++i) - { - data.push_back(i % 2); - } - auto cond = ead::make_constant(data.data(), shape); - auto a = ead::make_constant_scalar(0.3, shape); - auto b = ead::make_constant_scalar(0.6, shape); - auto f = tenncor::if_then_else(cond, a, b); - TIME(f->update()) - } - break; - - case age::CONV_IMG_GRAD: - case age::CONV_KRN_GRAD: - default: - continue; - } - mean_stat += stat; - // max_stat = std::max(max_stat, stat); - // min_stat = std::min(min_stat, stat); - stats.emplace(op.name_, stat); - } - - double mean = (double) mean_stat / stats.size(); - - // normalize stats by mean - weights::OpWeights opweights; - opweights.set_label("ead_weights"); - ::google::protobuf::Map< ::std::string,double>* weights = - opweights.mutable_weights(); - for (auto& op : stats) - { - double value = softplus((op.second - mean) / ( - mean + std::numeric_limits::epsilon())); - weights->insert({op.first, value}); - } - - logs::infof("writing to %s", writepath.c_str()); - std::fstream out(writepath, - std::ios::out | std::ios::trunc | std::ios::binary); - if (out.is_open()) - { - logs::infof("opened %s", writepath.c_str()); - if (opweights.SerializeToOstream(&out)) - { - logs::infof("done writing to %s", writepath.c_str()); - } - out.close(); - } - - return 0; -} diff --git a/rocnnet/BUILD.bazel b/rocnnet/BUILD.bazel index 9122c8786..99604ca2a 100644 --- a/rocnnet/BUILD.bazel +++ b/rocnnet/BUILD.bazel @@ -17,7 +17,7 @@ pybind_library( name = "rocnnet_py", cc_srcs = glob(["python/*.cpp"]), cc_deps = ["//rocnnet/trainer:trainer"], - py_deps = ["//ead:ead_py"], + py_deps = ["//eteq:eteq_py"], visibility = ["//visibility:public"], ) @@ -26,7 +26,7 @@ pybind_library( py_binary( name = "comparison_matmul", srcs = ["comparison/comparison_matmul.py"], - deps = ["//ead:ead_py"], + deps = ["//eteq:eteq_py"], ) py_binary( @@ -45,7 +45,7 @@ py_binary( py_binary( name = "comparison_mlp_grad_pll", srcs = ["comparison/comparison_mlp_grad_pll.py"], - deps = [":rocnnet_py", "//pll:pll_py"], + deps = [":rocnnet_py", "//ccur:ccur_py"], data = ["//cfg:optimizations"], ) diff --git a/rocnnet/comparison/comparison_matmul.py b/rocnnet/comparison/comparison_matmul.py index c88b90fa8..cc9d958e6 100644 --- a/rocnnet/comparison/comparison_matmul.py +++ b/rocnnet/comparison/comparison_matmul.py @@ -4,8 +4,8 @@ import matplotlib.pyplot as plt import tensorflow as tf -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq matrix_dims = [ 25, @@ -32,8 +32,8 @@ data = np.random.rand(*shape) data2 = np.random.rand(*shape) - var = ead.variable(data, 'var') - var2 = ead.variable(data2, 'var2') + var = eteq.variable(data, 'var') + var2 = eteq.variable(data2, 'var2') tf_var = tf.Variable(data) tf_var2 = tf.Variable(data2) @@ -52,7 +52,7 @@ print(data.dot(data2)) np_dur = time.time() - start - sess = ead.Session() + sess = eteq.Session() sess.track([out]) start = time.time() @@ -70,9 +70,9 @@ tf_durs.append(tf_dur) print('numpy durations: ', np_durs) -print('ead durations: ', ead_durs) +print('eteq durations: ', ead_durs) print('tf durations: ', tf_durs) -ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='ead durations') +ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='eteq durations') np_lines = plt.plot(matrix_dims, np_durs, 'g--', label='numpy durations') tf_line = plt.plot(matrix_dims, tf_durs, 'b--', label='tf durations') plt.legend() diff --git a/rocnnet/comparison/comparison_mlp.py b/rocnnet/comparison/comparison_mlp.py index 8872cf19c..0a4e79c0b 100644 --- a/rocnnet/comparison/comparison_mlp.py +++ b/rocnnet/comparison/comparison_mlp.py @@ -5,8 +5,8 @@ import matplotlib.pyplot as plt import tensorflow as tf -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq import rocnnet.rocnnet as rcn @@ -140,10 +140,10 @@ def copy(self, scope=None): bias_init=rcn.zero_init(), label="1")) brain.add(rcn.sigmoid()) - invar = ead.variable(np.zeros([batch_size, n_in], dtype=float), 'in') + invar = eteq.variable(np.zeros([batch_size, n_in], dtype=float), 'in') out = brain.connect(invar) - expected_out = ead.variable(np.zeros([batch_size, n_out], dtype=float), 'expected_out') - err = tc.square(tc.sub(expected_out, out)) + expected_out = eteq.variable(np.zeros([batch_size, n_out], dtype=float), 'expected_out') + err = tc.square(expected_out - out) # tensorflow mlp tf_brain = MLP([n_in], [matrix_dim, n_out], [tf.sigmoid, tf.sigmoid], scope='brain_' + str(matrix_dim)) @@ -153,7 +153,7 @@ def copy(self, scope=None): tf_expected_out = tf.compat.v1.placeholder(tf.float32, [batch_size, n_out], name='tf_expected_out') tf_err = tf.square(tf_expected_out - tf_out) - sess = ead.Session() + sess = eteq.Session() sess.track([err]) tfsess = tf.compat.v1.Session() @@ -184,9 +184,9 @@ def copy(self, scope=None): ead_durs.append(ead_dur) tf_durs.append(tf_dur) -print('ead durations: ', ead_durs) +print('eteq durations: ', ead_durs) print('tf durations: ', tf_durs) -ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='ead durations') +ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='eteq durations') tf_line = plt.plot(matrix_dims, tf_durs, 'b--', label='tf durations') plt.legend() plt.show() diff --git a/rocnnet/comparison/comparison_mlp_grad.py b/rocnnet/comparison/comparison_mlp_grad.py index f6504ab46..1c2f455cb 100644 --- a/rocnnet/comparison/comparison_mlp_grad.py +++ b/rocnnet/comparison/comparison_mlp_grad.py @@ -5,8 +5,8 @@ import matplotlib.pyplot as plt import tensorflow as tf -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq import rocnnet.rocnnet as rcn @@ -130,7 +130,7 @@ def copy(self, scope=None): n_out = int(n_in / 2) batch_size = 1 - sess = ead.Session() + sess = eteq.Session() tfsess = tf.compat.v1.Session() # regular mlp @@ -144,10 +144,10 @@ def copy(self, scope=None): bias_init=rcn.zero_init(), label="1")) brain.add(rcn.sigmoid()) - invar = ead.variable(np.zeros([batch_size, n_in], dtype=float), 'in') + invar = eteq.variable(np.zeros([batch_size, n_in], dtype=float), 'in') out = brain.connect(invar) - expected_out = ead.variable(np.zeros([batch_size, n_out], dtype=float), 'expected_out') - err = tc.square(tc.sub(expected_out, out)) + expected_out = eteq.variable(np.zeros([batch_size, n_out], dtype=float), 'expected_out') + err = tc.square(expected_out - out) trainer = rcn.MLPTrainer(brain, sess, rcn.get_sgd(learning_rate), batch_size) @@ -211,9 +211,9 @@ def calculate_update(batch, batch_out): ead_durs.append(ead_dur) tf_durs.append(tf_dur) -print('ead durations: ', ead_durs) +print('eteq durations: ', ead_durs) print('tf durations: ', tf_durs) -ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='ead durations') +ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='eteq durations') tf_line = plt.plot(matrix_dims, tf_durs, 'b--', label='tf durations') plt.legend() plt.show() diff --git a/rocnnet/comparison/comparison_mlp_grad_pll.py b/rocnnet/comparison/comparison_mlp_grad_pll.py index 3e012e575..11fc2710f 100644 --- a/rocnnet/comparison/comparison_mlp_grad_pll.py +++ b/rocnnet/comparison/comparison_mlp_grad_pll.py @@ -5,10 +5,10 @@ import matplotlib.pyplot as plt import tensorflow as tf -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq -import pll.pll as pll +import ccur.ccur as ccur import rocnnet.rocnnet as rcn @@ -132,7 +132,7 @@ def copy(self, scope=None): n_out = int(n_in / 2) batch_size = 1 - sess = pll.Session(nthread=4) + sess = ccur.Session(nthread=4) tfsess = tf.compat.v1.Session() # regular mlp @@ -146,10 +146,10 @@ def copy(self, scope=None): bias_init=rcn.zero_init(), label="1")) brain.add(rcn.sigmoid()) - invar = ead.variable(np.zeros([batch_size, n_in], dtype=float), 'in') + invar = eteq.variable(np.zeros([batch_size, n_in], dtype=float), 'in') out = brain.connect(invar) - expected_out = ead.variable(np.zeros([batch_size, n_out], dtype=float), 'expected_out') - err = tc.square(tc.sub(expected_out, out)) + expected_out = eteq.variable(np.zeros([batch_size, n_out], dtype=float), 'expected_out') + err = tc.square(expected_out - out) trainer = rcn.MLPTrainer(brain, sess, rcn.get_sgd(learning_rate), batch_size) @@ -213,9 +213,9 @@ def calculate_update(batch, batch_out): ead_durs.append(ead_dur) tf_durs.append(tf_dur) -print('ead durations: ', ead_durs) +print('eteq durations: ', ead_durs) print('tf durations: ', tf_durs) -ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='ead durations') +ead_line = plt.plot(matrix_dims, ead_durs, 'r--', label='eteq durations') tf_line = plt.plot(matrix_dims, tf_durs, 'b--', label='tf durations') plt.legend() plt.show() diff --git a/rocnnet/demo/dbn_demo.cpp b/rocnnet/demo/dbn_demo.cpp index 86e551022..a58f24630 100644 --- a/rocnnet/demo/dbn_demo.cpp +++ b/rocnnet/demo/dbn_demo.cpp @@ -8,7 +8,7 @@ #include "data/mnist_data.hpp" -#include "modl/db_trainer.hpp" +#include "layr/db_trainer.hpp" struct TestParams { @@ -51,7 +51,7 @@ static void pretrain (DBNTrainer& model, size_t n_input, size_t n_train_batches = n_data / params.n_batch; llo::VarptrT pretrain_in = llo::get_variable( std::vector(n_input * params.n_batch), - ade::Shape({n_input, params.n_batch}), "pretrain_in"); + teq::Shape({n_input, params.n_batch}), "pretrain_in"); float inbatch = params.n_batch * n_input; std::cout << "... getting the pretraining functions" << '\n'; @@ -63,7 +63,7 @@ static void pretrain (DBNTrainer& model, size_t n_input, for (size_t pidx = 0; pidx < pretrainers.size(); pidx++) { Deltas trainer = pretrainers[pidx].first; - ade::TensptrT cost = pretrainers[pidx].second; + teq::TensptrT cost = pretrainers[pidx].second; for (size_t e = 0; e < params.pretrain_epochs; e++) { float mean_cost = 0; diff --git a/rocnnet/demo/dqn_demo.py b/rocnnet/demo/dqn_demo.py index d16646d0c..f443ea231 100644 --- a/rocnnet/demo/dqn_demo.py +++ b/rocnnet/demo/dqn_demo.py @@ -5,8 +5,8 @@ import numpy as np -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq import rocnnet.rocnnet as rcn prog_description = 'Demo dqn_trainer' @@ -66,7 +66,7 @@ def main(args): if args.seed: print('seeding {}'.format(args.seedval)) - ead.seed(args.seedval) + eteq.seed(args.seedval) np.random.seed(args.seedval) n_observations = 10 @@ -101,7 +101,7 @@ def main(args): discount_rate = 0.99, exploration_period = 0.0) - sess = ead.Session() + sess = eteq.Session() untrained_dqn = rcn.DQNTrainer(untrained, sess, bgd, param) trained_dqn = rcn.DQNTrainer(model, sess, bgd, param, diff --git a/rocnnet/demo/gd_demo.cpp b/rocnnet/demo/gd_demo.cpp index ebd545ae4..c02e66735 100644 --- a/rocnnet/demo/gd_demo.cpp +++ b/rocnnet/demo/gd_demo.cpp @@ -7,13 +7,13 @@ #include "flag/flag.hpp" -#include "ead/ead.hpp" -#include "ead/parse.hpp" +#include "eteq/eteq.hpp" +#include "eteq/parse.hpp" #include "dbg/grpc/session.hpp" -#include "rocnnet/modl/model.hpp" -#include "rocnnet/modl/activations.hpp" +#include "layr/model.hpp" +#include "layr/activations.hpp" #include "rocnnet/trainer/mlp_trainer.hpp" @@ -22,7 +22,7 @@ static std::vector batch_generate (size_t n, size_t batchsize) size_t total = n * batchsize; // Specify the engine and distribution. - std::mt19937 mersenne_engine(ead::get_engine()()); + std::mt19937 mersenne_engine(eteq::get_engine()()); std::uniform_real_distribution dist(0, 1); auto gen = std::bind(dist, mersenne_engine); @@ -83,50 +83,50 @@ int main (int argc, const char** argv) if (seed) { std::cout << "seeding " << seedval << '\n'; - ead::get_engine().seed(seedval); + eteq::get_engine().seed(seedval); } uint8_t n_in = 10; uint8_t n_out = n_in / 2; - std::vector n_outs = {9, n_out}; + std::vector n_outs = {9, n_out}; - modl::SequentialModel model("demo"); - model.push_back(std::make_shared(9, n_in, - eqns::unif_xavier_init(1), eqns::zero_init(), "0")); - model.push_back(modl::sigmoid()); - model.push_back(std::make_shared(n_out, 9, - eqns::unif_xavier_init(1), eqns::zero_init(), "1")); - model.push_back(modl::sigmoid()); + layr::SequentialModel model("demo"); + model.push_back(std::make_shared(9, n_in, + layr::unif_xavier_init(1), layr::zero_init(), "0")); + model.push_back(layr::sigmoid()); + model.push_back(std::make_shared(n_out, 9, + layr::unif_xavier_init(1), layr::zero_init(), "1")); + model.push_back(layr::sigmoid()); - modl::SequentialModel untrained_model(model); - modl::SeqModelptrT trained_model = nullptr; + layr::SequentialModel untrained_model(model); + layr::SeqModelptrT trained_model = nullptr; std::ifstream loadstr(loadpath); if (loadstr.is_open()) { - ade::TensT trained_roots; - trained_model = std::static_pointer_cast( - modl::load_layer(loadstr, trained_roots, modl::seq_model_key, "demo")); + teq::TensT trained_roots; + trained_model = std::static_pointer_cast( + layr::load_layer(loadstr, trained_roots, layr::seq_model_key, "demo")); logs::infof("model successfully loaded from file `%s`", loadpath.c_str()); loadstr.close(); } else { logs::warnf("model failed to loaded from file `%s`", loadpath.c_str()); - trained_model = std::make_shared(model); + trained_model = std::make_shared(model); } uint8_t n_batch = 3; size_t show_every_n = 500; - eqns::ApproxF approx = [](const eqns::VarErrsT& leaves) + layr::ApproxF approx = [](const layr::VarErrsT& leaves) { - return eqns::sgd(leaves, 0.9); // learning rate = 0.9 + return layr::sgd(leaves, 0.9); // learning rate = 0.9 }; dbg::InteractiveSession sess("localhost:50051"); trainer::MLPTrainer trainer(model, sess, approx, n_batch); - ead::VarptrT testin = ead::make_variable_scalar( - 0, ade::Shape({n_in}), "testin"); + eteq::VarptrT testin = eteq::make_variable_scalar( + 0, teq::Shape({n_in}), "testin"); auto untrained_out = untrained_model.connect(testin); auto out = model.connect(testin); auto trained_out = trained_model->connect(testin); @@ -136,7 +136,7 @@ int main (int argc, const char** argv) trained_out->get_tensor(), }); - opt::OptCtx rules = ead::parse_file("cfg/optimizations.rules"); + opt::OptCtx rules = eteq::parse_file("cfg/optimizations.rules"); sess.optimize(rules); // train mlp to output input @@ -204,7 +204,7 @@ int main (int argc, const char** argv) std::ofstream savestr(savepath); if (savestr.is_open()) { - if (modl::save_layer(savestr, model, {})) + if (layr::save_layer(savestr, model, {})) { logs::infof("successfully saved model to `%s`", savepath.c_str()); } diff --git a/rocnnet/demo/gd_demo.py b/rocnnet/demo/gd_demo.py index b111b86b4..724de3eab 100644 --- a/rocnnet/demo/gd_demo.py +++ b/rocnnet/demo/gd_demo.py @@ -4,8 +4,8 @@ import numpy as np -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq import rocnnet.rocnnet as rcn prog_description = 'Demo mlp_trainer using sgd' @@ -50,7 +50,7 @@ def main(args): if args.seed: print('seeding {}'.format(args.seedval)) - ead.seed(args.seedval) + eteq.seed(args.seedval) np.random.seed(args.seedval) n_in = 10 @@ -76,13 +76,13 @@ def main(args): print('failed to load from "{}"'.format(args.load)) trained = model.clone() - sess = ead.Session() + sess = eteq.Session() n_batch = args.n_batch show_every_n = 500 trainer = rcn.MLPTrainer(model, sess, rcn.get_sgd(0.9), n_batch) - testin = ead.variable(np.zeros([n_in], dtype=float), 'testin') + testin = eteq.variable(np.zeros([n_in], dtype=float), 'testin') untrained_out = untrained.connect(testin) trained_out = model.connect(testin) pretrained_out = trained.connect(testin) diff --git a/rocnnet/demo/rbm_demo.py b/rocnnet/demo/rbm_demo.py index edc497616..299d71349 100644 --- a/rocnnet/demo/rbm_demo.py +++ b/rocnnet/demo/rbm_demo.py @@ -7,8 +7,8 @@ import matplotlib.pyplot as plt import numpy as np -import ead.tenncor as tc -import ead.ead as ead +import eteq.tenncor as tc +import eteq.eteq as eteq import rocnnet.rocnnet as rcn prog_description = 'Demo rbm_trainer' @@ -16,7 +16,7 @@ mnist = input_data.read_data_sets('MNIST_data/', one_hot=True) def mse_errfunc(x, visible_sample_): - return tc.reduce_mean(tc.square(tc.sub(x, visible_sample_))) + return tc.reduce_mean(tc.square(x - visible_sample_)) def show_digit(x, plt): plt.imshow(x.reshape((28, 28)), cmap=plt.cm.gray) @@ -54,7 +54,7 @@ def main(args): if args.seed: print('seeding {}'.format(args.seedval)) - ead.seed(args.seedval) + eteq.seed(args.seedval) np.random.seed(args.seedval) if args.use_tqdm: @@ -85,7 +85,7 @@ def main(args): print('failed to load from "{}"'.format(args.load)) trained = model.clone() - sess = ead.Session() + sess = eteq.Session() batch_size = 10 trainer = rcn.BernoulliRBMTrainer( @@ -96,7 +96,7 @@ def main(args): discount_factor=momentum, err_func=mse_errfunc) - x = ead.scalar_variable(0, [1, n_visible]) + x = eteq.scalar_variable(0, [1, n_visible]) genx = model.backward_connect( tc.random.rand_binom_one(model.connect(x))) diff --git a/rocnnet/eqns/BUILD.bazel b/rocnnet/eqns/BUILD.bazel deleted file mode 100644 index 61eb68e6c..000000000 --- a/rocnnet/eqns/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -licenses(["notice"]) - -package( - default_visibility = ["//visibility:public"], -) - -filegroup( - name = "srcs", - srcs = glob([ - "*.hpp", - "src/*.cpp", - ]) + ["BUILD.bazel"], -) - -######### LIBRARY ######### - -cc_library( - name = "eqns", - hdrs = glob(["*.hpp"]), - srcs = glob(["src/*.cpp"]), - copts = ["-std=c++17"], - deps = ["//ead:ead"], -) diff --git a/rocnnet/modl/BUILD.bazel b/rocnnet/modl/BUILD.bazel deleted file mode 100644 index abc815a37..000000000 --- a/rocnnet/modl/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -licenses(["notice"]) - -package( - default_visibility = ["//visibility:public"], -) - -filegroup( - name = "srcs", - srcs = glob([ - "*.hpp", - "src/*.cpp", - ]) + ["BUILD.bazel"], -) - -######### LIBRARY ######### - -cc_library( - name = "modl", - hdrs = glob(["*.hpp"]), - srcs = glob(["src/*.cpp"]), - copts = ["-std=c++17"], - deps = ["//ead:ead","//rocnnet/eqns:eqns"], -) diff --git a/rocnnet/modl/conv.hpp b/rocnnet/modl/conv.hpp deleted file mode 100644 index 9d4ac9947..000000000 --- a/rocnnet/modl/conv.hpp +++ /dev/null @@ -1,98 +0,0 @@ -#include "ead/generated/api.hpp" - -#include "rocnnet/modl/marshal.hpp" - -#ifndef MODL_CONV_HPP -#define MODL_CONV_HPP - -namespace modl -{ - -struct Conv final : public iMarshalSet -{ - Conv (std::pair filter_hw, ade::DimT in_ncol, - uinade::DimT out_ncol, std::string label) : iMarshalSet(label) - { - ade::Shape kernelshape({out_ncol, in_ncol, - filter_hw.second, filter_hw.first}); - size_t ndata = kernelshape.n_elems(); - - size_t input_size = filter_hw.first * filter_hw.second * in_ncol; - PybindT bound = 1.0 / std::sqrt(input_size); - std::uniform_real_distribution dist(-bound, bound); - auto gen = [&dist]() - { - return dist(ead::get_engine()); - }; - std::vector data(ndata); - std::generate(data.begin(), data.end(), gen); - - ead::VarptrT weight = ead::make_variable( - data.data(), kernelshape, "weight"); - ead::VarptrT bias = ead::make_variable_scalar( - 0.0, ade::Shape({out_ncol}), "bias"); - weight_ = std::make_shared(weight); - bias_ = std::make_shared(bias); - } - - Conv (const Conv& other) : iMarshalSet(other) - { - copy_helper(other); - } - - Conv& operator = (const Conv& other) - { - if (this != &other) - { - iMarshalSet::operator = (other); - copy_helper(other); - } - return *this; - } - - Conv (Conv&& other) = default; - - Conv& operator = (Conv&& other) = default; - - ead::NodeptrT operator () (ead::NodeptrT input) - { - return age::conv2d(input, - ead::convert_to_node(weight_->var_), - ead::convert_to_node(bias_->var_)); - } - - uint8_t get_ninput (void) const - { - return weight_->var_->shape().at(1); - } - - uint8_t get_noutput (void) const - { - return weight_->var_->shape().at(0); - } - - MarsarrT get_subs (void) const override - { - return {weight_, bias_}; - } - - MarVarsptrT weight_; - - MarVarsptrT bias_; - -private: - void copy_helper (const Conv& other) - { - weight_ = std::make_shared(*other.weight_); - bias_ = std::make_shared(*other.bias_); - } - - iMarshaler* clone_impl (void) const override - { - return new Conv(*this); - } -}; - -} - -#endif // MODL_CONV_HPP diff --git a/rocnnet/notebooks/ead/__init__.py b/rocnnet/notebooks/ead/__init__.py index 1e99eb777..d57c0ab0a 100644 --- a/rocnnet/notebooks/ead/__init__.py +++ b/rocnnet/notebooks/ead/__init__.py @@ -1 +1 @@ -import ead +import eteq diff --git a/rocnnet/notebooks/karpathy_game.ipynb b/rocnnet/notebooks/karpathy_game.ipynb index 2f10000cb..b37e1e440 100644 --- a/rocnnet/notebooks/karpathy_game.ipynb +++ b/rocnnet/notebooks/karpathy_game.ipynb @@ -22,8 +22,8 @@ "import numpy as np\n", "import tempfile\n", "\n", - "import ead.tenncor as tc\n", - "import ead.ead as ead\n", + "import eteq.tenncor as tc\n", + "import eteq.eteq as eteq\n", "import rocnnet.rocnnet as rcn\n", "\n", "from tf_rl.controller import HumanController\n", @@ -150,7 +150,7 @@ " store_interval = 4,\n", " train_interval = 4)\n", " \n", - " sess = ead.Session()\n", + " sess = eteq.Session()\n", " current_controller = DQNControllerWrapper(rcn.DQNTrainer(brain, nonlins, sess, bgd, param))\n", " sess.optimize(\"cfg/optimizations.rules\")" ] diff --git a/rocnnet/python/rocnnet.cpp b/rocnnet/python/rocnnet.cpp index 534ef5fd4..94b864bbb 100644 --- a/rocnnet/python/rocnnet.cpp +++ b/rocnnet/python/rocnnet.cpp @@ -6,16 +6,16 @@ #include "pybind11/stl.h" #include "pybind11/functional.h" -#include "ead/generated/pyapi.hpp" +#include "eteq/generated/pyapi.hpp" -#include "rocnnet/eqns/init.hpp" +#include "rocnnet/layr/init.hpp" -#include "rocnnet/modl/activations.hpp" -#include "rocnnet/modl/dense.hpp" -#include "rocnnet/modl/rbm.hpp" -#include "rocnnet/modl/model.hpp" -// #include "rocnnet/modl/dbn.hpp" -// #include "rocnnet/modl/conv.hpp" +#include "layr/activations.hpp" +#include "layr/dense.hpp" +#include "layr/rbm.hpp" +#include "layr/model.hpp" +// #include "layr/dbn.hpp" +// #include "layr/conv.hpp" #include "rocnnet/trainer/mlp_trainer.hpp" #include "rocnnet/trainer/dqn_trainer.hpp" @@ -27,13 +27,13 @@ namespace py = pybind11; namespace pyrocnnet { -ade::Shape p2cshape (std::vector& pyshape) +teq::Shape p2cshape (std::vector& pyshape) { - return ade::Shape(std::vector( + return teq::Shape(std::vector( pyshape.rbegin(), pyshape.rend())); } -std::vector arr2vec (ade::Shape& outshape, py::array data) +std::vector arr2vec (teq::Shape& outshape, py::array data) { py::buffer_info info = data.request(); outshape = p2cshape(info.shape); @@ -100,27 +100,27 @@ std::vector arr2vec (ade::Shape& outshape, py::array data) return vec; } -// modl::DBNptrT dbn_init (size_t n_input, std::vector n_hiddens, +// layr::DBNptrT dbn_init (size_t n_input, std::vector n_hiddens, // std::string label) // { -// return std::make_shared(n_input, +// return std::make_shared(n_input, // std::vector(n_hiddens.begin(), n_hiddens.end()), label); // } -eqns::ApproxF get_sgd (PybindT learning_rate) +layr::ApproxF get_sgd (PybindT learning_rate) { - return [=](const eqns::VarErrsT& leaves) + return [=](const layr::VarErrsT& leaves) { - return eqns::sgd(leaves, learning_rate); + return layr::sgd(leaves, learning_rate); }; } -eqns::ApproxF get_rms_momentum (PybindT learning_rate, +layr::ApproxF get_rms_momentum (PybindT learning_rate, PybindT discount_factor, PybindT epsilon) { - return [=](const eqns::VarErrsT& leaves) + return [=](const layr::VarErrsT& leaves) { - return eqns::rms_momentum(leaves, learning_rate, + return layr::rms_momentum(leaves, learning_rate, discount_factor, epsilon); }; } @@ -131,14 +131,14 @@ PYBIND11_MODULE(rocnnet, m) { m.doc() = "rocnnet api"; - py::class_ shape(m, "Shape"); + py::class_ shape(m, "Shape"); // layers - py::class_ layer(m, "Layer"); - py::class_ activation(m, "Activation"); - py::class_ dense(m, "Dense"); - py::class_ rbm(m, "RBM"); - py::class_ seqmodel(m, "SequentialModel"); + py::class_ layer(m, "Layer"); + py::class_ activation(m, "Activation"); + py::class_ dense(m, "Dense"); + py::class_ rbm(m, "RBM"); + py::class_ seqmodel(m, "SequentialModel"); // trainers py::class_ mlptrainer(m, "MLPTrainer"); @@ -146,34 +146,34 @@ PYBIND11_MODULE(rocnnet, m) py::class_ brbmtrainer(m, "BernoulliRBMTrainer"); // supports - py::class_ assigns(m, "VarAssign"); + py::class_ assigns(m, "VarAssign"); py::class_ dqninfo(m, "DQNInfo"); py::class_ trainingctx(m, "TrainingContext"); py::class_ dqntrainingctx(m, "DQNTrainingContext"); - shape.def(py::init>()); + shape.def(py::init>()); // layer layer - .def("connect", &modl::iLayer::connect) + .def("connect", &layr::iLayer::connect) .def("get_contents", - [](py::object self) -> ead::NodesT + [](py::object self) -> eteq::NodesT { - ade::TensT contents = self.cast()->get_contents(); - ead::NodesT nodes; + teq::TensT contents = self.cast()->get_contents(); + eteq::NodesT nodes; nodes.reserve(contents.size()); std::transform(contents.begin(), contents.end(), std::back_inserter(nodes), - ead::NodeConverters::to_node); + eteq::NodeConverters::to_node); return nodes; }) .def("save_file", [](py::object self, std::string filename) -> bool { - modl::iLayer& me = *self.cast(); + layr::iLayer& me = *self.cast(); std::fstream output(filename, std::ios::out | std::ios::trunc | std::ios::binary); - if (false == modl::save_layer(output, me, me.get_contents())) + if (false == layr::save_layer(output, me, me.get_contents())) { logs::errorf("cannot save to file %s", filename.c_str()); return false; @@ -183,52 +183,52 @@ PYBIND11_MODULE(rocnnet, m) .def("save_string", [](py::object self) -> std::string { - modl::iLayer& me = *self.cast(); + layr::iLayer& me = *self.cast(); std::stringstream savestr; - modl::save_layer(savestr, me, me.get_contents()); + layr::save_layer(savestr, me, me.get_contents()); return savestr.str(); }) - .def("get_ninput", &modl::iLayer::get_ninput) - .def("get_noutput", &modl::iLayer::get_noutput); + .def("get_ninput", &layr::iLayer::get_ninput) + .def("get_noutput", &layr::iLayer::get_noutput); // activation activation .def(py::init(), py::arg("label"), py::arg("activation_type") = "sigmoid") - .def("clone", &modl::Activation::clone, py::arg("prefix") = ""); + .def("clone", &layr::Activation::clone, py::arg("prefix") = ""); // dense m.def("create_dense", - [](ead::NodeptrT weight, - ead::NodeptrT bias, + [](eteq::NodeptrT weight, + eteq::NodeptrT bias, std::string label) { - return std::make_shared(weight, bias, label); + return std::make_shared(weight, bias, label); }, py::arg("weight"), py::arg("bias") = nullptr, py::arg("label")); dense - .def(py::init, - eqns::InitF, + .def(py::init, + layr::InitF, const std::string&>(), py::arg("nunits"), py::arg("indim"), - py::arg("weight_init") = eqns::unif_xavier_init(1), - py::arg("bias_init") = eqns::zero_init(), + py::arg("weight_init") = layr::unif_xavier_init(1), + py::arg("bias_init") = layr::zero_init(), py::arg("label")) - .def("clone", &modl::Dense::clone, py::arg("prefix") = ""); + .def("clone", &layr::Dense::clone, py::arg("prefix") = ""); // rbm m.def("create_rbm", - [](modl::DenseptrT hidden, - modl::DenseptrT visible, - modl::ActivationptrT activation, + [](layr::DenseptrT hidden, + layr::DenseptrT visible, + layr::ActivationptrT activation, std::string label) { - return std::make_shared( + return std::make_shared( hidden, visible, activation, label); }, py::arg("hidden"), @@ -236,47 +236,47 @@ PYBIND11_MODULE(rocnnet, m) py::arg("activation") = nullptr, py::arg("label")); rbm - .def(py::init, - eqns::InitF, + .def(py::init, + layr::InitF, const std::string&>(), py::arg("nhidden"), py::arg("nvisible"), - py::arg("activation") = modl::sigmoid(), - py::arg("weight_init") = eqns::unif_xavier_init(1), - py::arg("bias_init") = eqns::zero_init(), + py::arg("activation") = layr::sigmoid(), + py::arg("weight_init") = layr::unif_xavier_init(1), + py::arg("bias_init") = layr::zero_init(), py::arg("label")) - .def("clone", &modl::RBM::clone, py::arg("prefix") = "") - .def("backward_connect", &modl::RBM::backward_connect); + .def("clone", &layr::RBM::clone, py::arg("prefix") = "") + .def("backward_connect", &layr::RBM::backward_connect); // seqmodel seqmodel .def(py::init(), py::arg("label")) - .def("clone", &modl::SequentialModel::clone, py::arg("prefix") = "") - .def("add", &modl::SequentialModel::push_back); + .def("clone", &layr::SequentialModel::clone, py::arg("prefix") = "") + .def("add", &layr::SequentialModel::push_back); // // dbn // m.def("get_dbn", &pyrocnnet::dbn_init); // dbn // .def("copy", [](py::object self) // { - // return std::make_shared(*self.cast()); + // return std::make_shared(*self.cast()); // }, "deep copy this instance") - // .def("forward", [](py::object self, ead::NodeptrT input) + // .def("forward", [](py::object self, eteq::NodeptrT input) // { - // return (*self.cast())(input); + // return (*self.cast())(input); // }, "forward input tensor and returned connected output"); // mlptrainer mlptrainer - .def(py::init(), + .def(py::init(), py::arg("model"), py::arg("sess"), py::arg("update"), py::arg("batch_size"), - py::arg("gradprocess") = eqns::NodeUnarF(eqns::identity), + py::arg("gradprocess") = layr::NodeUnarF(layr::identity), py::arg("ctx") = trainer::TrainingContext()) .def("train", &trainer::MLPTrainer::train, "train internal variables") .def("train_in", @@ -308,7 +308,7 @@ PYBIND11_MODULE(rocnnet, m) dqninfo .def(py::init(), + size_t, teq::DimT, size_t>(), py::arg("train_interval") = 5, py::arg("rand_action_prob") = 0.05, py::arg("discount_rate") = 0.95, @@ -318,12 +318,12 @@ PYBIND11_MODULE(rocnnet, m) py::arg("mini_batch_size") = 32, py::arg("max_exp") = 30000); dqntrainer - .def(py::init(), + .def(py::init(), py::arg("model"), py::arg("sess"), py::arg("update"), py::arg("param"), - py::arg("gradprocess") = eqns::NodeUnarF(eqns::identity), + py::arg("gradprocess") = layr::NodeUnarF(layr::identity), py::arg("ctx") = trainer::DQNTrainingContext()) .def("action", &trainer::DQNTrainer::action, "get next action") .def("store", &trainer::DQNTrainer::store, "save observation, action, and reward") @@ -338,9 +338,9 @@ PYBIND11_MODULE(rocnnet, m) // brbmtrainer brbmtrainer .def(py::init< - modl::RBM&, - ead::iSession&, - ade::DimT, + layr::RBM&, + eteq::iSession&, + teq::DimT, PybindT, PybindT, trainer::ErrorF>(), @@ -354,7 +354,7 @@ PYBIND11_MODULE(rocnnet, m) [](py::object self, py::array data) { auto trainer = self.cast(); - ade::Shape shape; + teq::Shape shape; std::vector vec = pyrocnnet::arr2vec(shape, data); return trainer->train(vec); }, "train internal variables"); @@ -362,7 +362,7 @@ PYBIND11_MODULE(rocnnet, m) // inlines m // activations (no longer useful) - .def("identity", &eqns::identity) + .def("identity", &layr::identity) // optimizations .def("get_sgd", &pyrocnnet::get_sgd, @@ -374,52 +374,52 @@ PYBIND11_MODULE(rocnnet, m) // inits .def("variable_from_init", - [](eqns::InitF init, std::vector slist, std::string label) + [](layr::InitF init, std::vector slist, std::string label) { return init(pyrocnnet::p2cshape(slist), label); }, "Return labelled variable containing data created from initializer", py::arg("init"), py::arg("slist"), py::arg("label") = "") - .def("zero_init", eqns::zero_init) + .def("zero_init", layr::zero_init) .def("variance_scaling_init", [](PybindT factor) { - return eqns::variance_scaling_init(factor); + return layr::variance_scaling_init(factor); }, "truncated_normal(shape, 0, sqrt(factor / ((fanin + fanout)/2))", py::arg("factor")) - .def("unif_xavier_init", &eqns::unif_xavier_init, + .def("unif_xavier_init", &layr::unif_xavier_init, "uniform xavier initializer", py::arg("factor") = 1) - .def("norm_xavier_init", &eqns::norm_xavier_init, + .def("norm_xavier_init", &layr::norm_xavier_init, "normal xavier initializer", py::arg("factor") = 1) // layer creation - .def("sigmoid", modl::sigmoid, py::arg("label") = "sigmoid") - .def("tanh", modl::tanh, py::arg("label") = "tanh") + .def("sigmoid", layr::sigmoid, py::arg("label") = "sigmoid") + .def("tanh", layr::tanh, py::arg("label") = "tanh") .def("load_file_seqmodel", - [](std::string filename, std::string layer_label) -> modl::SeqModelptrT + [](std::string filename, std::string layer_label) -> layr::SeqModelptrT { std::ifstream input(filename); if (false == input.is_open()) { logs::fatalf("file %s not found", filename.c_str()); } - ade::TensT trained_roots; - return std::static_pointer_cast( - modl::load_layer(input, trained_roots, modl::seq_model_key, layer_label)); + teq::TensT trained_roots; + return std::static_pointer_cast( + layr::load_layer(input, trained_roots, layr::seq_model_key, layer_label)); }) .def("load_file_rbmmodel", - [](std::string filename, std::string layer_label) -> modl::RBMptrT + [](std::string filename, std::string layer_label) -> layr::RBMptrT { std::ifstream input(filename); if (false == input.is_open()) { logs::fatalf("file %s not found", filename.c_str()); } - ade::TensT trained_roots; - return std::static_pointer_cast( - modl::load_layer(input, trained_roots, modl::rbm_layer_key, layer_label)); + teq::TensT trained_roots; + return std::static_pointer_cast( + layr::load_layer(input, trained_roots, layr::rbm_layer_key, layer_label)); }); }; diff --git a/rocnnet/trainer/dbn_trainer.hpp b/rocnnet/trainer/dbn_trainer.hpp index bf682ee43..403a85c7f 100644 --- a/rocnnet/trainer/dbn_trainer.hpp +++ b/rocnnet/trainer/dbn_trainer.hpp @@ -1,21 +1,21 @@ -#include "rocnnet/modl/dbn.hpp" +#include "layr/dbn.hpp" #include "rocnnet/trainer/rbm_trainer.hpp" struct DBNTrainer final { - DBNTrainer (modl::DBNptrT brain, + DBNTrainer (layr::DBNptrT brain, uint8_t batch_size, PybindT learning_rate = 1e-3, size_t n_cont_div = 10, - ead::VarptrT train_in = nullptr) : + eteq::VarptrT train_in = nullptr) : brain_(brain), caches_({}) { if (nullptr == train_in) { - train_in_ = ead::VarptrT(ead::Variable::get( - 0.0, ade::Shape({brain->get_ninput(), batch_size}), "train_in")); + train_in_ = eteq::VarptrT(eteq::Variable::get( + 0.0, teq::Shape({brain->get_ninput(), batch_size}), "train_in")); } else { @@ -23,7 +23,7 @@ struct DBNTrainer final } train_out_ = train_in_; auto layers = brain_->get_layers(); - for (modl::RBMptrT& rbm : layers) + for (layr::RBMptrT& rbm : layers) { RBMTrainer trainer(rbm, nullptr, batch_size, learning_rate, @@ -33,9 +33,9 @@ struct DBNTrainer final } } - std::vector pretraining_functions (void) const + std::vector pretraining_functions (void) const { - std::vector pt_updates(rbm_trainers_.size()); + std::vector pt_updates(rbm_trainers_.size()); std::transform(rbm_trainers_.begin(), rbm_trainers_.end(), pt_updates.begin(), [](const RBMTrainer& trainer) @@ -47,56 +47,56 @@ struct DBNTrainer final // todo: conform to a current trainer convention, // or make all trainers functions instead of class bundles - std::pair build_finetune_functions ( - ead::VarptrT train_out, PybindT learning_rate = 1e-3) + std::pair build_finetune_functions ( + eteq::VarptrT train_out, PybindT learning_rate = 1e-3) { - ade::TensptrT out_dist = (*brain_)(ade::TensptrT(train_in_)); - ade::TensptrT finetune_cost = age::neg( - age::reduce_mean(age::log(out_dist))); + teq::TensptrT out_dist = (*brain_)(teq::TensptrT(train_in_)); + teq::TensptrT finetune_cost = egen::neg( + egen::reduce_mean(egen::log(out_dist))); - ade::TensptrT temp_diff = age::sub(out_dist, ade::TensptrT(train_out)); - ade::TensptrT error = age::reduce_mean( - age::pow(temp_diff, - ade::TensptrT(ead::Constant::get(2, temp_diff->shape())))); + teq::TensptrT temp_diff = egen::sub(out_dist, teq::TensptrT(train_out)); + teq::TensptrT error = egen::reduce_mean( + egen::pow(temp_diff, + teq::TensptrT(eteq::Constant::get(2, temp_diff->shape())))); pbm::PathedMapT vmap = brain_->list_bases(); - eqns::VariablesT vars; + layr::VariablesT vars; for (auto vpair : vmap) { - if (ead::VarptrT var = std::dynamic_pointer_cast< - ead::Variable>(vpair.first)) + if (eteq::VarptrT var = std::dynamic_pointer_cast< + eteq::Variable>(vpair.first)) { vars.push_back(var); } } - eqns::Deltas errs; - eqns::VarmapT connection; - for (ead::VarptrT& gp : vars) + layr::Deltas errs; + layr::VarmapT connection; + for (eteq::VarptrT& gp : vars) { - auto next_gp = age::sub(ade::TensptrT(gp), age::mul( - ade::TensptrT(ead::Constant::get(learning_rate, gp->shape())), - ead::derive(finetune_cost, gp.get())) + auto next_gp = egen::sub(teq::TensptrT(gp), egen::mul( + teq::TensptrT(eteq::Constant::get(learning_rate, gp->shape())), + eteq::derive(finetune_cost, gp.get())) ); errs.upkeep_.push_back(next_gp); connection.emplace(gp.get(), next_gp); } errs.actions_.push_back( - [connection](ead::CacheSpace* caches) + [connection](eteq::CacheSpace* caches) { - eqns::assign_all(caches, connection); + layr::assign_all(caches, connection); }); return {errs, error}; } - ead::VarptrT train_in_; + eteq::VarptrT train_in_; - ade::TensptrT train_out_; + teq::TensptrT train_out_; - modl::DBNptrT brain_; + layr::DBNptrT brain_; std::vector rbm_trainers_; - ead::CacheSpace caches_; + eteq::CacheSpace caches_; }; diff --git a/rocnnet/trainer/dqn_trainer.hpp b/rocnnet/trainer/dqn_trainer.hpp index bdfb9522b..435ce12c8 100644 --- a/rocnnet/trainer/dqn_trainer.hpp +++ b/rocnnet/trainer/dqn_trainer.hpp @@ -1,10 +1,10 @@ -#include "ead/parse.hpp" -#include "ead/grader.hpp" +#include "eteq/parse.hpp" +#include "eteq/grader.hpp" -#include "rocnnet/modl/model.hpp" +#include "layr/model.hpp" -#ifndef MODL_DQN_TRAINER_HPP -#define MODL_DQN_TRAINER_HPP +#ifndef LAYR_DQN_TRAINER_HPP +#define LAYR_DQN_TRAINER_HPP namespace trainer { @@ -31,10 +31,10 @@ struct DQNTrainingContext final std::vector experiences_; // target network - modl::SeqModelptrT target_model_ = nullptr; + layr::SeqModelptrT target_model_ = nullptr; // train fanout: shape - ead::NodeptrT next_output_ = nullptr; + eteq::NodeptrT next_output_ = nullptr; }; struct DQNInfo final @@ -45,7 +45,7 @@ struct DQNInfo final PybindT target_update_rate = 0.01, PybindT exploration_period = 1000, size_t store_interval = 5, - ade::DimT mini_batch_size = 32, + teq::DimT mini_batch_size = 32, size_t max_exp = 30000) : train_interval_(train_interval), rand_action_prob_(rand_action_prob), @@ -63,15 +63,15 @@ struct DQNInfo final PybindT exploration_period_ = 1000; // memory parameters size_t store_interval_ = 5; - ade::DimT mini_batch_size_ = 32; + teq::DimT mini_batch_size_ = 32; size_t max_exp_ = 30000; }; struct DQNTrainer final { - DQNTrainer (modl::SequentialModel& model, - ead::iSession& sess, eqns::ApproxF update, DQNInfo param, - eqns::NodeUnarF gradprocess = eqns::NodeUnarF(eqns::identity), + DQNTrainer (layr::SequentialModel& model, + eteq::iSession& sess, layr::ApproxF update, DQNInfo param, + layr::NodeUnarF gradprocess = layr::NodeUnarF(layr::identity), DQNTrainingContext ctx = DQNTrainingContext()) : sess_(&sess), params_(param), @@ -80,110 +80,105 @@ struct DQNTrainer final { if (nullptr == ctx_.target_model_) { - ctx_.target_model_ = modl::SeqModelptrT(model.clone("target_")); + ctx_.target_model_ = layr::SeqModelptrT(model.clone("target_")); } - input_ = ead::make_variable_scalar(0.0, ade::Shape({ - (ade::DimT) source_model_.get_ninput()}), "observation"); - train_input_ = ead::make_variable_scalar(0.0, ade::Shape({ - (ade::DimT) source_model_.get_ninput(), + input_ = eteq::make_variable_scalar(0.0, teq::Shape({ + (teq::DimT) source_model_.get_ninput()}), "observation"); + train_input_ = eteq::make_variable_scalar(0.0, teq::Shape({ + (teq::DimT) source_model_.get_ninput(), params_.mini_batch_size_}), "train_observation"); - next_input_ = ead::make_variable_scalar(0.0, ade::Shape({ - (ade::DimT) source_model_.get_ninput(), + next_input_ = eteq::make_variable_scalar(0.0, teq::Shape({ + (teq::DimT) source_model_.get_ninput(), params_.mini_batch_size_}), "next_observation"); - next_output_mask_ = ead::make_variable_scalar(0.0, - ade::Shape({params_.mini_batch_size_}), + next_output_mask_ = eteq::make_variable_scalar(0.0, + teq::Shape({params_.mini_batch_size_}), "next_observation_mask"); - reward_ = ead::make_variable_scalar(0.0, - ade::Shape({params_.mini_batch_size_}), "rewards"); - output_mask_ = ead::make_variable_scalar(0.0, - ade::Shape({(ade::DimT) source_model_.get_noutput(), + reward_ = eteq::make_variable_scalar(0.0, + teq::Shape({params_.mini_batch_size_}), "rewards"); + output_mask_ = eteq::make_variable_scalar(0.0, + teq::Shape({(teq::DimT) source_model_.get_noutput(), params_.mini_batch_size_}), "action_mask"); // forward action score computation - output_ = source_model_.connect(ead::convert_to_node(input_)); + output_ = source_model_.connect(eteq::convert_to_node(input_)); train_out_ = source_model_.connect( - ead::convert_to_node(train_input_)); + eteq::convert_to_node(train_input_)); // predicting target future rewards ctx_.next_output_ = ctx_.target_model_->connect( - ead::convert_to_node(next_input_)); + eteq::convert_to_node(next_input_)); - auto target_values = tenncor::mul( - tenncor::reduce_max_1d(ctx_.next_output_, 0), - ead::convert_to_node(next_output_mask_)); - future_reward_ = tenncor::add(ead::convert_to_node(reward_), - tenncor::mul( - ead::make_constant_scalar(params_.discount_rate_, - target_values->shape()), - target_values)); // reward for each instance in batch + auto target_values = + tenncor::reduce_max_1d(ctx_.next_output_, 0) * + eteq::convert_to_node(next_output_mask_); + // reward for each instance in batch + future_reward_ = eteq::convert_to_node(reward_) + + params_.discount_rate_ * target_values; // prediction error auto masked_output_score = tenncor::reduce_sum_1d( - tenncor::mul(train_out_, ead::convert_to_node(output_mask_)), 0); + train_out_ * eteq::convert_to_node(output_mask_), 0); prediction_error_ = tenncor::reduce_mean(tenncor::square( - tenncor::sub(masked_output_score, future_reward_))); + masked_output_score - future_reward_)); // updates for source network - ade::TensT source_contents = source_model_.get_contents(); - eqns::VarErrsT source_vars; + teq::TensT source_contents = source_model_.get_contents(); + layr::VarErrsT source_vars; for (auto tens : source_contents) { if (auto var = std::dynamic_pointer_cast< - ead::Variable>(tens)) + eteq::Variable>(tens)) { - auto varnode = std::make_shared>(var); + auto varnode = std::make_shared>(var); source_vars.push_back({ varnode, - gradprocess(ead::derive(prediction_error_, ead::convert_to_node(varnode))) + gradprocess(eteq::derive(prediction_error_, eteq::convert_to_node(varnode))) }); } } updates_ = update(source_vars); // update target network - ade::TensT target_contents = ctx_.target_model_->get_contents(); + teq::TensT target_contents = ctx_.target_model_->get_contents(); size_t nvars = source_vars.size(); - std::vector> target_vars; + std::vector> target_vars; target_vars.reserve(nvars); for (auto tens : target_contents) { if (auto var = std::dynamic_pointer_cast< - ead::Variable>(tens)) + eteq::Variable>(tens)) { target_vars.push_back( - std::make_shared>(var)); + std::make_shared>(var)); } } - eqns::AssignsT target_assigns; + layr::AssignsT target_assigns; for (size_t i = 0; i < nvars; i++) { // this is equivalent to target = (1-alpha) * target + alpha * source - auto target = ead::convert_to_node(target_vars[i]); - auto source = ead::convert_to_node(source_vars[i].first); - auto diff = tenncor::sub(target, source); - auto target_update_rate = ead::make_constant_scalar( - params_.target_update_rate_, diff->shape()); - - auto target_next = tenncor::sub(target, tenncor::mul( - target_update_rate, diff)); - target_assigns.push_back(eqns::VarAssign{ + auto target = eteq::convert_to_node(target_vars[i]); + auto source = eteq::convert_to_node(source_vars[i].first); + auto diff = target - source; + + auto target_next = target - params_.target_update_rate_ * diff; + target_assigns.push_back(layr::VarAssign{ fmts::sprintf("target_grad_%s", target_vars[i]->get_label().c_str()), target_vars[i], target_next}); } updates_.push_back(target_assigns); - ade::TensT track_batch = { + teq::TensT track_batch = { prediction_error_->get_tensor(), train_out_->get_tensor(), output_->get_tensor(), }; - for (eqns::AssignsT& assigns : updates_) + for (layr::AssignsT& assigns : updates_) { - for (eqns::VarAssign& assign : assigns) + for (layr::VarAssign& assign : assigns) { track_batch.push_back(assign.source_->get_tensor()); } @@ -290,7 +285,7 @@ struct DQNTrainer final reward_->get_tensor().get(), }); assign_groups(updates_, - [this](std::unordered_set& updated) + [this](std::unordered_set& updated) { this->sess_->update(updated); }); @@ -299,7 +294,7 @@ struct DQNTrainer final ctx_.n_train_called_++; } - ead::NodeptrT get_error (void) const + eteq::NodeptrT get_error (void) const { return prediction_error_; } @@ -311,22 +306,22 @@ struct DQNTrainer final // === forward computation === // fanin: shape - ead::VarptrT input_ = nullptr; + eteq::VarptrT input_ = nullptr; // fanout: shape - ead::NodeptrT output_ = nullptr; + eteq::NodeptrT output_ = nullptr; // === backward computation === // train fanin: shape - ead::VarptrT train_input_ = nullptr; + eteq::VarptrT train_input_ = nullptr; // train fanout: shape - ead::NodeptrT train_out_ = nullptr; + eteq::NodeptrT train_out_ = nullptr; // === updates && optimizer === - eqns::AssignGroupsT updates_; + layr::AssignGroupsT updates_; - ead::iSession* sess_; + eteq::iSession* sess_; private: PybindT linear_annealing (PybindT initial_prob) const @@ -339,7 +334,7 @@ struct DQNTrainer final PybindT get_random (void) { - return explore_(ead::get_engine()); + return explore_(eteq::get_engine()); } std::vector random_sample (void) @@ -361,30 +356,30 @@ struct DQNTrainer final DQNInfo params_; // source network - modl::SequentialModel& source_model_; + layr::SequentialModel& source_model_; // === prediction computation === // train_fanin: shape - ead::VarptrT next_input_ = nullptr; + eteq::VarptrT next_input_ = nullptr; // train mask: shape - ead::VarptrT next_output_mask_ = nullptr; + eteq::VarptrT next_output_mask_ = nullptr; // reward associated with next_output_: shape - ead::VarptrT reward_ = nullptr; + eteq::VarptrT reward_ = nullptr; // future reward calculated from reward history: <1, batchsize> - ead::NodeptrT future_reward_ = nullptr; + eteq::NodeptrT future_reward_ = nullptr; // === q-value computation === // weight output to get overall score: shape - ead::VarptrT output_mask_ = nullptr; + eteq::VarptrT output_mask_ = nullptr; // overall score: shape - ead::NodeptrT score_ = nullptr; + eteq::NodeptrT score_ = nullptr; // future error that we want to minimize: scalar shape - ead::NodeptrT prediction_error_ = nullptr; + eteq::NodeptrT prediction_error_ = nullptr; // states std::uniform_real_distribution explore_; @@ -394,4 +389,4 @@ struct DQNTrainer final } -#endif // MODL_DQN_TRAINER_HPP +#endif // LAYR_DQN_TRAINER_HPP diff --git a/rocnnet/trainer/mlp_trainer.hpp b/rocnnet/trainer/mlp_trainer.hpp index 5d759dc6d..00e308551 100644 --- a/rocnnet/trainer/mlp_trainer.hpp +++ b/rocnnet/trainer/mlp_trainer.hpp @@ -1,11 +1,11 @@ -#include "ead/grader.hpp" +#include "eteq/grader.hpp" -#include "rocnnet/modl/model.hpp" +#include "layr/model.hpp" -#include "rocnnet/eqns/err_approx.hpp" +#include "rocnnet/layr/err_approx.hpp" -#ifndef MODL_MLP_TRAINER_HPP -#define MODL_MLP_TRAINER_HPP +#ifndef LAYR_MLP_TRAINER_HPP +#define LAYR_MLP_TRAINER_HPP namespace trainer { @@ -19,47 +19,47 @@ struct TrainingContext final // MLPTrainer does not own anything struct MLPTrainer final { - MLPTrainer (modl::SequentialModel& model, - ead::iSession& sess, eqns::ApproxF update, ade::DimT batch_size, - eqns::NodeUnarF gradprocess = eqns::NodeUnarF(eqns::identity), + MLPTrainer (layr::SequentialModel& model, + eteq::iSession& sess, layr::ApproxF update, teq::DimT batch_size, + layr::NodeUnarF gradprocess = layr::NodeUnarF(layr::identity), TrainingContext ctx = TrainingContext()) : batch_size_(batch_size), - train_in_(ead::make_variable_scalar(0.0, ade::Shape({ - (ade::DimT) model.get_ninput(), batch_size}), "train_in")), + train_in_(eteq::make_variable_scalar(0.0, teq::Shape({ + (teq::DimT) model.get_ninput(), batch_size}), "train_in")), model_(model), sess_(&sess), ctx_(ctx) { train_out_ = model_.connect( - ead::convert_to_node(train_in_)); - expected_out_ = ead::make_variable_scalar(0.0, ade::Shape({ - (ade::DimT) model.get_noutput(), batch_size}), "expected_out"); + eteq::convert_to_node(train_in_)); + expected_out_ = eteq::make_variable_scalar(0.0, teq::Shape({ + (teq::DimT) model.get_noutput(), batch_size}), "expected_out"); error_ = tenncor::square( - tenncor::sub(ead::convert_to_node(expected_out_), train_out_)); + eteq::convert_to_node(expected_out_) - train_out_); auto contents = model_.get_contents(); - eqns::VarErrsT vars; + layr::VarErrsT vars; for (auto tens : contents) { if (auto var = std::dynamic_pointer_cast< - ead::Variable>(tens)) + eteq::Variable>(tens)) { - auto varnode = std::make_shared>(var); + auto varnode = std::make_shared>(var); vars.push_back({ varnode, - gradprocess(ead::derive(error_, ead::convert_to_node(varnode))) + gradprocess(eteq::derive(error_, eteq::convert_to_node(varnode))) }); } } updates_ = update(vars); - ade::TensT track_batch = { + teq::TensT track_batch = { train_out_->get_tensor(), error_->get_tensor(), }; - for (eqns::AssignsT& assigns : updates_) + for (layr::AssignsT& assigns : updates_) { - for (eqns::VarAssign& assign : assigns) + for (layr::VarAssign& assign : assigns) { track_batch.push_back(assign.source_->get_tensor()); } @@ -92,27 +92,27 @@ struct MLPTrainer final expected_out_->get_tensor().get(), }); assign_groups(updates_, - [this](std::unordered_set& updated) + [this](std::unordered_set& updated) { this->sess_->update(updated); }); ++ctx_.n_iterations_; } - modl::SequentialModel& model_; + layr::SequentialModel& model_; uint8_t batch_size_; - ead::VarptrT train_in_; - ead::VarptrT expected_out_; - ead::NodeptrT train_out_; - ead::NodeptrT error_; + eteq::VarptrT train_in_; + eteq::VarptrT expected_out_; + eteq::NodeptrT train_out_; + eteq::NodeptrT error_; - eqns::AssignGroupsT updates_; - ead::iSession* sess_; + layr::AssignGroupsT updates_; + eteq::iSession* sess_; TrainingContext ctx_; }; } -#endif // MODL_MLP_TRAINER_HPP +#endif // LAYR_MLP_TRAINER_HPP diff --git a/rocnnet/trainer/old_rbm_trainer.hpp b/rocnnet/trainer/old_rbm_trainer.hpp deleted file mode 100644 index d34635e63..000000000 --- a/rocnnet/trainer/old_rbm_trainer.hpp +++ /dev/null @@ -1,228 +0,0 @@ -#include "rocnnet/modl/rbm.hpp" - -#ifndef OLD_MODL_RBM_TRAINER_HPP -#define OLD_MODL_RBM_TRAINER_HPP - -namespace trainer -{ - -// recreate input using hidden distribution -// output shape of input->shape() -ead::NodeptrT reconstruct_visible (modl::RBM& rbm, - ead::NodeptrT input, modl::NonLinearsT nonlins) -{ - ead::NodeptrT hidden_dist = rbm(input, nonlins); - ead::NodeptrT hidden_sample = tenncor::random::rand_binom_one(hidden_dist); - return rbm.prop_down(hidden_sample, nonlins); -} - -ead::NodeptrT reconstruct_hidden (modl::RBM& rbm, - ead::NodeptrT hidden, modl::NonLinearsT nonlins) -{ - ead::NodeptrT visible_dist = rbm.prop_down(hidden, nonlins); - ead::NodeptrT visible_sample = tenncor::random::rand_binom_one(visible_dist); - return rbm(visible_sample, nonlins); -} - -struct RBMTrainer -{ - RBMTrainer (modl::RBMptrT brain, - modl::NonLinearsT nonlinearities, - ead::iSession& sess, - ead::VarptrT persistent, - uint8_t batch_size, - PybindT learning_rate = 1e-3, - size_t n_cont_div = 1, - ead::NodeptrT train_in = nullptr) : - brain_(brain), - sess_(&sess) - { - if (brain->layers_.size() != 1) - { - logs::error("rbm training only operates on the first layer"); - } - ead::VarptrT weight = brain_->layers_[0].weight_->var_; - ead::VarptrT hbias = brain_->layers_[0].hbias_->var_; - ead::VarptrT vbias = brain_->layers_[0].vbias_->var_; - - if (nullptr == train_in) - { - train_in_ = ead::convert_to_node( - ead::make_variable_scalar(0.0, ade::Shape({ - brain->get_ninput(), - batch_size, - }), "train_in")); - } - else - { - train_in_ = train_in; - } - // if persistent not available use Contrastive Divergence (CD) - if (nullptr == persistent) - { - persistent_ = tenncor::random::rand_binom_one( - (*brain)(train_in_, nonlinearities)); - } - // otherwise use Persistent CD - // (initialize from the old state of the chain) - else - { - persistent_ = persistent; - } - - // chain length is n_cont_div - auto chain_segment = tenncor::random::rand_binom_one( - reconstruct_hidden(*brain, persistent_, nonlinearities)); - assert(n_cont_div > 0); - for (size_t i = 0; i < n_cont_div - 1; ++i) - { - chain_segment = tenncor::random::rand_binom_one( - reconstruct_hidden(*brain, chain_segment, nonlinearities)); - } - - // use operational optimization to recover presig and vis nodes - auto presig_vis = tenncor::nn::fully_connect( - {chain_segment}, - {tenncor::transpose(ead::convert_to_node(weight))}, - ead::convert_to_node(vbias)); - auto final_visible_dist = tenncor::sigmoid(presig_vis); - auto chain_end = tenncor::random::rand_binom_one(final_visible_dist); - - cost_ = tenncor::sub(tenncor::reduce_mean(free_energy(train_in_)), - tenncor::reduce_mean(free_energy(chain_end))); - - auto dW = ead::derive(cost_, ead::convert_to_node(weight)); - auto dhb = ead::derive(cost_, ead::convert_to_node(hbias)); - auto dvb = ead::derive(cost_, ead::convert_to_node(vbias)); - - auto next_weight = tenncor::sub(ead::convert_to_node(weight), - tenncor::mul(ead::make_constant_scalar( - learning_rate, dW->shape()), dW)); - auto next_hbias = tenncor::sub(ead::convert_to_node(hbias), - tenncor::mul(ead::make_constant_scalar( - learning_rate, dhb->shape()), dhb)); - auto next_vbias = tenncor::sub(ead::convert_to_node(vbias), - tenncor::mul(ead::make_constant_scalar( - learning_rate, dvb->shape()), dvb)); - eqns::AssignsT assigns = { - eqns::VarAssign{"", weight, next_weight}, - eqns::VarAssign{"", hbias, next_hbias}, - eqns::VarAssign{"", vbias, next_vbias}, - }; - - if (nullptr == persistent) - { - // reconstruction cost - monitoring_cost_ = get_reconstruction_cost( - train_in_, final_visible_dist); - } - else - { - // pseudo-likelihood - auto next_persistent = tenncor::random::rand_binom_one( - reconstruct_hidden(*brain, chain_segment, nonlinearities)); - assigns.push_back( - eqns::VarAssign{"", persistent, next_persistent}); - - monitoring_cost_ = get_pseudo_likelihood_cost(train_in_); - } - updates_.push_back(assigns); - - ade::TensT track_batch; - track_batch.reserve(assigns.size()); - std::transform(assigns.begin(), assigns.end(), - std::back_inserter(track_batch), - [](eqns::VarAssign& assign) - { - return assign.source_->get_tensor(); - }); - sess_->track(track_batch); - } - - // input a 2-D vector of shape return monitor cost - PybindT train (std::vector& train_in) - { - auto var = dynamic_cast*>(train_in_.get()); - if (nullptr == var) - { - logs::fatal("cannot train RBM with non-native input"); - } - ade::Shape train_shape = var->shape(); - var->assign(train_in.data(), train_shape); - assign_groups(updates_, - [this](std::unordered_set& updated) - { - this->sess_->update(updated); - }); - - return monitoring_cost_->data()[0] / train_shape.at(1); - } - - ead::NodeptrT train_in_; - modl::RBMptrT brain_; - ead::NodeptrT cost_; - ead::NodeptrT monitoring_cost_; - - eqns::AssignGroupsT updates_; - ead::iSession* sess_; - -private: - ead::NodeptrT get_pseudo_likelihood_cost (ead::NodeptrT input) - { - const ade::Shape& shape = input->shape(); - std::vector zeros(shape.n_elems(), 0); - zeros[0] = 1; - auto one_i = ead::make_constant(zeros.data(), shape); - - ead::NodeptrT xi = tenncor::round(input); // xi = [0|1, ...] - ead::NodeptrT xi_flip = tenncor::sub(one_i, xi); - - ead::NodeptrT fe_xi = free_energy(xi); - ead::NodeptrT fe_xi_flip = free_energy(xi_flip); - - return tenncor::reduce_mean(tenncor::mul( - ead::make_constant_scalar(brain_->get_ninput(), fe_xi->shape()), - tenncor::log(tenncor::sigmoid(tenncor::sub(fe_xi_flip, fe_xi))))); - } - - ead::NodeptrT get_reconstruction_cost ( - ead::NodeptrT input, ead::NodeptrT visible_dist) - { - ead::NodeptrT p_success = tenncor::mul( - input, tenncor::log(visible_dist)); - ead::NodeptrT p_not = tenncor::mul( - tenncor::sub(ead::make_constant_scalar( - 1, input->shape()), input), - tenncor::log(tenncor::sub(ead::make_constant_scalar( - 1, visible_dist->shape()), visible_dist))); - return tenncor::reduce_mean(tenncor::reduce_sum_1d( - tenncor::transpose(tenncor::add(p_success, p_not)), 1)); - } - - ead::NodeptrT free_energy (ead::NodeptrT sample) - { - ead::VarptrT weight = brain_->layers_[0].weight_->var_; - ead::VarptrT hbias = brain_->layers_[0].hbias_->var_; - ead::VarptrT vbias = brain_->layers_[0].vbias_->var_; - - auto vbias_term = tenncor::matmul(sample, - tenncor::transpose(ead::convert_to_node(vbias))); - // @ + z -> - auto wx_b = tenncor::nn::fully_connect( - {sample}, - {ead::convert_to_node(weight)}, - ead::convert_to_node(hbias)); - auto hidden_term = tenncor::reduce_sum( - tenncor::log(tenncor::add( - ead::make_constant_scalar(1, wx_b->shape()), - tenncor::exp(wx_b) - )), 0, 1); - return tenncor::neg(tenncor::add(vbias_term, hidden_term)); - } - - ead::NodeptrT persistent_; -}; - -} - -#endif // OLD_MODL_RBM_TRAINER_HPP diff --git a/rocnnet/trainer/rbm_trainer.hpp b/rocnnet/trainer/rbm_trainer.hpp index fa4ab28db..b02a06ecb 100644 --- a/rocnnet/trainer/rbm_trainer.hpp +++ b/rocnnet/trainer/rbm_trainer.hpp @@ -1,9 +1,9 @@ -#include "rocnnet/modl/rbm.hpp" +#include "layr/rbm.hpp" -#include "rocnnet/eqns/err_approx.hpp" +#include "rocnnet/layr/err_approx.hpp" -#ifndef MODL_RBM_TRAINER_HPP -#define MODL_RBM_TRAINER_HPP +#ifndef LAYR_RBM_TRAINER_HPP +#define LAYR_RBM_TRAINER_HPP namespace trainer { @@ -14,42 +14,36 @@ namespace trainer // x_next = x_curr + next_momentum // // where η is the learning rate, and χ is discount_factor -eqns::AssignGroupsT bbernoulli_approx (const eqns::VarErrsT& leaves, +layr::AssignGroupsT bbernoulli_approx (const layr::VarErrsT& leaves, PybindT learning_rate, PybindT discount_factor, std::string root_label = "") { // assign momentums before leaves - eqns::AssignsT assigns; + layr::AssignsT assigns; for (size_t i = 0, nleaves = leaves.size(); i < nleaves; ++i) { - auto leaf_node = ead::convert_to_node(leaves[i].first); + auto leaf_node = eteq::convert_to_node(leaves[i].first); auto err = leaves[i].second; auto shape = err->shape(); - std::vector slist(shape.begin(), shape.end()); + std::vector slist(shape.begin(), shape.end()); auto it = slist.rbegin(), et = slist.rend(); while (it != et && *it == 1) { ++it; } - ade::DimT shape_factor = it == et ? 1 : *it; - auto momentum = ead::make_variable_scalar(0, + teq::DimT shape_factor = it == et ? 1 : *it; + auto momentum = eteq::make_variable_scalar(0, err->shape(), leaves[i].first->get_label() + "_momentum"); - auto momentum_next = tenncor::add( - tenncor::mul( - ead::make_constant_scalar(discount_factor, momentum->shape()), - ead::convert_to_node(momentum)), - tenncor::mul( - ead::make_constant_scalar(learning_rate * - (1 - discount_factor) / shape_factor, err->shape()), - err)); - auto leaf_next = tenncor::add(leaf_node, momentum_next); - - assigns.push_back(eqns::VarAssign{ + auto momentum_next = discount_factor * eteq::convert_to_node(momentum) + + (learning_rate * (1 - discount_factor) / shape_factor) * err; + auto leaf_next = leaf_node + momentum_next; + + assigns.push_back(layr::VarAssign{ fmts::sprintf("bbernoulli_momentum::%s_momentum_%s", root_label.c_str(), leaves[i].first->get_label().c_str()), momentum, momentum_next}); - assigns.push_back(eqns::VarAssign{ + assigns.push_back(layr::VarAssign{ fmts::sprintf("bbernoulli_momentum::%s_grad_%s", root_label.c_str(), leaves[i].first->get_label().c_str()), leaves[i].first, leaf_next}); @@ -57,20 +51,20 @@ eqns::AssignGroupsT bbernoulli_approx (const eqns::VarErrsT& leaves, return {assigns}; } -using ErrorF = std::function(ead::NodeptrT,ead::NodeptrT)>; +using ErrorF = std::function(eteq::NodeptrT,eteq::NodeptrT)>; struct BernoulliRBMTrainer final { - BernoulliRBMTrainer (modl::RBM& model, - ead::iSession& sess, - ade::DimT batch_size, + BernoulliRBMTrainer (layr::RBM& model, + eteq::iSession& sess, + teq::DimT batch_size, PybindT learning_rate, PybindT discount_factor, ErrorF err_func = ErrorF()) : model_(model), sess_(&sess), batch_size_(batch_size) { - visible_ = ead::make_variable_scalar(0, - ade::Shape({(ade::DimT) model.get_ninput(), batch_size})); + visible_ = eteq::make_variable_scalar(0, + teq::Shape({(teq::DimT) model.get_ninput(), batch_size})); hidden_sample_ = model.connect(visible_); visible_sample_ = model.backward_connect( @@ -78,27 +72,27 @@ struct BernoulliRBMTrainer final auto hidden_reconp = model.connect(visible_sample_); - auto grad_w = tenncor::sub( + auto grad_w = tenncor::matmul(tenncor::transpose( - ead::convert_to_node(visible_)), hidden_sample_), + eteq::convert_to_node(visible_)), hidden_sample_) - tenncor::matmul(tenncor::transpose( - visible_sample_), hidden_reconp)); + visible_sample_), hidden_reconp); auto grad_hb = tenncor::reduce_mean_1d( - tenncor::sub(hidden_sample_, hidden_reconp), 1); + hidden_sample_ - hidden_reconp, 1); auto grad_vb = tenncor::reduce_mean_1d( - tenncor::sub(ead::convert_to_node(visible_), visible_sample_), 1); + eteq::convert_to_node(visible_) - visible_sample_, 1); auto contents = model.get_contents(); - std::vector> vars; + std::vector> vars; vars.reserve(contents.size()); std::transform(contents.begin(), contents.end(), std::back_inserter(vars), - [](ade::TensptrT tens) + [](teq::TensptrT tens) { - return std::make_shared>( - std::static_pointer_cast>(tens)); + return std::make_shared>( + std::static_pointer_cast>(tens)); }); - eqns::VarErrsT varerrs = { + layr::VarErrsT varerrs = { {vars[0], grad_w}, {vars[1], grad_hb}, {vars[3], grad_vb}, @@ -106,14 +100,14 @@ struct BernoulliRBMTrainer final updates_ = bbernoulli_approx(varerrs, learning_rate, discount_factor); - ade::TensT to_track = { + teq::TensT to_track = { hidden_sample_->get_tensor(), visible_sample_->get_tensor(), }; to_track.reserve(updates_.size() + 1); if (err_func) { - error_ = err_func(ead::convert_to_node(visible_), visible_sample_); + error_ = err_func(eteq::convert_to_node(visible_), visible_sample_); to_track.push_back(error_->get_tensor()); } @@ -149,7 +143,7 @@ struct BernoulliRBMTrainer final if (nullptr == error_) { assign_groups(updates_, - [this](ead::TensSetT& updated) + [this](eteq::TensSetT& updated) { this->sess_->update(updated); }); @@ -157,35 +151,35 @@ struct BernoulliRBMTrainer final } assign_groups(updates_, - [this](ead::TensSetT& updated) + [this](eteq::TensSetT& updated) { this->sess_->update_target( - ead::TensSetT{this->error_->get_tensor().get()}, updated); + eteq::TensSetT{this->error_->get_tensor().get()}, updated); }); return error_->data()[0]; } private: - modl::RBM& model_; + layr::RBM& model_; - ead::VarptrT visible_ = nullptr; + eteq::VarptrT visible_ = nullptr; - ead::NodeptrT hidden_sample_ = nullptr; + eteq::NodeptrT hidden_sample_ = nullptr; - ead::NodeptrT visible_sample_ = nullptr; + eteq::NodeptrT visible_sample_ = nullptr; - ead::NodeptrT error_ = nullptr; + eteq::NodeptrT error_ = nullptr; // === updates && optimizer === - eqns::AssignGroupsT updates_; + layr::AssignGroupsT updates_; - ead::TensSetT assign_sources_; + eteq::TensSetT assign_sources_; - ead::iSession* sess_; + eteq::iSession* sess_; size_t batch_size_; }; } -#endif // MODL_RBM_TRAINER_HPP +#endif // LAYR_RBM_TRAINER_HPP diff --git a/tag/BUILD.bazel b/tag/BUILD.bazel index d20237ad4..35d28d402 100644 --- a/tag/BUILD.bazel +++ b/tag/BUILD.bazel @@ -25,7 +25,7 @@ cc_library( srcs = glob(["src/*.cpp"]), copts = ["-std=c++17"], deps = [ - "//ade:ade", + "//teq:teq", "@boost//:uuid", ], visibility = ["//visibility:public"], diff --git a/tag/README_TAG.md b/tag/README_TAG.md new file mode 100644 index 000000000..8857164af --- /dev/null +++ b/tag/README_TAG.md @@ -0,0 +1,3 @@ +# TAG + +Provides tags to label TEQ graphs for functional and debugging purposes diff --git a/tag/group.hpp b/tag/group.hpp index 0547fb4cf..769dfd97f 100644 --- a/tag/group.hpp +++ b/tag/group.hpp @@ -46,7 +46,7 @@ struct GroupRegistry final { GroupRegistry (TagRegistry& registry = get_reg()) : tag_reg_(registry) {} - void group_tag (ade::TensrefT tens, std::string tag) + void group_tag (teq::TensrefT tens, std::string tag) { tag_reg_.add_tag(tens, TagptrT(new GroupTag(tag))); @@ -68,28 +68,28 @@ struct GroupRegistry final GroupRegistry& get_group_reg (void); const std::string groups_key = get_reg().register_tagr("groups", -[](ade::TensrefT ref, std::string tag) +[](teq::TensrefT ref, std::string tag) { get_group_reg().group_tag(ref, tag); }); -void recursive_group_tag (ade::TensptrT tens, std::string group, - std::unordered_set stops, +void recursive_group_tag (teq::TensptrT tens, std::string group, + std::unordered_set stops, GroupRegistry& registry = get_group_reg()); using AGroupsT = std::map>; -using AdjMapT = std::unordered_map; +using AdjMapT = std::unordered_map; -void adjacencies (AdjMapT& out, ade::TensT roots, +void adjacencies (AdjMapT& out, teq::TensT roots, GroupRegistry& registry = get_group_reg()); -struct Subgraph final : public ade::iTraveler +struct Subgraph final : public teq::iTraveler { Subgraph (std::string group) : group_(group) {} /// Implementation of iTraveler - void visit (ade::iLeaf* leaf) override + void visit (teq::iLeaf* leaf) override { if (false == estd::has(content_, leaf)) { @@ -99,7 +99,7 @@ struct Subgraph final : public ade::iTraveler } /// Implementation of iTraveler - void visit (ade::iFunctor* func) override + void visit (teq::iFunctor* func) override { if (false == estd::has(content_, func)) { @@ -120,17 +120,17 @@ struct Subgraph final : public ade::iTraveler std::string group_; - std::unordered_set content_; + std::unordered_set content_; // todo: order subgraphs children somehow - std::unordered_map children_; + std::unordered_map children_; }; using SgraphptrT = std::shared_ptr; using SubgraphsT = std::unordered_set; -using SubgraphAssocsT = std::unordered_map; +using SubgraphAssocsT = std::unordered_map; void beautify_groups (SubgraphAssocsT& out, const AdjMapT& adjs); diff --git a/tag/prop.hpp b/tag/prop.hpp index c4ad6628a..528ed7645 100644 --- a/tag/prop.hpp +++ b/tag/prop.hpp @@ -41,12 +41,12 @@ struct PropertyRegistry final PropertyRegistry (TagRegistry& registry = get_reg()) : tag_reg_(registry) {} - void property_tag (ade::TensrefT tens, std::string property) + void property_tag (teq::TensrefT tens, std::string property) { tag_reg_.add_tag(tens, TagptrT(new PropTag(property))); } - bool has_property (const ade::iTensor* tens, std::string property) const; + bool has_property (const teq::iTensor* tens, std::string property) const; TagRegistry& tag_reg_; }; @@ -54,7 +54,7 @@ struct PropertyRegistry final PropertyRegistry& get_property_reg (void); const std::string props_key = get_reg().register_tagr("properties", -[](ade::TensrefT ref, std::string property) +[](teq::TensrefT ref, std::string property) { get_property_reg().property_tag(ref, property); }); diff --git a/tag/src/group.cpp b/tag/src/group.cpp index 861148fc2..c7174ba64 100644 --- a/tag/src/group.cpp +++ b/tag/src/group.cpp @@ -21,37 +21,37 @@ GroupRegistry& get_group_reg (void) return registry; } -void recursive_group_tag (ade::TensptrT tens, std::string group, - std::unordered_set stops, GroupRegistry& registry) +void recursive_group_tag (teq::TensptrT tens, std::string group, + std::unordered_set stops, GroupRegistry& registry) { recursive_tag(tens, stops, - [&](ade::TensrefT ref) + [&](teq::TensrefT ref) { registry.group_tag(ref, group); }); } -void adjacencies (AdjMapT& out, ade::TensT roots, +void adjacencies (AdjMapT& out, teq::TensT roots, GroupRegistry& registry) { - ade::HeightMatrix mat(roots); + teq::HeightMatrix mat(roots); boost::uuids::random_generator uuid_gen; for (auto it = mat.funcs_.rbegin(), et = mat.funcs_.rend(); it != et; ++it) { auto& funcs = *it; - for (ade::iFunctor* func : funcs) + for (teq::iFunctor* func : funcs) { TagRepsT tags = registry.tag_reg_.get_tags(func); std::vector groups; if (estd::get(groups, tags, groups_key)) { auto& children = func->get_children(); - std::unordered_set uchildren; + std::unordered_set uchildren; std::transform(children.begin(), children.end(), std::inserter(uchildren, uchildren.end()), - [](const ade::FuncArg& arg) + [](const teq::FuncArg& arg) { return arg.get_tensor().get(); }); @@ -69,7 +69,7 @@ void adjacencies (AdjMapT& out, ade::TensT roots, } auto& same_group = registry.groups_[group]; - for (ade::iTensor* child : uchildren) + for (teq::iTensor* child : uchildren) { // propagate unique gid set to child of same group auto it = same_group.find(TensKey(child)); @@ -83,7 +83,7 @@ void adjacencies (AdjMapT& out, ade::TensT roots, } } - for (ade::iLeaf* leaf : mat.leaves_) + for (teq::iLeaf* leaf : mat.leaves_) { auto tags = registry.tag_reg_.get_tags(leaf); std::vector groups; @@ -110,7 +110,7 @@ void beautify_groups (SubgraphAssocsT& out, const AdjMapT& adjs) std::unordered_map sgraphs; for (auto& gpair : adjs) { - ade::iTensor* tens = gpair.first; + teq::iTensor* tens = gpair.first; for (auto& idpair : gpair.second) { std::string group = idpair.first; @@ -125,7 +125,7 @@ void beautify_groups (SubgraphAssocsT& out, const AdjMapT& adjs) for (auto& sg : sgraphs) { - for (ade::iTensor* content : sg.second->content_) + for (teq::iTensor* content : sg.second->content_) { out[content].emplace(sg.second); } @@ -134,12 +134,12 @@ void beautify_groups (SubgraphAssocsT& out, const AdjMapT& adjs) void filter_head (SubgraphAssocsT& out, const SubgraphAssocsT& assocs) { - ade::GraphStat stat; + teq::GraphStat stat; for (auto& assoc_pair : assocs) { assoc_pair.first->accept(stat); } - std::unordered_map revhead; + std::unordered_map revhead; for (auto& sgpair : assocs) { const SubgraphsT& subgraphs = sgpair.second; @@ -147,7 +147,7 @@ void filter_head (SubgraphAssocsT& out, const SubgraphAssocsT& assocs) { if (estd::has(revhead, subgraph)) { - ade::iTensor*& oldhead = revhead[subgraph]; + teq::iTensor*& oldhead = revhead[subgraph]; if (stat.graphsize_[sgpair.first].upper_ > stat.graphsize_[oldhead].upper_) { diff --git a/tag/src/prop.cpp b/tag/src/prop.cpp index 465d9093d..983bdb553 100644 --- a/tag/src/prop.cpp +++ b/tag/src/prop.cpp @@ -15,7 +15,7 @@ TagRepsT PropTag::get_tags (void) const return out; } -bool PropertyRegistry::has_property (const ade::iTensor* tens, std::string property) const +bool PropertyRegistry::has_property (const teq::iTensor* tens, std::string property) const { auto reps = tag_reg_.get_tags(tens); auto it = reps.find(props_key); diff --git a/tag/src/tag.cpp b/tag/src/tag.cpp index a8c03f9f6..c22f4a26e 100644 --- a/tag/src/tag.cpp +++ b/tag/src/tag.cpp @@ -11,16 +11,16 @@ TagRegistry& get_reg (void) return registry; } -using RefMapT = std::unordered_map; +using RefMapT = std::unordered_map; -struct Tagger final : public ade::iTraveler +struct Tagger final : public teq::iTraveler { - Tagger (std::unordered_set stops, - std::function tag_op) : + Tagger (std::unordered_set stops, + std::function tag_op) : stops_(stops), tag_op_(tag_op) {} /// Implementation of iTraveler - void visit (ade::iLeaf* leaf) override + void visit (teq::iLeaf* leaf) override { if (false == estd::has(stops_, leaf)) { @@ -34,7 +34,7 @@ struct Tagger final : public ade::iTraveler } /// Implementation of iTraveler - void visit (ade::iFunctor* func) override + void visit (teq::iFunctor* func) override { if (false == estd::has(stops_, func)) { @@ -46,7 +46,7 @@ struct Tagger final : public ade::iTraveler auto& children = func->get_children(); for (auto& child : children) { - ade::TensptrT tens = child.get_tensor(); + teq::TensptrT tens = child.get_tensor(); owners_.emplace(tens.get(), tens); tens->accept(*this); } @@ -56,14 +56,14 @@ struct Tagger final : public ade::iTraveler RefMapT owners_; - std::unordered_set stops_; + std::unordered_set stops_; - std::function tag_op_; + std::function tag_op_; }; -void recursive_tag (ade::TensptrT root, - std::unordered_set stops, - std::function tag_op) +void recursive_tag (teq::TensptrT root, + std::unordered_set stops, + std::function tag_op) { Tagger tagger(stops, tag_op); tagger.owners_.emplace(root.get(), root); diff --git a/tag/tag.hpp b/tag/tag.hpp index e83d61638..17ddfdfab 100644 --- a/tag/tag.hpp +++ b/tag/tag.hpp @@ -1,7 +1,7 @@ #include #include -#include "ade/ade.hpp" +#include "teq/teq.hpp" #ifndef TAG_TAG_HPP #define TAG_TAG_HPP @@ -94,14 +94,14 @@ struct TagCollective final struct TensKey final { - TensKey (ade::TensrefT tens) : val_(tens.lock().get()), ref_(tens) {} + TensKey (teq::TensrefT tens) : val_(tens.lock().get()), ref_(tens) {} // used to match keys - TensKey (ade::iTensor* tens) : val_(tens) {} + TensKey (teq::iTensor* tens) : val_(tens) {} - TensKey (const ade::iTensor* tens) : val_(tens) {} + TensKey (const teq::iTensor* tens) : val_(tens) {} - operator const ade::iTensor*() const + operator const teq::iTensor*() const { return val_; } @@ -111,9 +111,9 @@ struct TensKey final return ref_.expired(); } - const ade::iTensor* val_; + const teq::iTensor* val_; - ade::TensrefT ref_; + teq::TensrefT ref_; }; struct TensKeyHash final @@ -130,13 +130,13 @@ inline bool operator == (const TensKey& lhs, const TensKey& rhs) return hasher(lhs) == hasher(rhs); } -using TagrF = std::function; +using TagrF = std::function; // todo: move tag registry to some session that claims global context // todo: make an interface for this struct TagRegistry final { - void add_tag (ade::TensrefT tens, TagptrT tag) + void add_tag (teq::TensrefT tens, TagptrT tag) { if (tens.expired()) { @@ -151,7 +151,7 @@ struct TagRegistry final registry_[tens].add(std::move(tag)); } - TagRepsT get_tags (const ade::iTensor* tens) + TagRepsT get_tags (const teq::iTensor* tens) { auto it = registry_.find(TensKey(tens)); if (registry_.end() == it || it->first.expired()) @@ -161,7 +161,7 @@ struct TagRegistry final return it->second.get_tags(); } - void move_tags (ade::TensrefT dest, const ade::iTensor* source) + void move_tags (teq::TensrefT dest, const teq::iTensor* source) { if (dest.expired()) { @@ -206,25 +206,25 @@ struct TagRegistry final TagRegistry& get_reg (void); -void recursive_tag (ade::TensptrT root, - std::unordered_set stops, - std::function tag_op); +void recursive_tag (teq::TensptrT root, + std::unordered_set stops, + std::function tag_op); -using LTensT = std::unordered_map>; +using LTensT = std::unordered_map>; using TTensT = std::unordered_map; -struct Query final : public ade::OnceTraveler +struct Query final : public teq::OnceTraveler { Query (TagRegistry& reg = get_reg()) : reg_(reg) {} - void visit_leaf (ade::iLeaf* leaf) override + void visit_leaf (teq::iLeaf* leaf) override { auto tags = reg_.get_tags(leaf); save_tags(tags, leaf); } - void visit_func (ade::iFunctor* func) override + void visit_func (teq::iFunctor* func) override { auto& children = func->get_children(); for (auto child : children) @@ -241,7 +241,7 @@ struct Query final : public ade::OnceTraveler TagRegistry& reg_; private: - void save_tags (TagRepsT& tag, ade::iTensor* tens) + void save_tags (TagRepsT& tag, teq::iTensor* tens) { for (auto& tpair : tag) { diff --git a/tag/test/common.hpp b/tag/test/common.hpp index 34d012070..7210e2332 100644 --- a/tag/test/common.hpp +++ b/tag/test/common.hpp @@ -1,15 +1,15 @@ -#include "ade/ileaf.hpp" +#include "teq/ileaf.hpp" #ifndef TAG_TEST_COMMON_HPP #define TAG_TEST_COMMON_HPP -struct MockTensor final : public ade::iLeaf +struct MockTensor final : public teq::iLeaf { MockTensor (void) = default; - MockTensor (ade::Shape shape) : shape_(shape) {} + MockTensor (teq::Shape shape) : shape_(shape) {} - const ade::Shape& shape (void) const override + const teq::Shape& shape (void) const override { return shape_; } @@ -49,7 +49,7 @@ struct MockTensor final : public ade::iLeaf return true; } - ade::Shape shape_; + teq::Shape shape_; }; #endif // TAG_TEST_COMMON_HPP diff --git a/tag/test/test_group.cpp b/tag/test/test_group.cpp index 2f3ebd04b..8f0976bba 100644 --- a/tag/test/test_group.cpp +++ b/tag/test/test_group.cpp @@ -16,11 +16,11 @@ TEST(GROUP, SingleTagAdjacency) tag::TagRegistry treg; tag::GroupRegistry registry(treg); { - ade::TensptrT tens = std::make_shared(); - ade::TensptrT tens2 = std::make_shared(); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"MOCK", 2}, { - ade::identity_map(tens), - ade::identity_map(tens2), + teq::TensptrT tens = std::make_shared(); + teq::TensptrT tens2 = std::make_shared(); + teq::TensptrT f(teq::Functor::get(teq::Opcode{"MOCK", 2}, { + teq::identity_map(tens), + teq::identity_map(tens2), })); registry.group_tag(tens, "group2"); @@ -77,37 +77,37 @@ TEST(GROUP, RecursiveTagAdjacency) tag::TagRegistry treg; tag::GroupRegistry registry(treg); { - ade::TensptrT tens = std::make_shared(); - ade::TensptrT tens2 = std::make_shared(); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"MOCK", 2}, { - ade::identity_map(tens), - ade::identity_map(tens2), + teq::TensptrT tens = std::make_shared(); + teq::TensptrT tens2 = std::make_shared(); + teq::TensptrT f(teq::Functor::get(teq::Opcode{"MOCK", 2}, { + teq::identity_map(tens), + teq::identity_map(tens2), })); - ade::TensptrT f2(ade::Functor::get(ade::Opcode{"MOCK", 2}, { - ade::identity_map(tens), - ade::identity_map(tens2), + teq::TensptrT f2(teq::Functor::get(teq::Opcode{"MOCK", 2}, { + teq::identity_map(tens), + teq::identity_map(tens2), })); tag::recursive_group_tag(tens, "group2", - std::unordered_set{}, + std::unordered_set{}, registry); tag::recursive_group_tag(tens2, "group2", - std::unordered_set{}, + std::unordered_set{}, registry); tag::recursive_group_tag(tens, "group7", - std::unordered_set{}, + std::unordered_set{}, registry); tag::recursive_group_tag(f, "group1", - std::unordered_set{}, + std::unordered_set{}, registry); tag::recursive_group_tag(f, "group3", - std::unordered_set{tens.get()}, + std::unordered_set{tens.get()}, registry); tag::recursive_group_tag(f, "group4", - std::unordered_set{tens.get(), tens2.get()}, + std::unordered_set{tens.get(), tens2.get()}, registry); tag::recursive_group_tag(f2, "group1", - std::unordered_set{}, + std::unordered_set{}, registry); tag::AdjMapT adjs; @@ -173,28 +173,28 @@ TEST(GROUP, Subgraph) tag::TagRegistry treg; tag::GroupRegistry registry(treg); { - ade::TensptrT tens = std::make_shared(); - ade::TensptrT tens2 = std::make_shared(); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"MOCK", 2}, { - ade::identity_map(tens), - ade::identity_map(tens2), + teq::TensptrT tens = std::make_shared(); + teq::TensptrT tens2 = std::make_shared(); + teq::TensptrT f(teq::Functor::get(teq::Opcode{"MOCK", 2}, { + teq::identity_map(tens), + teq::identity_map(tens2), })); tag::AdjMapT adjs = { - std::pair{f.get(), + std::pair{f.get(), tag::AGroupsT{ {"group1", {"lytening"}}, {"group3", {"fyreball"}}, {"group4", {"frostbyte"}}, }}, - std::pair{tens.get(), + std::pair{tens.get(), tag::AGroupsT{ {"group7", {"kaostorm"}}, {"group2", {"mudslyde"}}, {"group1", {"lytening"}}, }}, - std::pair{tens2.get(), + std::pair{tens2.get(), tag::AGroupsT{ {"group2", {"sandstrum"}}, {"group1", {"lytening"}}, diff --git a/tag/test/test_prop.cpp b/tag/test/test_prop.cpp index 836379b90..0b6d5734c 100644 --- a/tag/test/test_prop.cpp +++ b/tag/test/test_prop.cpp @@ -15,9 +15,9 @@ TEST(PROP, Tag) { tag::TagRegistry treg; tag::PropertyRegistry registry(treg); - ade::iTensor* ptr; + teq::iTensor* ptr; { - ade::TensptrT tens = std::make_shared(); + teq::TensptrT tens = std::make_shared(); registry.property_tag(tens, "property4"); registry.property_tag(tens, "property1"); registry.property_tag(tens, "property2"); diff --git a/tag/test/test_tag.cpp b/tag/test/test_tag.cpp index b87aa6dcb..3b5b43444 100644 --- a/tag/test/test_tag.cpp +++ b/tag/test/test_tag.cpp @@ -55,10 +55,10 @@ struct MockTag final : public tag::iTag TEST(TAG, AddGet) { tag::TagRegistry registry; - ade::iTensor* ptr; - ade::TensrefT ref; + teq::iTensor* ptr; + teq::TensrefT ref; { - ade::TensptrT tens = std::make_shared(); + teq::TensptrT tens = std::make_shared(); registry.add_tag(tens, std::make_unique()); EXPECT_EQ(1, registry.registry_.size()); tag::TagRepsT reps = registry.get_tags(tens.get()); @@ -94,13 +94,13 @@ TEST(TAG, AddGet) TEST(TAG, AddMove) { tag::TagRegistry registry; - ade::TensrefT ref; - ade::iTensor* ptr; - ade::iTensor* ptr2; + teq::TensrefT ref; + teq::iTensor* ptr; + teq::iTensor* ptr2; { - ade::TensptrT tens = std::make_shared(); + teq::TensptrT tens = std::make_shared(); { - ade::TensptrT tens2 = std::make_shared(); + teq::TensptrT tens2 = std::make_shared(); // move non tagged tens to non tagged tens registry.move_tags(tens2, tens.get()); diff --git a/ade/BUILD.bazel b/teq/BUILD.bazel similarity index 88% rename from ade/BUILD.bazel rename to teq/BUILD.bazel index 5db90e0fe..89fc0d119 100644 --- a/ade/BUILD.bazel +++ b/teq/BUILD.bazel @@ -28,7 +28,7 @@ filegroup( ######### LIBRARIES ######### cc_library( - name = "ade", + name = "teq", hdrs = glob(["*.hpp"]), srcs = glob(["src/*.cpp"]), copts = ["-std=c++17", "-DSDIM_BYTES=2"], @@ -41,9 +41,9 @@ cc_library( cc_test( name = "test", size = "medium", - srcs = ["//ade:test_srcs"], + srcs = ["//teq:test_srcs"], deps = [ - "//ade:ade", + "//teq:teq", "//testutil:tutil", "@gtest//:gtest", "@com_github_mingkaic_cppkg//exam:exam", @@ -55,9 +55,9 @@ cc_test( cc_binary( name = "benchmark", - srcs = ["//ade:benchmark_srcs"], + srcs = ["//teq:benchmark_srcs"], deps = [ - "//ade:ade", + "//teq:teq", "@com_github_google_benchmark//:benchmark", ], copts = ["-std=c++17"], diff --git a/ade/README_ADE.md b/teq/README_TEQ.md similarity index 81% rename from ade/README_ADE.md rename to teq/README_TEQ.md index 6924631b1..0452b20e1 100644 --- a/ade/README_ADE.md +++ b/teq/README_TEQ.md @@ -1,10 +1,10 @@ -# ADE (Automatic Differentiation Engine) +# TEQ (Tensor EQuations) -Provides the framework for building tensor equations. +Framework for building tensor equations. ## Components -ADE comprises of 4 types of components: +TEQ comprises of 4 types of components: - Coordinates - Shapes diff --git a/teq/bm/benchmark.cpp b/teq/bm/benchmark.cpp new file mode 100644 index 000000000..3f907a2e8 --- /dev/null +++ b/teq/bm/benchmark.cpp @@ -0,0 +1,173 @@ +#include + +#include "benchmark/benchmark.h" + +#include "teq/coord.hpp" + + +static std::random_device rnd_device; +static std::mt19937 mersenne_engine(rnd_device()); + + +template +static std::vector random_vector ( + teq::DimT lower, teq::DimT upper) +{ + std::vector out(N); + std::uniform_int_distribution dist(lower, upper); + std::generate(out.begin(), out.end(), + [&dist]() { return dist(mersenne_engine); }); + return out; +} + + +static teq::NElemT random_bignum (teq::NElemT lower, teq::NElemT upper) +{ + std::uniform_int_distribution dist(lower, upper); + return dist(mersenne_engine); +} + + +static void BM_MakeReduce(benchmark::State& state) +{ + std::vector slist; + for (auto _ : state) + { + state.PauseTiming(); + slist = random_vector(1, 255); + teq::RankT rank = random_bignum(0, teq::rank_cap - 1); + state.ResumeTiming(); + teq::reduce(rank, + std::vector(slist.begin() + rank, slist.end())); + } +} + +BENCHMARK(BM_MakeReduce); + + +static void BM_CoordFromIndex(benchmark::State& state) +{ + std::vector slist; + for (auto _ : state) + { + state.PauseTiming(); + slist = random_vector(1, 255); + teq::Shape shape(slist); + teq::NElemT index = random_bignum(0, shape.n_elems()); + state.ResumeTiming(); + teq::coordinate(shape, index); + } +} + +BENCHMARK(BM_CoordFromIndex); + + +static void BM_IndexFromCoord(benchmark::State& state) +{ + teq::CoordT coord; + std::vector slist; + for (auto _ : state) + { + state.PauseTiming(); + slist = random_vector(1, 255); + teq::Shape shape(slist); + teq::NElemT index = random_bignum(0, shape.n_elems()); + coord = teq::coordinate(shape, index); + state.ResumeTiming(); + teq::index(shape, coord); + } +} + +BENCHMARK(BM_IndexFromCoord); + + +static void BM_CoordReduce(benchmark::State& state) +{ + teq::CoordT outcoord, coord; + std::vector slist; + for (auto _ : state) + { + state.PauseTiming(); + slist = random_vector(1, 255); + teq::Shape shape(slist); + teq::NElemT index = random_bignum(0, shape.n_elems()); + coord = teq::coordinate(shape, index); + teq::RankT rank = random_bignum(0, teq::rank_cap - 1); + auto reducer = teq::reduce(rank, + std::vector(slist.begin() + rank, slist.end())); + state.ResumeTiming(); + reducer->forward(outcoord.begin(), coord.begin()); + } +} + +BENCHMARK(BM_CoordReduce); + + +static void BM_ReduceReverse(benchmark::State& state) +{ + std::vector slist; + for (auto _ : state) + { + state.PauseTiming(); + slist = random_vector(1, 255); + teq::RankT rank = random_bignum(0, teq::rank_cap - 1); + auto reducer = teq::reduce(rank, + std::vector(slist.begin() + rank, slist.end())); + state.ResumeTiming(); + delete reducer->reverse(); + } +} + +BENCHMARK(BM_ReduceReverse); + + +static void BM_RedPermConnect(benchmark::State& state) +{ + std::vector slist; + for (auto _ : state) + { + state.PauseTiming(); + slist = random_vector(1, 255); + teq::RankT rank = random_bignum(0, teq::rank_cap - 1); + std::vector indices(teq::rank_cap); + std::iota(indices.begin(), indices.end(), 0); + std::shuffle(indices.begin(), indices.end(), mersenne_engine); + auto permuter = teq::permute(indices); + auto reducer = teq::reduce(rank, + std::vector(slist.begin() + rank, slist.end())); + state.ResumeTiming(); + delete reducer->connect(*permuter); + } +} + +BENCHMARK(BM_RedPermConnect); + + +struct SilentLogger final : public logs::iLogger +{ + void log (size_t msg_level, std::string msg) const override {} + + size_t get_log_level (void) const override { return 0; } + + void set_log_level (size_t log_level) override {} + + void warn (std::string msg) const override {} + + void error (std::string msg) const override {} + + void fatal (std::string msg) const override + { + throw std::runtime_error(msg); + } +}; + + +int main(int argc, char** argv) +{ + std::shared_ptr logger = std::make_shared(); + set_logger(logger); + + ::benchmark::Initialize(&argc, argv); + ::benchmark::RunSpecifiedBenchmarks(); + return 0; +} diff --git a/ade/coord.hpp b/teq/coord.hpp similarity index 96% rename from ade/coord.hpp rename to teq/coord.hpp index 800d9d8da..6175150fd 100644 --- a/ade/coord.hpp +++ b/teq/coord.hpp @@ -1,6 +1,6 @@ /// /// coord.hpp -/// ade +/// teq /// /// Purpose: /// Define shape/coordinate transformation functions @@ -8,12 +8,12 @@ #include -#include "ade/matops.hpp" +#include "teq/matops.hpp" -#ifndef ADE_COORD_HPP -#define ADE_COORD_HPP +#ifndef TEQ_COORD_HPP +#define TEQ_COORD_HPP -namespace ade +namespace teq { /// Interface for transforming coordinates and reversing the coordinate @@ -82,7 +82,7 @@ struct CoordMap final : public iCoordMap /// Implementation of iCoordMap std::string to_string (void) const override { - return ade::to_string(fwd_); + return teq::to_string(fwd_); } /// Implementation of iCoordMap @@ -140,4 +140,4 @@ CoordptrT flip (RankT dim); } -#endif // ADE_COORD_HPP +#endif // TEQ_COORD_HPP diff --git a/ade/funcarg.hpp b/teq/funcarg.hpp similarity index 94% rename from ade/funcarg.hpp rename to teq/funcarg.hpp index 47de9b271..edc760e45 100644 --- a/ade/funcarg.hpp +++ b/teq/funcarg.hpp @@ -1,18 +1,18 @@ /// /// funcarg.hpp -/// ade +/// teq /// /// Purpose: /// Define functor argument wrapper to carryover shape and coordinate mappers /// -#include "ade/itensor.hpp" -#include "ade/coord.hpp" +#include "teq/itensor.hpp" +#include "teq/coord.hpp" -#ifndef ADE_FUNCARG_HPP -#define ADE_FUNCARG_HPP +#ifndef TEQ_FUNCARG_HPP +#define TEQ_FUNCARG_HPP -namespace ade +namespace teq { Shape apply_shaper (const CoordptrT& shaper, Shape inshape); @@ -100,7 +100,7 @@ struct FuncArg final /// Type of functor arguments using ArgsT = std::vector; -/// Vector representation of ade tensor pointers +/// Vector representation of teq tensor pointers using TensT = std::vector; /// Return FuncArg that identity maps input tensor @@ -138,4 +138,4 @@ ArgsT to_args (TensT tens); } -#endif // ADE_FUNCARG_HPP +#endif // TEQ_FUNCARG_HPP diff --git a/ade/functor.hpp b/teq/functor.hpp similarity index 91% rename from ade/functor.hpp rename to teq/functor.hpp index 2e1fb9d7e..6bf45e98b 100644 --- a/ade/functor.hpp +++ b/teq/functor.hpp @@ -1,17 +1,17 @@ /// /// functor.hpp -/// ade +/// teq /// /// Purpose: /// Define functor nodes of an equation graph /// -#include "ade/ifunctor.hpp" +#include "teq/ifunctor.hpp" -#ifndef ADE_FUNCTOR_HPP -#define ADE_FUNCTOR_HPP +#ifndef TEQ_FUNCTOR_HPP +#define TEQ_FUNCTOR_HPP -namespace ade +namespace teq { /// Functor of the graph mapping to operators specified by opcode argument @@ -81,7 +81,7 @@ struct Functor final : public iFunctor /// Implementation of iFunctor void update_child (FuncArg arg, size_t index) override { - logs::warn("ade::Functor does not allow editing of children"); + logs::warn("teq::Functor does not allow editing of children"); } private: @@ -104,4 +104,4 @@ struct Functor final : public iFunctor } -#endif // ADE_FUNCTOR_HPP +#endif // TEQ_FUNCTOR_HPP diff --git a/ade/grad_def.hpp b/teq/grad_def.hpp similarity index 96% rename from ade/grad_def.hpp rename to teq/grad_def.hpp index 70f258926..7b8d160a3 100644 --- a/ade/grad_def.hpp +++ b/teq/grad_def.hpp @@ -1,6 +1,6 @@ /// /// grad_def.hpp -/// ade +/// teq /// /// Purpose: /// Define gradient builder interface for building derivatives @@ -8,12 +8,12 @@ #include -#include "ade/traveler.hpp" +#include "teq/traveler.hpp" -#ifndef ADE_GRAD_DEF_HPP -#define ADE_GRAD_DEF_HPP +#ifndef TEQ_GRAD_DEF_HPP +#define TEQ_GRAD_DEF_HPP -namespace ade +namespace teq { /// Define manditory definitions required for tensor differentiation @@ -142,4 +142,4 @@ struct iGradientBuilder } -#endif // ADE_GRAD_DEF_HPP +#endif // TEQ_GRAD_DEF_HPP diff --git a/ade/idata.hpp b/teq/idata.hpp similarity index 87% rename from ade/idata.hpp rename to teq/idata.hpp index 4dc6d6c98..5a7473ebd 100644 --- a/ade/idata.hpp +++ b/teq/idata.hpp @@ -1,6 +1,6 @@ /// /// ileaf.hpp -/// ade +/// teq /// /// Purpose: /// Define common interface for node unveiling data information @@ -9,10 +9,10 @@ #include #include -#ifndef ADE_IDATA_HPP -#define ADE_IDATA_HPP +#ifndef TEQ_IDATA_HPP +#define TEQ_IDATA_HPP -namespace ade +namespace teq { /// Interface for unveiling data @@ -38,4 +38,4 @@ struct iData } -#endif // ADE_IDATA_HPP +#endif // TEQ_IDATA_HPP diff --git a/ade/ifunctor.hpp b/teq/ifunctor.hpp similarity index 87% rename from ade/ifunctor.hpp rename to teq/ifunctor.hpp index d721a0669..29d04d677 100644 --- a/ade/ifunctor.hpp +++ b/teq/ifunctor.hpp @@ -1,17 +1,17 @@ /// /// functor.hpp -/// ade +/// teq /// /// Purpose: /// Define functor nodes of an equation graph /// -#include "ade/funcarg.hpp" +#include "teq/funcarg.hpp" -#ifndef ADE_IFUNCTOR_HPP -#define ADE_IFUNCTOR_HPP +#ifndef TEQ_IFUNCTOR_HPP +#define TEQ_IFUNCTOR_HPP -namespace ade +namespace teq { /// Encoding of operation @@ -50,4 +50,4 @@ using FuncptrT = std::shared_ptr; } -#endif // ADE_IFUNCTOR_HPP +#endif // TEQ_IFUNCTOR_HPP diff --git a/ade/ileaf.hpp b/teq/ileaf.hpp similarity index 78% rename from ade/ileaf.hpp rename to teq/ileaf.hpp index 02f47e481..d9773e3e3 100644 --- a/ade/ileaf.hpp +++ b/teq/ileaf.hpp @@ -1,18 +1,18 @@ /// /// ileaf.hpp -/// ade +/// teq /// /// Purpose: /// Define leafs for tensor equation graph /// -#include "ade/itensor.hpp" -#include "ade/idata.hpp" +#include "teq/itensor.hpp" +#include "teq/idata.hpp" -#ifndef ADE_ILEAF_HPP -#define ADE_ILEAF_HPP +#ifndef TEQ_ILEAF_HPP +#define TEQ_ILEAF_HPP -namespace ade +namespace teq { /// Leaf of the graph commonly representing the variable in an equation @@ -35,4 +35,4 @@ using LeafptrT = std::shared_ptr; } -#endif // ADE_ILEAF_HPP +#endif // TEQ_ILEAF_HPP diff --git a/ade/iopfunc.hpp b/teq/iopfunc.hpp similarity index 74% rename from ade/iopfunc.hpp rename to teq/iopfunc.hpp index 23414ef20..725b8c119 100644 --- a/ade/iopfunc.hpp +++ b/teq/iopfunc.hpp @@ -1,19 +1,19 @@ /// /// opfunc.hpp -/// ade +/// teq /// /// Purpose: /// Define functor nodes directly hold/manipulate data /// This differs from Functor which should not directly manipulate data /// -#include "ade/ifunctor.hpp" -#include "ade/idata.hpp" +#include "teq/ifunctor.hpp" +#include "teq/idata.hpp" -#ifndef ADE_OPFUNC_HPP -#define ADE_OPFUNC_HPP +#ifndef TEQ_OPFUNC_HPP +#define TEQ_OPFUNC_HPP -namespace ade +namespace teq { /// A functor node with direct access to evaluated data @@ -27,4 +27,4 @@ struct iOperableFunc : public iFunctor, public iData } -#endif // ADE_OPFUNC_HPP +#endif // TEQ_OPFUNC_HPP diff --git a/ade/itensor.hpp b/teq/itensor.hpp similarity index 88% rename from ade/itensor.hpp rename to teq/itensor.hpp index 631351462..3114a683d 100644 --- a/ade/itensor.hpp +++ b/teq/itensor.hpp @@ -1,17 +1,17 @@ /// /// itensor.hpp -/// ade +/// teq /// /// Purpose: /// Define interfaces and building blocks for an equation graph /// -#include "ade/shape.hpp" +#include "teq/shape.hpp" -#ifndef ADE_INTERFACE_HPP -#define ADE_INTERFACE_HPP +#ifndef TEQ_INTERFACE_HPP +#define TEQ_INTERFACE_HPP -namespace ade +namespace teq { struct iLeaf; @@ -53,4 +53,4 @@ using TensrefT = std::weak_ptr; } -#endif // ADE_INTERFACE_HPP +#endif // TEQ_INTERFACE_HPP diff --git a/ade/matops.hpp b/teq/matops.hpp similarity index 82% rename from ade/matops.hpp rename to teq/matops.hpp index 3dcf2b182..5471e4e5b 100644 --- a/ade/matops.hpp +++ b/teq/matops.hpp @@ -1,21 +1,21 @@ /// /// matops.hpp -/// ade +/// teq /// /// Purpose: /// Define matrix operations for coordinate transformation -/// This functions are here to avoid external dependencies in ADE +/// This functions are here to avoid external dependencies in TEQ /// #include #include -#include "ade/shape.hpp" +#include "teq/shape.hpp" -#ifndef ADE_MATOPS_HPP -#define ADE_MATOPS_HPP +#ifndef TEQ_MATOPS_HPP +#define TEQ_MATOPS_HPP -namespace ade +namespace teq { /// Number of rows and columns for the homogeneous matrix @@ -41,4 +41,4 @@ void matmul (MatrixT out, const MatrixT& lhs, const MatrixT& rhs); } -#endif /// ADE_MATOPS_HPP +#endif /// TEQ_MATOPS_HPP diff --git a/ade/shape.hpp b/teq/shape.hpp similarity index 98% rename from ade/shape.hpp rename to teq/shape.hpp index 5817b2b18..b9a926258 100644 --- a/ade/shape.hpp +++ b/teq/shape.hpp @@ -1,6 +1,6 @@ /// /// shape.hpp -/// ade +/// teq /// /// Purpose: /// Define shapes models and coordinate to flattened index mapping @@ -13,10 +13,10 @@ #include "logs/logs.hpp" -#ifndef ADE_SHAPE_HPP -#define ADE_SHAPE_HPP +#ifndef TEQ_SHAPE_HPP +#define TEQ_SHAPE_HPP -namespace ade +namespace teq { /// Type used for shape rank @@ -220,4 +220,4 @@ CoordT coordinate (Shape shape, NElemT idx); } -#endif // ADE_SHAPE_HPP +#endif // TEQ_SHAPE_HPP diff --git a/ade/src/coord.cpp b/teq/src/coord.cpp similarity index 98% rename from ade/src/coord.cpp rename to teq/src/coord.cpp index 3f03eee5f..2990bc460 100644 --- a/ade/src/coord.cpp +++ b/teq/src/coord.cpp @@ -1,8 +1,8 @@ -#include "ade/coord.hpp" +#include "teq/coord.hpp" -#ifdef ADE_COORD_HPP +#ifdef TEQ_COORD_HPP -namespace ade +namespace teq { using WorkArrT = std::array; diff --git a/ade/src/funcarg.cpp b/teq/src/funcarg.cpp similarity index 96% rename from ade/src/funcarg.cpp rename to teq/src/funcarg.cpp index a140c4d5a..c5ad9086b 100644 --- a/ade/src/funcarg.cpp +++ b/teq/src/funcarg.cpp @@ -1,8 +1,8 @@ -#include "ade/funcarg.hpp" +#include "teq/funcarg.hpp" -#ifdef ADE_FUNCARG_HPP +#ifdef TEQ_FUNCARG_HPP -namespace ade +namespace teq { Shape apply_shaper (const CoordptrT& shaper, Shape inshape) diff --git a/ade/src/matops.cpp b/teq/src/matops.cpp similarity index 98% rename from ade/src/matops.cpp rename to teq/src/matops.cpp index 5af39c277..9387c57ab 100644 --- a/ade/src/matops.cpp +++ b/teq/src/matops.cpp @@ -1,8 +1,8 @@ -#include "ade/matops.hpp" +#include "teq/matops.hpp" -#ifdef ADE_MATOPS_HPP +#ifdef TEQ_MATOPS_HPP -namespace ade +namespace teq { using AugMatrixT = double[mat_dim][mat_dim * 2]; diff --git a/ade/src/shape.cpp b/teq/src/shape.cpp similarity index 94% rename from ade/src/shape.cpp rename to teq/src/shape.cpp index 9909886fe..c11e5a6c0 100644 --- a/ade/src/shape.cpp +++ b/teq/src/shape.cpp @@ -1,8 +1,8 @@ -#include "ade/shape.hpp" +#include "teq/shape.hpp" -#ifdef ADE_SHAPE_HPP +#ifdef TEQ_SHAPE_HPP -namespace ade +namespace teq { NElemT index (Shape shape, CoordT coord) diff --git a/ade/src/traveler.cpp b/teq/src/traveler.cpp similarity index 90% rename from ade/src/traveler.cpp rename to teq/src/traveler.cpp index e909f492c..f30bac5d7 100644 --- a/ade/src/traveler.cpp +++ b/teq/src/traveler.cpp @@ -1,8 +1,8 @@ -#include "ade/traveler.hpp" +#include "teq/traveler.hpp" -#ifdef ADE_TRAVELER_HPP +#ifdef TEQ_TRAVELER_HPP -namespace ade +namespace teq { struct OwnerTracker final : public OnceTraveler diff --git a/teq/teq.hpp b/teq/teq.hpp new file mode 100644 index 000000000..aaff6acd5 --- /dev/null +++ b/teq/teq.hpp @@ -0,0 +1,11 @@ +/// +/// teq.hpp +/// teq +/// +/// Purpose: +/// Collectively include all teq header files +/// + +#include "teq/functor.hpp" +#include "teq/traveler.hpp" +#include "teq/iopfunc.hpp" diff --git a/ade/test/common.hpp b/teq/test/common.hpp similarity index 67% rename from ade/test/common.hpp rename to teq/test/common.hpp index 5a8a90842..fd0c177c3 100644 --- a/ade/test/common.hpp +++ b/teq/test/common.hpp @@ -1,17 +1,17 @@ -#include "ade/ileaf.hpp" +#include "teq/ileaf.hpp" -#ifndef ADE_TEST_COMMON_HPP -#define ADE_TEST_COMMON_HPP +#ifndef TEQ_TEST_COMMON_HPP +#define TEQ_TEST_COMMON_HPP -struct MockTensor : public ade::iLeaf +struct MockTensor : public teq::iLeaf { MockTensor (void) = default; - MockTensor (ade::Shape shape) : shape_(shape) {} + MockTensor (teq::Shape shape) : shape_(shape) {} virtual ~MockTensor (void) = default; - const ade::Shape& shape (void) const override + const teq::Shape& shape (void) const override { return shape_; } @@ -51,7 +51,7 @@ struct MockTensor : public ade::iLeaf return true; } - ade::Shape shape_; + teq::Shape shape_; }; -#endif // ADE_TEST_COMMON_HPP +#endif // TEQ_TEST_COMMON_HPP diff --git a/pll/test/main.cpp b/teq/test/main.cpp similarity index 100% rename from pll/test/main.cpp rename to teq/test/main.cpp diff --git a/ade/test/test_coord.cpp b/teq/test/test_coord.cpp similarity index 65% rename from ade/test/test_coord.cpp rename to teq/test/test_coord.cpp index 6a7dd3610..2ea01002b 100644 --- a/ade/test/test_coord.cpp +++ b/teq/test/test_coord.cpp @@ -6,7 +6,7 @@ #include "exam/exam.hpp" -#include "ade/coord.hpp" +#include "teq/coord.hpp" TEST(COORD, Forward) @@ -33,47 +33,47 @@ TEST(COORD, Forward) 0.2458311259, 0.6726336808, 0.8680681183, 0.2344609279, 0.2667416547, 0.7905403230, 0.1139956031, 0.7112792746, 0.5421166290, 0.6555476101, 0.7982603464, 0.9427891524, 0.5630265226, 0.0529621550, 0.0767490955, 0.9764540804, 0.0229466953, 0.0357362313, }; - ade::CoordMap lhs([&indata](ade::MatrixT m) + teq::CoordMap lhs([&indata](teq::MatrixT m) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - m[i][j] = indata[i * ade::mat_dim + j]; + m[i][j] = indata[i * teq::mat_dim + j]; } } }); - ade::CoordMap rhs([&indata2](ade::MatrixT m) + teq::CoordMap rhs([&indata2](teq::MatrixT m) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - m[i][j] = indata2[i * ade::mat_dim + j]; + m[i][j] = indata2[i * teq::mat_dim + j]; } } }); - ade::MatrixT expected; - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + teq::MatrixT expected; + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { expected[i][j] = 0; - for (ade::RankT k = 0; k < ade::mat_dim; ++k) + for (teq::RankT k = 0; k < teq::mat_dim; ++k) { - expected[i][j] += indata[i * ade::mat_dim + k] * indata2[k * ade::mat_dim + j]; + expected[i][j] += indata[i * teq::mat_dim + k] * indata2[k * teq::mat_dim + j]; } } } - ade::iCoordMap* res = lhs.connect(rhs); - res->access([&expected](const ade::MatrixT& m) + teq::iCoordMap* res = lhs.connect(rhs); + res->access([&expected](const teq::MatrixT& m) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { EXPECT_EQ(expected[i][j], m[i][j]); } @@ -97,32 +97,32 @@ TEST(COORD, Reverse) 0.2815274054, 0.4986186274, 0.4177728965, 0.5379782806, 0.9728425173, 0.8384357553, 0.1148654580, 0.6567525656, 0.4295099714, 0.7243125736, 0.5892803635, 0.2597629537, 0.8375568398, 0.8496011347, 0.2944557711, 0.6930016846, 0.7247803680, 0.4856868442, }; - ade::CoordMap fwd([&indata](ade::MatrixT m) + teq::CoordMap fwd([&indata](teq::MatrixT m) { - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - m[i][j] = indata[i * ade::mat_dim + j]; + m[i][j] = indata[i * teq::mat_dim + j]; } } }); - ade::iCoordMap* rev = fwd.reverse(); + teq::iCoordMap* rev = fwd.reverse(); - ade::MatrixT out; - rev->access([&out, &fwd](const ade::MatrixT& bwd) + teq::MatrixT out; + rev->access([&out, &fwd](const teq::MatrixT& bwd) { - fwd.access([&out, &bwd](const ade::MatrixT& fwd) + fwd.access([&out, &bwd](const teq::MatrixT& fwd) { - ade::matmul(out, fwd, bwd); + teq::matmul(out, fwd, bwd); }); }); // expect matmul is identity - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { if (i == j) { @@ -142,13 +142,13 @@ TEST(COORD, Reverse) TEST(COORD, Identity) { std::string idstr; - ade::identity->access( - [&](const ade::MatrixT& mat) + teq::identity->access( + [&](const teq::MatrixT& mat) { - idstr = ade::to_string(mat); + idstr = teq::to_string(mat); }); - std::string idstr2 = ade::identity->to_string(); + std::string idstr2 = teq::identity->to_string(); EXPECT_STREQ("[[1\\0\\0\\0\\0\\0\\0\\0\\0]\\\n" "[0\\1\\0\\0\\0\\0\\0\\0\\0]\\\n" @@ -161,12 +161,12 @@ TEST(COORD, Identity) "[0\\0\\0\\0\\0\\0\\0\\0\\1]]", idstr.c_str()); EXPECT_STREQ(idstr.c_str(), idstr2.c_str()); - ade::CoordT fwd_out; - ade::CoordT icoord = { + teq::CoordT fwd_out; + teq::CoordT icoord = { 42, 12, 85, 7, 82, 91, 2, 34, }; - ade::identity->forward(fwd_out.begin(), icoord.begin()); + teq::identity->forward(fwd_out.begin(), icoord.begin()); EXPECT_ARREQ(icoord, fwd_out); } @@ -174,11 +174,11 @@ TEST(COORD, Identity) TEST(COORD, Reduce) { size_t rank = 5; - std::vector red = {22, 32, 2}; - ade::CoordptrT reducer = ade::reduce(rank, red); + std::vector red = {22, 32, 2}; + teq::CoordptrT reducer = teq::reduce(rank, red); - ade::CoordT fwd_out; - ade::CoordT icoord = { + teq::CoordT fwd_out; + teq::CoordT icoord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, 99.9911659058, 7.2182000783, 6.4776819746 }; @@ -188,63 +188,63 @@ TEST(COORD, Reduce) { EXPECT_EQ(icoord[i], fwd_out[i]) << i; } - for (size_t i = rank; i < ade::rank_cap; ++i) + for (size_t i = rank; i < teq::rank_cap; ++i) { EXPECT_DOUBLE_EQ(icoord[i] / red[i - rank], fwd_out[i]) << "red=" << red[i - rank] << ",i=" << i; } - EXPECT_FATAL(ade::reduce(rank, {0}), "cannot reduce using zero dimensions [0]"); + EXPECT_FATAL(teq::reduce(rank, {0}), "cannot reduce using zero dimensions [0]"); std::string fatalmsg = fmts::sprintf( "cannot reduce shape rank %d beyond rank_cap with n_red %d", rank + 1, red.size()); - EXPECT_FATAL(ade::reduce(rank + 1, red), fatalmsg.c_str()); + EXPECT_FATAL(teq::reduce(rank + 1, red), fatalmsg.c_str()); - EXPECT_WARN(ade::reduce(0, {}), "reducing scalar ... will do nothing"); + EXPECT_WARN(teq::reduce(0, {}), "reducing scalar ... will do nothing"); } TEST(COORD, Extend) { size_t rank = 3; - std::vector ext = {12, 21, 8, 4, 52}; - ade::CoordptrT extender = ade::extend(rank, ext); + std::vector ext = {12, 21, 8, 4, 52}; + teq::CoordptrT extender = teq::extend(rank, ext); - ade::CoordT fwd_out; - ade::CoordT incoord = {142.2, 42.17, 33.292, 33, 231.5, 2.33, 96.4, 1.23}; + teq::CoordT fwd_out; + teq::CoordT incoord = {142.2, 42.17, 33.292, 33, 231.5, 2.33, 96.4, 1.23}; extender->forward(fwd_out.begin(), incoord.begin()); for (size_t i = 0; i < rank; ++i) { EXPECT_EQ(incoord[i], fwd_out[i]) << i; } - for (size_t i = rank; i < ade::rank_cap; ++i) + for (size_t i = rank; i < teq::rank_cap; ++i) { EXPECT_DOUBLE_EQ(incoord[i] * ext[i - rank], fwd_out[i]) << "ext=" << ext[i - rank] << ",i=" << i; } - EXPECT_FATAL(ade::extend(rank, {0}), "cannot extend using zero dimensions [0]"); + EXPECT_FATAL(teq::extend(rank, {0}), "cannot extend using zero dimensions [0]"); std::string fatalmsg = fmts::sprintf( "cannot extend shape rank %d beyond rank_cap with n_ext %d", rank + 1, ext.size()); - EXPECT_FATAL(ade::extend(rank + 1, ext), fatalmsg.c_str()); + EXPECT_FATAL(teq::extend(rank + 1, ext), fatalmsg.c_str()); - EXPECT_WARN(ade::extend(0, {}), "extending with empty vector ... will do nothing"); + EXPECT_WARN(teq::extend(0, {}), "extending with empty vector ... will do nothing"); } TEST(COORD, Permute) { - std::vector perm = {4, 2, 3, 7, 0, 1}; - ade::CoordptrT permuter = ade::permute(perm); - std::array permed; + std::vector perm = {4, 2, 3, 7, 0, 1}; + teq::CoordptrT permuter = teq::permute(perm); + std::array permed; permed.fill(false); - for (ade::DimT p : perm) + for (teq::DimT p : perm) { permed[p] = true; } - for (size_t i = 0; i < ade::rank_cap; ++i) + for (size_t i = 0; i < teq::rank_cap; ++i) { if (false == permed[i]) { @@ -252,70 +252,70 @@ TEST(COORD, Permute) } } - ade::CoordT fwd_out; - ade::CoordT icoord = {12, 82, 20, 31, 49, 1, 1, 1}; + teq::CoordT fwd_out; + teq::CoordT icoord = {12, 82, 20, 31, 49, 1, 1, 1}; permuter->forward(fwd_out.begin(), icoord.begin()); - for (size_t i = 0; i < ade::rank_cap; ++i) + for (size_t i = 0; i < teq::rank_cap; ++i) { EXPECT_EQ(icoord[perm[i]], fwd_out[i]); } - EXPECT_WARN(ade::permute({}), "permuting with same dimensions ... will do nothing"); + EXPECT_WARN(teq::permute({}), "permuting with same dimensions ... will do nothing"); } TEST(COORD, Flip) { size_t dim = 3; - ade::CoordptrT flipper = ade::flip(dim); + teq::CoordptrT flipper = teq::flip(dim); - ade::CoordT fwd_out; - ade::CoordT icoord = {23, 66, 72, 83, 91, 1, 31, 21}; + teq::CoordT fwd_out; + teq::CoordT icoord = {23, 66, 72, 83, 91, 1, 31, 21}; flipper->forward(fwd_out.begin(), icoord.begin()); for (size_t i = 0; i < dim; ++i) { EXPECT_EQ(icoord[i], fwd_out[i]) << i; } - for (size_t i = dim + 1; i < ade::rank_cap; ++i) + for (size_t i = dim + 1; i < teq::rank_cap; ++i) { EXPECT_EQ(icoord[i], fwd_out[i]) << i; } EXPECT_EQ(-icoord[dim]-1, fwd_out[dim]); - EXPECT_WARN(ade::flip(ade::rank_cap * 2), "flipping dimension out of rank_cap ... will do nothing"); + EXPECT_WARN(teq::flip(teq::rank_cap * 2), "flipping dimension out of rank_cap ... will do nothing"); } TEST(COORD, Bijection) { - EXPECT_TRUE(ade::identity->is_bijective()); + EXPECT_TRUE(teq::identity->is_bijective()); - ade::CoordptrT reducer = ade::reduce(3, {2}); + teq::CoordptrT reducer = teq::reduce(3, {2}); EXPECT_FALSE(reducer->is_bijective()); } TEST(COORD, IsIdentity) { - EXPECT_TRUE(ade::is_identity(nullptr)); - EXPECT_TRUE(ade::is_identity(ade::identity.get())); + EXPECT_TRUE(teq::is_identity(nullptr)); + EXPECT_TRUE(teq::is_identity(teq::identity.get())); - ade::CoordptrT sample_id = std::make_shared( - *static_cast(ade::identity.get())); // deep copy - EXPECT_TRUE(ade::is_identity(sample_id.get())); + teq::CoordptrT sample_id = std::make_shared( + *static_cast(teq::identity.get())); // deep copy + EXPECT_TRUE(teq::is_identity(sample_id.get())); - ade::CoordptrT bourne(new ade::CoordMap( - [](ade::MatrixT fwd) + teq::CoordptrT bourne(new teq::CoordMap( + [](teq::MatrixT fwd) { // todo: we can randomize this so long as fwd is not identity - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { fwd[i][i] = 2; } })); - EXPECT_FALSE(ade::is_identity(bourne.get())); + EXPECT_FALSE(teq::is_identity(bourne.get())); } diff --git a/ade/test/test_funcarg.cpp b/teq/test/test_funcarg.cpp similarity index 53% rename from ade/test/test_funcarg.cpp rename to teq/test/test_funcarg.cpp index 495631c15..7205b55cb 100644 --- a/ade/test/test_funcarg.cpp +++ b/teq/test/test_funcarg.cpp @@ -6,37 +6,37 @@ #include "exam/exam.hpp" -#include "ade/test/common.hpp" +#include "teq/test/common.hpp" -#include "ade/funcarg.hpp" +#include "teq/funcarg.hpp" TEST(FUNCARG, Reduce1d) { size_t rank = 5; - ade::CoordT fwd_out; - ade::CoordT icoord = { + teq::CoordT fwd_out; + teq::CoordT icoord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, 99.9911659058, 7.2182000783, 6.4776819746 }; - ade::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); - ade::TensptrT tens = std::make_shared(shape); + teq::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); + teq::TensptrT tens = std::make_shared(shape); - ade::FuncArg redtens = ade::reduce_1d_map(tens, rank); - ade::Shape rshaped = redtens.shape(); + teq::FuncArg redtens = teq::reduce_1d_map(tens, rank); + teq::Shape rshaped = redtens.shape(); - std::vector expect_shape = {223, 35, 7, 25, + std::vector expect_shape = {223, 35, 7, 25, 19, 72, 7, 1}; EXPECT_ARREQ(expect_shape, rshaped); EXPECT_TRUE(redtens.map_io()); auto cmapped = redtens.get_coorder(); cmapped->forward(fwd_out.begin(), icoord.begin()); - ade::CoordT expect_coord = { + teq::CoordT expect_coord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, 7.2182000783, 6.4776819746, (99.9911659058 / 214.0) }; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_DOUBLE_EQ(expect_coord[i], fwd_out[i]); } @@ -46,29 +46,29 @@ TEST(FUNCARG, Reduce1d) TEST(FUNCARG, Reduce) { size_t rank = 5; - ade::CoordT fwd_out; - ade::CoordT icoord = { + teq::CoordT fwd_out; + teq::CoordT icoord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, 99.9911659058, 7.2182000783, 6.4776819746 }; - ade::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); - ade::TensptrT tens = std::make_shared(shape); + teq::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); + teq::TensptrT tens = std::make_shared(shape); - ade::FuncArg redtens = ade::reduce_map(tens, rank, {2}); - ade::Shape rshaped = redtens.shape(); + teq::FuncArg redtens = teq::reduce_map(tens, rank, {2}); + teq::Shape rshaped = redtens.shape(); - std::vector expect_shape = {223, 35, 7, 25, + std::vector expect_shape = {223, 35, 7, 25, 19, 214 / 2, 72, 7}; EXPECT_ARREQ(expect_shape, rshaped); EXPECT_TRUE(redtens.map_io()); auto cmapped = redtens.get_coorder(); cmapped->forward(fwd_out.begin(), icoord.begin()); - ade::CoordT expect_coord = { + teq::CoordT expect_coord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, (99.9911659058 / 2), 7.2182000783, 6.4776819746 }; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_DOUBLE_EQ(expect_coord[i], fwd_out[i]); } @@ -78,29 +78,29 @@ TEST(FUNCARG, Reduce) TEST(FUNCARG, Extend) { size_t rank = 5; - ade::CoordT fwd_out; - ade::CoordT icoord = { + teq::CoordT fwd_out; + teq::CoordT icoord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, 99.9911659058, 7.2182000783, 6.4776819746 }; - ade::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); - ade::TensptrT tens = std::make_shared(shape); + teq::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); + teq::TensptrT tens = std::make_shared(shape); - ade::FuncArg extens = ade::extend_map(tens, rank, {2, 3}); - ade::Shape eshaped = extens.shape(); + teq::FuncArg extens = teq::extend_map(tens, rank, {2, 3}); + teq::Shape eshaped = extens.shape(); - std::vector expect_shape = {223, 35, 7, 25, + std::vector expect_shape = {223, 35, 7, 25, 19, 214 * 2, 72 * 3, 7}; EXPECT_ARREQ(expect_shape, eshaped); EXPECT_FALSE(extens.map_io()); auto cmapped = extens.get_coorder(); cmapped->forward(fwd_out.begin(), icoord.begin()); - ade::CoordT expect_coord = { + teq::CoordT expect_coord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, 99.9911659058 / 2, 7.2182000783 / 3, 6.4776819746 }; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_DOUBLE_EQ(expect_coord[i], fwd_out[i]); } @@ -109,29 +109,29 @@ TEST(FUNCARG, Extend) TEST(FUNCARG, Permute) { - ade::CoordT fwd_out; - ade::CoordT icoord = { + teq::CoordT fwd_out; + teq::CoordT icoord = { 211.6172349153, 3.6941314330, 3.3471187148, 24.3511302088, 17.8520169468, 99.9911659058, 7.2182000783, 6.4776819746 }; - ade::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); - ade::TensptrT tens = std::make_shared(shape); + teq::Shape shape({223, 35, 7, 25, 19, 214, 72, 7}); + teq::TensptrT tens = std::make_shared(shape); - ade::FuncArg ptens = ade::permute_map(tens, {3, 5, 2}); - ade::Shape pshaped = ptens.shape(); + teq::FuncArg ptens = teq::permute_map(tens, {3, 5, 2}); + teq::Shape pshaped = ptens.shape(); - std::vector expect_shape = {25, 214, 7, 223, + std::vector expect_shape = {25, 214, 7, 223, 35, 19, 72, 7}; EXPECT_ARREQ(expect_shape, pshaped); EXPECT_FALSE(ptens.map_io()); auto cmapped = ptens.get_coorder(); cmapped->forward(fwd_out.begin(), icoord.begin()); - ade::CoordT expect_coord = { + teq::CoordT expect_coord = { 24.3511302088, 17.8520169468, 3.3471187148, 211.6172349153, 99.9911659058, 3.6941314330, 7.2182000783, 6.4776819746 }; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_DOUBLE_EQ(expect_coord[i], fwd_out[i]); } @@ -140,28 +140,28 @@ TEST(FUNCARG, Permute) TEST(FUNCARG, ShapeCoordDiff) { - ade::CoordptrT shaper = ade::identity; - ade::CoordptrT coorder = ade::flip(1); - ade::Shape shape({3, 2}); - ade::TensptrT tens = std::make_shared(shape); - - ade::FuncArg farg(tens, shaper, true, coorder); - ade::CoordT fwd_out; - ade::CoordT icoord = { + teq::CoordptrT shaper = teq::identity; + teq::CoordptrT coorder = teq::flip(1); + teq::Shape shape({3, 2}); + teq::TensptrT tens = std::make_shared(shape); + + teq::FuncArg farg(tens, shaper, true, coorder); + teq::CoordT fwd_out; + teq::CoordT icoord = { 1, 1, 0, 0, 0, 0, 0, 0, }; - ade::Shape fshaped = farg.shape(); + teq::Shape fshaped = farg.shape(); - std::vector expect_shape = {3, 2, 1, 1, 1, 1, 1, 1}; + std::vector expect_shape = {3, 2, 1, 1, 1, 1, 1, 1}; EXPECT_ARREQ(expect_shape, fshaped); EXPECT_TRUE(farg.map_io()); auto cmapped = farg.get_coorder(); cmapped->forward(fwd_out.begin(), icoord.begin()); - ade::CoordT expect_coord = { + teq::CoordT expect_coord = { 1, -2, 0, 0, 0, 0, 0, 0, }; - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_DOUBLE_EQ(expect_coord[i], fwd_out[i]); } @@ -170,22 +170,22 @@ TEST(FUNCARG, ShapeCoordDiff) TEST(FUNCARG, ToArgs) { - ade::Shape shape({3, 2}); - ade::TensptrT tens = std::make_shared(shape); - ade::TensptrT tens2 = std::make_shared(shape); - auto args = ade::to_args({tens, tens2}); + teq::Shape shape({3, 2}); + teq::TensptrT tens = std::make_shared(shape); + teq::TensptrT tens2 = std::make_shared(shape); + auto args = teq::to_args({tens, tens2}); ASSERT_EQ(2, args.size()); auto arg = args[0]; EXPECT_EQ(tens, arg.get_tensor()); - EXPECT_EQ(ade::identity, arg.get_shaper()); - EXPECT_EQ(ade::identity, arg.get_coorder()); + EXPECT_EQ(teq::identity, arg.get_shaper()); + EXPECT_EQ(teq::identity, arg.get_coorder()); auto arg2 = args[1]; EXPECT_EQ(tens2, arg2.get_tensor()); - EXPECT_EQ(ade::identity, arg2.get_shaper()); - EXPECT_EQ(ade::identity, arg2.get_coorder()); + EXPECT_EQ(teq::identity, arg2.get_shaper()); + EXPECT_EQ(teq::identity, arg2.get_coorder()); } diff --git a/teq/test/test_functor.cpp b/teq/test/test_functor.cpp new file mode 100644 index 000000000..e92d91906 --- /dev/null +++ b/teq/test/test_functor.cpp @@ -0,0 +1,104 @@ + +#ifndef DISABLE_FUNCTOR_TEST + + +#include "gtest/gtest.h" + +#include "exam/exam.hpp" + +#include "teq/test/common.hpp" + +#include "teq/functor.hpp" + + +TEST(FUNCTOR, Shapes) +{ + std::vector slist = {94, 78, 70, 82, 62, 29, 38}; + std::vector bad = {94, 78, 70, 82, 62, 22, 38}; + teq::Shape shape(slist); + teq::Shape badshape(bad); + + teq::TensptrT leaf(new MockTensor(shape)); + teq::TensptrT leaf1(new MockTensor(shape)); + teq::TensptrT badleaf(new MockTensor(badshape)); + + teq::TensptrT func(teq::Functor::get(teq::Opcode{"MOCK", 0}, { + teq::identity_map(leaf), + teq::identity_map(leaf1), + })); + + teq::Shape gotshape = func->shape(); + EXPECT_ARREQ(shape, gotshape); + + EXPECT_FATAL(teq::Functor::get(teq::Opcode{"MOCK", 0}, {}), + "cannot perform `MOCK` with no arguments"); + + std::string fatalmsg = fmts::sprintf( + "cannot perform `MOCK` with incompatible shapes %s and %s", + shape.to_string().c_str(), badshape.to_string().c_str()); + EXPECT_FATAL(teq::Functor::get(teq::Opcode{"MOCK", 0}, { + teq::identity_map(leaf), + teq::identity_map(badleaf), + }), fatalmsg.c_str()); +} + + +TEST(FUNCTOR, Opcode) +{ + std::string mockname = "asd123101ksq"; + size_t mockcode = 3247; + teq::TensptrT leaf(new MockTensor()); + + teq::Functor* func = teq::Functor::get(teq::Opcode{mockname, mockcode}, { + teq::identity_map(leaf), + }); + + teq::Opcode op = func->get_opcode(); + EXPECT_STREQ(mockname.c_str(), op.name_.c_str()); + EXPECT_EQ(mockcode, op.code_); + + delete func; +} + + +TEST(FUNCTOR, Children) +{ + teq::TensptrT leaf(new MockTensor()); + teq::TensptrT leaf1(new MockTensor()); + teq::TensptrT leaf2(new MockTensor()); + + teq::FuncptrT func(teq::Functor::get(teq::Opcode{"MOCK", 0}, { + teq::identity_map(leaf), + teq::identity_map(leaf1), + })); + + ASSERT_NE(nullptr, func.get()); + + teq::ArgsT refs = func->get_children(); + + ASSERT_EQ(2, refs.size()); + EXPECT_EQ(leaf.get(), refs[0].get_tensor().get()); + EXPECT_EQ(leaf1.get(), refs[1].get_tensor().get()); + + EXPECT_WARN((func->update_child(teq::identity_map(leaf2), 1)), + "teq::Functor does not allow editing of children"); +} + + +TEST(FUNCTOR, ToString) +{ + teq::TensptrT leaf(new MockTensor()); + teq::TensptrT leaf1(new MockTensor()); + + teq::TensptrT func(teq::Functor::get(teq::Opcode{"MOCK", 0}, { + teq::identity_map(leaf), + teq::identity_map(leaf1), + })); + + ASSERT_NE(nullptr, func.get()); + + EXPECT_STREQ("MOCK", func->to_string().c_str()); +} + + +#endif // DISABLE_FUNCTOR_TEST diff --git a/ade/test/test_grad.cpp b/teq/test/test_grad.cpp similarity index 70% rename from ade/test/test_grad.cpp rename to teq/test/test_grad.cpp index 48662327f..e3b7ce7b0 100644 --- a/ade/test/test_grad.cpp +++ b/teq/test/test_grad.cpp @@ -6,17 +6,17 @@ #include "exam/exam.hpp" -#include "ade/test/common.hpp" +#include "teq/test/common.hpp" -#include "ade/grad_def.hpp" -#include "ade/functor.hpp" +#include "teq/grad_def.hpp" +#include "teq/functor.hpp" #include "testutil/tutil.hpp" struct LabelledMockTensor final : public MockTensor { - LabelledMockTensor (std::string label, ade::Shape shape) : + LabelledMockTensor (std::string label, teq::Shape shape) : MockTensor(shape), label_(label) {} std::string to_string (void) const override @@ -28,9 +28,9 @@ struct LabelledMockTensor final : public MockTensor }; -struct MockGradientBuilder final : public ade::iGradientBuilder +struct MockGradientBuilder final : public teq::iGradientBuilder { - ade::TensptrT local_derivative (ade::FuncptrT op, size_t arg_idx) const override + teq::TensptrT local_derivative (teq::FuncptrT op, size_t arg_idx) const override { std::string label = op->to_string(); if (label == "FUNC") @@ -39,41 +39,41 @@ struct MockGradientBuilder final : public ade::iGradientBuilder } else if (label == "FUNC2") { - return ade::TensptrT(ade::Functor::get(ade::Opcode{"FUNC4", 3}, + return teq::TensptrT(teq::Functor::get(teq::Opcode{"FUNC4", 3}, {op->get_children()[arg_idx]})); } - return ade::TensptrT(new LabelledMockTensor("other", op->shape())); + return teq::TensptrT(new LabelledMockTensor("other", op->shape())); } - ade::TensptrT chain_rule (ade::FuncptrT op, const ade::TensptrT& local_der, - ade::TensptrT supcomp_grad, size_t arg_idx) const override + teq::TensptrT chain_rule (teq::FuncptrT op, const teq::TensptrT& local_der, + teq::TensptrT supcomp_grad, size_t arg_idx) const override { - ade::TensptrT tens(ade::Functor::get(ade::Opcode{"FUNC2", 1}, { - ade::identity_map(op), - ade::identity_map(local_der), + teq::TensptrT tens(teq::Functor::get(teq::Opcode{"FUNC2", 1}, { + teq::identity_map(op), + teq::identity_map(local_der), })); - return ade::TensptrT(ade::Functor::get(ade::Opcode{"FUNC3", 2}, { - ade::identity_map(tens), - ade::identity_map(supcomp_grad), + return teq::TensptrT(teq::Functor::get(teq::Opcode{"FUNC3", 2}, { + teq::identity_map(tens), + teq::identity_map(supcomp_grad), })); } - ade::TensptrT get_const_one (ade::Shape shape) const override + teq::TensptrT get_const_one (teq::Shape shape) const override { - return ade::TensptrT(new LabelledMockTensor("1", shape)); + return teq::TensptrT(new LabelledMockTensor("1", shape)); } - ade::TensptrT get_const_zero (ade::Shape shape) const override + teq::TensptrT get_const_zero (teq::Shape shape) const override { - return ade::TensptrT(new LabelledMockTensor("0", shape)); + return teq::TensptrT(new LabelledMockTensor("0", shape)); } - ade::TensptrT add (ade::TensptrT& lhs, ade::TensptrT& rhs) const override + teq::TensptrT add (teq::TensptrT& lhs, teq::TensptrT& rhs) const override { - return ade::TensptrT(ade::Functor::get(ade::Opcode{"FUNC", 0}, { - ade::identity_map(lhs), - ade::identity_map(rhs), + return teq::TensptrT(teq::Functor::get(teq::Opcode{"FUNC", 0}, { + teq::identity_map(lhs), + teq::identity_map(rhs), })); } }; @@ -83,16 +83,16 @@ TEST(GRAD, OneZero) { MockGradientBuilder builder; - std::vector slist = {94, 78, 70, 82, 62, 29, 38}; - ade::Shape shape(slist); + std::vector slist = {94, 78, 70, 82, 62, 29, 38}; + teq::Shape shape(slist); // standard v - ade::TensptrT leaf(new LabelledMockTensor("leaf", shape)); - ade::TensptrT leaf1(new LabelledMockTensor("leaf2", shape)); - ade::TensptrT leaf2(new LabelledMockTensor("leaf3", shape)); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"FUNC", 0}, { - ade::identity_map(leaf), - ade::identity_map(leaf1), + teq::TensptrT leaf(new LabelledMockTensor("leaf", shape)); + teq::TensptrT leaf1(new LabelledMockTensor("leaf2", shape)); + teq::TensptrT leaf2(new LabelledMockTensor("leaf3", shape)); + teq::TensptrT f(teq::Functor::get(teq::Opcode{"FUNC", 0}, { + teq::identity_map(leaf), + teq::identity_map(leaf1), })); auto wun = builder.derive(f, f); @@ -117,15 +117,15 @@ TEST(GRAD, BuilderStandardV) { MockGradientBuilder builder; - std::vector slist = {94, 78, 70, 82, 62, 29, 38}; - ade::Shape shape(slist); + std::vector slist = {94, 78, 70, 82, 62, 29, 38}; + teq::Shape shape(slist); // standard v - ade::TensptrT leaf(new LabelledMockTensor("leaf", shape)); - ade::TensptrT leaf1(new LabelledMockTensor("leaf2", shape)); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"FUNC", 0}, { - ade::identity_map(leaf), - ade::identity_map(leaf1), + teq::TensptrT leaf(new LabelledMockTensor("leaf", shape)); + teq::TensptrT leaf1(new LabelledMockTensor("leaf2", shape)); + teq::TensptrT f(teq::Functor::get(teq::Opcode{"FUNC", 0}, { + teq::identity_map(leaf), + teq::identity_map(leaf1), })); auto gl = builder.derive(f, leaf); @@ -157,20 +157,20 @@ TEST(GRAD, BuilderDiamond) { MockGradientBuilder builder; - std::vector slist = {94, 78, 70, 82, 62, 29, 38}; - ade::Shape shape(slist); + std::vector slist = {94, 78, 70, 82, 62, 29, 38}; + teq::Shape shape(slist); // diamond - ade::TensptrT leaf(new LabelledMockTensor("leaf", shape)); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"FUNC", 0}, { - ade::identity_map(leaf), + teq::TensptrT leaf(new LabelledMockTensor("leaf", shape)); + teq::TensptrT f(teq::Functor::get(teq::Opcode{"FUNC", 0}, { + teq::identity_map(leaf), })); - ade::TensptrT f2(ade::Functor::get(ade::Opcode{"FUNC2", 1}, { - ade::identity_map(leaf), + teq::TensptrT f2(teq::Functor::get(teq::Opcode{"FUNC2", 1}, { + teq::identity_map(leaf), })); - ade::TensptrT f3(ade::Functor::get(ade::Opcode{"FUNC3", 2}, { - ade::identity_map(f), - ade::identity_map(f2), + teq::TensptrT f3(teq::Functor::get(teq::Opcode{"FUNC3", 2}, { + teq::identity_map(f), + teq::identity_map(f2), })); auto gl = builder.derive(f3, leaf); @@ -214,22 +214,22 @@ TEST(GRAD, TadPole) { MockGradientBuilder builder; - std::vector slist = {94, 78, 70, 82, 62, 29, 38}; - ade::Shape shape(slist); + std::vector slist = {94, 78, 70, 82, 62, 29, 38}; + teq::Shape shape(slist); - ade::TensptrT leaf(new LabelledMockTensor("leaf", shape)); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"FUNC", 0}, { - ade::identity_map(leaf), + teq::TensptrT leaf(new LabelledMockTensor("leaf", shape)); + teq::TensptrT f(teq::Functor::get(teq::Opcode{"FUNC", 0}, { + teq::identity_map(leaf), })); - ade::TensptrT f2(ade::Functor::get(ade::Opcode{"FUNC2", 1}, { - ade::identity_map(f), + teq::TensptrT f2(teq::Functor::get(teq::Opcode{"FUNC2", 1}, { + teq::identity_map(f), })); - ade::TensptrT f3(ade::Functor::get(ade::Opcode{"FUNC3", 2}, { - ade::identity_map(f), + teq::TensptrT f3(teq::Functor::get(teq::Opcode{"FUNC3", 2}, { + teq::identity_map(f), })); - ade::TensptrT f4(ade::Functor::get(ade::Opcode{"FUNC4", 3}, { - ade::identity_map(f2), - ade::identity_map(f3), + teq::TensptrT f4(teq::Functor::get(teq::Opcode{"FUNC4", 3}, { + teq::identity_map(f2), + teq::identity_map(f3), })); auto gl = builder.derive(f4, leaf); diff --git a/ade/test/test_matops.cpp b/teq/test/test_matops.cpp similarity index 86% rename from ade/test/test_matops.cpp rename to teq/test/test_matops.cpp index 13dca82b9..1fefeae1d 100644 --- a/ade/test/test_matops.cpp +++ b/teq/test/test_matops.cpp @@ -6,7 +6,7 @@ #include "exam/exam.hpp" -#include "ade/matops.hpp" +#include "teq/matops.hpp" TEST(MATOPS, ToString) @@ -20,21 +20,21 @@ TEST(MATOPS, ToString) "[54\\55\\56\\57\\58\\59\\60\\61\\62]\\\n" "[63\\64\\65\\66\\67\\68\\69\\70\\71]\\\n" "[72\\73\\74\\75\\76\\77\\78\\79\\80]]"; - ade::MatrixT mat; - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + teq::MatrixT mat; + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - mat[i][j] = i * ade::mat_dim + j; + mat[i][j] = i * teq::mat_dim + j; } } - EXPECT_STREQ(expected.c_str(), ade::to_string(mat).c_str()); + EXPECT_STREQ(expected.c_str(), teq::to_string(mat).c_str()); } TEST(MATOPS, Determinant) { - ade::MatrixT indata = { + teq::MatrixT indata = { {0.6889268247, 0.5182375525, 0.8077819453, 0.6586822856, 0.1064583106, 0.5584794867, 0.7151236734, 0.6955541292, 0.5299556786}, {0.8790203846, 0.7084063896, 0.0299713280, 0.6855684925, 0.1025625016, 0.8161998141, 0.1896595999, 0.1917321581, 0.3208859474}, {0.5316041393, 0.2931242636, 0.1538743813, 0.8752116402, 0.3936777315, 0.2557857831, 0.0779308587, 0.2442668717, 0.0513855996}, @@ -46,9 +46,9 @@ TEST(MATOPS, Determinant) {0.3058592439, 0.9815336218, 0.3364270850, 0.1562163045, 0.5562589952, 0.3769814342, 0.4465301119, 0.6977257625, 0.7664397080}, }; - EXPECT_DOUBLE_EQ(-0.028472153084121096, ade::determinant(indata)); + EXPECT_DOUBLE_EQ(-0.028472153084121096, teq::determinant(indata)); - ade::MatrixT determ0 = { + teq::MatrixT determ0 = { {0.6889268247, 0, 0.8077819453, 0.6586822856, 0.1064583106, 0.5584794867, 0.7151236734, 0.6955541292, 0.5299556786}, {0.8790203846, 0, 0.0299713280, 0.6855684925, 0.1025625016, 0.8161998141, 0.1896595999, 0.1917321581, 0.3208859474}, {0.5316041393, 0.2931242636, 0.1538743813, 0.8752116402, 0, 0.2557857831, 0.0779308587, 0.2442668717, 0.0513855996}, @@ -60,15 +60,15 @@ TEST(MATOPS, Determinant) {0.3058592439, 0.9815336218, 0.3364270850, 0.1562163045, 0.5562589952, 0.3769814342, 0.4465301119, 0.6977257625, 0.7664397080}, }; - EXPECT_DOUBLE_EQ(0, ade::determinant(determ0)); + EXPECT_DOUBLE_EQ(0, teq::determinant(determ0)); } TEST(MATOPS, Inverse) { - ade::MatrixT out, in; - ade::MatrixT zout, zin; - ade::MatrixT badout, badin; + teq::MatrixT out, in; + teq::MatrixT zout, zin; + teq::MatrixT badout, badin; std::vector indata = { 0.6889268247, 0.5182375525, 0.8077819453, 0.6586822856, 0.1064583106, 0.5584794867, 0.7151236734, 0.6955541292, 0.5299556786, 0.8790203846, 0.7084063896, 0.0299713280, 0.6855684925, 0.1025625016, 0.8161998141, 0.1896595999, 0.1917321581, 0.3208859474, @@ -102,31 +102,31 @@ TEST(MATOPS, Inverse) 0.2252390787, 0.3721492337, 0.2498504751, 0.3123788027, 0.5537316928, 0.5611137585, 0.2029485252, 0.8579680347, 0.0442760832, 0.3058592439, 0.9815336218, 0.3364270850, 0.1562163045, 0.5562589952, 0.3769814342, 0.4465301119, 0.6977257625, 0.7664397080 }; - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - in[i][j] = indata[i * ade::mat_dim + j]; - zin[i][j] = zdata[i * ade::mat_dim + j]; - badin[i][j] = baddata[i * ade::mat_dim + j]; + in[i][j] = indata[i * teq::mat_dim + j]; + zin[i][j] = zdata[i * teq::mat_dim + j]; + badin[i][j] = baddata[i * teq::mat_dim + j]; } } - ade::inverse(out, in); - ade::inverse(zout, zin); + teq::inverse(out, in); + teq::inverse(zout, zin); std::string fatalmsg = fmts::sprintf("cannot invert matrix:\n%s", - ade::to_string(badin).c_str()); - EXPECT_FATAL(ade::inverse(badout, badin), fatalmsg.c_str()); + teq::to_string(badin).c_str()); + EXPECT_FATAL(teq::inverse(badout, badin), fatalmsg.c_str()); // expect matmul is identity - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { double val = 0; double zval = 0; - for (ade::RankT k = 0; k < ade::mat_dim; ++k) + for (teq::RankT k = 0; k < teq::mat_dim; ++k) { val += out[i][k] * in[k][j]; zval += zout[i][k] * zin[k][j]; @@ -148,7 +148,7 @@ TEST(MATOPS, Inverse) TEST(MATOPS, Matmul) { - ade::MatrixT expected, out, in, in2; + teq::MatrixT expected, out, in, in2; std::vector indata = { 0.7259523165, 0.0215138058, 0.8619883459, 0.2181517503, 0.4143879487, 0.3798615637, 0.9794909452, 0.8138574826, 0.6938136856, 0.1179236527, 0.8269392333, 0.7848566651, 0.4425554320, 0.2254599610, 0.2829998577, 0.4309979629, 0.4136898807, 0.0993291362, @@ -171,25 +171,25 @@ TEST(MATOPS, Matmul) 0.2479436504, 0.1266563150, 0.5884766852, 0.5278691634, 0.8695170450, 0.6803630612, 0.0420603430, 0.3344162464, 0.8860979790, 0.8208910642, 0.2098140918, 0.3228215729, 0.7025393155, 0.8154172074, 0.4992314192, 0.3955948624, 0.0212939634, 0.1960803287 }; - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { - in[i][j] = indata[i * ade::mat_dim + j]; - in2[i][j] = indata2[i * ade::mat_dim + j]; + in[i][j] = indata[i * teq::mat_dim + j]; + in2[i][j] = indata2[i * teq::mat_dim + j]; expected[i][j] = 0; - for (ade::RankT k = 0; k < ade::mat_dim; ++k) + for (teq::RankT k = 0; k < teq::mat_dim; ++k) { - expected[i][j] += indata[i * ade::mat_dim + k] * indata2[k * ade::mat_dim + j]; + expected[i][j] += indata[i * teq::mat_dim + k] * indata2[k * teq::mat_dim + j]; } } } - ade::matmul(out, in, in2); + teq::matmul(out, in, in2); - for (ade::RankT i = 0; i < ade::mat_dim; ++i) + for (teq::RankT i = 0; i < teq::mat_dim; ++i) { - for (ade::RankT j = 0; j < ade::mat_dim; ++j) + for (teq::RankT j = 0; j < teq::mat_dim; ++j) { EXPECT_EQ(expected[i][j], out[i][j]); } diff --git a/ade/test/test_shape.cpp b/teq/test/test_shape.cpp similarity index 52% rename from ade/test/test_shape.cpp rename to teq/test/test_shape.cpp index 80eee5067..dffabe0c3 100644 --- a/ade/test/test_shape.cpp +++ b/teq/test/test_shape.cpp @@ -6,64 +6,64 @@ #include "exam/exam.hpp" -#include "ade/shape.hpp" +#include "teq/shape.hpp" TEST(SHAPE, Init) { - ade::Shape scalar; + teq::Shape scalar; - std::vector slist = {12, 43, 56}; - ade::Shape vec(slist); - ade::RankT n = slist.size(); + std::vector slist = {12, 43, 56}; + teq::Shape vec(slist); + teq::RankT n = slist.size(); - std::vector longlist = {4, 23, 44, 52, 19, 92, 12, 2, 5}; - ade::Shape lvec(longlist); + std::vector longlist = {4, 23, 44, 52, 19, 92, 12, 2, 5}; + teq::Shape lvec(longlist); - std::vector zerolist = {43, 2, 5, 33, 0, 2, 7}; + std::vector zerolist = {43, 2, 5, 33, 0, 2, 7}; std::string fatalmsg = "cannot create shape with vector containing zero: " + fmts::to_string(zerolist.begin(), zerolist.end()); - EXPECT_FATAL(ade::Shape junk(zerolist), fatalmsg.c_str()); + EXPECT_FATAL(teq::Shape junk(zerolist), fatalmsg.c_str()); - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_EQ(1, scalar.at(i)); } - for (ade::RankT i = 0; i < n; ++i) + for (teq::RankT i = 0; i < n; ++i) { EXPECT_EQ(slist[i], vec.at(i)); } - for (ade::RankT i = n; i < ade::rank_cap; ++i) + for (teq::RankT i = n; i < teq::rank_cap; ++i) { EXPECT_EQ(1, vec.at(i)); } - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_EQ(longlist[i], lvec.at(i)); } - EXPECT_FATAL(scalar.at(ade::rank_cap), "cannot access out of bounds index 8"); - EXPECT_FATAL(vec.at(ade::rank_cap), "cannot access out of bounds index 8"); + EXPECT_FATAL(scalar.at(teq::rank_cap), "cannot access out of bounds index 8"); + EXPECT_FATAL(vec.at(teq::rank_cap), "cannot access out of bounds index 8"); } TEST(SHAPE, VecAssign) {\ - std::vector zerolist = {3, 0, 11, 89}; - std::vector slist = {52, 58, 35, 46, 77, 80}; - std::vector junk = {7, 42}; + std::vector zerolist = {3, 0, 11, 89}; + std::vector slist = {52, 58, 35, 46, 77, 80}; + std::vector junk = {7, 42}; - ade::Shape vecassign; - ade::Shape vecassign2(junk); + teq::Shape vecassign; + teq::Shape vecassign2(junk); vecassign = slist; - std::vector vlist(vecassign.begin(), vecassign.end()); + std::vector vlist(vecassign.begin(), vecassign.end()); EXPECT_ARREQ(slist, vlist); vecassign2 = slist; - std::vector vlist2(vecassign2.begin(), vecassign2.end()); + std::vector vlist2(vecassign2.begin(), vecassign2.end()); EXPECT_ARREQ(slist, vlist2); std::string fatalmsg = "cannot create shape with vector containing zero: " + @@ -74,33 +74,33 @@ TEST(SHAPE, VecAssign) TEST(SHAPE, Moves) { - std::vector junk = {8, 51, 73}; - std::vector slist = {24, 11, 12, 16}; + std::vector junk = {8, 51, 73}; + std::vector slist = {24, 11, 12, 16}; - ade::Shape mvassign; - ade::Shape mvassign2(junk); - ade::Shape orig(slist); + teq::Shape mvassign; + teq::Shape mvassign2(junk); + teq::Shape orig(slist); - ade::Shape mv(std::move(orig)); - std::vector mlist(mv.begin(), mv.end()); + teq::Shape mv(std::move(orig)); + std::vector mlist(mv.begin(), mv.end()); EXPECT_ARREQ(slist, mlist); - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_EQ(1, orig.at(i)); } mvassign = std::move(mv); - std::vector alist(mvassign.begin(), mvassign.end()); + std::vector alist(mvassign.begin(), mvassign.end()); EXPECT_ARREQ(slist, alist); - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_EQ(1, mv.at(i)); } mvassign2 = std::move(mvassign); - std::vector alist2(mvassign2.begin(), mvassign2.end()); + std::vector alist2(mvassign2.begin(), mvassign2.end()); EXPECT_ARREQ(slist, alist2); - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_EQ(1, mvassign.at(i)); } @@ -109,31 +109,31 @@ TEST(SHAPE, Moves) TEST(SHAPE, NElems) { - std::vector slist = {11, 12, 16}; - ade::Shape shape(slist); + std::vector slist = {11, 12, 16}; + teq::Shape shape(slist); size_t expect_nelems = 11 * 12 * 16; EXPECT_EQ(expect_nelems, shape.n_elems()); - std::vector biglist(8, 255); - ade::Shape bigshape(biglist); + std::vector biglist(8, 255); + teq::Shape bigshape(biglist); size_t expect_bignelems = 17878103347812890625ul; EXPECT_EQ(expect_bignelems, bigshape.n_elems()); // also check the bounds - EXPECT_GT(std::numeric_limits::max(), + EXPECT_GT(std::numeric_limits::max(), expect_bignelems); } TEST(SHAPE, Compatible) { - std::vector slist = {20, 48, 10, 27, 65, 74}; - ade::Shape shape(slist); + std::vector slist = {20, 48, 10, 27, 65, 74}; + teq::Shape shape(slist); // shape is compatible with itself regardless of after idx - for (ade::RankT idx = 0; idx < ade::rank_cap; ++idx) + for (teq::RankT idx = 0; idx < teq::rank_cap; ++idx) { EXPECT_TRUE(shape.compatible_after(shape, idx)) << "expect " << shape.to_string() << @@ -141,10 +141,10 @@ TEST(SHAPE, Compatible) } uint32_t insertion_pt = 3; - std::vector ilist = slist; + std::vector ilist = slist; ilist.insert(ilist.begin() + insertion_pt, 2); - ade::Shape ishape(ilist); - for (ade::RankT idx = 0; idx < insertion_pt; ++idx) + teq::Shape ishape(ilist); + for (teq::RankT idx = 0; idx < insertion_pt; ++idx) { EXPECT_FALSE(shape.compatible_after(ishape, idx)) << "expect " << shape.to_string() << @@ -153,15 +153,15 @@ TEST(SHAPE, Compatible) } ilist[insertion_pt] = 3; - ade::Shape ishape2(ilist); - for (ade::RankT idx = 0; idx <= insertion_pt; ++idx) + teq::Shape ishape2(ilist); + for (teq::RankT idx = 0; idx <= insertion_pt; ++idx) { EXPECT_FALSE(ishape.compatible_after(ishape2, idx)) << "expect " << ishape.to_string() << " to be incompatible with " << ishape2.to_string() << " after idx " << unsigned(idx); } - for (ade::RankT idx = insertion_pt + 1; idx < ade::rank_cap; ++idx) + for (teq::RankT idx = insertion_pt + 1; idx < teq::rank_cap; ++idx) { EXPECT_TRUE(ishape.compatible_after(ishape2, idx)) << "shape " << ishape.to_string() << @@ -173,39 +173,39 @@ TEST(SHAPE, Compatible) TEST(SHAPE, Coordinates) { - std::vector slist = {9, 3, 7, 8, 5}; - ade::Shape shape(slist); - ade::CoordT coord; - for (ade::NElemT i = 0, n = shape.n_elems(); i < n; ++i) + std::vector slist = {9, 3, 7, 8, 5}; + teq::Shape shape(slist); + teq::CoordT coord; + for (teq::NElemT i = 0, n = shape.n_elems(); i < n; ++i) { - coord = ade::coordinate(shape, i); - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + coord = teq::coordinate(shape, i); + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { EXPECT_GT(shape.at(i), coord[i]); } - ade::NElemT idx = ade::index(shape, coord); + teq::NElemT idx = teq::index(shape, coord); EXPECT_EQ(i, idx); } - for (ade::RankT i = 0; i < ade::rank_cap; ++i) + for (teq::RankT i = 0; i < teq::rank_cap; ++i) { coord[i] = shape.at(i); } std::string shapestr = shape.to_string(); std::string fatalmsg = fmts::sprintf("cannot get index of bad coordinate " "%s for shape %s", shapestr.c_str(), shapestr.c_str()); - EXPECT_FATAL(ade::index(shape, coord), fatalmsg.c_str()); + EXPECT_FATAL(teq::index(shape, coord), fatalmsg.c_str()); std::string fatalmsg2 = fmts::sprintf("cannot get coordinate of index %d " "(>= shape %s)", shape.n_elems(), shapestr.c_str()); - EXPECT_FATAL(ade::coordinate(shape, shape.n_elems()), fatalmsg2.c_str()); + EXPECT_FATAL(teq::coordinate(shape, shape.n_elems()), fatalmsg2.c_str()); } TEST(SHAPE, ToString) { - std::vector slist = {24, 11, 12, 16, 7, 71, 1, 1}; - ade::Shape shape(slist); + std::vector slist = {24, 11, 12, 16, 7, 71, 1, 1}; + teq::Shape shape(slist); std::string out = shape.to_string(); const char* expect_out = "[24\\11\\12\\16\\7\\71\\1\\1]"; diff --git a/teq/test/test_tensor.cpp b/teq/test/test_tensor.cpp new file mode 100644 index 000000000..0c604f85d --- /dev/null +++ b/teq/test/test_tensor.cpp @@ -0,0 +1,47 @@ + +#ifndef DISABLE_TENSOR_TEST + + +#include "gtest/gtest.h" + +#include "exam/exam.hpp" + +#include "teq/test/common.hpp" + +#include "teq/funcarg.hpp" + + +TEST(TENSOR, FuncArg) +{ + std::vector slist = {2, 81}; + + size_t dim = 1; + teq::TensptrT tens(new MockTensor(teq::Shape(slist))); + teq::FuncArg mt = teq::flip_map(tens, dim); + + teq::Shape shape = mt.shape(); + EXPECT_ARREQ(slist, shape); + + teq::FuncArg mt2(tens, teq::CoordptrT(new teq::CoordMap( + [](teq::MatrixT m) + { + for (size_t i = 0; i < teq::mat_dim; ++i) + { + m[i][i] = 1; + } + m[0][0] = 4; + }))); + + teq::Shape shape2 = mt2.shape(); + EXPECT_EQ(4 * slist[0], shape2.at(0)); + + EXPECT_FATAL(teq::identity_map(nullptr), + "cannot map a null tensor"); + + EXPECT_FATAL(teq::FuncArg(nullptr, teq::reduce(3, {4}), + false, teq::extend(3, {4})), + "cannot map a null tensor"); +} + + +#endif // DISABLE_TENSOR_TEST diff --git a/ade/test/test_traveler.cpp b/teq/test/test_traveler.cpp similarity index 63% rename from ade/test/test_traveler.cpp rename to teq/test/test_traveler.cpp index b0faf6912..7aa98098a 100644 --- a/ade/test/test_traveler.cpp +++ b/teq/test/test_traveler.cpp @@ -6,29 +6,29 @@ #include "exam/exam.hpp" -#include "ade/test/common.hpp" +#include "teq/test/common.hpp" -#include "ade/functor.hpp" -#include "ade/traveler.hpp" +#include "teq/functor.hpp" +#include "teq/traveler.hpp" TEST(TRAVELER, GraphStat) { - ade::TensptrT a(new MockTensor()); - ade::TensptrT b(new MockTensor()); - ade::TensptrT c(new MockTensor()); + teq::TensptrT a(new MockTensor()); + teq::TensptrT b(new MockTensor()); + teq::TensptrT c(new MockTensor()); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"MOCK1", 1}, { - ade::identity_map(a), - ade::identity_map(b), + teq::TensptrT f(teq::Functor::get(teq::Opcode{"MOCK1", 1}, { + teq::identity_map(a), + teq::identity_map(b), })); - ade::TensptrT g(ade::Functor::get(ade::Opcode{"MOCK0", 0}, { - ade::identity_map(c), - ade::identity_map(f), + teq::TensptrT g(teq::Functor::get(teq::Opcode{"MOCK0", 0}, { + teq::identity_map(c), + teq::identity_map(f), })); - ade::GraphStat stat; + teq::GraphStat stat; g->accept(stat); EXPECT_EQ(2, stat.graphsize_[g.get()].upper_); EXPECT_EQ(1, stat.graphsize_[f.get()].upper_); @@ -40,21 +40,21 @@ TEST(TRAVELER, GraphStat) TEST(TRAVELER, PathFinder) { - ade::TensptrT a(new MockTensor()); - ade::TensptrT b(new MockTensor()); - ade::TensptrT c(new MockTensor()); + teq::TensptrT a(new MockTensor()); + teq::TensptrT b(new MockTensor()); + teq::TensptrT c(new MockTensor()); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"MOCK1", 1}, { - ade::identity_map(a), - ade::identity_map(b), + teq::TensptrT f(teq::Functor::get(teq::Opcode{"MOCK1", 1}, { + teq::identity_map(a), + teq::identity_map(b), })); - ade::TensptrT g(ade::Functor::get(ade::Opcode{"MOCK1", 1}, { - ade::identity_map(c), - ade::identity_map(f), + teq::TensptrT g(teq::Functor::get(teq::Opcode{"MOCK1", 1}, { + teq::identity_map(c), + teq::identity_map(f), })); - ade::PathFinder finder(a.get()); + teq::PathFinder finder(a.get()); g->accept(finder); { @@ -75,7 +75,7 @@ TEST(TRAVELER, PathFinder) EXPECT_ARRHAS(finder.parents_[f.get()], 0); } - ade::PathFinder finder2(c.get()); + teq::PathFinder finder2(c.get()); g->accept(finder2); { @@ -93,27 +93,27 @@ TEST(TRAVELER, PathFinder) TEST(TRAVELER, ReverseParentGraph) { - ade::TensptrT a(new MockTensor()); - ade::TensptrT b(new MockTensor()); - ade::TensptrT c(new MockTensor()); + teq::TensptrT a(new MockTensor()); + teq::TensptrT b(new MockTensor()); + teq::TensptrT c(new MockTensor()); - ade::TensptrT f(ade::Functor::get(ade::Opcode{"f", 1}, { - ade::identity_map(a), - ade::identity_map(b), + teq::TensptrT f(teq::Functor::get(teq::Opcode{"f", 1}, { + teq::identity_map(a), + teq::identity_map(b), })); - ade::TensptrT g(ade::Functor::get(ade::Opcode{"g", 2}, { - ade::identity_map(f), - ade::identity_map(b), + teq::TensptrT g(teq::Functor::get(teq::Opcode{"g", 2}, { + teq::identity_map(f), + teq::identity_map(b), })); - ade::TensptrT h(ade::Functor::get(ade::Opcode{"h", 3}, { - ade::identity_map(c), - ade::identity_map(f), - ade::identity_map(g), + teq::TensptrT h(teq::Functor::get(teq::Opcode{"h", 3}, { + teq::identity_map(c), + teq::identity_map(f), + teq::identity_map(g), })); - ade::ParentFinder finder; + teq::ParentFinder finder; h->accept(finder); // expect: a -> [f], b -> [f, g], c -> [h], f -> [g, h], g -> [h], h -> [] @@ -144,26 +144,26 @@ TEST(TRAVELER, ReverseParentGraph) TEST(TRAVELER, Owners) { - ade::OwnerMapT owners; - ade::TensptrT a(new MockTensor()); - ade::TensptrT b(new MockTensor()); - ade::TensptrT c(new MockTensor()); - ade::iTensor* fref; - ade::iTensor* gref; + teq::OwnerMapT owners; + teq::TensptrT a(new MockTensor()); + teq::TensptrT b(new MockTensor()); + teq::TensptrT c(new MockTensor()); + teq::iTensor* fref; + teq::iTensor* gref; { - ade::TensptrT f(ade::Functor::get(ade::Opcode{"f", 1}, { - ade::identity_map(a), - ade::identity_map(b), + teq::TensptrT f(teq::Functor::get(teq::Opcode{"f", 1}, { + teq::identity_map(a), + teq::identity_map(b), })); - ade::TensptrT g(ade::Functor::get(ade::Opcode{"g", 2}, { - ade::identity_map(f), - ade::identity_map(c), + teq::TensptrT g(teq::Functor::get(teq::Opcode{"g", 2}, { + teq::identity_map(f), + teq::identity_map(c), })); fref = f.get(); gref = g.get(); - owners = ade::track_owners({g}); + owners = teq::track_owners({g}); ASSERT_HAS(owners, a.get()); ASSERT_HAS(owners, b.get()); ASSERT_HAS(owners, c.get()); diff --git a/ade/traveler.hpp b/teq/traveler.hpp similarity index 90% rename from ade/traveler.hpp rename to teq/traveler.hpp index 183cece2c..bdbe7eb7e 100644 --- a/ade/traveler.hpp +++ b/teq/traveler.hpp @@ -1,6 +1,6 @@ /// /// traveler.hpp -/// ade +/// teq /// /// Purpose: /// Define common traveler implementations @@ -9,13 +9,13 @@ #include "estd/estd.hpp" #include "estd/range.hpp" -#include "ade/ileaf.hpp" -#include "ade/ifunctor.hpp" +#include "teq/ileaf.hpp" +#include "teq/ifunctor.hpp" -#ifndef ADE_TRAVELER_HPP -#define ADE_TRAVELER_HPP +#ifndef TEQ_TRAVELER_HPP +#define TEQ_TRAVELER_HPP -namespace ade +namespace teq { /// Extremely generic traveler that visits every node in the graph once @@ -200,10 +200,10 @@ OwnerMapT track_owners (TensT roots); struct HeightMatrix { - HeightMatrix (const ade::TensT& roots) + HeightMatrix (const TensT& roots) { - ade::GraphStat stat; - for (ade::TensptrT root : roots) + GraphStat stat; + for (TensptrT root : roots) { root->accept(stat); } @@ -212,14 +212,14 @@ struct HeightMatrix root_heights.reserve(roots.size()); std::transform(roots.begin(), roots.end(), std::back_inserter(root_heights), - [&stat](const ade::TensptrT& root) + [&stat](const TensptrT& root) { return stat.graphsize_[root.get()].upper_; }); // max of the maxheight of roots should be the maxheight of the whole graph size_t maxheight = *std::max_element( root_heights.begin(), root_heights.end()); - funcs_ = std::vector>(maxheight); + funcs_ = std::vector>(maxheight); for (auto& gpair : stat.graphsize_) { @@ -227,20 +227,20 @@ struct HeightMatrix size_t height = gpair.second.upper_; if (0 == height) { - leaves_.emplace(static_cast(tens)); + leaves_.emplace(static_cast(tens)); } else { - funcs_[height - 1].emplace(static_cast(tens)); + funcs_[height - 1].emplace(static_cast(tens)); } } } - std::unordered_set leaves_; + std::unordered_set leaves_; - std::vector> funcs_; + std::vector> funcs_; }; } -#endif // ADE_TRAVELER_HPP +#endif // TEQ_TRAVELER_HPP diff --git a/tests.sh b/tests.sh index 589c6c7a6..2e0e15b5d 100755 --- a/tests.sh +++ b/tests.sh @@ -13,11 +13,11 @@ free -m; # ===== Run Gtest ===== echo "===== TESTS ====="; -bazel test --config asan --config gtest --action_env="ASAN_OPTIONS=detect_leaks=0" --define EAD_CFG=MIN \ -//ade:test //tag:test //pbm:test //opt:test //opt/parse:test //ead:ctest //perf:test //pll:test +bazel test --config asan --config gtest --action_env="ASAN_OPTIONS=detect_leaks=0" --define ETEQ_CFG=MIN \ +//teq:test //tag:test //pbm:test //opt:test //opt/parse:test //eteq:ctest //perf:test //ccur:test -bazel test --run_under='valgrind --leak-check=full' --define EAD_CFG=MIN \ -//ade:test //gen:ptest //tag:test //pbm:test //opt:test //opt/parse:test //ead:ctest //ead:ptest //perf:test //pll:test +bazel test --run_under='valgrind --leak-check=full' --define ETEQ_CFG=MIN \ +//teq:test //gen:ptest //tag:test //pbm:test //opt:test //opt/parse:test //eteq:ctest //eteq:ptest //perf:test //ccur:test # ===== Coverage Analysis ====== echo "===== STARTING COVERAGE ANALYSIS ====="; diff --git a/testutil/src/tutil.cpp b/testutil/src/tutil.cpp index ca8b584ef..697a92c1f 100644 --- a/testutil/src/tutil.cpp +++ b/testutil/src/tutil.cpp @@ -7,7 +7,7 @@ namespace tutil { -std::string compare_graph (std::istream& expectstr, ade::TensptrT root, +std::string compare_graph (std::istream& expectstr, teq::TensptrT root, bool showshape, LabelsMapT labels) { PrettyEquation artist; diff --git a/testutil/tutil.hpp b/testutil/tutil.hpp index cfd37f042..140e2d1f9 100644 --- a/testutil/tutil.hpp +++ b/testutil/tutil.hpp @@ -1,4 +1,4 @@ -#include "dbg/stream/ade.hpp" +#include "dbg/stream/teq.hpp" #ifndef TEST_TUTIL_HPP #define TEST_TUTIL_HPP @@ -6,7 +6,7 @@ namespace tutil { -std::string compare_graph (std::istream& expectstr, ade::TensptrT root, +std::string compare_graph (std::istream& expectstr, teq::TensptrT root, bool showshape = true, LabelsMapT labels = {}); #define EXPECT_GRAPHEQ(MSG, ROOT) {\ diff --git a/todo b/todo index da27aa7ce..15613a890 100644 --- a/todo +++ b/todo @@ -6,7 +6,6 @@ P4 = "nice to have/need investigating" P5 = "get to it eventually" === feature === -P2 - update API to simplify operator with constants e.g.: X + 1 instead of explicitly creating constant 1 P2 - allow users to create ambiguous nodes holding shapes with ambiguous dimensions (denoted in python by None) P2 - make shape dimension size configurable P2 - make constant pre-calculation configurable in opt module @@ -17,7 +16,7 @@ P4 - implement reduce_prod gradient using Eigen cumprod === performance === general performance: -P2 - ensure ade is thread-safe +P2 - ensure teq is thread-safe P3 - benchmark test rocnnet against various other frameworks (CPU only) P3 - convert as many shared_ptr to unique_ptr P4 - remove unordered_map/unordered_set if we're not using them properly