Skip to content

Commit

Permalink
stash
Browse files Browse the repository at this point in the history
  • Loading branch information
daquexian committed Aug 20, 2019
1 parent e065c8c commit f8ecb67
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 0 deletions.
1 change: 1 addition & 0 deletions common/common_bitpack.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ inline void pack_64_bitset(const float *fptr, uint64_t *buf,
* In this case, we need pad the tensor to make the
* channel aligned with 128.
*/
BNN_ASSERT(eff_bits == 64, eff_bits);
const size_t UNIT_LEN = 64;
BNN_ASSERT(eff_bits <= UNIT_LEN, "The eff_bits ", eff_bits,
" must be smaller than UNIT_LEN ", UNIT_LEN);
Expand Down
3 changes: 3 additions & 0 deletions dabnn/layers/BinConv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,13 @@ void BinConv::forward_impl() const {
// pack_mat(*input_mat, *binarized_mat);
const auto N = input_mat->n;
const auto HWC = input_mat->h * input_mat->w * input_mat->elem_c;
PNT(HWC, input_mat->h, input_mat->w, input_mat->elem_c);
auto *bptr = binarized_mat->point<uint64_t>(0, 0, 0);
FORZ(n, N) {
FORZS(i, HWC, 128) {
const size_t eff_bits = std::max<size_t>(HWC - i, 128);
// PNT(eff_bits, std::min<size_t>(eff_bits, 64),
// std::min<size_t>(std::max<size_t>(0, eff_bits - 64), 64));
auto *fptr = ((float *) input_mat->data) + n * HWC + i;
pack_64_bitset(fptr, bptr++,
std::min<size_t>(eff_bits, 64));
Expand Down

0 comments on commit f8ecb67

Please sign in to comment.