Skip to content
Permalink
Browse files

32b dim_t fixes (#3990)

Summary:
These are needed to fix tests with 32b dim_t.

Also sneak in a fix for some whitespaces of a .py script the "format.sh fix" keeps modifying (at least with clang-format-7).
Pull Request resolved: #3990

Test Plan: 32b and 64b dim_t via pocl CPU driver (LLVM 8).

Differential Revision: D19327112

Pulled By: jfix71

fbshipit-source-id: 58bcde09902d159816b4ecf2a678b464bb93adc4
  • Loading branch information
pjaaskel authored and facebook-github-bot committed Jan 9, 2020
1 parent abeb69c commit bfb251d7fddcc6e7323229376ab64f8affe470f0
@@ -1291,7 +1291,7 @@ static bool verifyFusedRowwiseQuantizedSparseLengthsSum(
// For EmbeddingBagByteRowwiseOffsets lengths are really offsets and should be
// Int64ITy.
if (isEmbeddingBagByteRowwiseOffsets) {
isValid &= checkType(lengths, ElemKind::Int64ITy, parent);
isValid &= checkType(lengths, IndexElemKind, parent);
} else {
isValid &= checkType(lengths, ElemKind::Int32ITy, parent);
}
@@ -7866,14 +7866,13 @@ createAndInitRWQSLWSAllSame(glow::PlaceholderBindings &bindings,
0.2244578, 0.44881952, 0.42696562, 0.33007848, 0.4511249, 0.11568925,
0.02629679, 0.33864713, 0.42614424};

Placeholder *indices =
mod.createPlaceholder(ElemKind::Int64ITy, {21}, "indices",
/* isTrainable */ false);
Placeholder *indices = mod.createPlaceholder(IndexElemKind, {21}, "indices",
/* isTrainable */ false);
Placeholder *lengths =
mod.createPlaceholder(ElemKind::Int32ITy, {2}, "lengths",
/* isTrainable */ false);

bindings.allocate(indices)->getHandle<int64_t>() = {
bindings.allocate(indices)->getHandle<sdim_t>() = {
11, 8, 19, 8, 4, 11, 4, 19, 6, 18, 2, 6, 15, 5, 14, 14, 15, 13, 4, 6, 5,
};
bindings.allocate(lengths)->getHandle<int32_t>() = {15, 6};
@@ -541,7 +541,7 @@ static void createSimpleSparseNNModule(Module &mod) {
for (int table = 0; table < 5; table++) {
Tensor data(ElemKind::FloatTy, {tableEntries, tableWidth});
auto indices = mod.createPlaceholder(
ElemKind::Int64ITy, {numIndices * batchSize}, "indices", false);
IndexElemKind, {numIndices * batchSize}, "indices", false);
auto weights = mod.createPlaceholder(
ElemKind::FloatTy, {numIndices * batchSize}, "weights", false);
auto lengths = mod.createPlaceholder(ElemKind::Int32ITy, {batchSize},
@@ -190,7 +190,9 @@ def compute_gru(forward):
dir_idx)

def f(x): return (1 / (1 + np.exp(-x)))

def g(x): return np.tanh(x)

def mm(x, w): return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])

@@ -173,6 +173,7 @@ def compute_rnn(forward):
Wi, Ri, bWi, bRi = get_weights(dir_idx)

def f(x): return np.tanh(x)

def mm(x, w): return np.matmul(x, w.transpose())
Ht = np.reshape(initial_h[dir_idx, :, :], [batch_size, hidden_size])

0 comments on commit bfb251d

Please sign in to comment.
You can’t perform that action at this time.