-
Notifications
You must be signed in to change notification settings - Fork 254
/
multinomial-logistic-loss.jl
35 lines (27 loc) · 1 KB
/
multinomial-logistic-loss.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
function forward(backend::GPUBackend, state::MultinomialLogisticLossLayerState, inputs::Vector{Blob})
pred = inputs[1]
label = inputs[2]
data_type = eltype(pred)
spatial_dim, channels, num = split_dims(pred, state.op_dim)
prob_dim = channels
x_block = int(ceil(float64(num)/CUDA.THREADS_PER_BLOCK_X))
y_block = spatial_dim
loss_blob = make_zero_blob(backend, Float32, 1, 1, 1, 1)
if data_type == Float32
kernel = backend.mocha.logistic_loss_forward_float
elseif data_type == Float64
kernel = backend.mocha.logistic_loss_forward_double
else
error("Unsupported data type $data_type")
end
if isa(state.weights_blob, NullBlob)
weights = convert(Ptr{data_type}, 0)
else
weights = state.weights_blob.ptr.p
end
CUDA.launch(kernel, (x_block, y_block), (CUDA.THREADS_PER_BLOCK_X, 1),
(pred.ptr.p, label.ptr.p, weights, num, spatial_dim, prob_dim, loss_blob.ptr.p))
loss = Float32[0]
copy!(loss, loss_blob)
state.loss = loss[1] / (spatial_dim * num)
end