-
Notifications
You must be signed in to change notification settings - Fork 0
/
test.py
62 lines (52 loc) · 1.53 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# -*- coding:utf-8 -*-
# @Author : Michael-Wang
import numpy as np
import tensorflow as tf
from unlikelihood_loss import sequence_unlikelihood_loss
if __name__ == '__main__':
"""
calculate unlikelihood manually
targets = [[0, 1, 1, 4, 0]]
weights = [[1, 1, 1, 1, 0]]
logits array([[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]],
dtype=float32)
step 0:
loss = 0
step 1: should not occur 0
loss = -log(1 - 0.1)
step 2:should not occur 0
loss = -log(1 - 0.1)
step 3:should not occur 0 and 1
loss = -log(1 - 0.1) + -log(1 - 0.1)
step 4:out of length
loss = 0
total_loss = 0.5
total_step = 4
candidate_loss = -log(1 - 0.1) * 4 / 4 = -log(1 - 0.1) = 0.10536051565782628
"""
"""
function result:
0.10536051565782628
"""
vocab_size = 10
sequence_len = 5
batch_size = 1
dtype = tf.float32
targets = tf.constant(
[[0, 1, 1, 4, 0]],
dtype=dtype
)
weights = tf.constant(
[[1, 1, 1, 1, 0]],
dtype=dtype
)
logits = tf.constant(
np.ones((batch_size, sequence_len, vocab_size,)) * (1.0 / vocab_size),
dtype=dtype
)
g = sequence_unlikelihood_loss(logits, targets, weights)
print(tf.Session().run(g))