Skip to content

Commit

Permalink
Fix infinite loop caused by incorrect timestamp tokens prediction (#914)
Browse files Browse the repository at this point in the history
* Fix infinite loop caused by incorrect timestamp tokens prediction

#810

* Update decoding.py

---------

Co-authored-by: Jong Wook Kim <jongwook@openai.com>
  • Loading branch information
andrewchernyh and jongwook committed Feb 1, 2023
1 parent 5c1a8c1 commit 7858aa9
Showing 1 changed file with 7 additions and 1 deletion.
8 changes: 7 additions & 1 deletion whisper/decoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,8 @@ def apply(self, logits: Tensor, tokens: Tensor):

# timestamps have to appear in pairs, except directly before EOT; mask logits accordingly
for k in range(tokens.shape[0]):
seq = [t for t in tokens[k, self.sample_begin :].tolist()]
sampled_tokens = tokens[k, self.sample_begin :]
seq = [t for t in sampled_tokens.tolist()]
last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.tokenizer.timestamp_begin
penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.tokenizer.timestamp_begin

Expand All @@ -422,6 +423,11 @@ def apply(self, logits: Tensor, tokens: Tensor):
else: # cannot be normal text tokens
logits[k, : self.tokenizer.eot] = -np.inf

timestamps = sampled_tokens[sampled_tokens.ge(self.tokenizer.timestamp_begin)]
if timestamps.numel() > 0:
# timestamps shouldn't decrease; forbid timestamp tokens smaller than the last
logits[k, self.tokenizer.timestamp_begin : timestamps[-1]] = -np.inf

if tokens.shape[1] == self.sample_begin:
# suppress generating non-timestamp tokens at the beginning
logits[:, : self.tokenizer.timestamp_begin] = -np.inf
Expand Down

0 comments on commit 7858aa9

Please sign in to comment.