Skip to content

Commit

Permalink
[Minor] Fix typo and remove unused code (vllm-project#2305)
Browse files Browse the repository at this point in the history
  • Loading branch information
esmeetu committed Jan 3, 2024
1 parent 4c86061 commit 6ab66cb
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 22 deletions.
21 changes: 0 additions & 21 deletions vllm/model_executor/layers/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,27 +112,6 @@ def _prune_hidden_states(
sampling_metadata.selected_token_indices)


def _get_prompt_and_output_tokens(
sampling_metadata: SamplingMetadata,
) -> Tuple[List[List[int]], List[List[int]]]:
prompt_tokens: List[List[int]] = []
output_tokens: List[List[int]] = []
for i, seq_group in enumerate(sampling_metadata.seq_groups):
seq_ids, sampling_params = seq_group
if (i < sampling_metadata.num_prompts
and sampling_params.prompt_logprobs is not None):
# NOTE: prompt token positions do not need output tokens to
# compute penalties.
prompt_len = sampling_metadata.prompt_lens[i]
prompt_tokens.extend([] for _ in range(prompt_len - 1))
output_tokens.extend([] for _ in range(prompt_len - 1))
for seq_id in seq_ids:
seq_data = sampling_metadata.seq_data[seq_id]
prompt_tokens.append(seq_data.prompt_token_ids)
output_tokens.append(seq_data.output_token_ids)
return prompt_tokens, output_tokens


def _get_bin_counts_and_mask(
tokens: torch.Tensor,
vocab_size: int,
Expand Down
2 changes: 1 addition & 1 deletion vllm/sampling_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def __init__(
temperature: float = 1.0,
top_p: float = 1.0,
top_k: int = -1,
min_p: int = 0.0,
min_p: float = 0.0,
use_beam_search: bool = False,
length_penalty: float = 1.0,
early_stopping: Union[bool, str] = False,
Expand Down

0 comments on commit 6ab66cb

Please sign in to comment.