Skip to content

Commit

Permalink
fixed torch reinit
Browse files Browse the repository at this point in the history
  • Loading branch information
Robert Shaw committed May 9, 2024
1 parent 4c04122 commit 0300194
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 5 deletions.
7 changes: 3 additions & 4 deletions vllm/model_executor/model_loader/weight_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,10 @@

from vllm.config import LoadConfig, ModelConfig
from vllm.logger import init_logger
# UPSTREAM SYNC: needed for sparsity
from vllm.model_executor.layers.parameters import LazyCompressedParameter
from vllm.model_executor.layers.quantization import (QuantizationConfig,
get_quantization_config)
from vllm.model_executor.layers.quantization.schema import QuantParamSchema
from vllm.model_executor.layers.sparsity import (SparsityConfig,
get_sparsity_config)

logger = init_logger(__name__)

Expand Down Expand Up @@ -119,7 +116,9 @@ def convert_bin_to_safetensor_file(

# UPSTREAM SYNC: needed for sparsity
# TODO: (MLE) load compressed models from here
def get_sparse_config(model_config: ModelConfig) -> SparsityConfig:
def get_sparse_config(model_config: ModelConfig) -> QuantizationConfig:
# Lazy import for optional nm-magic-wand-nightly.
from vllm.model_executor.layers.sparsity import get_sparsity_config
sparsity_cls = get_sparsity_config(model_config.sparsity)
hf_sparsity_config = getattr(model_config.hf_config, "sparsity_config",
None)
Expand Down
2 changes: 1 addition & 1 deletion vllm/worker/model_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -1033,7 +1033,7 @@ def vocab_size(self) -> int:
return self.model_config.get_vocab_size()


class CUDAGraphRunner():
class CUDAGraphRunner:

def __init__(self, model: nn.Module):
self.model = model
Expand Down

1 comment on commit 0300194

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 0300194 Previous: df1f1a0 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.2.0", "python_version": "3.10.12 (main, Mar 7 2024, 18:39:53) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.8340460653484705 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.2.0", "python_version": "3.10.12 (main, Mar 7 2024, 18:39:53) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1472.2736890938127 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.