-
-
Notifications
You must be signed in to change notification settings - Fork 11.8k
[Compressed Tensors] Add XPU wNa16 support
#29484
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
+102
−0
Merged
Changes from all commits
Commits
Show all changes
16 commits
Select commit
Hold shift + click to select a range
08a9704
add ipex woq kernel for CT
yiliu30 6a95599
format
yiliu30 fd0c3f8
fix
yiliu30 a9538e4
Merge branch 'main' into woq-xpu
yiliu30 1227dc3
Merge branch 'main' into woq-xpu
yiliu30 94c904d
correct
yiliu30 1600e94
fix input shape
yiliu30 53c797b
rename file
yiliu30 3fcf033
fix
yiliu30 1432e93
Merge branch 'main' into woq-xpu
yiliu30 a46f2cf
Merge branch 'main' into woq-xpu
yiliu30 ff3285f
Merge branch 'main' into woq-xpu
yiliu30 189a351
Merge branch 'main' into woq-xpu
yiliu30 af19489
Merge branch 'main' into woq-xpu
yiliu30 d9e1e2c
Merge branch 'main' into woq-xpu
yiliu30 ea3c42a
add e2e test
yiliu30 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
97 changes: 97 additions & 0 deletions
97
vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,97 @@ | ||
| # SPDX-License-Identifier: Apache-2.0 | ||
| # SPDX-FileCopyrightText: Copyright contributors to the vLLM project | ||
|
|
||
|
|
||
| import torch | ||
|
|
||
| from vllm.platforms import current_platform | ||
|
|
||
| from .MPLinearKernel import MPLinearKernel, MPLinearLayerConfig | ||
|
|
||
|
|
||
| class XPUwNa16LinearKernel(MPLinearKernel): | ||
| @classmethod | ||
| def get_min_capability(cls) -> int: | ||
| return 0 | ||
|
|
||
| @classmethod | ||
| def can_implement(cls, c: MPLinearLayerConfig) -> tuple[bool, str | None]: | ||
| if not current_platform.is_xpu(): | ||
| return False, "IPEX wNa16 only supported on XPU/CPU devices" | ||
|
|
||
| # TODO: (yiliu30) relax these restrictions in later PRs | ||
| if c.zero_points: | ||
| return False, "Zero points not supported for Now" | ||
|
|
||
| return True, None | ||
|
|
||
| def process_weights_after_loading(self, layer: torch.nn.Module) -> None: | ||
| from packaging import version | ||
|
|
||
| MIN_IPEX_VERSION = "2.6.0" | ||
| bias = layer.bias if not layer.skip_bias_add else None | ||
|
|
||
| try: | ||
| import intel_extension_for_pytorch as ipex | ||
|
|
||
| if version.parse(ipex.__version__) < version.parse(MIN_IPEX_VERSION): | ||
| raise ImportError( | ||
| "intel_extension_for_pytorch version is " | ||
| "wrong. Please install " | ||
| f"intel_extension_for_pytorch>={MIN_IPEX_VERSION}." | ||
| ) | ||
| except ImportError as err: | ||
| raise ImportError( | ||
| "Please install " | ||
| f"intel_extension_for_pytorch>={MIN_IPEX_VERSION} via " | ||
| f"`pip install intel_extension_for_pytorch>={MIN_IPEX_VERSION}`" | ||
| " to use IPEX-AWQ linear method." | ||
| ) from err | ||
| # Using the compute dtype (lowp_mode) as INT8 to leverage instructions | ||
| # with better performance. | ||
| lowp_mode = ipex.quantization.WoqLowpMode.INT8 | ||
| # The weight will be de-packed from INT4 to INT8. | ||
| weight_dtype = ipex.quantization.WoqWeightDtype.INT4 | ||
| # The float activation will be quantized (dynamic, per-token) to INT8. | ||
| act_quant_mode = ipex.quantization.WoqActQuantMode.PER_BATCH | ||
|
|
||
| qconfig = ipex.quantization.get_weight_only_quant_qconfig_mapping( | ||
| weight_dtype=weight_dtype, | ||
| lowp_mode=lowp_mode, | ||
| act_quant_mode=act_quant_mode, | ||
| group_size=self.config.group_size, | ||
| weight_qscheme=ipex.quantization.WoqWeightQScheme.SYMMETRIC, | ||
| ) | ||
| qweight = layer.weight_packed | ||
| g_idx = layer.weight_g_idx if self.config.has_g_idx else None | ||
| scales = layer.weight_scale | ||
| qzeros = None | ||
| if self.config.zero_points: | ||
| qzeros = layer.weight_zero_point.contiguous() | ||
| qweight = qweight.t().contiguous() | ||
| scales = scales.t().contiguous() | ||
| layer.ipex_output_size = self.config.partition_weight_shape[1] | ||
| layer.ipex_qlinear = ( | ||
| ipex.llm.quantization.woq_linear.IPEXWeightOnlyQuantizedLinear.from_weight( | ||
| qweight, | ||
| scales, | ||
| qzeros, | ||
| in_features=self.config.partition_weight_shape[0], | ||
| out_features=self.config.partition_weight_shape[1], | ||
| qconfig=qconfig, | ||
| g_idx=g_idx, | ||
| bias=bias, | ||
| group_size=self.config.group_size, | ||
| quant_method=0, # `0` stands for the IPEX GPTQ | ||
| ) | ||
| ) | ||
|
|
||
| def apply_weights( | ||
| self, | ||
| layer: torch.nn.Module, | ||
| x: torch.Tensor, | ||
| bias: torch.Tensor | None = None, | ||
| ) -> torch.Tensor: | ||
| reshaped_x = x.reshape(-1, x.shape[-1]) | ||
| out = layer.ipex_qlinear(reshaped_x) | ||
| return out.reshape(x.shape[:-1] + (layer.ipex_output_size,)) | ||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.