Skip to content

Commit 9306d13

Browse files
feat(inference): remove DeploymentQuantization.enabled field (#922)
Co-authored-by: Jonathan R. <jremy@scaleway.com>
1 parent cb9e3cb commit 9306d13

File tree

4 files changed

+0
-24
lines changed

4 files changed

+0
-24
lines changed

scaleway-async/scaleway_async/inference/v1/marshalling.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -167,10 +167,6 @@ def unmarshal_DeploymentQuantization(data: Any) -> DeploymentQuantization:
167167

168168
args: Dict[str, Any] = {}
169169

170-
field = data.get("enabled", None)
171-
if field is not None:
172-
args["enabled"] = field
173-
174170
field = data.get("bits", None)
175171
if field is not None:
176172
args["bits"] = field
@@ -497,9 +493,6 @@ def marshal_DeploymentQuantization(
497493
) -> Dict[str, Any]:
498494
output: Dict[str, Any] = {}
499495

500-
if request.enabled is not None:
501-
output["enabled"] = request.enabled
502-
503496
if request.bits is not None:
504497
output["bits"] = request.bits
505498

scaleway-async/scaleway_async/inference/v1/types.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -113,11 +113,6 @@ class ModelSupportedNode:
113113

114114
@dataclass
115115
class DeploymentQuantization:
116-
enabled: bool
117-
"""
118-
Whether to enable quantization for this deployment.
119-
"""
120-
121116
bits: int
122117
"""
123118
The number of bits each model parameter should be quantized to. The quantization method is chosen based on this value.

scaleway/scaleway/inference/v1/marshalling.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -167,10 +167,6 @@ def unmarshal_DeploymentQuantization(data: Any) -> DeploymentQuantization:
167167

168168
args: Dict[str, Any] = {}
169169

170-
field = data.get("enabled", None)
171-
if field is not None:
172-
args["enabled"] = field
173-
174170
field = data.get("bits", None)
175171
if field is not None:
176172
args["bits"] = field
@@ -497,9 +493,6 @@ def marshal_DeploymentQuantization(
497493
) -> Dict[str, Any]:
498494
output: Dict[str, Any] = {}
499495

500-
if request.enabled is not None:
501-
output["enabled"] = request.enabled
502-
503496
if request.bits is not None:
504497
output["bits"] = request.bits
505498

scaleway/scaleway/inference/v1/types.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -113,11 +113,6 @@ class ModelSupportedNode:
113113

114114
@dataclass
115115
class DeploymentQuantization:
116-
enabled: bool
117-
"""
118-
Whether to enable quantization for this deployment.
119-
"""
120-
121116
bits: int
122117
"""
123118
The number of bits each model parameter should be quantized to. The quantization method is chosen based on this value.

0 commit comments

Comments
 (0)