File tree Expand file tree Collapse file tree 4 files changed +0
-24
lines changed
scaleway-async/scaleway_async/inference/v1
scaleway/scaleway/inference/v1 Expand file tree Collapse file tree 4 files changed +0
-24
lines changed Original file line number Diff line number Diff line change @@ -167,10 +167,6 @@ def unmarshal_DeploymentQuantization(data: Any) -> DeploymentQuantization:
167167
168168 args : Dict [str , Any ] = {}
169169
170- field = data .get ("enabled" , None )
171- if field is not None :
172- args ["enabled" ] = field
173-
174170 field = data .get ("bits" , None )
175171 if field is not None :
176172 args ["bits" ] = field
@@ -497,9 +493,6 @@ def marshal_DeploymentQuantization(
497493) -> Dict [str , Any ]:
498494 output : Dict [str , Any ] = {}
499495
500- if request .enabled is not None :
501- output ["enabled" ] = request .enabled
502-
503496 if request .bits is not None :
504497 output ["bits" ] = request .bits
505498
Original file line number Diff line number Diff line change @@ -113,11 +113,6 @@ class ModelSupportedNode:
113113
114114@dataclass
115115class DeploymentQuantization :
116- enabled : bool
117- """
118- Whether to enable quantization for this deployment.
119- """
120-
121116 bits : int
122117 """
123118 The number of bits each model parameter should be quantized to. The quantization method is chosen based on this value.
Original file line number Diff line number Diff line change @@ -167,10 +167,6 @@ def unmarshal_DeploymentQuantization(data: Any) -> DeploymentQuantization:
167167
168168 args : Dict [str , Any ] = {}
169169
170- field = data .get ("enabled" , None )
171- if field is not None :
172- args ["enabled" ] = field
173-
174170 field = data .get ("bits" , None )
175171 if field is not None :
176172 args ["bits" ] = field
@@ -497,9 +493,6 @@ def marshal_DeploymentQuantization(
497493) -> Dict [str , Any ]:
498494 output : Dict [str , Any ] = {}
499495
500- if request .enabled is not None :
501- output ["enabled" ] = request .enabled
502-
503496 if request .bits is not None :
504497 output ["bits" ] = request .bits
505498
Original file line number Diff line number Diff line change @@ -113,11 +113,6 @@ class ModelSupportedNode:
113113
114114@dataclass
115115class DeploymentQuantization :
116- enabled : bool
117- """
118- Whether to enable quantization for this deployment.
119- """
120-
121116 bits : int
122117 """
123118 The number of bits each model parameter should be quantized to. The quantization method is chosen based on this value.
You can’t perform that action at this time.
0 commit comments