We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 454391f commit fbf94e0Copy full SHA for fbf94e0
fms_mo/quant_refactor/quantizers_new.py
@@ -2913,6 +2913,7 @@ def __init__(
2913
self.perCh = perCh
2914
self.extend_act_range = extend_act_range
2915
self.perGp = perGp
2916
+ self.recompute_clips = False
2917
2918
self.set_quantizer()
2919
@@ -2997,7 +2998,7 @@ def forward(self, input):
2997
2998
if len(clipvaln_new.shape) == 0:
2999
clipvaln_new = clipvaln_new.unsqueeze(dim=0)
3000
- if self.Niter == 0 and self.training:
3001
+ if (self.Niter == 0 and self.training) or self.recompute_clips:
3002
# to avoid unintended bwd ops added to the graph, cause memory leak sometimes
3003
with torch.no_grad():
3004
# similar to fill_(), will not change id(self.clip_val) but update the values
0 commit comments