From d0e36d82ba775e5eed8c3d226e1bd2fca338be76 Mon Sep 17 00:00:00 2001 From: Wenhua Cheng Date: Mon, 10 Nov 2025 17:12:47 +0800 Subject: [PATCH] fix bug --- auto_round/compressors/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auto_round/compressors/base.py b/auto_round/compressors/base.py index 8f398e7a1..7ab11d0ff 100644 --- a/auto_round/compressors/base.py +++ b/auto_round/compressors/base.py @@ -2699,7 +2699,7 @@ def _quantize_block( ) logger.info(dump_info) if self.low_gpu_mem_usage: - clear_memory(self.device_list) # clear cached memory during training + clear_memory(device_list=self.device_list) # clear cached memory during training if len(unquantized_layer_names) != 0: logger.info(f"{unquantized_layer_names} have not been quantized") with torch.no_grad():