From 94c67a30887188a1a829b590941584d865b894a1 Mon Sep 17 00:00:00 2001 From: Mr-Neutr0n <64578610+Mr-Neutr0n@users.noreply.github.com> Date: Wed, 11 Feb 2026 19:35:12 +0530 Subject: [PATCH] Fix LoRA memory leak: delete data_restore after weight restoration In restore_lora(), the data_restore attribute stored on weight tensors was never cleaned up after restoring weights. Each merge/restore cycle accumulated cloned tensor copies, effectively doubling GPU memory usage over time. Changes: - Delete data_restore attribute after restoring weights in restore_lora() to free the stored tensor copy immediately - Fix misleading loop condition `while len(layer_infos) > -1` (always true since len() >= 0) to `while True` in both merge_lora() and restore_lora(), relying on the existing explicit break logic Fixes #589 --- facechain/merge_lora.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/facechain/merge_lora.py b/facechain/merge_lora.py index dbd19833..82690df0 100644 --- a/facechain/merge_lora.py +++ b/facechain/merge_lora.py @@ -54,7 +54,7 @@ def merge_lora(pipeline, curr_layer = pipeline.unet temp_name = layer_infos.pop(0) - while len(layer_infos) > -1: + while True: try: curr_layer = curr_layer.__getattr__(temp_name) if len(layer_infos) > 0: @@ -138,7 +138,7 @@ def restore_lora(pipeline, curr_layer = pipeline.unet temp_name = layer_infos.pop(0) - while len(layer_infos) > -1: + while True: try: curr_layer = curr_layer.__getattr__(temp_name) if len(layer_infos) > 0: @@ -162,5 +162,6 @@ def restore_lora(pipeline, curr_layer.weight.data = curr_layer.weight.data.to(device) curr_layer.weight.data = curr_layer.weight.data_restore.clone() + del curr_layer.weight.data_restore return pipeline \ No newline at end of file