From 6abb3d39421ddaf96e4c0b9a0563be9743a3eaa3 Mon Sep 17 00:00:00 2001
From: Tim Bauerle <tim.bauerle@rwth-aachen.de>
Date: Wed, 3 Jul 2024 11:21:51 +0200
Subject: [PATCH] Reduced server GPU memory for Parallel Swarm Learning

---
 edml/core/server.py | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/edml/core/server.py b/edml/core/server.py
index a671ba9..3c9833e 100644
--- a/edml/core/server.py
+++ b/edml/core/server.py
@@ -335,7 +335,14 @@ class DeviceServer:
                 self._lr_scheduler.step(round_no + 1)  # epoch=1
             else:
                 self._lr_scheduler.step()
-
+        # delete references and free GPU memory manually
+        server_batch = None
+        server_labels = None
+        server_gradients = None
+        client_gradients = None
+        concatenated_client_gradients = None
+        mean_tensor = None
+        torch.cuda.empty_cache()
         return (
             self.node_device.client.get_weights(),
             self.get_weights(),
-- 
GitLab