diff --git a/config/default.yaml b/config/default.yaml
index dac99d0ff6f6b963c60d9cc8e16b0c82df22d217..cf51f4118495971a2b319f335d6844cc6cb1351f 100644
--- a/config/default.yaml
+++ b/config/default.yaml
@@ -14,8 +14,9 @@ defaults:
   - wandb: default
   - _self_
 
-# If true, controllers will run devices in parallel. If false, they will run sequentially and their runtime is corrected
+# If False, controllers will run devices in parallel. If True, they will run sequentially and their runtime is corrected
 # to account for the parallelism in post-processing.
+# Important Note: In a limited energy setting, the runtime will not be accounted for correctly (i.e. wall time) if parallelism is only simulated
 simulate_parallelism: False
 own_device_id: "d0"
 num_devices: ${len:${topology.devices}}
diff --git a/edml/controllers/parallel_split_controller.py b/edml/controllers/parallel_split_controller.py
index 82bf519a06dc456d79ba25368914c376236275c9..05e5e49685086fcc47b7af390a64c825118abbe0 100644
--- a/edml/controllers/parallel_split_controller.py
+++ b/edml/controllers/parallel_split_controller.py
@@ -49,13 +49,13 @@ class ParallelSplitController(BaseController):
                 )
 
             # Start parallel training of all client devices.
-            adaptive_threshold = self._adaptive_threshold_fn.invoke(i)
-            self.logger.log({"adaptive-threshold": adaptive_threshold})
+            adaptive_threshold_value = self._adaptive_threshold_fn.invoke(i)
+            self.logger.log({"adaptive-threshold": adaptive_threshold_value})
             training_response = self.request_dispatcher.train_parallel_on_server(
                 server_device_id=server_device_id,
                 epochs=1,
                 round_no=i,
-                adaptive_learning_threshold=adaptive_threshold,
+                adaptive_threshold_value=adaptive_threshold_value,
                 optimizer_state=optimizer_state,
             )
 
diff --git a/edml/controllers/swarm_controller.py b/edml/controllers/swarm_controller.py
index 19378c4aedfbe5b5b1307751ee4cbffe7519fe66..716b73ec20546a83c7b63e34dd9287a113b4a8e4 100644
--- a/edml/controllers/swarm_controller.py
+++ b/edml/controllers/swarm_controller.py
@@ -103,7 +103,7 @@ class SwarmController(BaseController):
             server_device_id,
             epochs=1,
             round_no=round_no,
-            adaptive_learning_threshold=adaptive_threshold,
+            adaptive_threshold_value=adaptive_threshold,
             optimizer_state=optimizer_state,
         )
 
diff --git a/edml/core/device.py b/edml/core/device.py
index 78c76b5cf875333b118e745230d06dccf227ad67..aea1ae95de2735dc4c984a13d49dadd632d66432 100644
--- a/edml/core/device.py
+++ b/edml/core/device.py
@@ -242,13 +242,13 @@ class NetworkDevice(Device):
         self,
         clients: list[str],
         round_no: int,
-        adaptive_learning_threshold: Optional[float] = None,
+        adaptive_threshold_value: Optional[float] = None,
         optimizer_state: dict[str, Any] = None,
     ):
         return self.server.train_parallel_split_learning(
             clients=clients,
             round_no=round_no,
-            adaptive_learning_threshold=adaptive_learning_threshold,
+            adaptive_threshold_value=adaptive_threshold_value,
             optimizer_state=optimizer_state,
         )
 
@@ -306,7 +306,7 @@ class NetworkDevice(Device):
         self,
         epochs: int,
         round_no: int = -1,
-        adaptive_learning_threshold: Optional[float] = None,
+        adaptive_threshold_value: Optional[float] = None,
         optimizer_state: dict[str, Any] = None,
     ) -> Tuple[
         Any, Any, ModelMetricResultContainer, Any, DiagnosticMetricResultContainer
@@ -315,7 +315,7 @@ class NetworkDevice(Device):
             devices=self.__get_device_ids__(),
             epochs=epochs,
             round_no=round_no,
-            adaptive_learning_threshold=adaptive_learning_threshold,
+            adaptive_threshold_value=adaptive_threshold_value,
             optimizer_state=optimizer_state,
         )
 
@@ -456,7 +456,7 @@ class RPCDeviceServicer(DeviceServicer):
             self.device.train_global(
                 request.epochs,
                 request.round_no,
-                request.adaptive_learning_threshold,
+                request.adaptive_threshold_value,
                 proto_to_state_dict(request.optimizer_state),
             )
         )
@@ -577,14 +577,14 @@ class RPCDeviceServicer(DeviceServicer):
         print(f"Starting parallel split learning")
         clients = self.device.__get_device_ids__()
         round_no = request.round_no
-        adaptive_learning_threshold = request.adaptive_learning_threshold
+        adaptive_threshold_value = request.adaptive_threshold_value
         optimizer_state = proto_to_state_dict(request.optimizer_state)
 
         cw, sw, model_metrics, optimizer_state, diagnostic_metrics = (
             self.device.train_parallel_split_learning(
                 clients=clients,
                 round_no=round_no,
-                adaptive_learning_threshold=adaptive_learning_threshold,
+                adaptive_threshold_value=adaptive_threshold_value,
                 optimizer_state=optimizer_state,
             )
         )
@@ -665,10 +665,10 @@ class DeviceRequestDispatcher:
         server_device_id: str,
         epochs: int,
         round_no: int,
-        adaptive_learning_threshold: Optional[float] = None,
+        adaptive_threshold_value: Optional[float] = None,
         optimizer_state: dict[str, Any] = None,
     ):
-        print(f"><><><> {adaptive_learning_threshold}")
+        print(f"><><><> {adaptive_threshold_value}")
 
         try:
             response: TrainGlobalParallelSplitLearningResponse = self._get_connection(
@@ -676,7 +676,7 @@ class DeviceRequestDispatcher:
             ).TrainGlobalParallelSplitLearning(
                 connection_pb2.TrainGlobalParallelSplitLearningRequest(
                     round_no=round_no,
-                    adaptive_learning_threshold=adaptive_learning_threshold,
+                    adaptive_threshold_value=adaptive_threshold_value,
                     optimizer_state=state_dict_to_proto(optimizer_state),
                 )
             )
@@ -773,7 +773,7 @@ class DeviceRequestDispatcher:
         device_id: str,
         epochs: int,
         round_no: int = -1,
-        adaptive_learning_threshold: Optional[float] = None,
+        adaptive_threshold_value: Optional[float] = None,
         optimizer_state: dict[str, Any] = None,
     ) -> Union[
         Tuple[
@@ -790,7 +790,7 @@ class DeviceRequestDispatcher:
                 connection_pb2.TrainGlobalRequest(
                     epochs=epochs,
                     round_no=round_no,
-                    adaptive_learning_threshold=adaptive_learning_threshold,
+                    adaptive_threshold_value=adaptive_threshold_value,
                     optimizer_state=state_dict_to_proto(optimizer_state),
                 )
             )
diff --git a/edml/core/server.py b/edml/core/server.py
index ce4b72dcc3768db067786042cc881e1209e5ed4b..f93f7fb3025095a93d72f08b26151db751032fc0 100644
--- a/edml/core/server.py
+++ b/edml/core/server.py
@@ -5,7 +5,6 @@ from typing import List, Optional, Tuple, Any, TYPE_CHECKING
 
 import torch
 from omegaconf import DictConfig
-from colorama import Fore
 from torch import nn
 from torch.autograd import Variable
 
@@ -51,7 +50,7 @@ class DeviceServer:
         self._cfg = cfg
         self.node_device: Optional[Device] = None
         self.latency_factor = latency_factor
-        self.adaptive_learning_threshold = None
+        self.adaptive_threshold_value = None
 
     def set_device(self, node_device: Device):
         """Sets the device reference for the server."""
@@ -74,7 +73,7 @@ class DeviceServer:
         devices: List[str],
         epochs: int = 1,
         round_no: int = -1,
-        adaptive_learning_threshold: Optional[float] = None,
+        adaptive_threshold_value: Optional[float] = None,
         optimizer_state: dict[str, Any] = None,
     ) -> Tuple[
         Any, Any, ModelMetricResultContainer, Any, DiagnosticMetricResultContainer
@@ -85,7 +84,7 @@ class DeviceServer:
             devices: The devices to train on
             epochs: Optionally, the number of epochs to train.
             round_no: Optionally, the current global epoch number if a learning rate scheduler is used.
-            adaptive_learning_threshold: Optionally, the loss threshold to not send the gradients to the client
+            adaptive_threshold_value: Optionally, the loss threshold to not send the gradients to the client
             optimizer_state: Optionally, the optimizer_state to proceed from
         """
         client_weights = None
@@ -93,8 +92,8 @@ class DeviceServer:
         diagnostic_metric_container = DiagnosticMetricResultContainer()
         if optimizer_state is not None:
             self._optimizer.load_state_dict(optimizer_state)
-        if adaptive_learning_threshold is not None:
-            self.adaptive_learning_threshold = adaptive_learning_threshold
+        if adaptive_threshold_value is not None:
+            self.adaptive_threshold_value = adaptive_threshold_value
         for epoch in range(epochs):
             if self._lr_scheduler is not None:
                 if round_no != -1:
@@ -103,7 +102,7 @@ class DeviceServer:
                     self._lr_scheduler.step()
             for device_id in devices:
                 print(
-                    f"Train epoch {epoch} on client {device_id} with server {self.node_device.device_id} and threshold {self.adaptive_learning_threshold}"
+                    f"Train epoch {epoch} on client {device_id} with server {self.node_device.device_id}"
                 )
                 if client_weights is not None:
                     self.node_device.set_weights_on(
@@ -172,8 +171,8 @@ class DeviceServer:
         else:
             gradients = smashed_data.grad
         if (
-            self.adaptive_learning_threshold
-            and loss_train.item() < self.adaptive_learning_threshold
+            self.adaptive_threshold_value
+            and loss_train.item() < self.adaptive_threshold_value
         ):
             self.node_device.log(
                 {"adaptive_learning_threshold_applied": gradients.size(0)}
@@ -240,7 +239,7 @@ class DeviceServer:
         self,
         clients: List[str],
         round_no: int,
-        adaptive_learning_threshold: Optional[float] = None,
+        adaptive_threshold_value: Optional[float] = None,
         optimizer_state: dict[str, Any] = None,
     ):
         def client_training_job(client_id: str, batch_index: int):
@@ -274,8 +273,8 @@ class DeviceServer:
                 self._lr_scheduler.step(round_no + 1)  # epoch=1
             else:
                 self._lr_scheduler.step()
-        if adaptive_learning_threshold is not None:
-            self.adaptive_learning_threshold = adaptive_learning_threshold
+        if adaptive_threshold_value is not None:
+            self.adaptive_threshold_value = adaptive_threshold_value
         num_threads = len(clients)
         executor = create_executor_with_threads(num_threads)
 
diff --git a/edml/generated/connection_pb2.py b/edml/generated/connection_pb2.py
index 20237aafe29133f1667ddd8a256fd29c2a196d05..3c9a0d7b48b4739017066ae0dfedaa636dd64a62 100644
--- a/edml/generated/connection_pb2.py
+++ b/edml/generated/connection_pb2.py
@@ -14,7 +14,7 @@ _sym_db = _symbol_database.Default()
 import datastructures_pb2 as datastructures__pb2
 
 
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x63onnection.proto\x1a\x14\x64\x61tastructures.proto\"4\n\x13SetGradientsRequest\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\"5\n\x14UpdateWeightsRequest\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\";\n\x1aSingleBatchBackwardRequest\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\"j\n\x1bSingleBatchBackwardResponse\x12\x19\n\x07metrics\x18\x01 \x01(\x0b\x32\x08.Metrics\x12\"\n\tgradients\x18\x02 \x01(\x0b\x32\n.GradientsH\x00\x88\x01\x01\x42\x0c\n\n_gradients\"C\n\x1aSingleBatchTrainingRequest\x12\x13\n\x0b\x62\x61tch_index\x18\x01 \x01(\x05\x12\x10\n\x08round_no\x18\x02 \x01(\x05\"\x80\x01\n\x1bSingleBatchTrainingResponse\x12\'\n\x0csmashed_data\x18\x01 \x01(\x0b\x32\x0c.ActivationsH\x00\x88\x01\x01\x12\x1c\n\x06labels\x18\x02 \x01(\x0b\x32\x07.LabelsH\x01\x88\x01\x01\x42\x0f\n\r_smashed_dataB\t\n\x07_labels\"\xd5\x01\n\'TrainGlobalParallelSplitLearningRequest\x12\x15\n\x08round_no\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12(\n\x1b\x61\x64\x61ptive_learning_threshold\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12(\n\x0foptimizer_state\x18\x03 \x01(\x0b\x32\n.StateDictH\x02\x88\x01\x01\x42\x0b\n\t_round_noB\x1e\n\x1c_adaptive_learning_thresholdB\x12\n\x10_optimizer_state\"\x89\x02\n(TrainGlobalParallelSplitLearningResponse\x12 \n\x0e\x63lient_weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12 \n\x0eserver_weights\x18\x02 \x01(\x0b\x32\x08.Weights\x12\x19\n\x07metrics\x18\x03 \x01(\x0b\x32\x08.Metrics\x12(\n\x0foptimizer_state\x18\x04 \x01(\x0b\x32\n.StateDictH\x00\x88\x01\x01\x12)\n\x12\x64iagnostic_metrics\x18\x05 \x01(\x0b\x32\x08.MetricsH\x01\x88\x01\x01\x42\x12\n\x10_optimizer_stateB\x15\n\x13_diagnostic_metrics\"\xd0\x01\n\x12TrainGlobalRequest\x12\x0e\n\x06\x65pochs\x18\x01 \x01(\x05\x12\x15\n\x08round_no\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12(\n\x1b\x61\x64\x61ptive_learning_threshold\x18\x03 \x01(\x01H\x01\x88\x01\x01\x12(\n\x0foptimizer_state\x18\x04 \x01(\x0b\x32\n.StateDictH\x02\x88\x01\x01\x42\x0b\n\t_round_noB\x1e\n\x1c_adaptive_learning_thresholdB\x12\n\x10_optimizer_state\"\xf4\x01\n\x13TrainGlobalResponse\x12 \n\x0e\x63lient_weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12 \n\x0eserver_weights\x18\x02 \x01(\x0b\x32\x08.Weights\x12\x19\n\x07metrics\x18\x03 \x01(\x0b\x32\x08.Metrics\x12(\n\x0foptimizer_state\x18\x04 \x01(\x0b\x32\n.StateDictH\x00\x88\x01\x01\x12)\n\x12\x64iagnostic_metrics\x18\x05 \x01(\x0b\x32\x08.MetricsH\x01\x88\x01\x01\x42\x12\n\x10_optimizer_stateB\x15\n\x13_diagnostic_metrics\"A\n\x11SetWeightsRequest\x12\x19\n\x07weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12\x11\n\ton_client\x18\x02 \x01(\x08\"V\n\x12SetWeightsResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"T\n\x11TrainEpochRequest\x12\x1b\n\x06server\x18\x01 \x01(\x0b\x32\x0b.DeviceInfo\x12\x15\n\x08round_no\x18\x02 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_round_no\"q\n\x12TrainEpochResponse\x12\x19\n\x07weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"P\n\x11TrainBatchRequest\x12\"\n\x0csmashed_data\x18\x01 \x01(\x0b\x32\x0c.Activations\x12\x17\n\x06labels\x18\x02 \x01(\x0b\x32\x07.Labels\"\x91\x01\n\x12TrainBatchResponse\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x12\x11\n\x04loss\x18\x03 \x01(\x01H\x01\x88\x01\x01\x42\x15\n\x13_diagnostic_metricsB\x07\n\x05_loss\":\n\x11\x45valGlobalRequest\x12\x12\n\nvalidation\x18\x01 \x01(\x08\x12\x11\n\tfederated\x18\x02 \x01(\x08\"q\n\x12\x45valGlobalResponse\x12\x19\n\x07metrics\x18\x01 \x01(\x0b\x32\x08.Metrics\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\">\n\x0b\x45valRequest\x12\x1b\n\x06server\x18\x01 \x01(\x0b\x32\x0b.DeviceInfo\x12\x12\n\nvalidation\x18\x02 \x01(\x08\"P\n\x0c\x45valResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"O\n\x10\x45valBatchRequest\x12\"\n\x0csmashed_data\x18\x01 \x01(\x0b\x32\x0c.Activations\x12\x17\n\x06labels\x18\x02 \x01(\x0b\x32\x07.Labels\"p\n\x11\x45valBatchResponse\x12\x19\n\x07metrics\x18\x01 \x01(\x0b\x32\x08.Metrics\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\";\n\x15\x46ullModelTrainRequest\x12\x15\n\x08round_no\x18\x01 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_round_no\"\xce\x01\n\x16\x46ullModelTrainResponse\x12 \n\x0e\x63lient_weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12 \n\x0eserver_weights\x18\x02 \x01(\x0b\x32\x08.Weights\x12\x13\n\x0bnum_samples\x18\x03 \x01(\x05\x12\x19\n\x07metrics\x18\x04 \x01(\x0b\x32\x08.Metrics\x12)\n\x12\x64iagnostic_metrics\x18\x05 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x18\n\x16StartExperimentRequest\"[\n\x17StartExperimentResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x16\n\x14\x45ndExperimentRequest\"Y\n\x15\x45ndExperimentResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x16\n\x14\x42\x61tteryStatusRequest\"y\n\x15\x42\x61tteryStatusResponse\x12\x1e\n\x06status\x18\x01 \x01(\x0b\x32\x0e.BatteryStatus\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x19\n\x17\x44\x61tasetModelInfoRequest\"\xa5\x02\n\x18\x44\x61tasetModelInfoResponse\x12\x15\n\rtrain_samples\x18\x01 \x01(\x05\x12\x1a\n\x12validation_samples\x18\x02 \x01(\x05\x12\x17\n\x0f\x63lient_fw_flops\x18\x03 \x01(\x05\x12\x17\n\x0fserver_fw_flops\x18\x04 \x01(\x05\x12\x1c\n\x0f\x63lient_bw_flops\x18\x05 \x01(\x05H\x00\x88\x01\x01\x12\x1c\n\x0fserver_bw_flops\x18\x06 \x01(\x05H\x01\x88\x01\x01\x12)\n\x12\x64iagnostic_metrics\x18\x07 \x01(\x0b\x32\x08.MetricsH\x02\x88\x01\x01\x42\x12\n\x10_client_bw_flopsB\x12\n\x10_server_bw_flopsB\x15\n\x13_diagnostic_metrics2\xf8\x08\n\x06\x44\x65vice\x12:\n\x0bTrainGlobal\x12\x13.TrainGlobalRequest\x1a\x14.TrainGlobalResponse\"\x00\x12\x37\n\nSetWeights\x12\x12.SetWeightsRequest\x1a\x13.SetWeightsResponse\"\x00\x12\x37\n\nTrainEpoch\x12\x12.TrainEpochRequest\x1a\x13.TrainEpochResponse\"\x00\x12\x37\n\nTrainBatch\x12\x12.TrainBatchRequest\x1a\x13.TrainBatchResponse\"\x00\x12;\n\x0e\x45valuateGlobal\x12\x12.EvalGlobalRequest\x1a\x13.EvalGlobalResponse\"\x00\x12)\n\x08\x45valuate\x12\x0c.EvalRequest\x1a\r.EvalResponse\"\x00\x12\x38\n\rEvaluateBatch\x12\x11.EvalBatchRequest\x1a\x12.EvalBatchResponse\"\x00\x12\x46\n\x11\x46ullModelTraining\x12\x16.FullModelTrainRequest\x1a\x17.FullModelTrainResponse\"\x00\x12\x46\n\x0fStartExperiment\x12\x17.StartExperimentRequest\x1a\x18.StartExperimentResponse\"\x00\x12@\n\rEndExperiment\x12\x15.EndExperimentRequest\x1a\x16.EndExperimentResponse\"\x00\x12\x43\n\x10GetBatteryStatus\x12\x15.BatteryStatusRequest\x1a\x16.BatteryStatusResponse\"\x00\x12L\n\x13GetDatasetModelInfo\x12\x18.DatasetModelInfoRequest\x1a\x19.DatasetModelInfoResponse\"\x00\x12y\n TrainGlobalParallelSplitLearning\x12(.TrainGlobalParallelSplitLearningRequest\x1a).TrainGlobalParallelSplitLearningResponse\"\x00\x12W\n\x18TrainSingleBatchOnClient\x12\x1b.SingleBatchTrainingRequest\x1a\x1c.SingleBatchTrainingResponse\"\x00\x12\x65\n&BackwardPropagationSingleBatchOnClient\x12\x1b.SingleBatchBackwardRequest\x1a\x1c.SingleBatchBackwardResponse\"\x00\x12\x45\n#SetGradientsAndFinalizeTrainingStep\x12\x14.SetGradientsRequest\x1a\x06.Empty\"\x00\x62\x06proto3')
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10\x63onnection.proto\x1a\x14\x64\x61tastructures.proto\"4\n\x13SetGradientsRequest\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\"5\n\x14UpdateWeightsRequest\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\";\n\x1aSingleBatchBackwardRequest\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\"j\n\x1bSingleBatchBackwardResponse\x12\x19\n\x07metrics\x18\x01 \x01(\x0b\x32\x08.Metrics\x12\"\n\tgradients\x18\x02 \x01(\x0b\x32\n.GradientsH\x00\x88\x01\x01\x42\x0c\n\n_gradients\"C\n\x1aSingleBatchTrainingRequest\x12\x13\n\x0b\x62\x61tch_index\x18\x01 \x01(\x05\x12\x10\n\x08round_no\x18\x02 \x01(\x05\"\x80\x01\n\x1bSingleBatchTrainingResponse\x12\'\n\x0csmashed_data\x18\x01 \x01(\x0b\x32\x0c.ActivationsH\x00\x88\x01\x01\x12\x1c\n\x06labels\x18\x02 \x01(\x0b\x32\x07.LabelsH\x01\x88\x01\x01\x42\x0f\n\r_smashed_dataB\t\n\x07_labels\"\xcf\x01\n\'TrainGlobalParallelSplitLearningRequest\x12\x15\n\x08round_no\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12%\n\x18\x61\x64\x61ptive_threshold_value\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12(\n\x0foptimizer_state\x18\x03 \x01(\x0b\x32\n.StateDictH\x02\x88\x01\x01\x42\x0b\n\t_round_noB\x1b\n\x19_adaptive_threshold_valueB\x12\n\x10_optimizer_state\"\x89\x02\n(TrainGlobalParallelSplitLearningResponse\x12 \n\x0e\x63lient_weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12 \n\x0eserver_weights\x18\x02 \x01(\x0b\x32\x08.Weights\x12\x19\n\x07metrics\x18\x03 \x01(\x0b\x32\x08.Metrics\x12(\n\x0foptimizer_state\x18\x04 \x01(\x0b\x32\n.StateDictH\x00\x88\x01\x01\x12)\n\x12\x64iagnostic_metrics\x18\x05 \x01(\x0b\x32\x08.MetricsH\x01\x88\x01\x01\x42\x12\n\x10_optimizer_stateB\x15\n\x13_diagnostic_metrics\"\xca\x01\n\x12TrainGlobalRequest\x12\x0e\n\x06\x65pochs\x18\x01 \x01(\x05\x12\x15\n\x08round_no\x18\x02 \x01(\x05H\x00\x88\x01\x01\x12%\n\x18\x61\x64\x61ptive_threshold_value\x18\x03 \x01(\x01H\x01\x88\x01\x01\x12(\n\x0foptimizer_state\x18\x04 \x01(\x0b\x32\n.StateDictH\x02\x88\x01\x01\x42\x0b\n\t_round_noB\x1b\n\x19_adaptive_threshold_valueB\x12\n\x10_optimizer_state\"\xf4\x01\n\x13TrainGlobalResponse\x12 \n\x0e\x63lient_weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12 \n\x0eserver_weights\x18\x02 \x01(\x0b\x32\x08.Weights\x12\x19\n\x07metrics\x18\x03 \x01(\x0b\x32\x08.Metrics\x12(\n\x0foptimizer_state\x18\x04 \x01(\x0b\x32\n.StateDictH\x00\x88\x01\x01\x12)\n\x12\x64iagnostic_metrics\x18\x05 \x01(\x0b\x32\x08.MetricsH\x01\x88\x01\x01\x42\x12\n\x10_optimizer_stateB\x15\n\x13_diagnostic_metrics\"A\n\x11SetWeightsRequest\x12\x19\n\x07weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12\x11\n\ton_client\x18\x02 \x01(\x08\"V\n\x12SetWeightsResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"T\n\x11TrainEpochRequest\x12\x1b\n\x06server\x18\x01 \x01(\x0b\x32\x0b.DeviceInfo\x12\x15\n\x08round_no\x18\x02 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_round_no\"q\n\x12TrainEpochResponse\x12\x19\n\x07weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"P\n\x11TrainBatchRequest\x12\"\n\x0csmashed_data\x18\x01 \x01(\x0b\x32\x0c.Activations\x12\x17\n\x06labels\x18\x02 \x01(\x0b\x32\x07.Labels\"\x91\x01\n\x12TrainBatchResponse\x12\x1d\n\tgradients\x18\x01 \x01(\x0b\x32\n.Gradients\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x12\x11\n\x04loss\x18\x03 \x01(\x01H\x01\x88\x01\x01\x42\x15\n\x13_diagnostic_metricsB\x07\n\x05_loss\":\n\x11\x45valGlobalRequest\x12\x12\n\nvalidation\x18\x01 \x01(\x08\x12\x11\n\tfederated\x18\x02 \x01(\x08\"q\n\x12\x45valGlobalResponse\x12\x19\n\x07metrics\x18\x01 \x01(\x0b\x32\x08.Metrics\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\">\n\x0b\x45valRequest\x12\x1b\n\x06server\x18\x01 \x01(\x0b\x32\x0b.DeviceInfo\x12\x12\n\nvalidation\x18\x02 \x01(\x08\"P\n\x0c\x45valResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"O\n\x10\x45valBatchRequest\x12\"\n\x0csmashed_data\x18\x01 \x01(\x0b\x32\x0c.Activations\x12\x17\n\x06labels\x18\x02 \x01(\x0b\x32\x07.Labels\"p\n\x11\x45valBatchResponse\x12\x19\n\x07metrics\x18\x01 \x01(\x0b\x32\x08.Metrics\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\";\n\x15\x46ullModelTrainRequest\x12\x15\n\x08round_no\x18\x01 \x01(\x05H\x00\x88\x01\x01\x42\x0b\n\t_round_no\"\xce\x01\n\x16\x46ullModelTrainResponse\x12 \n\x0e\x63lient_weights\x18\x01 \x01(\x0b\x32\x08.Weights\x12 \n\x0eserver_weights\x18\x02 \x01(\x0b\x32\x08.Weights\x12\x13\n\x0bnum_samples\x18\x03 \x01(\x05\x12\x19\n\x07metrics\x18\x04 \x01(\x0b\x32\x08.Metrics\x12)\n\x12\x64iagnostic_metrics\x18\x05 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x18\n\x16StartExperimentRequest\"[\n\x17StartExperimentResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x16\n\x14\x45ndExperimentRequest\"Y\n\x15\x45ndExperimentResponse\x12)\n\x12\x64iagnostic_metrics\x18\x01 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x16\n\x14\x42\x61tteryStatusRequest\"y\n\x15\x42\x61tteryStatusResponse\x12\x1e\n\x06status\x18\x01 \x01(\x0b\x32\x0e.BatteryStatus\x12)\n\x12\x64iagnostic_metrics\x18\x02 \x01(\x0b\x32\x08.MetricsH\x00\x88\x01\x01\x42\x15\n\x13_diagnostic_metrics\"\x19\n\x17\x44\x61tasetModelInfoRequest\"\xa5\x02\n\x18\x44\x61tasetModelInfoResponse\x12\x15\n\rtrain_samples\x18\x01 \x01(\x05\x12\x1a\n\x12validation_samples\x18\x02 \x01(\x05\x12\x17\n\x0f\x63lient_fw_flops\x18\x03 \x01(\x05\x12\x17\n\x0fserver_fw_flops\x18\x04 \x01(\x05\x12\x1c\n\x0f\x63lient_bw_flops\x18\x05 \x01(\x05H\x00\x88\x01\x01\x12\x1c\n\x0fserver_bw_flops\x18\x06 \x01(\x05H\x01\x88\x01\x01\x12)\n\x12\x64iagnostic_metrics\x18\x07 \x01(\x0b\x32\x08.MetricsH\x02\x88\x01\x01\x42\x12\n\x10_client_bw_flopsB\x12\n\x10_server_bw_flopsB\x15\n\x13_diagnostic_metrics2\xf8\x08\n\x06\x44\x65vice\x12:\n\x0bTrainGlobal\x12\x13.TrainGlobalRequest\x1a\x14.TrainGlobalResponse\"\x00\x12\x37\n\nSetWeights\x12\x12.SetWeightsRequest\x1a\x13.SetWeightsResponse\"\x00\x12\x37\n\nTrainEpoch\x12\x12.TrainEpochRequest\x1a\x13.TrainEpochResponse\"\x00\x12\x37\n\nTrainBatch\x12\x12.TrainBatchRequest\x1a\x13.TrainBatchResponse\"\x00\x12;\n\x0e\x45valuateGlobal\x12\x12.EvalGlobalRequest\x1a\x13.EvalGlobalResponse\"\x00\x12)\n\x08\x45valuate\x12\x0c.EvalRequest\x1a\r.EvalResponse\"\x00\x12\x38\n\rEvaluateBatch\x12\x11.EvalBatchRequest\x1a\x12.EvalBatchResponse\"\x00\x12\x46\n\x11\x46ullModelTraining\x12\x16.FullModelTrainRequest\x1a\x17.FullModelTrainResponse\"\x00\x12\x46\n\x0fStartExperiment\x12\x17.StartExperimentRequest\x1a\x18.StartExperimentResponse\"\x00\x12@\n\rEndExperiment\x12\x15.EndExperimentRequest\x1a\x16.EndExperimentResponse\"\x00\x12\x43\n\x10GetBatteryStatus\x12\x15.BatteryStatusRequest\x1a\x16.BatteryStatusResponse\"\x00\x12L\n\x13GetDatasetModelInfo\x12\x18.DatasetModelInfoRequest\x1a\x19.DatasetModelInfoResponse\"\x00\x12y\n TrainGlobalParallelSplitLearning\x12(.TrainGlobalParallelSplitLearningRequest\x1a).TrainGlobalParallelSplitLearningResponse\"\x00\x12W\n\x18TrainSingleBatchOnClient\x12\x1b.SingleBatchTrainingRequest\x1a\x1c.SingleBatchTrainingResponse\"\x00\x12\x65\n&BackwardPropagationSingleBatchOnClient\x12\x1b.SingleBatchBackwardRequest\x1a\x1c.SingleBatchBackwardResponse\"\x00\x12\x45\n#SetGradientsAndFinalizeTrainingStep\x12\x14.SetGradientsRequest\x1a\x06.Empty\"\x00\x62\x06proto3')
 
 _globals = globals()
 _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -35,57 +35,57 @@ if _descriptor._USE_C_DESCRIPTORS == False:
   _globals['_SINGLEBATCHTRAININGRESPONSE']._serialized_start=390
   _globals['_SINGLEBATCHTRAININGRESPONSE']._serialized_end=518
   _globals['_TRAINGLOBALPARALLELSPLITLEARNINGREQUEST']._serialized_start=521
-  _globals['_TRAINGLOBALPARALLELSPLITLEARNINGREQUEST']._serialized_end=734
-  _globals['_TRAINGLOBALPARALLELSPLITLEARNINGRESPONSE']._serialized_start=737
-  _globals['_TRAINGLOBALPARALLELSPLITLEARNINGRESPONSE']._serialized_end=1002
-  _globals['_TRAINGLOBALREQUEST']._serialized_start=1005
-  _globals['_TRAINGLOBALREQUEST']._serialized_end=1213
-  _globals['_TRAINGLOBALRESPONSE']._serialized_start=1216
-  _globals['_TRAINGLOBALRESPONSE']._serialized_end=1460
-  _globals['_SETWEIGHTSREQUEST']._serialized_start=1462
-  _globals['_SETWEIGHTSREQUEST']._serialized_end=1527
-  _globals['_SETWEIGHTSRESPONSE']._serialized_start=1529
-  _globals['_SETWEIGHTSRESPONSE']._serialized_end=1615
-  _globals['_TRAINEPOCHREQUEST']._serialized_start=1617
-  _globals['_TRAINEPOCHREQUEST']._serialized_end=1701
-  _globals['_TRAINEPOCHRESPONSE']._serialized_start=1703
-  _globals['_TRAINEPOCHRESPONSE']._serialized_end=1816
-  _globals['_TRAINBATCHREQUEST']._serialized_start=1818
-  _globals['_TRAINBATCHREQUEST']._serialized_end=1898
-  _globals['_TRAINBATCHRESPONSE']._serialized_start=1901
-  _globals['_TRAINBATCHRESPONSE']._serialized_end=2046
-  _globals['_EVALGLOBALREQUEST']._serialized_start=2048
-  _globals['_EVALGLOBALREQUEST']._serialized_end=2106
-  _globals['_EVALGLOBALRESPONSE']._serialized_start=2108
-  _globals['_EVALGLOBALRESPONSE']._serialized_end=2221
-  _globals['_EVALREQUEST']._serialized_start=2223
-  _globals['_EVALREQUEST']._serialized_end=2285
-  _globals['_EVALRESPONSE']._serialized_start=2287
-  _globals['_EVALRESPONSE']._serialized_end=2367
-  _globals['_EVALBATCHREQUEST']._serialized_start=2369
-  _globals['_EVALBATCHREQUEST']._serialized_end=2448
-  _globals['_EVALBATCHRESPONSE']._serialized_start=2450
-  _globals['_EVALBATCHRESPONSE']._serialized_end=2562
-  _globals['_FULLMODELTRAINREQUEST']._serialized_start=2564
-  _globals['_FULLMODELTRAINREQUEST']._serialized_end=2623
-  _globals['_FULLMODELTRAINRESPONSE']._serialized_start=2626
-  _globals['_FULLMODELTRAINRESPONSE']._serialized_end=2832
-  _globals['_STARTEXPERIMENTREQUEST']._serialized_start=2834
-  _globals['_STARTEXPERIMENTREQUEST']._serialized_end=2858
-  _globals['_STARTEXPERIMENTRESPONSE']._serialized_start=2860
-  _globals['_STARTEXPERIMENTRESPONSE']._serialized_end=2951
-  _globals['_ENDEXPERIMENTREQUEST']._serialized_start=2953
-  _globals['_ENDEXPERIMENTREQUEST']._serialized_end=2975
-  _globals['_ENDEXPERIMENTRESPONSE']._serialized_start=2977
-  _globals['_ENDEXPERIMENTRESPONSE']._serialized_end=3066
-  _globals['_BATTERYSTATUSREQUEST']._serialized_start=3068
-  _globals['_BATTERYSTATUSREQUEST']._serialized_end=3090
-  _globals['_BATTERYSTATUSRESPONSE']._serialized_start=3092
-  _globals['_BATTERYSTATUSRESPONSE']._serialized_end=3213
-  _globals['_DATASETMODELINFOREQUEST']._serialized_start=3215
-  _globals['_DATASETMODELINFOREQUEST']._serialized_end=3240
-  _globals['_DATASETMODELINFORESPONSE']._serialized_start=3243
-  _globals['_DATASETMODELINFORESPONSE']._serialized_end=3536
-  _globals['_DEVICE']._serialized_start=3539
-  _globals['_DEVICE']._serialized_end=4683
+  _globals['_TRAINGLOBALPARALLELSPLITLEARNINGREQUEST']._serialized_end=728
+  _globals['_TRAINGLOBALPARALLELSPLITLEARNINGRESPONSE']._serialized_start=731
+  _globals['_TRAINGLOBALPARALLELSPLITLEARNINGRESPONSE']._serialized_end=996
+  _globals['_TRAINGLOBALREQUEST']._serialized_start=999
+  _globals['_TRAINGLOBALREQUEST']._serialized_end=1201
+  _globals['_TRAINGLOBALRESPONSE']._serialized_start=1204
+  _globals['_TRAINGLOBALRESPONSE']._serialized_end=1448
+  _globals['_SETWEIGHTSREQUEST']._serialized_start=1450
+  _globals['_SETWEIGHTSREQUEST']._serialized_end=1515
+  _globals['_SETWEIGHTSRESPONSE']._serialized_start=1517
+  _globals['_SETWEIGHTSRESPONSE']._serialized_end=1603
+  _globals['_TRAINEPOCHREQUEST']._serialized_start=1605
+  _globals['_TRAINEPOCHREQUEST']._serialized_end=1689
+  _globals['_TRAINEPOCHRESPONSE']._serialized_start=1691
+  _globals['_TRAINEPOCHRESPONSE']._serialized_end=1804
+  _globals['_TRAINBATCHREQUEST']._serialized_start=1806
+  _globals['_TRAINBATCHREQUEST']._serialized_end=1886
+  _globals['_TRAINBATCHRESPONSE']._serialized_start=1889
+  _globals['_TRAINBATCHRESPONSE']._serialized_end=2034
+  _globals['_EVALGLOBALREQUEST']._serialized_start=2036
+  _globals['_EVALGLOBALREQUEST']._serialized_end=2094
+  _globals['_EVALGLOBALRESPONSE']._serialized_start=2096
+  _globals['_EVALGLOBALRESPONSE']._serialized_end=2209
+  _globals['_EVALREQUEST']._serialized_start=2211
+  _globals['_EVALREQUEST']._serialized_end=2273
+  _globals['_EVALRESPONSE']._serialized_start=2275
+  _globals['_EVALRESPONSE']._serialized_end=2355
+  _globals['_EVALBATCHREQUEST']._serialized_start=2357
+  _globals['_EVALBATCHREQUEST']._serialized_end=2436
+  _globals['_EVALBATCHRESPONSE']._serialized_start=2438
+  _globals['_EVALBATCHRESPONSE']._serialized_end=2550
+  _globals['_FULLMODELTRAINREQUEST']._serialized_start=2552
+  _globals['_FULLMODELTRAINREQUEST']._serialized_end=2611
+  _globals['_FULLMODELTRAINRESPONSE']._serialized_start=2614
+  _globals['_FULLMODELTRAINRESPONSE']._serialized_end=2820
+  _globals['_STARTEXPERIMENTREQUEST']._serialized_start=2822
+  _globals['_STARTEXPERIMENTREQUEST']._serialized_end=2846
+  _globals['_STARTEXPERIMENTRESPONSE']._serialized_start=2848
+  _globals['_STARTEXPERIMENTRESPONSE']._serialized_end=2939
+  _globals['_ENDEXPERIMENTREQUEST']._serialized_start=2941
+  _globals['_ENDEXPERIMENTREQUEST']._serialized_end=2963
+  _globals['_ENDEXPERIMENTRESPONSE']._serialized_start=2965
+  _globals['_ENDEXPERIMENTRESPONSE']._serialized_end=3054
+  _globals['_BATTERYSTATUSREQUEST']._serialized_start=3056
+  _globals['_BATTERYSTATUSREQUEST']._serialized_end=3078
+  _globals['_BATTERYSTATUSRESPONSE']._serialized_start=3080
+  _globals['_BATTERYSTATUSRESPONSE']._serialized_end=3201
+  _globals['_DATASETMODELINFOREQUEST']._serialized_start=3203
+  _globals['_DATASETMODELINFOREQUEST']._serialized_end=3228
+  _globals['_DATASETMODELINFORESPONSE']._serialized_start=3231
+  _globals['_DATASETMODELINFORESPONSE']._serialized_end=3524
+  _globals['_DEVICE']._serialized_start=3527
+  _globals['_DEVICE']._serialized_end=4671
 # @@protoc_insertion_point(module_scope)
diff --git a/edml/generated/connection_pb2.pyi b/edml/generated/connection_pb2.pyi
index cbbaadd8f74a800e77028145de74c967ddec3956..bc0c09189004803b0d556d0c7eaeeecaf82922e0 100644
--- a/edml/generated/connection_pb2.pyi
+++ b/edml/generated/connection_pb2.pyi
@@ -48,14 +48,14 @@ class SingleBatchTrainingResponse(_message.Message):
     def __init__(self, smashed_data: _Optional[_Union[_datastructures_pb2.Activations, _Mapping]] = ..., labels: _Optional[_Union[_datastructures_pb2.Labels, _Mapping]] = ...) -> None: ...
 
 class TrainGlobalParallelSplitLearningRequest(_message.Message):
-    __slots__ = ["round_no", "adaptive_learning_threshold", "optimizer_state"]
+    __slots__ = ["round_no", "adaptive_threshold_value", "optimizer_state"]
     ROUND_NO_FIELD_NUMBER: _ClassVar[int]
-    ADAPTIVE_LEARNING_THRESHOLD_FIELD_NUMBER: _ClassVar[int]
+    ADAPTIVE_THRESHOLD_VALUE_FIELD_NUMBER: _ClassVar[int]
     OPTIMIZER_STATE_FIELD_NUMBER: _ClassVar[int]
     round_no: int
-    adaptive_learning_threshold: float
+    adaptive_threshold_value: float
     optimizer_state: _datastructures_pb2.StateDict
-    def __init__(self, round_no: _Optional[int] = ..., adaptive_learning_threshold: _Optional[float] = ..., optimizer_state: _Optional[_Union[_datastructures_pb2.StateDict, _Mapping]] = ...) -> None: ...
+    def __init__(self, round_no: _Optional[int] = ..., adaptive_threshold_value: _Optional[float] = ..., optimizer_state: _Optional[_Union[_datastructures_pb2.StateDict, _Mapping]] = ...) -> None: ...
 
 class TrainGlobalParallelSplitLearningResponse(_message.Message):
     __slots__ = ["client_weights", "server_weights", "metrics", "optimizer_state", "diagnostic_metrics"]
@@ -72,16 +72,16 @@ class TrainGlobalParallelSplitLearningResponse(_message.Message):
     def __init__(self, client_weights: _Optional[_Union[_datastructures_pb2.Weights, _Mapping]] = ..., server_weights: _Optional[_Union[_datastructures_pb2.Weights, _Mapping]] = ..., metrics: _Optional[_Union[_datastructures_pb2.Metrics, _Mapping]] = ..., optimizer_state: _Optional[_Union[_datastructures_pb2.StateDict, _Mapping]] = ..., diagnostic_metrics: _Optional[_Union[_datastructures_pb2.Metrics, _Mapping]] = ...) -> None: ...
 
 class TrainGlobalRequest(_message.Message):
-    __slots__ = ["epochs", "round_no", "adaptive_learning_threshold", "optimizer_state"]
+    __slots__ = ["epochs", "round_no", "adaptive_threshold_value", "optimizer_state"]
     EPOCHS_FIELD_NUMBER: _ClassVar[int]
     ROUND_NO_FIELD_NUMBER: _ClassVar[int]
-    ADAPTIVE_LEARNING_THRESHOLD_FIELD_NUMBER: _ClassVar[int]
+    ADAPTIVE_THRESHOLD_VALUE_FIELD_NUMBER: _ClassVar[int]
     OPTIMIZER_STATE_FIELD_NUMBER: _ClassVar[int]
     epochs: int
     round_no: int
-    adaptive_learning_threshold: float
+    adaptive_threshold_value: float
     optimizer_state: _datastructures_pb2.StateDict
-    def __init__(self, epochs: _Optional[int] = ..., round_no: _Optional[int] = ..., adaptive_learning_threshold: _Optional[float] = ..., optimizer_state: _Optional[_Union[_datastructures_pb2.StateDict, _Mapping]] = ...) -> None: ...
+    def __init__(self, epochs: _Optional[int] = ..., round_no: _Optional[int] = ..., adaptive_threshold_value: _Optional[float] = ..., optimizer_state: _Optional[_Union[_datastructures_pb2.StateDict, _Mapping]] = ...) -> None: ...
 
 class TrainGlobalResponse(_message.Message):
     __slots__ = ["client_weights", "server_weights", "metrics", "optimizer_state", "diagnostic_metrics"]
diff --git a/edml/proto/connection.proto b/edml/proto/connection.proto
index b0441d99a3397af8b85fc2f4eb99190accb05242..f4c7952a62b9db7225760a9b7596a03e4b9f09f4 100644
--- a/edml/proto/connection.proto
+++ b/edml/proto/connection.proto
@@ -51,7 +51,7 @@ message SingleBatchTrainingResponse {
 
 message TrainGlobalParallelSplitLearningRequest {
   optional int32 round_no = 1;
-  optional double adaptive_learning_threshold = 2;
+  optional double adaptive_threshold_value = 2;
   optional StateDict optimizer_state = 3;
 }
 
@@ -66,7 +66,7 @@ message TrainGlobalParallelSplitLearningResponse {
 message TrainGlobalRequest {
   int32 epochs = 1;
   optional int32 round_no = 2;
-  optional double adaptive_learning_threshold = 3;
+  optional double adaptive_threshold_value = 3;
   optional StateDict optimizer_state = 4;
 
 }
diff --git a/edml/tests/controllers/swarm_controller_test.py b/edml/tests/controllers/swarm_controller_test.py
index 4cde758ebf1199882293e115fb479d25a80fecf6..89ad8aa1fe830bc397ff2831cd037df49ba3b4b7 100644
--- a/edml/tests/controllers/swarm_controller_test.py
+++ b/edml/tests/controllers/swarm_controller_test.py
@@ -57,7 +57,7 @@ class SwarmControllerTest(unittest.TestCase):
             "d1",
             epochs=1,
             round_no=0,
-            adaptive_learning_threshold=0.0,
+            adaptive_threshold_value=0.0,
             optimizer_state={"optimizer_state": 43},
         )
 
@@ -83,7 +83,7 @@ class SwarmControllerTest(unittest.TestCase):
             "d1",
             epochs=1,
             round_no=0,
-            adaptive_learning_threshold=0.0,
+            adaptive_threshold_value=0.0,
             optimizer_state=None,
         )
 
diff --git a/edml/tests/core/device_test.py b/edml/tests/core/device_test.py
index 31371cebea53098c3344cc64b2d2f25614836cb7..85b0e1f7f7204efa6a66b3c95f89a5c392098855 100644
--- a/edml/tests/core/device_test.py
+++ b/edml/tests/core/device_test.py
@@ -134,7 +134,7 @@ class RPCDeviceServicerTest(unittest.TestCase):
         request = connection_pb2.TrainGlobalRequest(
             epochs=42,
             round_no=1,
-            adaptive_learning_threshold=3,
+            adaptive_threshold_value=3,
             optimizer_state=state_dict_to_proto({"optimizer_state": 42}),
         )
 
@@ -526,7 +526,7 @@ class RequestDispatcherTest(unittest.TestCase):
             connection_pb2.TrainGlobalRequest(
                 epochs=42,
                 round_no=43,
-                adaptive_learning_threshold=3,
+                adaptive_threshold_value=3,
                 optimizer_state=state_dict_to_proto({"optimizer_state": 44}),
             )
         )
@@ -538,7 +538,7 @@ class RequestDispatcherTest(unittest.TestCase):
             "1",
             42,
             round_no=43,
-            adaptive_learning_threshold=3,
+            adaptive_threshold_value=3,
             optimizer_state={"optimizer_state": 44},
         )
 
@@ -547,7 +547,7 @@ class RequestDispatcherTest(unittest.TestCase):
             connection_pb2.TrainGlobalRequest(
                 epochs=42,
                 round_no=43,
-                adaptive_learning_threshold=3,
+                adaptive_threshold_value=3,
                 optimizer_state=state_dict_to_proto({"optimizer_state": 44}),
             )
         )