diff --git a/util.py b/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..87bb1dd1ce1bb9028b4cd751ef87d7ac23d24209
--- /dev/null
+++ b/util.py
@@ -0,0 +1,83 @@
+from model.py import ECGDataset
+
+def load_data(file="./data/data.npz", device="cuda", dtype=torch.float32):
+    with np.load(file) as data:
+        X_test = torch.tensor(data['X_test'], dtype=dtype, device=device)
+        X_train = torch.tensor(data["X_train"], dtype=dtype, device=device)
+        X_val = torch.tensor(data['X_val'], dtype=dtype, device=device)
+        y_test = torch.tensor(data['y_test'], dtype=dtype, device=device)
+        y_train = torch.tensor(data['y_train'], dtype=dtype, device=device)
+        y_val = torch.tensor(data['y_val'], dtype=dtype, device=device)
+    return X_train, y_train, X_val, y_val, X_test, y_test
+
+def get_dataloaders(X_train, y_train, X_val, y_val, X_test, y_test):
+    train_data = ECGDataset(X_train, y_train)
+    train_dataloader = DataLoader(train_data, batch_size=32, shuffle=True)
+
+    val_data = ECGDataset(X_val, y_val)
+    val_dataloader = DataLoader(val_data, batch_size=32, shuffle=True)
+
+    test_data = TensorDataset(X_test, y_test)
+    test_dataloader = DataLoader(test_data, batch_size=32, shuffle=True)
+    
+    return train_dataloader, val_dataloader, test_dataloader
+
+def evaluate_on_val(model, val_loader, criterion):
+
+def evaluate_on_test(model, test_loader):
+    model.eval()  # Set model to evaluation mode
+    all_labels = []
+    all_preds = []
+    with torch.no_grad():
+        for sequences_batch, labels_batch in test_loader:
+            # use sliding window for test
+            outputs = model.predict(sequences_batch)
+            preds = torch.sigmoid(outputs)  # Apply sigmoid to get probabilities
+            
+            # Collect the predicted and true labels
+            all_labels.append(labels_batch.to("cpu").numpy())
+            all_preds.append(preds.to("cpu").numpy())
+
+    # Convert to numpy arrays
+    all_labels = np.vstack(all_labels)
+    all_preds = np.vstack(all_preds)
+    # Binarize the predictions (0.5 threshold)
+    all_preds_binary = (all_preds >= 0.5).astype(int)
+
+    # Calculate metrics
+    correct_predictions = np.any(np.logical_and(all_preds_binary, all_labels), axis=1)
+
+    # Calculate accuracy as the mean of correct predictions
+    any_hit_accuracy = np.mean(correct_predictions)
+    accuracy = accuracy_score(all_labels, all_preds_binary)
+    precision = precision_score(all_labels, all_preds_binary, average='macro', zero_division=0)
+    recall = recall_score(all_labels, all_preds_binary, average='macro', zero_division=0)
+    f1 = f1_score(all_labels, all_preds_binary, average='macro', zero_division=0)
+    auc = roc_auc_score(all_labels, all_preds, average='micro', multi_class='ovr')
+
+    print(f'Test Any-Hit Accuracy: {any_hit_accuracy:.4f}')
+    print(f'Accuracy: {accuracy:.4f}')
+    print(f'Test Precision: {precision:.4f}')
+    print(f'Test Recall: {recall:.4f}')
+    print(f'Test F1 Score: {f1:.4f}')
+    print(f'Test AUC: {auc:.4f}')
+
+def export_onnx(model, model_name, batch_size=32, window_size=100):
+    x = torch.randn(batch_size, 12, window_size, requires_grad=True)
+    torch_out = model.predict(x)
+
+    # Export the model
+    torch.onnx.export(model,               # model being run
+                    x,                         # model input (or a tuple for multiple inputs)
+                    f"{model_name}.onnx",   # where to save the model (can be a file or file-like object)
+                    export_params=True,        # store the trained parameter weights inside the model file
+                    opset_version=18,          # the ONNX version to export the model to
+                    do_constant_folding=True,  # whether to execute constant folding for optimization
+                    input_names = ['input'],   # the model's input names
+                    output_names = ['output'], # the model's output names
+                    dynamic_axes={'input' : {0 : 'batch_size'},    # variable length axes
+                                    'output' : {0 : 'batch_size'}},
+                    dynamo=False)
+
+
+