From cf8067a58b878160259e9f9d579f3cc224b0f0a9 Mon Sep 17 00:00:00 2001
From: Pierre LOTTE <pierrelotte.dev@gmail.com>
Date: Sun, 6 Oct 2024 11:10:18 +0200
Subject: [PATCH] Add log correlation

---
 .../generator/dimension/log_correlation.py    | 58 +++++++++++++++++++
 1 file changed, 58 insertions(+)
 create mode 100644 paradise/generator/dimension/log_correlation.py

diff --git a/paradise/generator/dimension/log_correlation.py b/paradise/generator/dimension/log_correlation.py
new file mode 100644
index 0000000..efddfa5
--- /dev/null
+++ b/paradise/generator/dimension/log_correlation.py
@@ -0,0 +1,58 @@
+"""
+This module defines the logarithmic correlated dimension generator.
+"""
+import numpy as np
+
+from .base import BaseDimension
+
+
+class LogCorrelationDimension(BaseDimension):
+    """
+    This class defines a loarithmically correlated dimension. It will follow logonantial function and will go
+    up when the correlated dimension does go up and go down when the correlated dimension goes down.
+    """
+    def generate(self) -> np.array:
+        # Compute useful data
+        log_diffs = np.flip(np.exp(np.linspace(self.terms["start"], self.terms["end"], self.data.shape[1]//5))) /10
+
+        # Compute testing data
+        # Find the dimension to watch for
+        corr_dim = self.data[self.params["dimension"]]
+        # Check for lag parameter
+        lag = self.terms["lag"] if "lag" in self.terms else 0
+
+        # Compute values
+        sign = np.sign(np.diff(corr_dim))
+        signchange = ((np.roll(sign, 1) - sign) != 0).astype(int)
+        steps = np.zeros(self.data.shape[1])
+
+        idx_log = 0
+        for i in range(len(self.data[self.idx, 1:])):
+            if signchange[i] == 1:
+                idx_log = 0
+            steps[i+1] = log_diffs[idx_log]
+            idx_log += 1
+
+        steps[1:] *= sign * self.terms["sign"]
+        self.data[self.idx, lag:] = np.cumsum(steps)[:-lag] if lag > 0 else np.cumsum(steps)
+
+        # Compute training data
+        # Find the dimension to watch for
+        corr_dim = self.train_data[self.params["dimension"]]
+
+        # Compute values
+        sign = np.sign(np.diff(corr_dim))
+        signchange = ((np.roll(sign, 1) - sign) != 0).astype(int)
+        steps = np.zeros(self.train_data.shape[1])
+
+        idx_log = 0
+        for i in range(len(self.train_data[self.idx, 1:])):
+            if signchange[i] == 1:
+                idx_log = 0
+            steps[i+1] = log_diffs[idx_log]
+            idx_log += 1
+
+        steps[1:] *= sign * self.terms["sign"]
+        self.train_data[self.idx, lag:] = np.cumsum(steps)[:-lag] if lag > 0 else np.cumsum(steps)
+
+        return self.data, self.train_data
-- 
GitLab