diff --git a/.gitignore b/.gitignore
index 7c4ddb3146c91a896fea0c4c6aaa0fdc37094723..766b259382966985d13c1207ef78f24e67d90ff6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,5 @@
 **/__pycache__
 /.idea
 /.vscode
+
+/algorithms
diff --git a/algorithms/deepant/.gitignore b/algorithms/deepant/.gitignore
deleted file mode 100644
index 19ccce36f32f32fe6f6ea8592f0ba02f606cf27f..0000000000000000000000000000000000000000
--- a/algorithms/deepant/.gitignore
+++ /dev/null
@@ -1,147 +0,0 @@
-########################################
-# Python.gitignore from github/gitignore
-########################################
-
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-#  Usually these files are written by a python script from a template
-#  before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-cover/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-#   For a library or package, you might want to ignore these files since the code is
-#   intended to run in multiple environments; otherwise, check them in:
-# .python-version
-
-# pipenv
-#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-#   However, in case of collaboration, if having platform-specific dependencies or dependencies
-#   having no cross-platform support, pipenv may install dependencies that don't work, or not
-#   install all needed dependencies.
-#Pipfile.lock
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-########################################
-
-
-**.png
diff --git a/algorithms/deepant/Dockerfile b/algorithms/deepant/Dockerfile
deleted file mode 100644
index 6f8e61d9a9dbf9dc127c593399c0abd372bd7b98..0000000000000000000000000000000000000000
--- a/algorithms/deepant/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM ghcr.io/timeeval/python3-torch:0.3.0
-
-LABEL maintainer="grit.fessel@student.hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-# install algorithm dependencies
-COPY requirements.txt /app/
-RUN pip install -r /app/requirements.txt
-
-COPY deepant /app/deepant
-COPY helper.py /app/
-COPY manifest.json /app/
-COPY algorithm.py /app/
diff --git a/algorithms/deepant/LICENSE b/algorithms/deepant/LICENSE
deleted file mode 100644
index 04c1b36e8e3bfe31e26b875b4f048b57077a239d..0000000000000000000000000000000000000000
--- a/algorithms/deepant/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2022 Mohsin Munir, Shoaib Ahmed Siddiqui, Andreas Dengel, Sheraz Ahmed, Aadarsh Kumar Singh, Grit Fessel, Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/deepant/README.md b/algorithms/deepant/README.md
deleted file mode 100755
index bd2c1fdf723f824a1c925da20e3b587c282b59f0..0000000000000000000000000000000000000000
--- a/algorithms/deepant/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# DeepAnT
-
-Adapted version of the community implementation of DeepAnT from https://github.com/dev-aadarsh/DeepAnT.
-
-|||
-| :--- | :--- |
-| Citekey | BasharNayak2020TAnoGAN |
-| Source Code | [https://github.com/dev-aadarsh/DeepAnT](https://github.com/dev-aadarsh/DeepAnT) |
-| Input Dimensionality | multivariate |
-| Learning Type | semi-supervised |
-|||
-
-## Dependencies
-
-- python 3
-- numpy
-- pandas
-- pytorch
-
-## Notes
-
-DeepAnT outputs anomaly scores for windows.
-The results require post-processing.
-The scores for each point can be assigned by aggregating the anomaly scores for each window the point is included in.
-The window size is computed by `window_size + prediction_window_size`.
-
-U can use the following code snippet for the post-processing step in TimeEval (default parameters directly filled in from the source code):
-
-<!--BEGIN:timeeval-post-->
-```python
-from timeeval.utils.window import ReverseWindowing
-# post-processing for DeepAnT
-def _post_deepant(scores: np.ndarray, args: dict) -> np.ndarray:
-    window_size = args.get("hyper_params", {}).get("window_size", 45)
-    prediction_window_size = args.get("hyper_params", {}).get("prediction_window_size", 1)
-    size = window_size + prediction_window_size
-    return ReverseWindowing(window_size=size).fit_transform(scores)
-```
-<!--END:timeeval-post-->
diff --git a/algorithms/deepant/algorithm.py b/algorithms/deepant/algorithm.py
deleted file mode 100644
index f5a6e635efbcfa12d0b9c44efade23882cd375ee..0000000000000000000000000000000000000000
--- a/algorithms/deepant/algorithm.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/env python
-import json, sys
-import torch
-import numpy as np
-import pandas as pd
-from deepant.detector import Detector
-from deepant.predictor import Predictor
-from deepant.dataset import TimeSeries
-from pathlib import Path
-from helper import retrieve_save_path
-
-EPOCHS = 50
-WINDOW = 45
-PRED_WINDOW = 1
-LR = 1e-5
-WEIGHT_DECAY = 1e-6
-TRAIN_SPLIT = 0.75
-VAL_SPLIT = 0.25
-BATCH_SIZE = 45
-EARLY_STOPPING_DELTA = 0.05
-EARLY_STOPPING_PATIENCE = 10
-RANDOM_STATE = 42
-
-
-class Config:
-    dataInput: Path
-    dataOutput: Path
-    modelInput: Path
-    modelOutput: Path
-    executionType: str
-    epochs: int
-    window: int
-    pred_window: int
-    lr: float
-    batch_size: int
-    split: float
-    early_stopping_delta: float
-    early_stopping_patience: int
-    random_state: int
-
-    def __init__(self, params):
-        self.dataInput = Path(params.get("dataInput", "data/dataset.csv"))
-        self.dataOutput = Path(params.get("dataOutput", "results/anomalies.csv"))
-        self.modelInput = Path(params.get("modelInput", "results/model.pt"))
-        self.modelOutput = Path(params.get("modelOutput", "results/model.pt"))
-        self.executionType = params.get("executionType")
-        try:
-            customParameters = params["customParameters"]
-        except KeyError:
-            customParameters = {}
-        self.epochs = customParameters.get("epochs", EPOCHS)
-        self.window = customParameters.get("window_size", WINDOW)
-        self.pred_window = customParameters.get("prediction_window_size", PRED_WINDOW)
-        self.lr = customParameters.get("learning_rate", LR)
-        self.batch_size = customParameters.get("batch_size", BATCH_SIZE)
-        self.split = customParameters.get("split", TRAIN_SPLIT)
-        self.early_stopping_delta = customParameters.get("early_stopping_delta", EARLY_STOPPING_DELTA)
-        self.early_stopping_patience = customParameters.get("early_stopping_patience", EARLY_STOPPING_PATIENCE)
-        self.random_state = customParameters.get("random_state", RANDOM_STATE)
-    
-    def __str__(self):
-        if config.executionType == "train":
-            outputString = f"Config("\
-                f"dataInput={self.dataInput}, modelOutput={self.modelOutput}, executionType={self.executionType}," \
-                f"epochs={self.epochs}, window={self.window}, lr={self.lr}," \
-                f"pred_window={self.pred_window}, batch_size={self.batch_size})"
-        elif config.executionType == "execute":
-            outputString = f"Config("\
-                f"dataInput={self.dataInput}, dataOutput={self.dataOutput}, modelInput={self.modelInput}," \
-                f"executionType={self.executionType}, window={self.window}, pred_window={self.pred_window})"
-        return outputString
-
-
-def get_subsequences(data, window, pred_window, channels):
-    X = []
-    Y = []
-
-    for i in range(len(data) - window - pred_window):
-        X.append(data[i : i + window])
-        Y.append(data[i + window : i + window + pred_window])
-
-    X = np.array(X)
-    Y = np.array(Y)
-    X = np.moveaxis(X, source=2, destination=1)
-    Y = np.reshape(Y, (Y.shape[0], channels*pred_window))
-    return X, Y
-
-
-def preprocess_data(config):
-    """
-    Requirements for dataset:
-    - CSV dataset
-    - 1. column is index (e.g. timestamp)
-    - all other columns are values (float)
-    - there must not be a specific label
-    """
-    ts_data = pd.read_csv(config.dataInput, index_col = 0).iloc[:, :-1]  # remove labels
-    print(f"Dataset {config.dataInput};")
-    print(ts_data)
-
-    c_values = ts_data.columns
-    channels = len(c_values)
-
-    if config.executionType == "train":
-        # define train and validation datasets
-        train_samples = int(config.split * len(ts_data))
-        valid_samples = int((1 - config.split) * len(ts_data))
-        print(f"Training data: {train_samples} ({config.split*100:.0f}%)")
-        print(f"Validation data: {valid_samples} ({(1 - config.split)*100:.0f}%)")
-
-        train_dataset = TimeSeries(ts_data.iloc[:train_samples].values, window_length=config.window, prediction_length=config.pred_window)
-        valid_dataset = TimeSeries(ts_data.iloc[train_samples:].values, window_length=config.window, prediction_length=config.pred_window)
-
-        return {
-            "train": train_dataset,
-            "val": valid_dataset,
-            "n_channels": channels
-        }
-    elif config.executionType == "execute":
-        test_data = ts_data.iloc[:]
-
-        print(f"Creating subsequences with window length {config.window + config.pred_window}")
-        test_dataset = TimeSeries(test_data.values, config.window, config.pred_window)
-
-        return {
-            "test": test_dataset,
-            "n_channels": channels
-        }
-
-    return {}
-
-
-def train(config):
-    print("\nPREPROCESSING ====")
-    data = preprocess_data(config)
-
-    # create components
-    predictor = Predictor(config.window, config.pred_window, config.lr, config.batch_size, in_channels=data["n_channels"])
-    print(predictor.model)
-
-    # train
-    print("\nTRAINING =========")
-    train_dataset = data["train"]
-    valid_dataset = data["val"]
-    predictor.train(train_dataset, valid_dataset, n_epochs=config.epochs, save_path=config.modelOutput,
-                    early_stopping_delta=config.early_stopping_delta, early_stopping_patience=config.early_stopping_patience)
-
-
-def execute(config):
-    data = preprocess_data(config)
-
-    print("\nPREDICTION =======")
-    predictor = Predictor(window=config.window, pred_window=config.pred_window, in_channels=data["n_channels"])
-    predictor.load(config.modelInput)
-    print(predictor.model)
-
-    detector = Detector()
-    
-    test_dataset = data["test"]
-    predictedY = predictor.predict(test_dataset)
-    anomalies = detector.detect(predictedY, test_dataset)
-    result_save_path = retrieve_save_path(config.dataOutput, "anomalies.csv")
-    anomalies.tofile(result_save_path, sep="\n")
-
-
-def parse_args():
-    if len(sys.argv) < 2:
-        print("No arguments supplied, please specify the execution type at least!", file=sys.stderr)
-        exit(1)
-    elif len(sys.argv) > 2:
-        print("Wrong number of arguments supplied! Single JSON-String expected!", file=sys.stderr)
-        exit(1)
-    else:
-        params = json.loads(sys.argv[1])
-    return Config(params)
-
-
-def set_random_state(config: Config) -> None:
-    seed = config.random_state
-    import random
-    random.seed(seed)
-    np.random.seed(seed)
-    torch.manual_seed(seed)
-
-
-if __name__ == "__main__":
-    config = parse_args()
-    set_random_state(config)
-    print(config)
-    if config.executionType == "train":
-        train(config)
-    elif config.executionType == "execute":
-        execute(config)
-    else:
-        raise ValueError(f"No executionType '{config.executionType}' available! Choose either 'train' or 'execute'.")
diff --git a/algorithms/deepant/deepant/__init__.py b/algorithms/deepant/deepant/__init__.py
deleted file mode 100755
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/algorithms/deepant/deepant/dataset.py b/algorithms/deepant/deepant/dataset.py
deleted file mode 100644
index 0e9c05275267a55a70a604861fb08bbb1e42aa46..0000000000000000000000000000000000000000
--- a/algorithms/deepant/deepant/dataset.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import torch
-
-from torch.utils.data import Dataset
-from typing import List, Optional, Tuple
-
-
-class TimeSeries(Dataset):
-    def __init__(self, X, window_length: int, prediction_length: int, output_dims: Optional[List[int]] = None):
-        self.output_dims = output_dims or list(range(X.shape[1]))
-        self.X = torch.from_numpy(X).float()
-        self.window_length = window_length
-        self.prediction_length = prediction_length
-
-    def __len__(self):
-        return self.X.shape[0] - (self.window_length - 1) - self.prediction_length
-
-    def __getitem__(self, index) -> Tuple[torch.Tensor, torch.Tensor]:
-        end_idx = index+self.window_length
-        x = self.X[index:end_idx].reshape(len(self.output_dims), -1)
-        y = self.X[end_idx:end_idx+self.prediction_length, self.output_dims]
-        return x, y
diff --git a/algorithms/deepant/deepant/detector.py b/algorithms/deepant/deepant/detector.py
deleted file mode 100755
index 00c33e73fed6884a686b8d440010cc15769376a5..0000000000000000000000000000000000000000
--- a/algorithms/deepant/deepant/detector.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import numpy as np
-import torch
-from torch.utils.data import Dataset, DataLoader
-import torch.nn.functional as F
-
-
-class Detector():
-    def __init__(self):
-        pass
-
-    def detect(self, predictedY: torch.Tensor, test_dataset: Dataset) -> np.ndarray:
-        _, test_y = next(iter(DataLoader(test_dataset, batch_size=predictedY.shape[0])))
-
-        # calculate euclidian distance
-        anomaly_score = torch.sqrt(F.mse_loss(predictedY.detach(), test_y.detach(), reduction="none").sum(dim=[1, 2]))
-        # standardize error
-        anomaly_score = (anomaly_score - anomaly_score.mean()).abs() / anomaly_score.std()
-        return anomaly_score.numpy()
diff --git a/algorithms/deepant/deepant/early_stopping.py b/algorithms/deepant/deepant/early_stopping.py
deleted file mode 100644
index bf1f6f43c3a58bc9e76bcec41f9d7cbe0f8a5a37..0000000000000000000000000000000000000000
--- a/algorithms/deepant/deepant/early_stopping.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from typing import Iterator, Optional, List, Callable
-
-
-class EarlyStopping:
-    def __init__(self, patience: int, delta: float, epochs: int,
-                 callbacks: Optional[List[Callable[[bool, float, int], None]]] = None):
-        self.patience = patience
-        self.delta = delta
-        self.epochs = epochs
-        self.current_epoch = 0
-        self.epochs_without_change = 0
-        self.last_loss: Optional[float] = None
-        self.callbacks = callbacks or []
-
-    def _callback(self, improvement: bool, loss: float):
-        for cb in self.callbacks:
-            cb(improvement, loss, self.epochs_without_change)
-
-    def update(self, loss: float):
-        improvement = False
-        if self.last_loss is None or (1 - (loss / self.last_loss) > self.delta):
-            self.last_loss = loss
-            self.epochs_without_change = 0
-            improvement = True
-        else:
-            self.epochs_without_change += 1
-
-        self._callback(improvement, loss)
-
-    def __iter__(self) -> Iterator[int]:
-        while self.epochs_without_change <= self.patience and self.current_epoch < self.epochs:
-            yield self.current_epoch
-            self.current_epoch += 1
diff --git a/algorithms/deepant/deepant/model.py b/algorithms/deepant/deepant/model.py
deleted file mode 100755
index ead46519b6efd1887d4f1ae20fc2298ecfba2135..0000000000000000000000000000000000000000
--- a/algorithms/deepant/deepant/model.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import torch.nn.functional as F
-from torch.nn import Module, Conv1d, MaxPool1d, Linear, Dropout
-
-
-class DeepAnTCNN(Module):
-    def __init__(self, window, pred_window, in_channels, filter1_size, filter2_size, kernel_size, pool_size, stride):
-        super(DeepAnTCNN, self).__init__()
-
-        # layers
-        self.conv1 = Conv1d(in_channels=in_channels, out_channels=filter1_size, kernel_size=kernel_size, stride=stride, padding = 0)
-
-        self.conv2 = Conv1d(in_channels=filter1_size, out_channels=filter2_size, kernel_size=kernel_size, stride=stride, padding = 0)
-
-        self.maxpool = MaxPool1d(pool_size)
-
-        self.dropout = Dropout(0.25)
-
-        self.pred_window = pred_window
-        self.in_channels = in_channels
-        self.dim1 = int(0.5*(0.5*(window-1)-1)) * filter2_size
-        self.lin1 = Linear(self.dim1, in_channels*pred_window)
-
-    def forward(self, x):
-        # convolution layer 1
-        x = F.relu(self.conv1(x))
-        x = self.maxpool(x)
-
-        # convolution layer 2
-        x = F.relu(self.conv2(x))
-        x = self.maxpool(x)
-
-        x = x.view(-1, self.dim1)
-
-        x = self.dropout(x)
-        x = self.lin1(x)
-
-        return x.view(-1, self.pred_window, self.in_channels)
diff --git a/algorithms/deepant/deepant/predictor.py b/algorithms/deepant/deepant/predictor.py
deleted file mode 100755
index 0228c13fdb96b98c73b63aa6d7cf20283e04a367..0000000000000000000000000000000000000000
--- a/algorithms/deepant/deepant/predictor.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import numpy as np
-import torch
-from torch.nn import L1Loss
-from torch.optim import Adam
-from torch.utils.data import DataLoader, Dataset
-
-from deepant.early_stopping import EarlyStopping
-from deepant.model import DeepAnTCNN
-
-from helper import retrieve_save_path
-
-
-class Predictor():
-    def __init__(self, window, pred_window, lr = 1e-5, batch_size = 45, in_channels=1, filter1_size = 128, filter2_size = 32,
-            kernel_size = 2, pool_size = 2, stride = 1):
-        self.model = DeepAnTCNN(window, pred_window, in_channels, filter1_size, filter2_size, kernel_size, pool_size, stride)
-        self.lr = lr
-        self.batch_size = batch_size
-
-    def train(self, train_dataset: Dataset, valid_dataset: Dataset, n_epochs, save_path, log_freq=10, early_stopping_patience = 5, early_stopping_delta = 1e-2):
-        model_save_name = retrieve_save_path(save_path, "model.pt")
-
-        valid_loss_min = np.Inf
-        train_loss_min = np.Inf
-
-        optimizer = Adam(self.model.parameters(), lr=self.lr)
-        criterion = L1Loss()
-
-        dataloader_train = DataLoader(train_dataset, batch_size=self.batch_size)
-        dataloader_valid = DataLoader(valid_dataset, batch_size=self.batch_size)
-
-        early_stopping = EarlyStopping(early_stopping_patience, early_stopping_delta, n_epochs)
-
-        for epoch in early_stopping:
-            train_losses = []
-            for X, y in dataloader_train:
-                # training
-                self.model.train()
-
-                optimizer.zero_grad()
-                output = self.model(X)
-                loss = criterion(output, y)
-                loss.backward()
-                optimizer.step()
-                train_losses.append(loss.item())
-            train_loss = sum(train_losses)
-
-            valid_losses = []
-            for X, y in dataloader_valid:
-                # validation
-                self.model.eval()
-                output_valid = self.model(X)
-
-                loss_valid = criterion(output_valid, y)
-                valid_losses.append(loss_valid.item())
-            valid_loss = sum(valid_losses)
-
-            early_stopping.update(valid_loss)
-            if(epoch % log_freq == 0):
-                print(f"Epoch: {epoch}/{n_epochs} \tTraining Loss: {train_loss:.6f} \tValidation Loss: {valid_loss:.6f}")
-
-            if train_loss < train_loss_min:
-                train_loss_min = train_loss
-
-            # save model if validation loss decreases
-            if valid_loss < valid_loss_min:
-                torch.save(self.model.state_dict(), model_save_name)
-                valid_loss_min = valid_loss
-            last_epoch = epoch
-        if last_epoch < n_epochs:
-            print(f"\nTraining canceled because validation loss has not decreased significantly for {early_stopping_patience} epochs")
-            print(f"Minimal Training Loss: {train_loss_min:.6f} \tMinimal Validation Loss: {valid_loss_min:.6f}")
-            print("Model has been saved")
-
-    def predict(self, test_dataset: Dataset):
-        self.model.eval()
-
-        dataloader = DataLoader(test_dataset, batch_size=self.batch_size)
-        result = []
-        for x, _ in dataloader:
-            out = self.model(x).detach()
-            result.append(out)
-        return torch.cat(result, dim=0)
-
-    def save(self, path):
-        torch.save(self.model.state_dict(), path)
-
-    def load(self, path):
-        self.model.load_state_dict(torch.load(path))
diff --git a/algorithms/deepant/helper.py b/algorithms/deepant/helper.py
deleted file mode 100644
index b2214e99ae50d92868979ca1632919dc3c1fd149..0000000000000000000000000000000000000000
--- a/algorithms/deepant/helper.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import os
-
-def retrieve_save_path(save_path, default_file_name):
-    file_name = default_file_name
-    # splits path into directories without file name and file name only
-    file_path_tuple = os.path.split(save_path)
-    if file_path_tuple[1] != "":
-        file_name = file_path_tuple[1]
-    if not os.path.exists(file_path_tuple[0]):
-        os.mkdir(file_path_tuple[0])
-    save_name = os.path.join(file_path_tuple[0], file_name)
-    return save_name
diff --git a/algorithms/deepant/manifest.json b/algorithms/deepant/manifest.json
deleted file mode 100755
index b7809935d66dff3c2d551c252d314a52c15eb530..0000000000000000000000000000000000000000
--- a/algorithms/deepant/manifest.json
+++ /dev/null
@@ -1,105 +0,0 @@
-{
-  "title": "DeepAnT",
-  "description": "Adapted community implementation (https://github.com/dev-aadarsh/DeepAnT)",
-  "inputDimensionality": "multivariate",
-  "version": "0.3.0",
-  "authors": "Mohsin Munir and Shoaib Ahmed Siddiqui and Andreas Dengel and Sheraz Ahmed",
-  "language": "Python",
-  "type": "Detector",
-  "learningType": "semi-supervised",
-  "mainFile": "algorithm.py",
-  "trainingStep": {
-    "parameters": [
-      {
-        "name": "epochs",
-        "type": "int",
-        "description": "Number of training epochs",
-        "defaultValue": 50,
-        "optional": "true"
-      },
-      {
-        "name": "window_size",
-        "type": "int",
-        "description": "History window: Number of time stamps in history, which are taken into account",
-        "defaultValue": 45,
-        "optional": "true"
-      },
-      {
-        "name": "prediction_window_size",
-        "type": "int",
-        "description": "Prediction window: Number of data points that will be predicted from each window",
-        "defaultValue": 1,
-        "optional": "true"
-      },
-      {
-        "name": "learning_rate",
-        "type": "float",
-        "description": "Learning rate",
-        "defaultValue": 1e-05,
-        "optional": "true"
-      },
-      {
-        "name": "batch_size",
-        "type": "int",
-        "description": "Batch size for input data",
-        "defaultValue": 45,
-        "optional": "true"
-      },
-      {
-        "name": "random_state",
-        "type": "int",
-        "defaultValue": 42,
-        "optional": "true",
-        "description": "Seed for the random number generator"
-      },
-      {
-        "name": "split",
-        "type": "float",
-        "defaultValue": 0.8,
-        "optional": "true",
-        "description": "Train-validation split for early stopping"
-      },
-      {
-        "name": "early_stopping_delta",
-        "type": "float",
-        "defaultValue": 0.05,
-        "optional": "true",
-        "description": "If 1 - (loss / last_loss) is less than `delta` for `patience` epochs, stop"
-      },
-      {
-        "name": "early_stopping_patience",
-        "type": "int",
-        "defaultValue": 10,
-        "optional": "true",
-        "description": "If 1 - (loss / last_loss) is less than `delta` for `patience` epochs, stop"
-      }
-    ],
-    "modelInput": "none"
-  },
-  "executionStep": {
-    "parameters": [
-      {
-        "name": "window_size",
-        "type": "int",
-        "description": "History window that was used for the model training",
-        "defaultValue": 45,
-        "optional": "false"
-      },
-      {
-        "name": "prediction_window_size",
-        "type": "int",
-        "description": "Prediction window that was used for the model training",
-        "defaultValue": 1,
-        "optional": "false"
-      },
-      {
-        "name": "random_state",
-        "type": "int",
-        "defaultValue": 42,
-        "optional": "true",
-        "description": "Seed for the random number generator"
-      }
-    ],
-    "modelInput": "optional"
-  }
-}
\ No newline at end of file
diff --git a/algorithms/deepant/requirements.txt b/algorithms/deepant/requirements.txt
deleted file mode 100755
index b469256baab469ac2d29823f10bb502500d9802a..0000000000000000000000000000000000000000
--- a/algorithms/deepant/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-numpy
-pandas
-torch
diff --git a/algorithms/health_esn/Dockerfile b/algorithms/health_esn/Dockerfile
deleted file mode 100644
index 06a534f0a3bdbc140b50a9514634521f3105b318..0000000000000000000000000000000000000000
--- a/algorithms/health_esn/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM ghcr.io/timeeval/python3-base:0.3.0
-
-LABEL maintainer="phillip.wenig@hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-COPY requirements.txt /app/
-RUN pip install -r /app/requirements.txt
-
-COPY health_esn /app/health_esn/
-COPY manifest.json /app/
-COPY algorithm.py /app/
diff --git a/algorithms/health_esn/LICENSE b/algorithms/health_esn/LICENSE
deleted file mode 100644
index e594ec89208b9bcc64e3b455e283d3ce07f1b817..0000000000000000000000000000000000000000
--- a/algorithms/health_esn/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2022 Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/health_esn/README.md b/algorithms/health_esn/README.md
deleted file mode 100644
index 548c602a4445a7bce53ccf12cc07d371e8654c12..0000000000000000000000000000000000000000
--- a/algorithms/health_esn/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# HealthESN
-
-|||
-| :--- | :--- |
-| Citekey | ChenEtAl2020Imbalanced |
-| Source Code | `own` |
-| Learning type | semi-supervised |
-| Input dimensionality | multivariate |
-|||
-
-## Dependencies
-
-- python 3
diff --git a/algorithms/health_esn/algorithm.py b/algorithms/health_esn/algorithm.py
deleted file mode 100644
index b98fe8c99ceafa90b910587337198b6f58fd16da..0000000000000000000000000000000000000000
--- a/algorithms/health_esn/algorithm.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python3
-import argparse
-import json
-import sys
-import numpy as np
-import pandas as pd
-import pickle
-from typing import Callable
-from enum import Enum
-
-from dataclasses import dataclass
-from health_esn.model import HealthESN
-from scipy.special import expit
-
-
-class Activation(Enum):
-    SIGMOID="sigmoid"
-    TANH="tanh"
-
-    def get_fun(self) -> Callable[[np.ndarray], np.ndarray]:
-        if self == Activation.SIGMOID:
-            return expit
-        else: # if self == Activation.TANH
-            return np.tanh
-
-
-@dataclass
-class CustomParameters:
-    linear_hidden_size: int = 500
-    prediction_window_size: int = 20
-    connectivity: float = 0.25
-    spectral_radius: float = 0.6
-    activation: str = Activation.TANH.value
-    random_state: int = 42
-
-
-class AlgorithmArgs(argparse.Namespace):
-    @property
-    def ts(self) -> np.ndarray:
-        return self.df.iloc[:, 1:-1].values
-
-    @property
-    def df(self) -> pd.DataFrame:
-        return pd.read_csv(self.dataInput)
-
-    @staticmethod
-    def from_sys_args() -> 'AlgorithmArgs':
-        args: dict = json.loads(sys.argv[1])
-        custom_parameter_keys = dir(CustomParameters())
-        filtered_parameters = dict(
-            filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items()))
-        args["customParameters"] = CustomParameters(**filtered_parameters)
-        return AlgorithmArgs(**args)
-
-
-def set_random_state(config: AlgorithmArgs) -> None:
-    seed = config.customParameters.random_state
-    import random
-    random.seed(seed)
-    np.random.seed(seed)
-
-
-def save(args: AlgorithmArgs, model: HealthESN):
-    with open(args.modelOutput, "wb") as f:
-        pickle.dump(model, f)
-
-
-def load(args: AlgorithmArgs) -> HealthESN:
-    with open(args.modelOutput, "rb") as f:
-        model = pickle.load(f)
-    return model
-
-
-def train(args: AlgorithmArgs):
-    ts = args.ts
-    health_esn = HealthESN(n_dimensions=ts.shape[1],
-                           hidden_units=args.customParameters.linear_hidden_size,
-                           window_size=args.customParameters.prediction_window_size,
-                           connectivity=args.customParameters.connectivity,
-                           spectral_radius=args.customParameters.spectral_radius,
-                           activation=Activation(args.customParameters.activation).get_fun(),
-                           seed=args.customParameters.random_state)
-    health_esn.fit(ts)
-    save(args, health_esn)
-
-
-def execute(args: AlgorithmArgs):
-    ts = args.ts
-    health_esn = load(args)
-    anomaly_scores = health_esn.predict(ts)
-    anomaly_scores.tofile(args.dataOutput, sep="\n")
-
-
-if __name__ == "__main__":
-    if len(sys.argv) != 2:
-        print("Wrong number of arguments specified; expected a single json-string!")
-        exit(1)
-
-    args = AlgorithmArgs.from_sys_args()
-    print(f"AlgorithmArgs: {args}")
-
-    if args.executionType == "train":
-        train(args)
-    elif args.executionType == "execute":
-        execute(args)
-    else:
-        raise ValueError(f"Unknown execution type '{args.executionType}'; expected either 'train' or 'execute'!")
diff --git a/algorithms/health_esn/health_esn/__init__.py b/algorithms/health_esn/health_esn/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/algorithms/health_esn/health_esn/model.py b/algorithms/health_esn/health_esn/model.py
deleted file mode 100644
index 62bf160f3eee3a2637d2918c76b9fc41662e4ab8..0000000000000000000000000000000000000000
--- a/algorithms/health_esn/health_esn/model.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import numpy as np
-from scipy import sparse
-from sklearn.base import BaseEstimator, TransformerMixin, OutlierMixin
-from sklearn.linear_model import LinearRegression
-import matplotlib.pyplot as plt
-import tqdm
-
-from typing import List, Tuple, Callable, Optional
-
-
-class Reservoir(BaseEstimator, TransformerMixin):
-    def __init__(self, input_size: int, output_size: int, hidden_units: int, connectivity: float, spectral_radius: float, activation: Callable[[np.ndarray], np.ndarray]):
-        super().__init__()
-
-        self.hidden_units = hidden_units
-        self.activation = activation
-        self.W_in  = np.random.uniform(-0.1, 0.1, (input_size, hidden_units))
-        self.W_s = self._initialize_internal_weights(hidden_units, connectivity, spectral_radius)
-        self.W_fb = np.random.uniform(-0.1, 0.1, (output_size, hidden_units))
-
-    def _initialize_internal_weights(self, n_internal_units, connectivity, spectral_radius) -> np.ndarray:
-        # Generate sparse, uniformly distributed weights.
-        internal_weights = sparse.rand(n_internal_units,
-                                       n_internal_units,
-                                       density=connectivity).todense()
-
-        # Ensure that the nonzero values are uniformly distributed in [-0.5, 0.5]
-        internal_weights[np.where(internal_weights > 0)] -= 0.5
-
-        # Adjust the spectral radius.
-        E, _ = np.linalg.eig(internal_weights)
-        e_max = np.max(np.abs(E))
-        internal_weights /= np.abs(e_max) / spectral_radius
-
-        return internal_weights
-
-    def _calc_state(self, x: np.ndarray, last_state: np.ndarray, last_output: np.ndarray):
-        state = x.dot(self.W_in) + last_state.dot(self.W_s) + last_output.dot(self.W_fb)
-        state = self.activation(state)
-        return state
-
-    def fit_transform(self, X: Tuple[np.ndarray, Optional[np.ndarray], Optional[np.ndarray]], y=None, **fit_params) -> np.ndarray:
-        """
-        :param X: all Xs need the following shapes (batch_size, n_components)np.ndarray
-                  and are expected to be (input, last_state, last_output)
-        :param y: not needed
-        :param fit_params: not needed
-        :return: `window_size` outputs of the Reservoir
-        """
-
-        current_input, last_state, last_output = X
-
-        if last_state is None and last_output is None:
-            last_state = np.zeros((1, self.hidden_units))
-            last_output = np.zeros_like(current_input)
-
-        state = self._calc_state(current_input, last_state, last_output)
-        return state
-
-
-class HealthESN(BaseEstimator, OutlierMixin):
-    def __init__(self,
-                 n_dimensions: int,
-                 hidden_units: int,
-                 window_size: int,
-                 connectivity: float,
-                 spectral_radius: float,
-                 activation: Callable[[np.ndarray], np.ndarray],
-                 seed: int
-                 ):
-        super().__init__()
-
-        np.random.seed(seed)
-
-        self.esn = Reservoir(n_dimensions, n_dimensions, hidden_units, connectivity, spectral_radius, activation)
-        self.w_out = LinearRegression()
-        self.window_size = window_size
-        sigma = np.arange(self.window_size)[::-1]
-        self.sigma = sigma / sigma.sum()
-
-    def fit(self, X: np.ndarray) -> 'HealthESN':
-        y = X[1:]
-        x = X[:-1]
-
-        last_state = None
-        last_output = None
-        states: List[np.ndarray] = []
-        for t in tqdm.trange(x.shape[0]):
-            x_ = (x[[t]], last_state, last_output)
-            state = self.esn.fit_transform(x_)
-            states.append(state)
-            last_state = state
-            last_output = y[[t]]
-
-        self.w_out.fit(np.asarray(np.concatenate(states, axis=0)), y)
-        return self
-
-    def predict(self, X: np.ndarray) -> np.ndarray:
-        states = []
-        last_state = None
-        last_output = None
-        for i in tqdm.trange(self.window_size, X.shape[0]):
-            for p in reversed(range(1, self.window_size + 1)):
-                x = (X[i-p], last_state, last_output)
-                state = self.esn.fit_transform(x, X[i-p+1])
-                last_state = state
-                last_output = X[i-p+1]
-            states.append(last_state)
-        outputs = self.w_out.predict(np.asarray(np.concatenate(states, axis=0)))
-
-        scores = np.linalg.norm(X[self.window_size:] - outputs, axis=1)
-        scores = np.concatenate([np.zeros(X.shape[0]-scores.shape[0]) + np.nan, scores])
-
-        return scores
-
-    def predict_paper(self, X: np.ndarray) -> np.ndarray:
-        outputs = np.zeros((X.shape[0] - self.window_size + 1, X.shape[1]))
-        for i in tqdm.trange(self.window_size, X.shape[0]-1):
-            d = X[i - self.window_size + 1:i+2].copy()
-            t = np.zeros((self.window_size, X.shape[1]))
-            last_state = None
-            last_output = None
-            for j in range(self.window_size):
-                for p in range(self.window_size):
-                    x = (d[p], last_state, last_output)
-                    y = d[p+1]
-                    state = self.esn.fit_transform(x, y)
-                    last_state = state
-                    last_output = self.w_out.predict(state)
-                t[j] = last_output
-                d[j] = t[j]
-            outputs[i-self.window_size + 1] = self.sigma.dot(t)
-
-        score = np.mean((X[self.window_size - 1:] - outputs)**2, axis=1)
-
-        plt.plot(X[self.window_size:], label="data")
-        plt.plot(outputs, label="predicted")
-        plt.legend()
-        plt.show()
-
-        return score
-
-    def fit_predict(self, X, y=None):
-        self.fit(X)
-        return self.predict(X)
diff --git a/algorithms/health_esn/manifest.json b/algorithms/health_esn/manifest.json
deleted file mode 100644
index 68f765643a1083320b8c795ed5643b4b2e4cbc9b..0000000000000000000000000000000000000000
--- a/algorithms/health_esn/manifest.json
+++ /dev/null
@@ -1,62 +0,0 @@
-{
-    "title": "HealthESN",
-    "description": "Implementation of https://doi.org/10.1007/s00521-018-3747-z",
-    "inputDimensionality": "multivariate",
-    "version": "0.3.0",
-    "authors": "Qing Chen, Anguo Zhang, Tingwen Huang, Qianping He, Yongduan Song",
-    "language": "Python",
-    "type": "Detector",
-    "mainFile": "algorithm.py",
-    "learningType": "semi-supervised",
-    "trainingStep": {
-        "parameters": [
-            {
-                "name": "linear_hidden_size",
-                "type": "int",
-                "defaultValue": 500,
-                "optional": "true",
-                "description": "Hidden units in ESN reservoir."
-            },
-            {
-                "name": "prediction_window_size",
-                "type": "int",
-                "defaultValue": 20,
-                "optional": "true",
-                "description": "Window of predicted points in the future."
-            },
-            {
-                "name": "connectivity",
-                "type": "float",
-                "defaultValue": 0.25,
-                "optional": "true",
-                "description": "How dense the units in the reservoir are connected (= percentage of non-zero weights)"
-            },
-            {
-                "name": "spectral_radius",
-                "type": "float",
-                "defaultValue": 0.6,
-                "optional": "true",
-                "description": "Factor used for random initialization of ESN neural connections."
-            },
-            {
-                "name": "activation",
-                "type": "enum[tanh,sigmoid]",
-                "defaultValue": "tanh",
-                "optional": "true",
-                "description": "Activation function used for the ESN."
-            },
-            {
-                "name": "random_state",
-                "type": "int",
-                "defaultValue": 42,
-                "optional": "true",
-                "description": "Seed for the random number generator"
-            }
-        ],
-        "modelInput": "none"
-    },
-    "executionStep": {
-        "parameters": [],
-        "modelInput": "required"
-    }
-}
diff --git a/algorithms/health_esn/requirements.txt b/algorithms/health_esn/requirements.txt
deleted file mode 100644
index a265258aa90d80796c67a740365cfc2ffc7093fe..0000000000000000000000000000000000000000
--- a/algorithms/health_esn/requirements.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-numpy
-pandas
-scipy
-scikit-learn
-matplotlib
-tqdm
diff --git a/algorithms/if_lof/Dockerfile b/algorithms/if_lof/Dockerfile
deleted file mode 100644
index 67dd034eef14c49c09ebc62fff84008f1934e8e4..0000000000000000000000000000000000000000
--- a/algorithms/if_lof/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM ghcr.io/timeeval/python3-base:0.3.0
-
-LABEL maintainer="rohan.sawahn@student.hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-COPY requirements.txt /app/
-RUN pip install -r requirements.txt
-
-COPY algorithm.py /app/
-COPY manifest.json /app/
diff --git a/algorithms/if_lof/LICENSE b/algorithms/if_lof/LICENSE
deleted file mode 100644
index 2db883d8cabaf3f696cc2eb5ed48661877d41795..0000000000000000000000000000000000000000
--- a/algorithms/if_lof/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2022 Rohan Sawahn, Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/if_lof/README.md b/algorithms/if_lof/README.md
deleted file mode 100644
index 83dd0d7df8da3c830b755ae4b08c6e2d31ff973a..0000000000000000000000000000000000000000
--- a/algorithms/if_lof/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Isolation Forest - Local Outlier Factor (IF-LOF)
-
-|||
-| :--- | :--- |
-| Citekey | ChengEtAl2019Outlier |
-| Source Code | own |
-| Learning type | unsupervised |
-| Input dimensionality | multivariate |
-|||
-
-## Output Format
-
-The output will be an anomaly score for every input data point
-
-## Dependencies
-
-- python 3
-
-## Copyright notice and citation format
-
-> Zhangyu Cheng, Chengming Zou, and Jianwei Dong. 2019. Outlier detection using isolation forest and local outlier factor. In Proceedings of the Conference on Research in Adaptive and Convergent Systems (RACS '19). Association for Computing Machinery, New York, NY, USA, 161–168. DOI:https://doi.org/10.1145/3338840.3355641
diff --git a/algorithms/if_lof/algorithm.py b/algorithms/if_lof/algorithm.py
deleted file mode 100644
index 2d2374f2c53242c37559aff6ff90dc55aadb7bbc..0000000000000000000000000000000000000000
--- a/algorithms/if_lof/algorithm.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python3
-
-import sys
-import json
-import math
-import heapq
-
-import numpy as np
-from typing import Optional
-from pathlib import Path
-from sklearn.neighbors import LocalOutlierFactor
-from sklearn.ensemble import IsolationForest
-
-
-class Config:
-    dataInput: Path
-    dataOutput: Path
-    executionType: str
-    n_trees: int
-    max_samples: Optional[float]
-    n_neighbors: int
-    alpha: float
-    m: int
-    random_state: int
-
-    def __init__(self, params):
-        self.dataInput = Path(params.get('dataInput',
-                                         '/data/dataset.csv'))
-        self.dataOutput = Path(params.get('dataOutput',
-                                          '/results/anomaly_window_scores.ts'))
-        self.executionType = params.get('executionType',
-                                        'execute')
-        try:
-            customParameters = params['customParameters']
-        except KeyError:
-            customParameters = {}
-        self.n_trees = customParameters.get('n_trees', 200)
-        self.max_samples = customParameters.get('max_samples', None)
-        self.n_neighbors = customParameters.get('n_neighbors', 20)
-        self.alpha = customParameters.get('alpha', 1)
-        self.m = customParameters.get('m', None)
-        self.random_state = customParameters.get('random_state', 42)
-
-
-def set_random_state(config) -> None:
-    seed = config.random_state
-    import random
-    random.seed(seed)
-    np.random.seed(seed)
-
-
-def read_data(config: Config):
-    print('Reading Data...')
-    X = np.genfromtxt(config.dataInput, delimiter=',', skip_header=True)
-    # skip first col (index) and last col (label)
-    X = X[:, 1:-1]
-    print('Data')
-    print('  dims:', X.shape[0])
-    print('  samples:', X.shape[1])
-    if config.m is None:
-        config.m = len(X[0])
-    return (X, config)
-
-
-def compute_iforest_scores(X, config: Config):
-    print('Configuring forest...')
-    max_samples = config.max_samples if config.max_samples else "auto"
-    forest = IsolationForest(n_estimators=config.n_trees,
-                             max_samples=max_samples,
-                             random_state=config.random_state)
-    forest.fit(X)
-    print('Computing Forest scores...')
-    scores = forest.decision_function(X)
-    return -scores
-
-
-def save_results(data, path: str):
-    print(f'Saving Results to {path}')
-    np.savetxt(path, data, delimiter=',', fmt='%f')
-    print('Results saved')
-
-
-def outlier_coefficient_for_attribute(attr_index: int, data):
-    ''' The original paper is incorrect and inaccurate over here.
-    My assumption is that we would want to calculate the following:
-    | emperical_standard_deviation(attr) / mean(attr) | '''
-
-    attr = data[:, attr_index]
-    mean = np.mean(attr)
-    esd = np.std(attr)
-    # We take to absolute value in the case of a negative mean
-    return np.abs(esd / mean)
-
-
-def prune_data(config: Config, data, anomaly_scores):
-    ''' The original paper is very inaccurate over here and it is sometimes hard
-    to grasp the meaning of variables. Please be aware that
-    this method might not be the same as inteded by the authors, but is my
-    assumption on what they were trying to do. The pruning is described
-    in section 3.3 of the paper'''
-
-    print('Pruning data...')
-
-    outlier_coefficients = [outlier_coefficient_for_attribute(attr_index, data)
-                            for attr_index in range(len(data[0]))]
-
-    # assumption: We want to get the m outlier coefficients with highest value
-    outlier_coefficients.sort(reverse=True)
-    top_m = outlier_coefficients[0:config.m]
-    proportion_of_outliers = (config.alpha * sum(top_m)) / config.m
-
-    # now that we know the proportion of outliers, we return the according
-    # amount of data points with the highest anomaly scores
-    num_outliers = math.ceil(len(data) * proportion_of_outliers)
-    print(f'Num of outlier_candidates {num_outliers}')
-
-    # prune the dataset by removing all points except the num_outlier points with
-    # highest anomaly score
-    min_anomaly_score = heapq.nlargest(num_outliers, anomaly_scores)[-1]
-    outlier_candidates_indexes = [i for i in range(len(data))
-                                  if anomaly_scores[i] > min_anomaly_score]
-    outlier_candidates = [data[i] for i in outlier_candidates_indexes]
-
-    return (outlier_candidates, outlier_candidates_indexes)
-
-
-def compute_lof(config: Config, data, outlier_canidates):
-    print('Computing local outlier factors ...')
-    lof = LocalOutlierFactor(n_neighbors=config.n_neighbors, novelty=True)
-    lof.fit(data)
-    return -lof.score_samples(outlier_canidates)
-
-
-def continous_scores(outlier_factors, outlier_indexes, original_ds_len):
-    print("Postprocessing")
-    current_outlier_index = 0
-    res = []
-
-    def is_index_of_outlier_candidate(i):
-        return i in outlier_indexes
-
-    for i in range(0, original_ds_len):
-        if is_index_of_outlier_candidate(i):
-            res.append(outlier_factors[current_outlier_index])
-            current_outlier_index += 1
-        else:
-            res.append(0)
-
-    return res
-
-
-def execute(config: Config):
-    data, config = read_data(config=config)
-    iforest_scores = compute_iforest_scores(X=data, config=config)
-    outlier_candidates, outlier_indexes = prune_data(config=config,
-                                                     data=data,
-                                                     anomaly_scores=iforest_scores)
-    outlier_factors = compute_lof(config=config,
-                                  data=data,
-                                  outlier_canidates=outlier_candidates)
-
-    results = continous_scores(outlier_factors=outlier_factors,
-                               outlier_indexes=outlier_indexes,
-                               original_ds_len=len(data))
-
-    save_results(data=results, path=config.dataOutput)
-
-
-def parse_args():
-    print(sys.argv)
-    if len(sys.argv) < 2:
-        print('No arguments supplied, using default arguments!',
-              file=sys.stderr)
-        params = {}
-    elif len(sys.argv) > 2:
-        print('Wrong number of arguments given! Single JSON-String expected!',
-              file=sys.stderr)
-        exit(1)
-    else:
-        params = json.loads(sys.argv[1])
-    return Config(params)
-
-
-if __name__ == '__main__':
-    config = parse_args()
-    if config.executionType == 'train':
-        print('Nothing to train.')
-    elif config.executionType == 'execute':
-        execute(config)
-    else:
-        raise Exception('Invalid Execution type given')
diff --git a/algorithms/if_lof/manifest.json b/algorithms/if_lof/manifest.json
deleted file mode 100644
index 3dbbd0aa8430f089d5f6d3aba7d8a8493889f86b..0000000000000000000000000000000000000000
--- a/algorithms/if_lof/manifest.json
+++ /dev/null
@@ -1,58 +0,0 @@
-{
-    "title": "IF-LOF",
-    "description": "Isolation Forest - Local Outlier Factor: Uses a 3 step process - Building an isolation forest, pruning the forest with a computed treshhold, and applies local outlier factor to the resulting dataset",
-    "inputDimensionality": "multivariate",
-    "version": "0.3.0",
-    "authors": "Cheng, Zhangyu and Zou, Chengming and Dong, Jianwei",
-    "type": "Detector",
-    "language": "Python",
-    "learningType": "Unsupervised",
-    "mainFile": "algorithm.py",
-    "executionStep": {
-        "parameters": [
-            {
-                "name": "n_trees",
-                "type": "int",
-                "description": "Number of trees in isolation forest",
-                "defaultValue": 200,
-                "optional": "false"
-            },
-            {
-                "name": "max_samples",
-                "type": "float",
-                "description": "The number of samples to draw from X to train each tree: `max_samples * X.shape[0]`. If unspecified (`null`), then `max_samples=min(256, X.shape[0])`.",
-                "defaultValue": null,
-                "optional": "false"
-            },
-            {
-                "name": "n_neighbors",
-                "type": "int",
-                "description": "Number neighbors to look at in local outlier factor calculation",
-                "defaultValue": 10,
-                "optional": "false"
-            },
-            {
-                "name": "alpha",
-                "type": "float",
-                "description": "Scalar that depends on consideration of the dataset and controls the amount of data to be pruned",
-                "defaultValue": 0.5,
-                "optional": "false"
-            },
-            {
-                "name": "m",
-                "type": "int",
-                "description": "m features with highest scores will be used for pruning",
-                "defaultValue": null,
-                "optional": "true"
-            },
-            {
-                "name": "random_state",
-                "type": "int",
-                "description": "Seed for random number generation.",
-                "defaultValue": 42,
-                "optional": "true"
-            }
-        ],
-        "modelInput": "none"
-    }
-}
diff --git a/algorithms/if_lof/requirements.txt b/algorithms/if_lof/requirements.txt
deleted file mode 100644
index 5cddbb17cf94d1ab654eb558d756c8e6ad1f345c..0000000000000000000000000000000000000000
--- a/algorithms/if_lof/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-numpy
-scikit-learn
diff --git a/algorithms/iforest/Dockerfile b/algorithms/iforest/Dockerfile
deleted file mode 100644
index d3af8f3720eda8c56f65240b14b2ceac7a9f35eb..0000000000000000000000000000000000000000
--- a/algorithms/iforest/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM ghcr.io/timeeval/pyod:0.3.1
-
-LABEL maintainer="sebastian.schmidl@hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-# install algorithm dependencies
-COPY requirements.txt /app/
-RUN pip install -r /app/requirements.txt
-
-COPY algorithm.py /app/
-COPY manifest.json /app/
diff --git a/algorithms/iforest/LICENSE b/algorithms/iforest/LICENSE
deleted file mode 100644
index e594ec89208b9bcc64e3b455e283d3ce07f1b817..0000000000000000000000000000000000000000
--- a/algorithms/iforest/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2022 Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/iforest/README.md b/algorithms/iforest/README.md
deleted file mode 100644
index 15b758943ef52a11caf1a67b2f0057632889a632..0000000000000000000000000000000000000000
--- a/algorithms/iforest/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# Isolation Forest (iForest)
-
-|||
-| :--- | :--- |
-| Citekey | LiuEtAl2012IsolationBased |
-| Source Code | https://github.com/yzhao062/pyod/blob/master/pyod/models/iforest.py |
-| Learning type | unsupervised |
-| Input dimensionality | multivariate |
-|||
-
-## Parameters
-
-- `n_estimators`: int, optional (default=100)  
-  The number of base estimators in the ensemble.
-
-- `max_samples`: int or float, optional (default="auto")  
-  The number of samples to draw from X to train each base estimator.
-    - If int, then draw `max_samples` samples.
-    - If float, then draw `max_samples * X.shape[0]` samples.
-    - If "auto", then `max_samples=min(256, n_samples)`.
-  If max_samples is larger than the number of samples provided, all samples will be used for all trees (no sampling).
-
-- `max_features`: int or float, optional (default=1.0)  
-  The number of features to draw from X to train each base estimator.
-    - If int, then draw `max_features` features.
-    - If float, then draw `max_features * X.shape[1]` features.
-
-- `contamination`: float in (0., 0.5), optional (default=0.1)  
-  The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
-  Used when fitting to define the threshold on the decision function.
-  **Automatically determined by algorithm script!!**
-
-- `bootstrap`: bool, optional (default=False)  
-  If True, individual trees are fit on random subsets of the training data sampled with replacement.
-  If False, sampling without replacement is performed.
-
-- `behaviour`: str, default='old'  
-  Behaviour of the `decision_function` which can be either 'old' or 'new'.
-  Passing `behaviour='new'` makes the `decision_function` change to match other anomaly detection algorithm API which will be the default behaviour in the future.
-  As explained in details in the `offset_` attribute documentation, the `decision_function` becomes dependent on the contamination parameter, in such a way that 0 becomes its natural threshold to detect outliers.
-  **REMOVED** (old behavior is used per default!).
-
-- `random_state`: int, RandomState instance or None, optional (default=None)  
-  If int, random_state is the seed used by the random number generator;
-  If RandomState instance, random_state is the random number generator;
-  If None, the random number generator is the RandomState instance used by `np.random`.
-
-- `verbose`: int, optional (default=0)  
-  Controls the verbosity of the tree building process.
-
-- `n_jobs`: integer, optional (default=1)  
-  The number of jobs to run in parallel for both `fit` and `predict`.
-  If -1, then the number of jobs is set to the number of cores.
-
-## Citation format (for source code)
-
-> Zhao, Y., Nasrullah, Z. and Li, Z., 2019. PyOD: A Python Toolbox for Scalable Outlier Detection. Journal of machine learning research (JMLR), 20(96), pp.1-7.
diff --git a/algorithms/iforest/algorithm.py b/algorithms/iforest/algorithm.py
deleted file mode 100755
index fa2de08793ef096c1ae0eb601d96c8f719d2feb8..0000000000000000000000000000000000000000
--- a/algorithms/iforest/algorithm.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python3
-import argparse
-import json
-import sys
-import numpy as np
-import pandas as pd
-
-from typing import Optional
-from dataclasses import dataclass
-from pyod.models.iforest import IForest
-
-
-@dataclass
-class CustomParameters:
-    n_trees: float = 100
-    max_samples: Optional[float] = None
-    max_features: float = 1.
-    bootstrap: bool = False
-    random_state: int = 42
-    verbose: int = 0
-    n_jobs: int = 1
-
-
-class AlgorithmArgs(argparse.Namespace):
-    @staticmethod
-    def from_sys_args() -> 'AlgorithmArgs':
-        args: dict = json.loads(sys.argv[1])
-        custom_parameter_keys = dir(CustomParameters())
-        filtered_parameters = dict(filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items()))
-        args["customParameters"] = CustomParameters(**filtered_parameters)
-        return AlgorithmArgs(**args)
-
-
-def set_random_state(config: AlgorithmArgs) -> None:
-    seed = config.customParameters.random_state
-    import random
-    random.seed(seed)
-    np.random.seed(seed)
-
-
-def load_data(config: AlgorithmArgs) -> np.ndarray:
-    df = pd.read_csv(config.dataInput)
-    data = df.iloc[:, 1:-1].values
-    labels = df.iloc[:, -1].values
-    contamination = labels.sum() / len(labels)
-    # Use smallest positive float as contamination if there are no anomalies in dataset
-    contamination = np.nextafter(0, 1) if contamination == 0. else contamination
-    return data, contamination
-
-
-def main(config: AlgorithmArgs):
-    set_random_state(config)
-    data, contamination = load_data(config)
-
-    clf = IForest(
-        contamination=contamination,
-        n_estimators=config.customParameters.n_trees,
-        max_samples=config.customParameters.max_samples or "auto",
-        max_features=config.customParameters.max_features,
-        bootstrap=config.customParameters.bootstrap,
-        random_state=config.customParameters.random_state,
-        verbose=config.customParameters.verbose,
-        n_jobs=config.customParameters.n_jobs,
-    )
-    clf.fit(data)
-    scores = clf.decision_scores_
-    np.savetxt(config.dataOutput, scores, delimiter=",")
-
-
-if __name__ == "__main__":
-    if len(sys.argv) != 2:
-        print("Wrong number of arguments specified; expected a single json-string!")
-        exit(1)
-
-    config = AlgorithmArgs.from_sys_args()
-    print(f"Config: {config}")
-
-    if config.executionType == "train":
-        print("Nothing to train, finished!")
-    elif config.executionType == "execute":
-        main(config)
-    else:
-        raise ValueError(f"Unknown execution type '{config.executionType}'; expected either 'train' or 'execute'!")
diff --git a/algorithms/iforest/manifest.json b/algorithms/iforest/manifest.json
deleted file mode 100644
index 070ca8779f77d4cd46f7288d787b7a08a5b215ef..0000000000000000000000000000000000000000
--- a/algorithms/iforest/manifest.json
+++ /dev/null
@@ -1,65 +0,0 @@
-{
-    "title": "Isolation Forest (iForest)",
-    "description": "Implementation of https://doi.org/10.1145/2133360.2133363.",
-    "inputDimensionality": "multivariate",
-    "version": "0.3.1",
-    "authors": "Fei Tony Liu and Kai Ming Ting and Zhi-Hua Zhou",
-    "language": "Python",
-    "type": "Detector",
-    "mainFile": "algorithm.py",
-    "learningType": "unsupervised",
-    "executionStep": {
-        "parameters": [
-            {
-                "name": "n_trees",
-                "type": "int",
-                "defaultValue": 100,
-                "optional": "true",
-                "description": "The number of decision trees (base estimators) in the forest (ensemble)."
-            },
-            {
-                "name": "max_samples",
-                "type": "float",
-                "defaultValue": null,
-                "optional": "true",
-                "description": "The number of samples to draw from X to train each base estimator: `max_samples * X.shape[0]`. If unspecified (`null`), then `max_samples=min(256, n_samples)`."
-            },
-            {
-                "name": "max_features",
-                "type": "float",
-                "defaultValue": 1.0,
-                "optional": "true",
-                "description": "The number of features to draw from X to train each base estimator: `max_features * X.shape[1]`."
-            },
-            {
-                "name": "bootstrap",
-                "type": "boolean",
-                "defaultValue": "false",
-                "optional": "true",
-                "description": "If True, individual trees are fit on random subsets of the training data sampled with replacement. If False, sampling without replacement is performed."
-            },
-            {
-                "name": "random_state",
-                "type": "int",
-                "defaultValue": 42,
-                "optional": "true",
-                "description": "Seed for random number generation."
-            },
-            {
-                "name": "verbose",
-                "type": "int",
-                "defaultValue": 0,
-                "optional": "true",
-                "description": "Controls the verbosity of the tree building process logs."
-            },
-            {
-                "name": "n_jobs",
-                "type": "int",
-                "defaultValue": 1,
-                "optional": "true",
-                "description": "The number of jobs to run in parallel. If -1, then the number of jobs is set to the number of cores."
-            }
-        ],
-        "modelInput": "none"
-    }
-}
diff --git a/algorithms/iforest/requirements.txt b/algorithms/iforest/requirements.txt
deleted file mode 100644
index bec81306730ae25959144bbb9867f9c0b191a221..0000000000000000000000000000000000000000
--- a/algorithms/iforest/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-numpy>=1.19.5
-pandas>=1.2.1
-pyod>=0.9.2
diff --git a/algorithms/kmeans/.dockerignore b/algorithms/kmeans/.dockerignore
deleted file mode 100644
index 2d6e6795ca5b5007b9458c986620012864ac2265..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/.dockerignore
+++ /dev/null
@@ -1,5 +0,0 @@
--*.png
--README.md
--.dockerignore
--Dockerfile
--**/__pycache__
diff --git a/algorithms/kmeans/Dockerfile b/algorithms/kmeans/Dockerfile
deleted file mode 100644
index 69c122490d5fc68b3c0ef8cd9497400a7c19d4c5..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM ghcr.io/timeeval/python3-base:0.3.0
-
-LABEL maintainer="phillip.wenig@hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-# install algorithm dependencies
-COPY requirements.txt /app/
-
-RUN pip install -r /app/requirements.txt
-
-COPY . /app/
diff --git a/algorithms/kmeans/LICENSE b/algorithms/kmeans/LICENSE
deleted file mode 100644
index 739212c174b572a3094768f1d2aff989b5a9f701..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2023 Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/kmeans/README.md b/algorithms/kmeans/README.md
deleted file mode 100644
index 9162c8402c263202b797a32c97c965d9545612d5..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# K-Means
-
-|||
-| :--- | :--- |
-| Citekey | YairiEtAl2001Fault |
-| Source | `own` |
-| Learning type | unsupervised |
-| Input dimensionality | multivariate |
-|||
-
-## Dependencies
-
-- python 3
-
-## Hyper Parameters
-
-### k (n_clusters)
-
-`k` is the number of clusters to be fitted to the data. The bigger `k` is, the less noisy the anomaly scores are.
-
-Small `k` (k==2)
-![small k](./small-k.png)
-
-Big `k` (k==20)
-![big k](./big-k.png)
-
-### window_size
-
-This parameter defines the number of data points being chunked in one window. The bigger `window_size` is, the bigger the anomaly context is. If it's to big, things seem anomalous that are not. If it's too small, the algorithm is not able to find anomalous windows and looses its time context.
-If `window_size` (`anomaly_window_size`) is smaller than the anomaly, the algorithm might only detect the transitions between normal data and anomaly.
-
-Small `window_size` (window_size == 5)
-![small p](./small-window-size.png)
-
-Big `window_size` (window_size == 50)
-![big p](./big-window-size.png)
-
-### stride
-
-It is the step size between windows. The larger `stride` is, the noisier the scores get.
-
-Small `stride` (stride == 1)
-![small p](./small-stride.png)
-
-Big `stride` (stride == 20)
-![big p](./big-stride.png)
-
-(Plots were made after post-processing)
-
-## Notes
-
-KMeans automatically computes point-wise anomaly scores.
diff --git a/algorithms/kmeans/algorithm.py b/algorithms/kmeans/algorithm.py
deleted file mode 100644
index 556966905e6dfe7ce4c5f577d9e01d58a3ba495f..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/algorithm.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import numpy as np
-import pandas as pd
-import json
-import sys
-from dataclasses import dataclass, asdict
-import argparse
-
-from kmeans.model import KMeansAD
-
-
-@dataclass
-class CustomParameters:
-    n_clusters: int = 20
-    anomaly_window_size: int = 20
-    stride: int = 1
-    n_jobs: int = 1
-    random_state: int = 42
-
-
-class AlgorithmArgs(argparse.Namespace):
-    @property
-    def ts(self) -> np.ndarray:
-        return self.df.iloc[:, 1:-1].values
-
-    @property
-    def df(self) -> pd.DataFrame:
-        return pd.read_csv(self.dataInput)
-
-    @staticmethod
-    def from_sys_args() -> 'AlgorithmArgs':
-        args: dict = json.loads(sys.argv[1])
-        custom_parameter_keys = dir(CustomParameters())
-        filtered_parameters = dict(
-            filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items()))
-        args["customParameters"] = CustomParameters(**filtered_parameters)
-        return AlgorithmArgs(**args)
-
-
-def set_random_state(config: AlgorithmArgs) -> None:
-    seed = config.customParameters.random_state
-    import random
-    random.seed(seed)
-    np.random.seed(seed)
-
-
-def execute(args: AlgorithmArgs):
-    set_random_state(args)
-    data = args.ts
-    params = asdict(args.customParameters)
-    params["k"] = params["n_clusters"]
-    params["window_size"] = params["anomaly_window_size"]
-    del params["n_clusters"]
-    del params["random_state"]
-    del params["anomaly_window_size"]
-    detector = KMeansAD(**params)
-    anomaly_scores = detector.fit_predict(data)
-    anomaly_scores.tofile(args.dataOutput, sep="\n")
-
-
-if __name__ == "__main__":
-    args = AlgorithmArgs.from_sys_args()
-
-    if args.executionType == "train":
-        print("This algorithm does not need to be trained!")
-    elif args.executionType == "execute":
-        execute(args)
-    else:
-        raise ValueError(f"No executionType '{args.executionType}' available! Choose either 'train' or 'execute'.")
diff --git a/algorithms/kmeans/big-k.png b/algorithms/kmeans/big-k.png
deleted file mode 100644
index e0597873ad47ee487da367fdd7ba934c6b4b8bbb..0000000000000000000000000000000000000000
Binary files a/algorithms/kmeans/big-k.png and /dev/null differ
diff --git a/algorithms/kmeans/big-stride.png b/algorithms/kmeans/big-stride.png
deleted file mode 100644
index e8696a7b61db8954510c8e0848e99e847beb09bf..0000000000000000000000000000000000000000
Binary files a/algorithms/kmeans/big-stride.png and /dev/null differ
diff --git a/algorithms/kmeans/big-window-size.png b/algorithms/kmeans/big-window-size.png
deleted file mode 100644
index 07c5859c163a257ae31aa3cc55f1b2d8c491f2a2..0000000000000000000000000000000000000000
Binary files a/algorithms/kmeans/big-window-size.png and /dev/null differ
diff --git a/algorithms/kmeans/kmeans/__init__.py b/algorithms/kmeans/kmeans/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/algorithms/kmeans/kmeans/model.py b/algorithms/kmeans/kmeans/model.py
deleted file mode 100644
index d1fb8b5331d66d552e905b8f443b900ca4d806ed..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/kmeans/model.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from sklearn.base import BaseEstimator, OutlierMixin
-from sklearn.cluster import KMeans
-import numpy as np
-from numpy.lib.stride_tricks import sliding_window_view
-
-
-class KMeansAD(BaseEstimator, OutlierMixin):
-    def __init__(self, k: int, window_size: int, stride: int, n_jobs: int):
-        self.k = k
-        self.window_size = window_size
-        self.stride = stride
-        self.model = KMeans(n_clusters=k)
-        self.padding_length = 0
-
-    def _preprocess_data(self, X: np.ndarray) -> np.ndarray:
-        flat_shape = (X.shape[0] - (self.window_size - 1), -1)  # in case we have a multivariate TS
-        slides = sliding_window_view(X, window_shape=self.window_size, axis=0).reshape(flat_shape)[::self.stride, :]
-        self.padding_length = X.shape[0] - (slides.shape[0] * self.stride + self.window_size - self.stride)
-        print(f"Required padding_length={self.padding_length}")
-        return slides
-
-    def _custom_reverse_windowing(self, scores: np.ndarray) -> np.ndarray:
-        print("Reversing window-based scores to point-based scores:")
-        print(f"Before reverse-windowing: scores.shape={scores.shape}")
-        # compute begin and end indices of windows
-        begins = np.array([i * self.stride for i in range(scores.shape[0])])
-        ends = begins + self.window_size
-
-        # prepare target array
-        unwindowed_length = self.stride * (scores.shape[0] - 1) + self.window_size + self.padding_length
-        mapped = np.full(unwindowed_length, fill_value=np.nan)
-
-        # only iterate over window intersections
-        indices = np.unique(np.r_[begins, ends])
-        for i, j in zip(indices[:-1], indices[1:]):
-            window_indices = np.flatnonzero((begins <= i) & (j-1 < ends))
-            # print(i, j, window_indices)
-            mapped[i:j] = np.nanmean(scores[window_indices])
-
-        # replace untouched indices with 0 (especially for the padding at the end)
-        np.nan_to_num(mapped, copy=False)
-        print(f"After reverse-windowing: scores.shape={mapped.shape}")
-        return mapped
-
-    def fit(self, X: np.ndarray, y=None, preprocess=True) -> 'KMeansAD':
-        if preprocess:
-            X = self._preprocess_data(X)
-        self.model.fit(X)
-        return self
-
-    def predict(self, X: np.ndarray, preprocess=True) -> np.ndarray:
-        if preprocess:
-            X = self._preprocess_data(X)
-        clusters = self.model.predict(X)
-        diffs = np.linalg.norm(X - self.model.cluster_centers_[clusters], axis=1)
-        return self._custom_reverse_windowing(diffs)
-
-    def fit_predict(self, X, y=None) -> np.ndarray:
-        X = self._preprocess_data(X)
-        self.fit(X, y, preprocess=False)
-        return self.predict(X, preprocess=False)
diff --git a/algorithms/kmeans/manifest.json b/algorithms/kmeans/manifest.json
deleted file mode 100644
index ef0e3601ee219e1233c722e90fdb4f3f131197fa..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/manifest.json
+++ /dev/null
@@ -1,51 +0,0 @@
-{
-    "title": "k-Means",
-    "description": "Implementation of http://robotics.estec.esa.int/i-SAIRAS/isairas2001/papers/Paper_AS012.pdf",
-    "inputDimensionality": "multivariate",
-    "version": "0.3.0",
-    "authors": "Takehisa Yairi, Yoshikiyo Kato, Koichi Hori",
-    "language": "Python",
-    "type": "Detector",
-    "mainFile": "algorithm.py",
-    "learningType": "unsupervised",
-    "executionStep": {
-        "parameters": [
-            {
-                "name": "n_clusters",
-                "type": "int",
-                "defaultValue": 20,
-                "optional": "true",
-                "description": "The number of clusters to form as well as the number of centroids to generate. The bigger `n_clusters` (`k`) is, the less noisy the anomaly scores are."
-            },
-            {
-                "name": "anomaly_window_size",
-                "type": "int",
-                "defaultValue": 20,
-                "optional": "true",
-                "description": "Size of sliding windows. The bigger `window_size` is, the bigger the anomaly context is. If it's to big, things seem anomalous that are not. If it's too small, the algorithm is not able to find anomalous windows and looses its time context."
-            },
-            {
-                "name": "stride",
-                "type": "int",
-                "defaultValue": 1,
-                "optional": "true",
-                "description": "Stride of sliding windows. It is the step size between windows. The larger `stride` is, the noisier the scores get. If `stride == window_size`, they are tumbling windows."
-            },
-            {
-                "name": "n_jobs",
-                "type": "int",
-                "defaultValue": 1,
-                "optional": "true",
-                "description": "Internal parallelism used (sample-wise in the main loop which assigns each sample to its closest center). If `-1` or `None`, all available CPUs are used."
-            },
-            {
-                "name": "random_state",
-                "type": "int",
-                "defaultValue": 42,
-                "optional": "true",
-                "description": "Seed for random number generation."
-            }
-        ],
-        "modelInput": "none"
-    }
-}
diff --git a/algorithms/kmeans/requirements.txt b/algorithms/kmeans/requirements.txt
deleted file mode 100644
index ce766d9d884be1af273182104cb162421e7a9e94..0000000000000000000000000000000000000000
--- a/algorithms/kmeans/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-numpy>=1.19.5
-pandas>=1.2.1
-scikit-learn>=0.24.1
diff --git a/algorithms/kmeans/small-k.png b/algorithms/kmeans/small-k.png
deleted file mode 100644
index 72b2f82e697df888e6f46a0012f076b449cf579a..0000000000000000000000000000000000000000
Binary files a/algorithms/kmeans/small-k.png and /dev/null differ
diff --git a/algorithms/kmeans/small-stride.png b/algorithms/kmeans/small-stride.png
deleted file mode 100644
index fde12eaed493fffb45b3c33ba1cea576eb3dc783..0000000000000000000000000000000000000000
Binary files a/algorithms/kmeans/small-stride.png and /dev/null differ
diff --git a/algorithms/kmeans/small-window-size.png b/algorithms/kmeans/small-window-size.png
deleted file mode 100644
index d0553a5c7623d277bf9d14afa981a8c6f46effff..0000000000000000000000000000000000000000
Binary files a/algorithms/kmeans/small-window-size.png and /dev/null differ
diff --git a/algorithms/lof/Dockerfile b/algorithms/lof/Dockerfile
deleted file mode 100644
index 0388a4239b903d931acfc9bb70f09ed1bb2ca468..0000000000000000000000000000000000000000
--- a/algorithms/lof/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM ghcr.io/timeeval/pyod:0.3.1
-
-LABEL maintainer="sebastian.schmidl@hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-# install algorithm dependencies
-COPY requirements.txt /app/
-RUN pip install -r /app/requirements.txt
-
-COPY manifest.json /app/
-COPY algorithm.py /app/
diff --git a/algorithms/lof/LICENSE b/algorithms/lof/LICENSE
deleted file mode 100644
index e594ec89208b9bcc64e3b455e283d3ce07f1b817..0000000000000000000000000000000000000000
--- a/algorithms/lof/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2022 Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/lof/README.md b/algorithms/lof/README.md
deleted file mode 100644
index 5d89f249d65e26d9f43ae82f34f916d51fe651d3..0000000000000000000000000000000000000000
--- a/algorithms/lof/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# Local outlier factor (LOF)
-
-|||
-| :--- | :--- |
-| Citekey | BreunigEtAl2000LOF |
-| Source Code | https://github.com/yzhao062/pyod/blob/master/pyod/models/lof.py |
-| Learning type | unsupervised |
-| Input dimensionality | multivariate |
-|||
-
-## Parameters
-
-- `n_neighbors`: `int`, optional (default=20)  
-  Number of neighbors to use by default for `kneighbors` queries.
-  If n_neighbors is larger than the number of samples provided, all samples will be used.
-
-- `algorithm`: {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional (default 'auto')  
-  Algorithm used to compute the nearest neighbors:
-
-  - 'ball_tree' will use BallTree
-  - 'kd_tree' will use KDTree
-  - 'brute' will use a brute-force search.
-  - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to `fit` method.
-
-  Note: fitting on sparse input will override the setting of this parameter, using brute force.
-  **REMOVED!!**
-
-- `leaf_size`: int, optional (default=30)  
-  Leaf size passed to `BallTree` or `KDTree`.
-  This can affect the speed of the construction and query, as well as the memory required to store the tree.
-  The optimal value depends on the nature of the problem.
-
-- `metric`: string or callable, default 'minkowski'  
-  Metric used for the distance computation.
-  Any metric from scikit-learn or scipy.spatial.distance can be used.
-  If 'precomputed', the training input X is expected to be a distance matrix.
-  If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded.
-  The callable should take two arrays as input and return one value indicating the distance between them.
-  This works for Scipy's metrics, but is less efficient than passing the metric name as a string.
-  Valid values for metric are:
-
-  - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan']
-  - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
-  
-  See the documentation for scipy.spatial.distance for details on these metrics:
-  http://docs.scipy.org/doc/scipy/reference/spatial.distance.html.
-  **REMOVED!!**
-
-- `p`: integer, optional (default = 2)  
-  Parameter for the Minkowski metric from sklearn.metrics.pairwise.pairwise_distances.
-  When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
-  For arbitrary p, minkowski_distance (l_p) is used.
-  See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.
-  **Renamed to `distance_metric_order`!!**
-
-- `metric_params`: dict, optional (default = None)  
-  Additional keyword arguments for the metric function.
-  **REMOVED!!**
-
-- `contamination`: float in (0., 0.5), optional (default=0.1)  
-  The amount of contamination of the data set, i.e. the proportion of outliers in the data set.
-  When fitting this is used to define the threshold on the decision function.
-  **Automatically determined by algorithm script!!**
-
-- `n_jobs`: int, optional (default = 1)  
-  The number of parallel jobs to run for neighbors search.
-  If ``-1``, then the number of jobs is set to the number of CPU cores.
-  Affects only kneighbors and kneighbors_graph methods.
-
-## Citation format (for source code)
-
-> Zhao, Y., Nasrullah, Z. and Li, Z., 2019. PyOD: A Python Toolbox for Scalable Outlier Detection. Journal of machine learning research (JMLR), 20(96), pp.1-7.
diff --git a/algorithms/lof/algorithm.py b/algorithms/lof/algorithm.py
deleted file mode 100755
index b89298ce7476b045fb6fa5d2475f428775073da7..0000000000000000000000000000000000000000
--- a/algorithms/lof/algorithm.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env python3
-import argparse
-import json
-import sys
-import numpy as np
-import pandas as pd
-
-from dataclasses import dataclass
-from pyod.models.lof import LOF
-
-
-@dataclass
-class CustomParameters:
-    n_neighbors: int = 20
-    leaf_size: int = 30
-    distance_metric_order: int = 2
-    n_jobs: int = 1
-    algorithm: str = "auto"  # using default is fine
-    distance_metric: str = "minkowski"  # using default is fine
-    random_state: int = 42
-
-
-class AlgorithmArgs(argparse.Namespace):
-    @staticmethod
-    def from_sys_args() -> 'AlgorithmArgs':
-        args: dict = json.loads(sys.argv[1])
-        custom_parameter_keys = dir(CustomParameters())
-        filtered_parameters = dict(filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items()))
-        args["customParameters"] = CustomParameters(**filtered_parameters)
-        return AlgorithmArgs(**args)
-
-
-def set_random_state(config: AlgorithmArgs) -> None:
-    seed = config.customParameters.random_state
-    import random
-    random.seed(seed)
-    np.random.seed(seed)
-
-
-def load_data(config: AlgorithmArgs) -> np.ndarray:
-    df = pd.read_csv(config.dataInput)
-    data = df.iloc[:, 1:-1].values
-    labels = df.iloc[:, -1].values
-    contamination = labels.sum() / len(labels)
-    # Use smallest positive float as contamination if there are no anomalies in dataset
-    contamination = np.nextafter(0, 1) if contamination == 0. else contamination
-    return data, contamination
-
-
-def main(config: AlgorithmArgs):
-    set_random_state(config)
-    data, contamination = load_data(config)
-
-    clf = LOF(
-        contamination=contamination,
-        n_neighbors=config.customParameters.n_neighbors,
-        leaf_size=config.customParameters.leaf_size,
-        n_jobs=config.customParameters.n_jobs,
-        algorithm=config.customParameters.algorithm,
-        metric=config.customParameters.distance_metric,
-        metric_params=None,
-        p=config.customParameters.distance_metric_order,
-    )
-    clf.fit(data)
-    scores = clf.decision_scores_
-    np.savetxt(config.dataOutput, scores, delimiter=",")
-
-
-if __name__ == "__main__":
-    if len(sys.argv) != 2:
-        print("Wrong number of arguments specified; expected a single json-string!")
-        exit(1)
-
-    config = AlgorithmArgs.from_sys_args()
-    print(f"Config: {config}")
-
-    if config.executionType == "train":
-        print("Nothing to train, finished!")
-    elif config.executionType == "execute":
-        main(config)
-    else:
-        raise ValueError(f"Unknown execution type '{config.executionType}'; expected either 'train' or 'execute'!")
diff --git a/algorithms/lof/manifest.json b/algorithms/lof/manifest.json
deleted file mode 100644
index dbb14eb69f603157038e5c328f2eb780d4ffca0a..0000000000000000000000000000000000000000
--- a/algorithms/lof/manifest.json
+++ /dev/null
@@ -1,48 +0,0 @@
-{
-    "title": "LOF",
-    "description": "Implementation of https://doi.org/10.1145/342009.335388.",
-    "inputDimensionality": "multivariate",
-    "version": "0.3.1",
-    "authors": "Markus M. Breunig, Hans-Peter Kriegel, Raymond T. Ng, Jörg Sander",
-    "language": "Python",
-    "type": "Detector",
-    "mainFile": "algorithm.py",
-    "learningType": "unsupervised",
-    "executionStep": {
-    	"parameters": [
-            {
-                "name": "n_neighbors",
-                "type": "int",
-                "defaultValue": 20,
-                "optional": "true",
-                "description": "Number of neighbors to use by default for `kneighbors` queries. If n_neighbors is larger than the number of samples provided, all samples will be used."
-            },{
-                "name": "leaf_size",
-                "type": "int",
-                "defaultValue": 30,
-                "optional": "true",
-                "description": "Leaf size passed to `BallTree` or `KDTree`. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem."
-            },{
-                "name": "distance_metric_order",
-                "type": "int",
-                "defaultValue": 2,
-                "optional": "true",
-                "description": "Parameter for the Minkowski metric from sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances."
-            },{
-                "name": "n_jobs",
-                "type": "int",
-                "defaultValue": 1,
-                "optional": "true",
-                "description": "The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Affects only kneighbors and kneighbors_graph methods."
-            },
-            {
-                "name": "random_state",
-                "type": "int",
-                "defaultValue": 42,
-                "optional": "true",
-                "description": "Seed for random number generation."
-            }
-        ],
-        "modelInput": "none"
-    }
-}
diff --git a/algorithms/lof/requirements.txt b/algorithms/lof/requirements.txt
deleted file mode 100644
index bec81306730ae25959144bbb9867f9c0b191a221..0000000000000000000000000000000000000000
--- a/algorithms/lof/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-numpy>=1.19.5
-pandas>=1.2.1
-pyod>=0.9.2
diff --git a/algorithms/lstm_ad/Dockerfile b/algorithms/lstm_ad/Dockerfile
deleted file mode 100644
index 373518a21a65c46fad888ddf8a72c41599cee83b..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM ghcr.io/timeeval/python3-torch:0.3.0
-
-LABEL maintainer="phillip.wenig@hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-# install algorithm dependencies
-COPY requirements.txt /app/
-RUN pip install -r /app/requirements.txt
-
-COPY lstm_ad /app/lstm_ad
-COPY manifest.json /app/
-COPY algorithm.py /app/
diff --git a/algorithms/lstm_ad/LICENSE b/algorithms/lstm_ad/LICENSE
deleted file mode 100644
index e594ec89208b9bcc64e3b455e283d3ce07f1b817..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2022 Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/lstm_ad/README.md b/algorithms/lstm_ad/README.md
deleted file mode 100644
index f09de770528a4cb33a39d1eeed29f906303a310d..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# LSTM-AD
-
-|||
-| :--- | :--- |
-| Citekey | MalhotraEtAl2015Long |
-| Source code | `own` |
-| Learning type | semi-supervised |
-| Input dimensionality | multivariate |
-|||
-
-## Dependencies
-
-- python 3
-- pytorch
-
-## Notes
-
-LSTM-AD outputs anomaly scores for windows.
-The results require post-processing.
-The scores for each point can be assigned by aggregating the anomaly scores for each window the point is included in.
-
-You can use the following code snippet for the post-processing step in TimeEval (default parameters directly filled in from the source code):
-
-<!--BEGIN:timeeval-post-->
-```python
-from timeeval.utils.window import ReverseWindowing
-# post-processing for LSTM-AD
-def post_lstm_ad(scores: np.ndarray, args: dict) -> np.ndarray:
-    window_size = args.get("hyper_params", {}).get("window_size", 30)
-    prediction_window_size = args.get("hyper_params", {}).get("prediction_window_size", 1)
-    return ReverseWindowing(window_size=window_size + prediction_window_size).fit_transform(scores)
-```
-<!--END:timeeval-post-->
diff --git a/algorithms/lstm_ad/algorithm.py b/algorithms/lstm_ad/algorithm.py
deleted file mode 100644
index fc39551747cdfb6845125abf7ca1c76832bb89f7..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/algorithm.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import argparse
-from dataclasses import dataclass, asdict, field
-import json
-import numpy as np
-import pandas as pd
-import sys
-from typing import List
-
-from lstm_ad.model import LSTMAD
-
-
-@dataclass
-class CustomParameters:
-    lstm_layers: int = 2
-    split: float = 0.9
-    window_size: int = 30
-    prediction_window_size: int = 1
-    output_dims: List[int] = field(default_factory=lambda: [])
-    batch_size: int = 32
-    validation_batch_size: int = 128
-    test_batch_size: int = 128
-    epochs: int = 50  # bigger for smaller datasets, smaller for bigger datasets
-    early_stopping_delta: float = 0.05
-    early_stopping_patience: int = 10
-    optimizer: str = "adam"  # not exposed, always use Adam!
-    learning_rate: float = 1e-3
-    random_state: int = 42
-
-
-class AlgorithmArgs(argparse.Namespace):
-    @property
-    def ts(self) -> np.ndarray:
-        return self.df.iloc[:, 1:-1].values
-
-    @property
-    def df(self) -> pd.DataFrame:
-        return pd.read_csv(self.dataInput)
-
-    @staticmethod
-    def from_sys_args() -> 'AlgorithmArgs':
-        args: dict = json.loads(sys.argv[1])
-        custom_parameter_keys = dir(CustomParameters())
-        filtered_parameters = dict(
-            filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items()))
-        args["customParameters"] = CustomParameters(**filtered_parameters)
-        return AlgorithmArgs(**args)
-
-
-def train(args: AlgorithmArgs):
-    data = args.ts
-    model = LSTMAD(input_size=data.shape[1], **asdict(args.customParameters))
-    model.fit(data, args.modelOutput)
-    model.save(args.modelOutput)
-
-
-def execute(args: AlgorithmArgs):
-    data = args.ts
-    model = LSTMAD.load(args.modelInput, input_size=data.shape[1], **asdict(args.customParameters))
-    anomaly_scores = model.anomaly_detection(data)
-    anomaly_scores.tofile(args.dataOutput, sep="\n")
-
-
-def set_random_state(config: AlgorithmArgs) -> None:
-    seed = config.customParameters.random_state
-    import random, torch
-    random.seed(seed)
-    np.random.seed(seed)
-    torch.manual_seed(seed)
-
-
-if __name__ == "__main__":
-    args = AlgorithmArgs.from_sys_args()
-    set_random_state(args)
-
-    if args.executionType == "train":
-        train(args)
-    elif args.executionType == "execute":
-        execute(args)
-    else:
-        raise ValueError(f"No executionType '{args.executionType}' available! Choose either 'train' or 'execute'.")
diff --git a/algorithms/lstm_ad/lstm_ad/__init__.py b/algorithms/lstm_ad/lstm_ad/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/algorithms/lstm_ad/lstm_ad/dataset.py b/algorithms/lstm_ad/lstm_ad/dataset.py
deleted file mode 100644
index f0c2fc7f8a613f02c2893ebd18459f58d8ba12c6..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/lstm_ad/dataset.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import torch
-
-from torch.utils.data import Dataset
-from typing import List, Optional, Tuple
-
-
-class TimeSeries(Dataset):
-    def __init__(self, X, window_length: int, prediction_length: int, output_dims: Optional[List[int]] = None):
-        self.output_dims = output_dims or list(range(X.shape[1]))
-        self.X = torch.from_numpy(X).float()
-        self.window_length = window_length
-        self.prediction_length = prediction_length
-
-    def __len__(self):
-        return self.X.shape[0] - (self.window_length - 1) - self.prediction_length
-
-    def __getitem__(self, index) -> Tuple[torch.Tensor, torch.Tensor]:
-        end_idx = index+self.window_length
-        x = self.X[index:end_idx]
-        y = self.X[end_idx:end_idx+self.prediction_length, self.output_dims]
-        return x, y
diff --git a/algorithms/lstm_ad/lstm_ad/early_stopping.py b/algorithms/lstm_ad/lstm_ad/early_stopping.py
deleted file mode 100644
index bf1f6f43c3a58bc9e76bcec41f9d7cbe0f8a5a37..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/lstm_ad/early_stopping.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from typing import Iterator, Optional, List, Callable
-
-
-class EarlyStopping:
-    def __init__(self, patience: int, delta: float, epochs: int,
-                 callbacks: Optional[List[Callable[[bool, float, int], None]]] = None):
-        self.patience = patience
-        self.delta = delta
-        self.epochs = epochs
-        self.current_epoch = 0
-        self.epochs_without_change = 0
-        self.last_loss: Optional[float] = None
-        self.callbacks = callbacks or []
-
-    def _callback(self, improvement: bool, loss: float):
-        for cb in self.callbacks:
-            cb(improvement, loss, self.epochs_without_change)
-
-    def update(self, loss: float):
-        improvement = False
-        if self.last_loss is None or (1 - (loss / self.last_loss) > self.delta):
-            self.last_loss = loss
-            self.epochs_without_change = 0
-            improvement = True
-        else:
-            self.epochs_without_change += 1
-
-        self._callback(improvement, loss)
-
-    def __iter__(self) -> Iterator[int]:
-        while self.epochs_without_change <= self.patience and self.current_epoch < self.epochs:
-            yield self.current_epoch
-            self.current_epoch += 1
diff --git a/algorithms/lstm_ad/lstm_ad/model.py b/algorithms/lstm_ad/lstm_ad/model.py
deleted file mode 100644
index 8b068e24af5d0270e939d3b6763777579d970ff4..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/lstm_ad/model.py
+++ /dev/null
@@ -1,180 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-from torch.optim import Optimizer as BaseOptimizer
-from torch.optim import Adam, SGD, RMSprop
-from torch.utils.data import DataLoader
-from typing import List, Tuple
-from enum import Enum
-import os
-import logging
-
-from .dataset import TimeSeries
-from .early_stopping import EarlyStopping
-
-
-class Optimizer(Enum):
-    ADAM = "adam"
-    SGD = "sgd"
-    RMSPROP = "rmsprop"
-
-    def get(self, params, lr) -> BaseOptimizer:
-        if self == Optimizer.ADAM:
-            return Adam(params, lr=lr)
-        elif self == Optimizer.SGD:
-            return SGD(params, lr=lr)
-        else:  # if self == Optimizer.RMSPROP:
-            return RMSprop(params, lr=lr)
-
-
-class AnomalyScorer:
-    def __init__(self):
-        super().__init__()
-
-        self.mean = torch.tensor(0, dtype=torch.float64)
-        self.var = torch.tensor(1, dtype=torch.float64)
-
-    def forward(self, errors: torch.Tensor) -> torch.Tensor:
-        mean_diff = errors - self.mean
-        return torch.mul(torch.mul(mean_diff, self.var**-1), mean_diff)
-
-    def find_distribution(self, errors: torch.Tensor):
-        self.mean = errors.mean(dim=[0, 1])
-        self.var = errors.var(dim=[0, 1])
-
-
-class LSTMAD(nn.Module):
-    def __init__(self,
-                 input_size: int,
-                 lstm_layers: int,
-                 split: float,
-                 window_size: int,
-                 prediction_window_size: int,
-                 output_dims: List[int],
-                 batch_size: int,
-                 validation_batch_size: int,
-                 test_batch_size: int,
-                 epochs: int,
-                 early_stopping_delta: float,
-                 early_stopping_patience: int,
-                 optimizer: str,
-                 learning_rate: float,
-                 *args, **kwargs):
-        super().__init__()
-
-        self.input_size = input_size
-        self.lstm_layers = lstm_layers
-        self.split = split
-        self.window_size = window_size
-        self.prediction_length = prediction_window_size
-        self.output_dims = output_dims
-        self.batch_size = batch_size
-        self.validation_batch_size = validation_batch_size
-        self.test_batch_size = test_batch_size
-        self.epochs = epochs
-        self.early_stopping_delta = early_stopping_delta
-        self.early_stopping_patience = early_stopping_patience
-        self.optimizer = Optimizer(optimizer)
-        self.lr = learning_rate
-        if len(output_dims) > 0:
-            self.hidden_units = len(output_dims)
-        else:
-            self.hidden_units = input_size
-
-        self.lstms = nn.LSTM(input_size=input_size, hidden_size=self.hidden_units * self.prediction_length, batch_first=True, num_layers=lstm_layers)
-        self.dense = nn.Linear(in_features=self.window_size * self.hidden_units * self.prediction_length, out_features=self.hidden_units * self.prediction_length)
-        self.anomaly_scorer = AnomalyScorer()
-
-    def forward(self, x: torch.Tensor) -> torch.Tensor:
-        x, hidden = self.lstms(x)
-        x = x.reshape(-1, self.window_size * self.hidden_units * self.prediction_length)
-        x = self.dense(x)
-        return x
-
-    def fit(self, ts: np.ndarray, model_path: os.PathLike, verbose=True):
-        self.train()
-        logging.basicConfig(level=logging.INFO)
-        logger = logging.getLogger("LSTM-AD")
-        optimizer = self.optimizer.get(self.parameters(), lr=self.lr)
-        criterion = nn.MSELoss()
-
-        train_dl, valid_dl = self._split_data(ts)
-        def cb(i, _l, _e):
-            if i:
-                self._estimate_normal_distribution(valid_dl)
-                self.save(model_path)
-        early_stopping = EarlyStopping(self.early_stopping_patience, self.early_stopping_delta, self.epochs,
-                                       callbacks=[cb])
-
-        for epoch in early_stopping:
-            self.train()
-            losses = []
-            for x, y in train_dl:
-                self.zero_grad()
-                loss = self._predict(x, y, criterion)
-                loss.backward()
-                optimizer.step()
-                losses.append(loss.item())
-
-            self.eval()
-            valid_losses = []
-            for x, y in valid_dl:
-                loss = self._predict(x, y, criterion)
-                valid_losses.append(loss.item())
-            validation_loss = sum(valid_losses)
-            early_stopping.update(validation_loss)
-            if verbose:
-                logger.info(
-                    f"Epoch {epoch}: Training Loss {sum(losses) / len(train_dl)} \t "
-                    f"Validation Loss {validation_loss / len(valid_dl)}"
-                )
-        self._estimate_normal_distribution(valid_dl)
-
-    def _estimate_normal_distribution(self, dl: DataLoader):
-        self.eval()
-        errors = []
-        for x, y in dl:
-            y_hat = self.forward(x)
-            e = torch.abs(y.reshape(*y_hat.shape) - y_hat)
-            errors.append(e)
-        self.anomaly_scorer.find_distribution(torch.cat(errors))
-
-    def _predict(self, x, y, criterion) -> torch.Tensor:
-        y = y.reshape(-1, self.prediction_length * self.hidden_units)
-        y_hat = self.forward(x)
-        loss = criterion(y_hat, y)
-        return loss
-
-    def anomaly_detection(self, ts: np.ndarray) -> np.ndarray:
-        self.eval()
-        dataloader = DataLoader(TimeSeries(ts, window_length=self.window_size, prediction_length=self.prediction_length, output_dims=self.output_dims),
-                                batch_size=self.test_batch_size)
-        errors = []
-        for x, y in dataloader:
-            y_hat = self.forward(x)
-            e = torch.abs(y.reshape(*y_hat.shape) - y_hat)
-            errors.append(e)
-        errors = torch.cat(errors)
-        return self.anomaly_scorer.forward(errors.mean(dim=1)).detach().numpy()
-
-    def _split_data(self, ts: np.array) -> Tuple[DataLoader, DataLoader]:
-        split_at = int(len(ts) * self.split)
-        train_ts = ts[:split_at]
-        valid_ts = ts[split_at:]
-        train_ds = TimeSeries(train_ts, window_length=self.window_size, prediction_length=self.prediction_length, output_dims=self.output_dims)
-        valid_ds = TimeSeries(valid_ts, window_length=self.window_size, prediction_length=self.prediction_length, output_dims=self.output_dims)
-        return DataLoader(train_ds, batch_size=self.batch_size), DataLoader(valid_ds, batch_size=self.validation_batch_size)
-
-    def save(self, path: os.PathLike):
-        torch.save({
-            "model": self.state_dict(),
-            "anomaly_scorer": self.anomaly_scorer
-        }, path)
-
-    @staticmethod
-    def load(path: os.PathLike, **kwargs) -> 'LSTMAD':
-        checkpoint = torch.load(path)
-        model = LSTMAD(**kwargs)
-        model.load_state_dict(checkpoint["model"])
-        model.anomaly_scorer = checkpoint["anomaly_scorer"]
-        return model
diff --git a/algorithms/lstm_ad/manifest.json b/algorithms/lstm_ad/manifest.json
deleted file mode 100644
index fafaf4f712cdca1d40e3e908df8fba68235d54ee..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/manifest.json
+++ /dev/null
@@ -1,133 +0,0 @@
-{
-  "title": "LSTM-AD",
-  "description": "Implementation of https://www.elen.ucl.ac.be/Proceedings/esann/esannpdf/es2015-56.pdf",
-  "inputDimensionality": "multivariate",
-  "version": "0.3.0",
-  "authors": "Pankaj Malhotra, Lovekesh Vig, Gautam Shroff, Puneet Agarwal",
-  "language": "Python",
-  "type": "Detector",
-  "mainFile": "algorithm.py",
-  "learningType": "semi-supervised",
-  "trainingStep": {
-    "parameters": [
-      {
-        "name": "lstm_layers",
-        "type": "int",
-        "defaultValue": 2,
-        "optional": "true",
-        "description": "Number of stacked LSTM layers"
-      },
-      {
-        "name": "split",
-        "type": "float",
-        "defaultValue": 0.9,
-        "optional": "true",
-        "description": "Train-validation split for early stopping"
-      },
-      {
-        "name": "window_size",
-        "type": "int",
-        "defaultValue": 30,
-        "optional": "true",
-        "description": ""
-      },
-      {
-        "name": "prediction_window_size",
-        "type": "int",
-        "defaultValue": 1,
-        "optional": "true",
-        "description": "Number of points predicted"
-      },
-      {
-        "name": "batch_size",
-        "type": "int",
-        "defaultValue": 32,
-        "optional": "true",
-        "description": "Number of instances trained at the same time"
-      },
-      {
-        "name": "validation_batch_size",
-        "type": "int",
-        "defaultValue": 128,
-        "optional": "true",
-        "description": "Number of instances used for validation at the same time"
-      },
-      {
-        "name": "epochs",
-        "type": "int",
-        "defaultValue": 50,
-        "optional": "true",
-        "description": "Number of training iterations over entire dataset"
-      },
-      {
-        "name": "early_stopping_delta",
-        "type": "float",
-        "defaultValue": 0.05,
-        "optional": "true",
-        "description": "If 1 - (loss / last_loss) is less than `delta` for `patience` epochs, stop"
-      },
-      {
-        "name": "early_stopping_patience",
-        "type": "int",
-        "defaultValue": 10,
-        "optional": "true",
-        "description": "If 1 - (loss / last_loss) is less than `delta` for `patience` epochs, stop"
-      },
-      {
-        "name": "learning_rate",
-        "type": "float",
-        "defaultValue": 0.001,
-        "optional": "true",
-        "description": "Learning rate for Adam optimizer"
-      },
-      {
-        "name": "random_state",
-        "type": "int",
-        "defaultValue": 42,
-        "optional": "true",
-        "description": "Seed for the random number generator"
-      }
-    ],
-    "modelInput": "none"
-  },
-  "executionStep": {
-    "parameters": [
-      {
-        "name": "lstm_layers",
-        "type": "int",
-        "defaultValue": 2,
-        "optional": "true",
-        "description": "Number of stacked LSTM layers"
-      },
-      {
-        "name": "window_size",
-        "type": "int",
-        "defaultValue": 30,
-        "optional": "true",
-        "description": "Size of the sliding windows"
-      },
-      {
-        "name": "prediction_window_size",
-        "type": "int",
-        "defaultValue": 1,
-        "optional": "true",
-        "description": "Number of points predicted"
-      },
-      {
-        "name": "test_batch_size",
-        "type": "int",
-        "defaultValue": 128,
-        "optional": "true",
-        "description": "Number of instances used for testing at the same time"
-      },
-      {
-        "name": "random_state",
-        "type": "int",
-        "defaultValue": 42,
-        "optional": "true",
-        "description": "Seed for the random number generator"
-      }
-    ],
-    "modelInput": "required"
-  }
-}
\ No newline at end of file
diff --git a/algorithms/lstm_ad/requirements.txt b/algorithms/lstm_ad/requirements.txt
deleted file mode 100644
index 0d499b7deda72a8cdd422b2c562aeeab7661b914..0000000000000000000000000000000000000000
--- a/algorithms/lstm_ad/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-numpy>=1.19.5
-pandas>=1.2.1
-torch==1.7.1
diff --git a/algorithms/random_black_forest/Dockerfile b/algorithms/random_black_forest/Dockerfile
deleted file mode 100644
index 4a6d8ce8386dd43292797cd0f05315fd5ebf6d01..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM ghcr.io/timeeval/python3-base:0.3.0
-
-LABEL maintainer="sebastian.schmidl@hpi.de"
-LABEL org.opencontainers.image.licenses=MIT
-
-ENV ALGORITHM_MAIN="/app/algorithm.py"
-
-COPY requirements.txt /app/
-RUN pip install -r /app/requirements.txt
-
-COPY model.py /app/
-COPY manifest.json /app/
-COPY algorithm.py /app/
diff --git a/algorithms/random_black_forest/LICENSE b/algorithms/random_black_forest/LICENSE
deleted file mode 100644
index e594ec89208b9bcc64e3b455e283d3ce07f1b817..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2020-2022 Phillip Wenig and Sebastian Schmidl
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/algorithms/random_black_forest/README.md b/algorithms/random_black_forest/README.md
deleted file mode 100644
index 1b1dae564693abf5c91d8b64893a26a84bea832e..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# An ensemble detector using multiple random forests on different feature subsets (Random Black Forest)
-
-|||
-| :--- | :--- |
-| Citekey | - |
-| Source Code | own |
-| Learning type | semi-supervised |
-| Input dimensionality | multivariate |
-|||
-
-## Notes
-
-An ensemble windowed multi-output forecasting method using random forest regression and random subspace ensembling (requested by RollsRoyce).
-The forecasting error is used as anomaly score.
-
-The regressor ensebmle is trained on a clean time series to look at a fixed window (`train_window_size` points) and predict the next point.
-On the test series, the predicted values are compared to the observed ones and the prediction error is returned as anomaly score.
-The first `train_window_size` points of the test series don't get an anomaly score (are set to `NaN`), because no predictions are possible for them.
diff --git a/algorithms/random_black_forest/algorithm.py b/algorithms/random_black_forest/algorithm.py
deleted file mode 100755
index e95067aedfd9ff88b55fb1ab277b8b6ab4e05a0f..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/algorithm.py
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python3
-import argparse
-import json
-import sys
-import numpy as np
-import pandas as pd
-
-from pathlib import Path
-from typing import Optional
-from dataclasses import dataclass, asdict
-from model import RandomBlackForestAnomalyDetector
-
-
-@dataclass
-class CustomParameters:
-    train_window_size: int = 50
-    n_estimators: int = 2  # number of forests
-    max_features_per_estimator: float = 0.5  # fraction of features per forest
-    n_trees: float = 100  # number of trees per forest
-    max_features_method: str = "auto"  # "sqrt", "log2"
-    bootstrap: bool = True
-    max_samples: Optional[float] = None  # fraction of all samples
-    # standardize: bool = False  # does not really influence the quality
-    random_state: int = 42
-    verbose: int = 0
-    n_jobs: int = 1
-    # the following parameters control the tree size
-    max_depth: Optional[int] = None
-    min_samples_split: int = 2
-    min_samples_leaf: int = 1
-
-
-class AlgorithmArgs(argparse.Namespace):
-    @staticmethod
-    def from_sys_args() -> 'AlgorithmArgs':
-        args: dict = json.loads(sys.argv[1])
-        custom_parameter_keys = dir(CustomParameters())
-        filtered_parameters = dict(filter(lambda x: x[0] in custom_parameter_keys, args.get("customParameters", {}).items()))
-        args["customParameters"] = CustomParameters(**filtered_parameters)
-        return AlgorithmArgs(**args)
-
-
-def set_random_state(config: AlgorithmArgs) -> None:
-    seed = config.customParameters.random_state
-    import random
-    random.seed(seed)
-    np.random.seed(seed)
-
-
-def load_data(config: AlgorithmArgs) -> np.ndarray:
-    df = pd.read_csv(config.dataInput)
-    data = df.iloc[:, 1:-1].values
-    labels = df.iloc[:, -1].values
-    return data, labels
-
-
-def train(config: AlgorithmArgs):
-    np.random.seed(config.customParameters.random_state)
-    data, _ = load_data(config)
-
-    print("Training random forest classifier")
-    args = asdict(config.customParameters)
-    model = RandomBlackForestAnomalyDetector(**args).fit(data)
-    print(f"Saving model to {config.modelOutput}")
-    model.save(Path(config.modelOutput))
-
-def execute(config: AlgorithmArgs):
-    np.random.seed(config.customParameters.random_state)
-    data, _ = load_data(config)
-    print(f"Loading model from {config.modelInput}")
-    model = RandomBlackForestAnomalyDetector.load(Path(config.modelInput))
-
-    print("Forecasting and calculating errors")
-    scores = model.detect(data)
-    np.savetxt(config.dataOutput, scores, delimiter=",")
-    print(f"Stored anomaly scores at {config.dataOutput}")
-
-    # predictions = model.predict(data)
-    # plot(data, predictions, scores, _)
-
-
-def plot(data, predictions, scores, labels):
-    import matplotlib.pyplot as plt
-    from sklearn.preprocessing import MinMaxScaler
-
-    # for better visuals, align scores to value range of labels (0, 1)
-    scores = MinMaxScaler().fit_transform(scores.reshape(-1, 1)).reshape(-1)
-
-    fig, axs = plt.subplots(data.shape[1]+1, sharex=True)
-    for i in range(data.shape[1]):
-        axs[i].plot(data[:, i], label="truth")
-        axs[i].plot(predictions[:, i], label="predict")
-        axs[i].legend()
-    axs[-1].plot(labels, label="label")
-    axs[-1].plot(scores, label="score")
-    axs[-1].legend()
-    plt.show()
-
-
-if __name__ == "__main__":
-    if len(sys.argv) != 2:
-        print("Wrong number of arguments specified; expected a single json-string!")
-        exit(1)
-
-    config = AlgorithmArgs.from_sys_args()
-    set_random_state(config)
-    print(f"Config: {config}")
-
-    if config.executionType == "train":
-        train(config)
-    elif config.executionType == "execute":
-        execute(config)
-    else:
-        raise ValueError(f"Unknown execution type '{config.executionType}'; expected either 'train' or 'execute'!")
diff --git a/algorithms/random_black_forest/manifest.json b/algorithms/random_black_forest/manifest.json
deleted file mode 100644
index 658a4243e3d5be8ee7642b1739b0caf4f89f8e25..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/manifest.json
+++ /dev/null
@@ -1,119 +0,0 @@
-{
-    "title": "Random Black Forest (RR)",
-    "description": "An ensemble of multiple multi-output random forest regressors based on different feature subsets (requested by RollsRoyce). The forecasting error is used as anomaly score.",
-    "inputDimensionality": "multivariate",
-    "learningType": "semi-supervised",
-    "version": "0.3.0",
-    "authors": "Sebastian Schmidl",
-    "language": "Python",
-    "type": "Detector",
-    "mainFile": "algorithm.py",
-    "trainingStep": {
-        "parameters": [
-            {
-                "name": "train_window_size",
-                "type": "int",
-                "defaultValue": 50,
-                "optional": "true",
-                "description": "Size of the training windows. Always predicts a single point!"
-            },
-            {
-                "name": "n_estimators",
-                "type": "int",
-                "defaultValue": 2,
-                "optional": "true",
-                "description": "The number of forests. Each forest is trained on `max_features` features."
-            },
-            {
-                "name": "max_features_per_estimator",
-                "type": "float",
-                "defaultValue": 0.5,
-                "optional": "true",
-                "description": "Each forest is trained on randomly selected `int(max_features * n_features)` features."
-            },
-            {
-                "name": "n_trees",
-                "type": "int",
-                "defaultValue": 100,
-                "optional": "true",
-                "description": "The number of trees in the forest."
-            },
-            {
-                "name": "max_features_method",
-                "type": "enum[auto,sqrt,log2]",
-                "defaultValue": "auto",
-                "optional": "true",
-                "description": "The number of features to consider when looking for the best split between trees: 'auto': max_features=n_features, 'sqrt': max_features=sqrt(n_features), 'log2': max_features=log2(n_features)."
-            },
-            {
-                "name": "bootstrap",
-                "type": "boolean",
-                "defaultValue": true,
-                "optional": "true",
-                "description": "Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree."
-            },
-            {
-                "name": "max_samples",
-                "type": "float",
-                "defaultValue": null,
-                "optional": "true",
-                "description": "If bootstrap is True, the number of samples to draw from X to train each base estimator."
-            },
-            {
-                "name": "random_state",
-                "type": "int",
-                "defaultValue": 42,
-                "optional": "true",
-                "description": "Seeds the randomness of the bootstrapping and the sampling of the features."
-            },
-            {
-                "name": "verbose",
-                "type": "int",
-                "defaultValue": 0,
-                "optional": "true",
-                "description": "Controls logging verbosity."
-            },
-            {
-                "name": "n_jobs",
-                "type": "int",
-                "defaultValue": 1,
-                "optional": "true",
-                "description": "The number of jobs to run in parallel. `-1` means using all processors"
-            },
-            {
-                "name": "max_depth",
-                "type": "int",
-                "defaultValue": null,
-                "optional": "true",
-                "description": "The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples."
-            },
-            {
-                "name": "min_samples_split",
-                "type": "int",
-                "defaultValue": 2,
-                "optional": "true",
-                "description": "The minimum number of samples required to split an internal node."
-            },
-            {
-                "name": "min_samples_leaf",
-                "type": "int",
-                "defaultValue": 1,
-                "optional": "true",
-                "description": "The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least `min_samples_leaf` training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression."
-            }
-        ],
-        "modelInput": "none"
-    },
-    "executionStep": {
-        "parameters": [
-            {
-                "name": "random_state",
-                "type": "int",
-                "defaultValue": 42,
-                "optional": "true",
-                "description": "Seeds the randomness of the bootstrapping and the sampling of the features."
-            }
-        ],
-        "modelInput": "required"
-    }
-}
diff --git a/algorithms/random_black_forest/model.py b/algorithms/random_black_forest/model.py
deleted file mode 100644
index 1614459af530d78fcd0b05c2b200dfba5d856b4f..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/model.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import warnings
-import joblib
-
-import numpy as np
-
-from typing import Optional
-from pathlib import Path
-from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin
-from sklearn.ensemble import RandomForestRegressor, BaggingRegressor
-from sklearn.preprocessing import StandardScaler
-from sklearn.metrics.pairwise import paired_distances
-from sklearn.pipeline import make_pipeline
-
-from numpy.lib.stride_tricks import sliding_window_view
-
-
-class SlidingWindowProcessor(BaseEstimator, TransformerMixin):
-
-    def __init__(self, window_size: int, standardize: bool = False):
-        self.window_size = window_size
-        if standardize:
-            self.scaler = StandardScaler()
-        else:
-            self.scaler = None
-
-    def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None, **fit_params) -> 'SlidingWindowProcessor':
-        if self.scaler:
-            self.scaler.fit(X)
-        return self
-
-    def transform(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> np.ndarray:
-        """
-        y is unused (exists for compatibility)
-        """
-        if self.scaler:
-            print("Standardizing input data")
-            X = self.scaler.transform(X)
-        # the last window would have no target to predict, e.g. for n=10: [[1, 2] -> 3, ..., [8, 9] -> 10, [9, 10] -> ?]
-        new_X = sliding_window_view(X, window_shape=self.window_size, axis=0)[:-1]
-        # reshape to two dimensions (required by regressors)
-        new_X = new_X.reshape(new_X.shape[0], -1)
-        new_y = np.roll(X, -self.window_size, axis=0)[:-self.window_size]
-        return new_X, new_y
-
-    def transform_y(self, X: np.ndarray) -> np.ndarray:
-        if self.scaler:
-            print("Standardizing input data")
-            X = self.scaler.transform(X)
-        return np.roll(X, -self.window_size, axis=0)[:-self.window_size]
-
-    def inverse_transform_y(self, y: np.ndarray) -> np.ndarray:
-        result = np.full(shape=(self.window_size+y.shape[0], y.shape[1]), fill_value=np.nan)
-        result[-len(y):, :] = y
-        if self.scaler:
-            print("Reversing standardization for prediction")
-            result = self.scaler.inverse_transform(result)
-        return result
-
-
-class RandomBlackForestAnomalyDetector(BaseEstimator, RegressorMixin):
-    def __init__(self,
-            train_window_size: int = 50,
-            n_estimators: int = 2,
-            max_features_per_estimator: float = 0.5,
-            n_trees: float = 100,
-            max_features_method: str = "auto",  # "sqrt", "log2"
-            bootstrap: bool = True,
-            max_samples: Optional[float] = None,  # fraction of all samples
-            standardize: bool = False,
-            random_state: int = 42,
-            verbose: int = 0,
-            n_jobs: int = 1,
-            # the following parameters control the tree size
-            max_depth: Optional[int] = None,
-            min_samples_split: int = 2,
-            min_samples_leaf: int = 1):
-        self.preprocessor = SlidingWindowProcessor(train_window_size, standardize)
-        self.clf = BaggingRegressor(
-            estimator=RandomForestRegressor(
-                n_estimators=n_trees,
-                max_features=1.0,
-                bootstrap=bootstrap,
-                max_samples=max_samples,
-                random_state=random_state,
-                verbose=verbose,
-                n_jobs=n_jobs,
-                max_depth=max_depth,
-                min_samples_split=min_samples_split,
-                min_samples_leaf=min_samples_leaf,
-            ),
-            n_estimators=n_estimators,
-            max_features=max_features_per_estimator,
-            bootstrap_features=False, # draw features without replacement
-            max_samples=1.0, # all samples for every base estimator
-            n_jobs=n_jobs,
-        )
-
-    def fit(self, X: np.ndarray, y: np.ndarray = None) -> 'RandomBlackForestAnomalyDetector':
-        if y is not None:
-            warnings.warn(f"y is calculated from X. Please don't pass y to RandomBlackForestAnomalyDetector.fit, it will be ignored!")
-        X, y = self.preprocessor.fit_transform(X)
-        self.clf.fit(X, y)
-        return self
-
-    def predict(self, X: np.ndarray) -> np.ndarray:
-        X, _ = self.preprocessor.transform(X)
-        y_hat = self._predict_internal(X)
-        return self.preprocessor.inverse_transform_y(y_hat)
-
-    def detect(self, X: np.ndarray) -> np.ndarray:
-        result_target_shape = X.shape[0]
-        X, y = self.preprocessor.transform(X)
-        y_hat = self._predict_internal(X)
-        scores = paired_distances(y, y_hat.reshape(y.shape))
-        results = np.full(shape=result_target_shape, fill_value=np.nan)
-        results[-len(scores):] = scores
-        return results
-
-    def _predict_internal(self, X: np.ndarray) -> np.ndarray:
-        return self.clf.predict(X)
-
-    def save(self, path: Path) -> None:
-        joblib.dump(self, path)
-
-    @staticmethod
-    def load(path: Path) -> 'RandomBlackForestAnomalyDetector':
-        return joblib.load(path)
diff --git a/algorithms/random_black_forest/requirements.txt b/algorithms/random_black_forest/requirements.txt
deleted file mode 100644
index c879c5af90b268a1d7397a93bce8da4995cf8af7..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-numpy>=1.19.5
-pandas>=1.2.1
-scikit-learn>=0.24.1
-matplotlib>=3.3.4
diff --git a/algorithms/random_black_forest/run.sh b/algorithms/random_black_forest/run.sh
deleted file mode 100755
index b07e70ed164a2bda2449db4af1a6a132f965e661..0000000000000000000000000000000000000000
--- a/algorithms/random_black_forest/run.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-custom_params=${1:-'{}'}
-dataset=${2:-dataset.csv}
-echo ""
-echo "=== Training ==="
-python algorithm.py "{ \"executionType\": \"train\", \"dataInput\": \"../data/${dataset}\", \"dataOutput\": \"./scores.csv\", \"modelInput\": \"./model.pkl\", \"modelOutput\": \"./model.pkl\", \"customParameters\": $custom_params }"
-
-
-echo ""
-echo "=== Execution ==="
-python algorithm.py "{ \"executionType\": \"execute\", \"dataInput\": \"../data/${dataset}\", \"dataOutput\": \"./scores.csv\", \"modelInput\": \"./model.pkl\", \"modelOutput\": \"./model.pkl\" }"