diff --git a/configs/parameters.yaml b/configs/parameters.yaml
index 8ad5eac3376da65168a837f4b183f0cfdeeac390..322c2c079c9b67a697f86f1674e3533824de8abf 100644
--- a/configs/parameters.yaml
+++ b/configs/parameters.yaml
@@ -1,13 +1,31 @@
 # PATHS
 wav_path: '../data/wavs/'
+data_path: './data/'
 embedding_path: '../data/embeddings/'
+model_path: '../models/model'
 
-# PARAMETERS
+
+# TRAINING PARAMETERS
 sampling_rate: 16000
 batch_size: 16
 learning_rate: 0.001
+epochs: 50
+dropout: 0.2
+
+training_set_file: 'TRAINING.txt'
+test_set_file: 'TEST.txt'
+
+
+
+# MODEL PARAMETERS
+
+first_layer: 512
+second_layer: 128
+third_layer: 62
+
+
 
 # Types of embeddings supported: 'ecapa_tdnn' or 'x-vector'
-# ecapa_tdnn: dim = 192
-# x-vector: dim = 512
+# ecapa_tdnn: dim = 192 (change first_layer dim)
+# x-vector: dim = 512   (change first_layer dim)
 embedding_type: x-vector
diff --git a/data/TRAINING.txt b/data/TRAINING.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1aceb9e789cac5b19b5027426c00c07a7c290a14
--- /dev/null
+++ b/data/TRAINING.txt
@@ -0,0 +1,8 @@
+ANC150_LEC_seg_1_ecapa_emb.pickle,7.0,2.3,1.2,4.5,2.0,2.1
+ANC150_LEC_seg_2_ecapa_emb.pickle,5.0,3.3,2.2,4.1,1.2,8.3
+ANC150_LEC_seg_3_ecapa_emb.pickle,4.0,2.5,4.2,3.5,3.0,8.3
+ANC150_LEC_seg_4_ecapa_emb.pickle,3.0,2.3,1.2,4.1,3.4,5.4
+ANC150_LEC_seg_5_ecapa_emb.pickle,1.2,2.5,6.2,4.1,1.0,2.1
+ANC150_LEC_seg_6_ecapa_emb.pickle,7.3,2.4,1.2,3.5,2.5,2.4
+ANC150_LEC_seg_7_ecapa_emb.pickle,4.0,2.3,3.2,4.3,1.0,3.6
+ANC150_LEC_seg_8_ecapa_emb.pickle,2.0,2.3,2.2,4.2,1.6,2.4
diff --git a/dataloader/.embedding_extract.py.swp b/dataloader/.embedding_extract.py.swp
new file mode 100644
index 0000000000000000000000000000000000000000..023dba0bd56f739313356d0c781b1304162d2384
Binary files /dev/null and b/dataloader/.embedding_extract.py.swp differ
diff --git a/dataloader/__pycache__/load.cpython-36.pyc b/dataloader/__pycache__/load.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8202e853402b9db713c54fca80f0f6ea89ca1d0c
Binary files /dev/null and b/dataloader/__pycache__/load.cpython-36.pyc differ
diff --git a/dataloader/load.py b/dataloader/load.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d5b1afa0477e3cc959f02ed571f7a88ec104227
--- /dev/null
+++ b/dataloader/load.py
@@ -0,0 +1,25 @@
+import pandas as pd
+import torch
+from torch.utils.data import Dataset,DataLoader
+
+class load_data(Dataset):
+    def __init__(self, filename, datadir):
+
+        self.filename = filename
+        self.datadir = datadir
+        xy = pd.read_csv(filename,header=None)
+
+        self.file = xy.values[:,0]
+        self.INT = xy.values[:,1]
+        self.SEV = xy.values[:,2]
+        self.V  =  xy.values[:,3]
+        self.R  =  xy.values[:,4]
+        self.P  =  xy.values[:,5]
+        self.PD =  xy.values[:,6]
+
+    def __len__(self):
+        return self.n_samples
+
+    def __getitem__(self, idx):
+
+        return (self.file[idx], )
diff --git a/models/__pycache__/model.cpython-36.pyc b/models/__pycache__/model.cpython-36.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80e701b9c286ef9cfe645dcdc7a78d707d370d5e
Binary files /dev/null and b/models/__pycache__/model.cpython-36.pyc differ
diff --git a/models/model.py b/models/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..107e7a0470325ef16e89d5e862213c4718ca1b51
--- /dev/null
+++ b/models/model.py
@@ -0,0 +1,56 @@
+import yaml
+import torch
+import torch.nn as nn
+import torch.nn.functional as F 
+
+with open("./configs/parameters.yaml", "r") as ymlfile:
+    cfg = yaml.load(ymlfile)
+
+
+class model_embedding_snn(nn.Module):
+    def __init__(self):
+        super(model_embedding_snn,self).__init__()
+
+        self.relu = nn.ReLU()
+        self.dropout = nn.Dropout2d(cfg['dropout'])
+
+        self.batch_norm1 = nn.BatchNorm1d(cfg['first_layer'])
+        self.batch_norm2 = nn.BatchNorm1d(cfg['second_layer'])
+        self.batch_norm3 = nn.BatchNorm1d(cfg['third_layer'])
+
+        self.fc1 = nn.Linear(cfg['first_layer'],cfg['second_layer'])
+        self.fc2 = nn.Linear(cfg['second_layer'],cfg['third_layer'])
+        
+        self.fc_voix = nn.Linear(cfg['third_layer'],1)
+        self.fc_res = nn.Linear(cfg['third_layer'],1)
+        self.fc_pros = nn.Linear(cfg['third_layer'],1)
+        self.fc_pd = nn.Linear(cfg['third_layer'],1)
+
+        self.fc_int = nn.Linear(cfg['third_layer'],1)
+
+
+    def forward(self, input_embs):
+
+        x = self.batch_norm1(input_embs)
+        x = self.fc1(x)
+        x = self.dropout(x)
+        x = self.relu(x)
+        x = self.batch_norm2(x)
+        x = self.fc2(x)
+        x = self.dropout(x)
+        x = self.relu(x)
+        x = self.batch_norm3(x)
+
+        v = self.fc_voix(x)
+        v = self.relu(v)
+        r = self.fc_res(x)
+        r = self.relu(r)
+        p = self.fc_pros(x)
+        p = self.relu(p)
+        pd = self.fc_pd(x)
+        pd = self.relu(pd)
+
+        INT = self.fc_int(x)
+        INT = self.relu(INT)
+
+        return INT, v, r, p, pd
diff --git a/train.py b/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8c300e99cee561f730e1eb6e6c13e5ee03beb58
--- /dev/null
+++ b/train.py
@@ -0,0 +1,32 @@
+import sys
+import torch
+import torch.nn as nn
+import yaml
+import models.model
+import dataloader.load
+
+def load_config(config_path):
+    try:
+        with open(config_path, 'r') as file:
+            config = yaml.safe_load(file)
+        return config
+    except Exception as e:
+        print('Error reading the config file')
+
+
+if __name__ == "__main__":
+
+    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+
+    config_path = './configs/parameters.yaml'
+    cfg = load_config(config_path)
+
+    model_snn = models.model.model_embedding_snn().cuda()
+    criterion = nn.MSELoss()
+    optimizer = torch.optim.Adam(model_snn.parameters())
+
+    train_filename = cfg['data_path']+cfg['traininig_set_file']
+    print(train_filename)
+
+    for ep in range(cfg['epochs']):
+        print(ep)