diff --git a/Linker/Linker.py b/Linker/Linker.py
index 396639195b6381ad699457dbfe78899af4777e9c..c986c0e1bf3b96cf479a864d4e952a6cf47fa6af 100644
--- a/Linker/Linker.py
+++ b/Linker/Linker.py
@@ -9,6 +9,7 @@ import sys
 
 from torch.optim import AdamW
 from torch.utils.data import TensorDataset, random_split
+from torch.utils.tensorboard import SummaryWriter
 from transformers import get_cosine_schedule_with_warmup
 
 from Configuration import Configuration
@@ -23,6 +24,19 @@ from Linker.eval import mesure_accuracy, SinkhornLoss
 from utils import pad_sequence
 
 
+def output_create_dir():
+    """
+    Create le output dir for tensorboard and checkpoint
+    @return: output dir, tensorboard writter
+    """
+    from datetime import datetime
+    outpout_path = 'TensorBoard'
+    training_dir = os.path.join(outpout_path, 'Tranning_' + datetime.today().strftime('%d-%m_%H-%M'))
+    logs_dir = os.path.join(training_dir, 'logs')
+    writer = SummaryWriter(log_dir=logs_dir)
+    return training_dir, writer
+
+
 class Linker(Module):
     def __init__(self, supertagger):
         super(Linker, self).__init__()
@@ -73,18 +87,15 @@ class Linker(Module):
         Args:
             batch_size : int
             df_axiom_links pandas DataFrame
-            sentences_tokens
-            sentences_mask
             validation_rate
         Returns:
             the training dataloader and the validation dataloader. They contains the list of atoms, their polarities, the axiom links, the sentences tokenized, sentence mask
         """
         sentences_batch = df_axiom_links["Sentences"].tolist()
-        sentences_tokens, sentences_mask = self.supertagger.sent_tokenizer.fit_transform_tensors(sentences_batch)
+        sentences_tokens, sentences_mask = self.Supertagger.sent_tokenizer.fit_transform_tensors(sentences_batch)
 
         atoms_batch = get_atoms_batch(df_axiom_links["sub_tree"])
-        atom_tokenizer = AtomTokenizer(atom_map, self.max_atoms_in_sentence)
-        atoms_batch_tokenized = atom_tokenizer.convert_batchs_to_ids(atoms_batch)
+        atoms_batch_tokenized = self.atoms_tokenizer.convert_batchs_to_ids(atoms_batch)
 
         atoms_polarity_batch = find_pos_neg_idexes(self.max_atoms_in_sentence, df_axiom_links["sub_tree"])
 
@@ -158,27 +169,55 @@ class Linker(Module):
         return F.log_softmax(link_weights_per_batch, dim=3)
 
     def train_linker(self, df_axiom_links, validation_rate=0.1, epochs=20,
-                     batch_size=32, checkpoint=True, validate=True):
+                     batch_size=32, checkpoint=True, tensorboard=False):
         r"""
         Args:
             df_axiom_links : pandas dataFrame containing the atoms anoted with _i
-            sentences_tokens : sentences tokenized by BERT
-            sentences_mask : mask of tokens
             validation_rate : float
             epochs : int
             batch_size : int
             checkpoint : boolean
-            validate : boolean
+            tensorboard : boolean
         Returns:
             Final accuracy and final loss
         """
         training_dataloader, validation_dataloader = self.__preprocess_data(batch_size, df_axiom_links,
                                                                             validation_rate)
         self.to(self.device)
-        for epoch_i in range(0, epochs):
-            epoch_acc, epoch_loss = self.train_epoch(training_dataloader, validation_dataloader, checkpoint, validate)
 
-    def train_epoch(self, training_dataloader, validation_dataloader, checkpoint=True, validate=True):
+        if checkpoint or tensorboard:
+            checkpoint_dir, writer = output_create_dir()
+
+        for epoch_i in range(epochs):
+            epoch_acc, epoch_loss = self.train_epoch(training_dataloader)
+
+            print("Average Loss on train dataset : ", epoch_loss)
+            print("Average Accuracy on train dataset : ", epoch_acc)
+
+            if checkpoint:
+                self.__checkpoint_save(
+                    path=os.path.join("Output", 'linker' + datetime.today().strftime('%d-%m_%H-%M') + '.pt'))
+
+            if validation_rate > 0.0:
+                with torch.no_grad():
+                    accuracy_test, average_test_loss = self.eval_epoch(validation_dataloader, self.cross_entropy_loss)
+                    print("Average Loss on test dataset : ", average_test_loss)
+                    print("Average Accuracy on test dataset : ", accuracy_test)
+
+            if tensorboard:
+                writer.add_scalars(f'Accuracy', {
+                    'Train': epoch_acc}, epoch_i)
+                writer.add_scalars(f'Loss', {
+                    'Train': epoch_loss}, epoch_i)
+                if validation_rate > 0.0:
+                    writer.add_scalars(f'Accuracy', {
+                        'Validation': accuracy_test}, epoch_i)
+                    writer.add_scalars(f'Loss', {
+                        'Validation': average_test_loss}, epoch_i)
+
+            print('\n')
+
+    def train_epoch(self, training_dataloader):
         r""" Train epoch
 
         Args:
@@ -191,6 +230,7 @@ class Linker(Module):
 
         # Reset the total loss for this epoch.
         epoch_loss = 0
+        accuracy_train = 0
 
         self.train()
 
@@ -223,22 +263,13 @@ class Linker(Module):
             self.optimizer.step()
             self.scheduler.step()
 
-        avg_train_loss = epoch_loss / len(training_dataloader)
-        print("Average Loss on train dataset : ", avg_train_loss)
-
-        if checkpoint:
-            self.__checkpoint_save(
-                path=os.path.join("Output", 'linker' + datetime.today().strftime('%d-%m_%H-%M') + '.pt'))
-
-        if validate:
-            with torch.no_grad():
-                accuracy, average_test_loss = self.eval_epoch(validation_dataloader, self.cross_entropy_loss)
-                print("Average Loss on test dataset : ", average_test_loss)
-                print("Average Accuracy on test dataset : ", accuracy)
+            pred_axiom_links = torch.argmax(logits_predictions, dim=3)
+            accuracy_train += mesure_accuracy(batch_true_links, pred_axiom_links)
 
-        print('\n')
+        avg_train_loss = epoch_loss / len(training_dataloader)
+        avg_accuracy_train = accuracy_train / len(training_dataloader)
 
-        return accuracy, avg_train_loss
+        return avg_train_loss, avg_accuracy_train
 
     def predict(self, categories, sents_embedding, sents_mask=None):
         r"""Prediction from categories output by BERT and hidden_state from BERT
@@ -315,14 +346,12 @@ class Linker(Module):
         """
         accuracy_average = 0
         loss_average = 0
-        compt = 0
         for step, batch in enumerate(dataloader):
-            compt += 1
             accuracy, loss = self.eval_batch(batch, cross_entropy_loss)
             accuracy_average += accuracy
             loss_average += loss
 
-        return accuracy_average / compt, loss_average / compt
+        return accuracy_average / len(dataloader), loss_average / len(dataloader)
 
     def load_weights(self, model_file):
         print("#" * 15)
diff --git a/train.py b/train.py
index 4684d3b7fb816ceb1b04b37aaa40add606db6871..571777ac750eef16c30044cddd4a203abf57e522 100644
--- a/train.py
+++ b/train.py
@@ -21,4 +21,4 @@ print("Linker")
 linker = Linker(supertagger)
 linker = linker.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
 print("Linker Training")
-linker.train_linker(df_axiom_links, validation_rate=0.1, epochs=epochs, batch_size=batch_size, checkpoint=True, validate=True)
+linker.train_linker(df_axiom_links, validation_rate=0.1, epochs=epochs, batch_size=batch_size, checkpoint=True)