diff --git a/.gitignore b/.gitignore
index ac5aa4b67363e07a02541935c18acf2a302b4141..72fcf7413a2fe552e69233366f5ce86070330bf2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,4 @@
-deepgrail_Tagger
+SuperTagger
 Utils/silver
 Utils/gold
 .idea
diff --git a/Linker/Linker.py b/Linker/Linker.py
index 816b5893e45c3eddae25f9b99601482ab0a1b909..ff37ef83fdeec183faf6c6e24fb110c552de9d43 100644
--- a/Linker/Linker.py
+++ b/Linker/Linker.py
@@ -6,7 +6,6 @@ import time
 
 import torch
 import torch.nn.functional as F
-from torch import Module
 from torch.nn import Sequential, LayerNorm, Dropout
 from torch.optim import AdamW
 from torch.utils.data import TensorDataset, random_split
@@ -23,7 +22,7 @@ from Linker.atom_map import atom_map
 from Linker.eval import mesure_accuracy, SinkhornLoss
 from Linker.utils_linker import find_pos_neg_idexes, get_atoms_batch, FFN, get_axiom_links, get_pos_encoding_for_s_idx, \
     get_neg_encoding_for_s_idx
-from Supertagger import *
+from SuperTagger import *
 from utils import pad_sequence
 
 def format_time(elapsed):
diff --git a/train.py b/train.py
index 4c6645ea9b8e93faa610e8e10a9b6387f7ea1f02..50354a06864fa0665d0b63927af58c976c090dd8 100644
--- a/train.py
+++ b/train.py
@@ -5,13 +5,13 @@ from utils import read_csv_pgbar
 
 torch.cuda.empty_cache()
 batch_size = int(Configuration.modelTrainingConfig['batch_size'])
-nb_sentences = batch_size * 40
+nb_sentences = batch_size * 2
 epochs = int(Configuration.modelTrainingConfig['epoch'])
 
 file_path_axiom_links = 'Datasets/gold_dataset_links.csv'
 df_axiom_links = read_csv_pgbar(file_path_axiom_links, nb_sentences)
 
 print("Linker")
-linker = Linker("models/model_supertagger.pt")
-print("Linker Training")
+linker = Linker("models/flaubert_super_98%_V2_50e.pt")
+print("\nLinker Training\n\n")
 linker.train_linker(df_axiom_links, validation_rate=0.1, epochs=epochs, batch_size=batch_size, checkpoint=False, tensorboard=True)