diff --git a/Configuration/Configuration.py b/Configuration/Configuration.py
index 9be78296b0829ec5cd284268e9be8fc865ff988f..12a4b5f29755681ec929e73496d9bd3921fc9fc4 100644
--- a/Configuration/Configuration.py
+++ b/Configuration/Configuration.py
@@ -13,7 +13,6 @@ version = config["VERSION"]
 
 datasetConfig = config["DATASET_PARAMS"]
 modelEncoderConfig = config["MODEL_ENCODER"]
-modelDecoderConfig = config["MODEL_DECODER"]
 modelLinkerConfig = config["MODEL_LINKER"]
 modelTrainingConfig = config["MODEL_TRAINING"]
 
diff --git a/Configuration/config.ini b/Configuration/config.ini
index ea8dd6979eeed3bf6cbd6f77724329ce65273c9f..ed41d1ea5f9e24f830c2a9e77e5e4f7cda1ece12 100644
--- a/Configuration/config.ini
+++ b/Configuration/config.ini
@@ -4,24 +4,17 @@ transformers = 4.16.2
 [DATASET_PARAMS]
 symbols_vocab_size=26
 atom_vocab_size=17
-max_len_sentence=109
+max_len_sentence=266
 max_atoms_in_sentence=1250
 max_atoms_in_one_type=510
 
 [MODEL_ENCODER]
 dim_encoder = 768
 
-[MODEL_DECODER]
-nhead=4
-num_layers=1
-dropout=0.1
-dim_feedforward=512
-layer_norm_eps=1e-5
-
 [MODEL_LINKER]
-dim_embedding_atoms=256
+dim_cat_out=256
+dim_intermediate_FFN=128
 dim_pre_sinkhorn_transfo=32
-dim_polarity_transfo=256
 dropout=0.1
 sinkhorn_iters=3
 
diff --git a/Linker/Linker.py b/Linker/Linker.py
index 611575d7140c48516b4ce3f2cbd55bf995e04610..1b3db270743dcc528a5631f062589eafa10e9c90 100644
--- a/Linker/Linker.py
+++ b/Linker/Linker.py
@@ -5,8 +5,9 @@ import datetime
 
 import time
 
+import torch
 import torch.nn.functional as F
-from torch.nn import Sequential, LayerNorm, Dropout, Embedding
+from torch.nn import Sequential, LayerNorm, Module, Linear
 from torch.optim import AdamW
 from torch.utils.data import TensorDataset, random_split
 from torch.utils.tensorboard import SummaryWriter
@@ -14,12 +15,13 @@ from tqdm import tqdm
 
 from Configuration import Configuration
 from Linker.AtomTokenizer import AtomTokenizer
-from Linker.MHA import AttentionDecoderLayer
+from Linker.PositionEncoding import PositionalEncoding
 from Linker.Sinkhorn import sinkhorn_fn_no_exp as sinkhorn
 from Linker.atom_map import atom_map, atom_map_redux
 from Linker.eval import mesure_accuracy, SinkhornLoss
-from Linker.utils_linker import FFN, get_axiom_links, get_GOAL, get_pos_idx
-from Supertagger import *
+from Linker.utils_linker import FFN, get_axiom_links, get_GOAL, get_pos_idx, get_num_atoms_batch
+from Supertagger import SuperTagger
+from utils import pad_sequence
 
 
 def format_time(elapsed):
@@ -50,16 +52,15 @@ class Linker(Module):
     def __init__(self, supertagger_path_model):
         super(Linker, self).__init__()
 
-        self.dim_embedding_atoms = int(Configuration.modelLinkerConfig['dim_embedding_atoms'])
-        self.nhead = int(Configuration.modelDecoderConfig['nhead'])
+        dim_encoder = int(Configuration.modelEncoderConfig['dim_encoder'])
+        self.dim_cat_out = int(Configuration.modelLinkerConfig['dim_cat_out'])
         dim_pre_sinkhorn_transfo = int(Configuration.modelLinkerConfig['dim_pre_sinkhorn_transfo'])
-        dim_polarity_transfo = int(Configuration.modelLinkerConfig['dim_polarity_transfo'])
+        dim_intermediate_FFN = int(Configuration.modelLinkerConfig['dim_intermediate_FFN'])
         self.sinkhorn_iters = int(Configuration.modelLinkerConfig['sinkhorn_iters'])
+        self.max_len_sentence = int(Configuration.datasetConfig['max_len_sentence'])
         self.max_atoms_in_sentence = int(Configuration.datasetConfig['max_atoms_in_sentence'])
         self.max_atoms_in_one_type = int(Configuration.datasetConfig['max_atoms_in_one_type'])
-        atom_vocab_size = int(Configuration.datasetConfig['atom_vocab_size'])
         learning_rate = float(Configuration.modelTrainingConfig['learning_rate'])
-        self.dropout = Dropout(0.1)
         self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
         supertagger = SuperTagger()
@@ -71,18 +72,18 @@ class Linker(Module):
         self.padding_id = self.atom_map['[PAD]']
         self.atoms_tokenizer = AtomTokenizer(atom_map, self.max_atoms_in_sentence)
         self.inverse_map = self.atoms_tokenizer.inverse_atom_map
-        self.atoms_embedding = Embedding(num_embeddings=atom_vocab_size, embedding_dim=self.dim_embedding_atoms,
-                                         padding_idx=self.padding_id,
-                                         scale_grad_by_freq=True)
 
-        self.linker_encoder = AttentionDecoderLayer()
+        self.position_encoding = PositionalEncoding(dim_encoder, max_len=self.max_atoms_in_sentence)
+
+        dim_cat = dim_encoder * 2
+        self.linker_encoder = Linear(dim_cat, self.dim_cat_out, bias=False)
 
         self.pos_transformation = Sequential(
-            FFN(self.dim_embedding_atoms, dim_polarity_transfo, 0.1, d_out=dim_pre_sinkhorn_transfo),
+            FFN(self.dim_cat_out, dim_intermediate_FFN, 0.1, d_out=dim_pre_sinkhorn_transfo),
             LayerNorm(dim_pre_sinkhorn_transfo, eps=1e-12)
         )
         self.neg_transformation = Sequential(
-            FFN(self.dim_embedding_atoms, dim_polarity_transfo, 0.1, d_out=dim_pre_sinkhorn_transfo),
+            FFN(self.dim_cat_out, dim_intermediate_FFN, 0.1, d_out=dim_pre_sinkhorn_transfo),
             LayerNorm(dim_pre_sinkhorn_transfo, eps=1e-12)
         )
 
@@ -108,6 +109,8 @@ class Linker(Module):
         atoms_batch, atoms_polarity_batch = get_GOAL(self.max_atoms_in_sentence, df_axiom_links["Z"])
         atoms_batch_tokenized = self.atoms_tokenizer.convert_batchs_to_ids(atoms_batch)
 
+        num_atoms_per_word = get_num_atoms_batch(df_axiom_links["Z"], self.max_len_sentence)
+
         pos_idx = get_pos_idx(atoms_batch_tokenized, atoms_polarity_batch, self.max_atoms_in_one_type)
         neg_idx = get_pos_idx(atoms_batch_tokenized, atoms_polarity_batch, self.max_atoms_in_one_type)
 
@@ -116,7 +119,7 @@ class Linker(Module):
         truth_links_batch = truth_links_batch.permute(1, 0, 2)
 
         # Construction tensor dataset
-        dataset = TensorDataset(atoms_batch_tokenized, pos_idx, neg_idx, truth_links_batch, sentences_tokens,
+        dataset = TensorDataset(num_atoms_per_word, pos_idx, neg_idx, truth_links_batch, sentences_tokens,
                                 sentences_mask)
 
         if validation_rate > 0.0:
@@ -132,31 +135,33 @@ class Linker(Module):
         print("End preprocess Data")
         return training_dataloader, validation_dataloader
 
-    def make_decoder_mask(self, atoms_token):
-        decoder_attn_mask = torch.ones_like(atoms_token, dtype=torch.float64, device=self.device)
-        decoder_attn_mask[atoms_token.eq(self.padding_id)] = 0.0
-        return decoder_attn_mask.unsqueeze(1).repeat(1, atoms_token.shape[1], 1).repeat(self.nhead, 1, 1)
-
-    def forward(self, atoms_batch_tokenized, batch_pos_idx, batch_neg_idx, sents_embedding, sents_mask=None):
+    def forward(self, batch_num_atoms_per_word, batch_pos_idx, batch_neg_idx, sents_embedding, cat_embedding):
         r"""
         Args:
-            atoms_batch_tokenized : (batch_size, max_atoms_in_one_sentence) flattened categories
+            batch_num_atoms_per_word : (batch_size, len_sentence) flattened categories
             batch_pos_idx : (batch_size, atom_vocab_size, max atom in one cat) flattened categories polarities
-            batch_neg_idx :
+            batch_neg_idx : (batch_size, atom_vocab_size, max atom in one cat) flattened categories polarities
             sents_embedding : (batch_size, len_sentence, dim_encoder) output of BERT for context
-            sents_mask : mask from BERT tokenizer
+            cat_embedding : (batch_size, len_sentence, dim_encoder) output of BERT for cat embedding
         Returns:
             link_weights : atom_vocab_size, batch-size, max_atoms_in_one_cat, max_atoms_in_one_cat) log probabilities
         """
-
-        # atoms embedding
-        atoms_embedding = self.atoms_embedding(atoms_batch_tokenized)
-
-        # MHA ou LSTM avec sortie de BERT
-        sents_mask = sents_mask.unsqueeze(1).repeat(self.nhead, self.max_atoms_in_sentence, 1).to(torch.float64)
-        atoms_encoding = self.linker_encoder(atoms_embedding, sents_embedding, sents_mask,
-                                             self.make_decoder_mask(atoms_batch_tokenized))
-
+        # repeat embedding word for each atom in word
+        sents_embedding_repeat = pad_sequence(
+            [torch.repeat_interleave(input=sents_embedding[i], repeats=batch_num_atoms_per_word[i], dim=0)
+             for i in range(len(sents_embedding))], max_len=self.max_atoms_in_sentence, padding_value=0)
+        cat_embedding_repeat = pad_sequence(
+            [torch.repeat_interleave(input=cat_embedding[i], repeats=batch_num_atoms_per_word[i], dim=0)
+             for i in range(len(cat_embedding))], max_len=self.max_atoms_in_sentence, padding_value=0)
+
+        # positional encoding of atoms and cat embedding to form the atom embedding
+        position_encoding = self.position_encoding(cat_embedding_repeat)
+
+        # cat
+        atoms_sentences_encoding = torch.cat([sents_embedding_repeat, position_encoding], dim=2)
+        atoms_encoding = self.linker_encoder(atoms_sentences_encoding)
+
+        # linking per atom type
         link_weights = []
         for atom_type in self.sub_atoms_type_list:
             pos_encoding = self.make_sinkhorn_inputs(atoms_encoding, batch_pos_idx, atom_type)
@@ -241,7 +246,7 @@ class Linker(Module):
         with tqdm(training_dataloader, unit="batch") as tepoch:
             for batch in tepoch:
                 # Unpack this training batch from our dataloader
-                batch_atoms = batch[0].to(self.device)
+                batch_num_atoms = batch[0].to(self.device)
                 batch_pos_idx = batch[1].to(self.device)
                 batch_neg_idx = batch[2].to(self.device)
                 batch_true_links = batch[3].to(self.device)
@@ -251,11 +256,11 @@ class Linker(Module):
                 self.optimizer.zero_grad()
 
                 # get sentence embedding from BERT which is already trained
-                logits, sentences_embedding = self.Supertagger.forward(batch_sentences_tokens, batch_sentences_mask)
+                output = self.Supertagger.forward(batch_sentences_tokens, batch_sentences_mask)
 
                 # Run the kinker on the categories predictions
-                logits_predictions = self(batch_atoms, batch_pos_idx, batch_neg_idx, sentences_embedding,
-                                          batch_sentences_mask)
+                logits_predictions = self(batch_num_atoms, batch_pos_idx, batch_neg_idx, output['word_embeding'],
+                                          output['last_hidden_state'])
 
                 linker_loss = self.cross_entropy_loss(logits_predictions, batch_true_links)
                 # Perform a backward pass to calculate the gradients.
@@ -279,21 +284,20 @@ class Linker(Module):
         return avg_train_loss, avg_accuracy_train, training_time
 
     def eval_batch(self, batch):
-        batch_atoms = batch[0].to(self.device)
+        batch_num_atoms = batch[0].to(self.device)
         batch_pos_idx = batch[1].to(self.device)
         batch_neg_idx = batch[2].to(self.device)
         batch_true_links = batch[3].to(self.device)
         batch_sentences_tokens = batch[4].to(self.device)
         batch_sentences_mask = batch[5].to(self.device)
 
-        logits, sentences_embedding = self.Supertagger.forward(batch_sentences_tokens, batch_sentences_mask)
-        logits_axiom_links_pred = self(batch_atoms, batch_pos_idx, batch_neg_idx, sentences_embedding,
-                                       batch_sentences_mask)
-        axiom_links_pred = torch.argmax(logits_axiom_links_pred, dim=3)
+        output = self.Supertagger.forward(batch_sentences_tokens, batch_sentences_mask)
+        logits_predictions = self(batch_num_atoms, batch_pos_idx, batch_neg_idx, output['word_embeding'],
+                                  output['last_hidden_state'])
+        axiom_links_pred = torch.argmax(logits_predictions, dim=3)
 
         print('\n')
         print("Tokens de la phrase : ", batch_sentences_tokens[1])
-        print("Atoms dans la phrase : ", (batch_atoms[1][:50]))
         print("Polarités + des atoms de la phrase : ", batch_pos_idx[1][:50])
         print("Polarités - des atoms de la phrase : ", batch_neg_idx[1][:50])
         print("Les vrais liens de la catégorie n : ", batch_true_links[1][2][:100])
@@ -301,7 +305,7 @@ class Linker(Module):
         print('\n')
 
         accuracy = mesure_accuracy(batch_true_links, axiom_links_pred)
-        loss = self.cross_entropy_loss(logits_axiom_links_pred, batch_true_links)
+        loss = self.cross_entropy_loss(logits_predictions, batch_true_links)
 
         return loss, accuracy
 
@@ -369,6 +373,6 @@ class Linker(Module):
         """
 
         return torch.stack([torch.stack([bsd_tensor.select(0, index=i).select(0, index=int(atom)).to(self.device)
-                                         if atom != -1 else torch.zeros(self.dim_embedding_atoms, device=self.device)
+                                         if atom != -1 else torch.zeros(self.dim_cat_out, device=self.device)
                                          for atom in sentence])
                             for i, sentence in enumerate(positional_ids[:, atom_map_redux[atom_type], :])])
diff --git a/Linker/MHA.py b/Linker/MHA.py
deleted file mode 100644
index efc9e75cada3fd926853ade18fbb4ea1c61419d7..0000000000000000000000000000000000000000
--- a/Linker/MHA.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from torch import Tensor
-from torch.nn import (Dropout, LayerNorm, Module, MultiheadAttention)
-
-from Configuration import Configuration
-from Linker.utils_linker import FFN
-
-
-class AttentionDecoderLayer(Module):
-    r"""TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
-    This standard decoder layer is based on the paper "Attention Is All You Need".
-    Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
-    Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
-    Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
-    in a different way during application.
-
-    Args:
-        dim_model: the number of expected features in the input (required).
-        nhead: the number of heads in the multiheadattention models (required).
-        dim_feedforward: the dimension of the feedforward network model (default=2048).
-        dropout: the dropout value (default=0.1).
-        activation: the activation function of the intermediate layer, can be a string
-            ("relu" or "gelu") or a unary callable. Default: relu
-        layer_norm_eps: the eps value in layer normalization components (default=1e-5).
-        batch_first: If ``True``, then the input and output tensors are provided
-            as (batch, seq, feature). Default: ``False``.
-        norm_first: if ``True``, layer norm is done prior to self attention, multihead
-            attention and feedforward operations, respectivaly. Otherwise it's done after.
-            Default: ``False`` (after).
-    """
-
-    def __init__(self):
-        super(AttentionDecoderLayer, self).__init__()
-
-        # init params
-        dim_encoder = int(Configuration.modelEncoderConfig['dim_encoder'])
-        dim_decoder = int(Configuration.modelLinkerConfig['dim_embedding_atoms'])
-        nhead = int(Configuration.modelDecoderConfig['nhead'])
-        dropout = float(Configuration.modelDecoderConfig['dropout'])
-        dim_feedforward = int(Configuration.modelDecoderConfig['dim_feedforward'])
-        layer_norm_eps = float(Configuration.modelDecoderConfig['layer_norm_eps'])
-
-        # layers
-        self.dropout = Dropout(dropout)
-        self.self_attn = MultiheadAttention(dim_decoder, nhead, dropout=dropout,
-                                            kdim=dim_decoder, vdim=dim_decoder)
-        self.norm1 = LayerNorm(dim_decoder, eps=layer_norm_eps)
-        self.multihead_attn = MultiheadAttention(dim_decoder, nhead, dropout=dropout,
-                                                 kdim=dim_encoder, vdim=dim_encoder)
-        self.norm2 = LayerNorm(dim_decoder, eps=layer_norm_eps)
-        self.ffn = FFN(d_model=dim_decoder, d_ff=dim_feedforward, dropout=dropout)
-        self.norm3 = LayerNorm(dim_decoder, eps=layer_norm_eps)
-
-    def forward(self, atoms_embedding, sents_embedding, encoder_mask,
-                decoder_mask):
-        r"""Pass the inputs through the decoder layer.
-
-        Args:
-            atoms_embedding: the sequence to the decoder layer (required).
-            sents_embedding: the sequence from the last layer of the encoder (required)
-            encoder_mask
-            decoder_mask
-        """
-        atoms_embedding = atoms_embedding.permute(1, 0, 2)
-        sents_embedding = sents_embedding.permute(1, 0, 2)
-
-        x = atoms_embedding
-        x = self.norm1(x + self._mask_mha_block(atoms_embedding, decoder_mask))
-        x = self.norm2(x + self._mha_block(x, sents_embedding, encoder_mask))
-        x = self.norm3(x + self._ff_block(x))
-
-        return x.permute(1, 0, 2)
-
-    # self-attention block
-    def _mask_mha_block(self, x, decoder_mask):
-        x = self.self_attn(x, x, x, attn_mask=decoder_mask)[0]
-        return x
-
-    # multihead attention block
-    def _mha_block(self, x, sents_embs, encoder_mask):
-        x = self.multihead_attn(x, sents_embs, sents_embs, attn_mask=encoder_mask)[0]
-        return x
-
-    # feed forward block
-    def _ff_block(self, x):
-        x = self.ffn.forward(x)
-        return x
diff --git a/Linker/PositionEncoding.py b/Linker/PositionEncoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c7d11c4fd4cfacad0a9cc422349e421f624f9c2
--- /dev/null
+++ b/Linker/PositionEncoding.py
@@ -0,0 +1,25 @@
+import torch
+from torch import nn
+import math
+
+
+class PositionalEncoding(nn.Module):
+
+    def __init__(self, d_model, dropout=0.1, max_len=5000):
+        super().__init__()
+        self.dropout = nn.Dropout(p=dropout)
+
+        position = torch.arange(max_len).unsqueeze(1)
+        div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
+        pe = torch.zeros(max_len, 1, d_model)
+        pe[:, 0, 0::2] = torch.sin(position * div_term)
+        pe[:, 0, 1::2] = torch.cos(position * div_term)
+        self.register_buffer('pe', pe)
+
+    def forward(self, x):
+        """
+        Args:
+            x: Tensor, shape [batch_size,seq_len, embedding_dim]
+        """
+        x = x + self.pe[:x.size(0)]
+        return self.dropout(x)
diff --git a/Linker/__init__.py b/Linker/__init__.py
index 92c67b3fcaa9d1121107b979ba57a5bbeba043ea..3dee6a7ab702ee2ae8df692ab8d544a6a12afe8f 100644
--- a/Linker/__init__.py
+++ b/Linker/__init__.py
@@ -1,3 +1,4 @@
 from .Linker import Linker
 from .atom_map import atom_map
-from .AtomTokenizer import AtomTokenizer
\ No newline at end of file
+from .AtomTokenizer import AtomTokenizer
+from .PositionEncoding import PositionalEncoding
\ No newline at end of file
diff --git a/Linker/utils_linker.py b/Linker/utils_linker.py
index a5f0ff261ed94a2cb79908592051afc1e5c9ec27..c34c6bfc26b7090721f980ac55d25e8d04fda046 100644
--- a/Linker/utils_linker.py
+++ b/Linker/utils_linker.py
@@ -23,19 +23,12 @@ class FFN(Module):
         return self.ffn(x)
 
 
-#########################################################################################
 ################################ Regex ########################################
-#########################################################################################
-
 regex_categories_axiom_links = r'\w+\(\d+,(?:((?R))|(\w+))*,?(?:((?R))|(\w+))*\)'
 regex_categories = r'\w+\(\d+,(?:((?R))|(\w+))*,?(?:((?R))|(\w+))*\)'
 
 
-#########################################################################################
 ################################ Liste des atoms avec _i ########################################
-#########################################################################################
-
-
 def get_axiom_links(max_atoms_in_one_type, sub_atoms_type_list, atoms_polarity, batch_axiom_links):
     r"""
     Args:
@@ -107,9 +100,7 @@ def get_atoms_links_batch(category_batch):
     return batch
 
 
-#########################################################################################
 ################################ Liste des atoms ########################################
-#########################################################################################
 
 def category_to_atoms(category, categories_to_atoms):
     r"""
@@ -151,9 +142,50 @@ def get_atoms_batch(category_batch):
     return batch
 
 
-#########################################################################################
+################################ Liste des atoms ########################################
+
+def category_to_num_atoms(category, categories_to_atoms):
+    r"""
+    Args:
+        category : str of kind AtomCat | CategoryCat(dr or dl)
+        categories_to_atoms : recursive int
+    Returns:
+        List of atoms inside the category in prefix order
+    """
+    res = [(category == atom_type) for atom_type in atom_map.keys()]
+    if category.startswith("GOAL:"):
+        word, cat = category.split(':')
+        return category_to_num_atoms(cat, 0)
+    elif category == "let":
+        return 0
+    elif True in res:
+        return 1
+    else:
+        category_cut = regex.match(regex_categories, category).groups()
+        category_cut = [cat for cat in category_cut if cat is not None]
+        for cat in category_cut:
+            categories_to_atoms += category_to_num_atoms(cat, 0)
+        return categories_to_atoms
+
+
+def get_num_atoms_batch(category_batch, max_len_sentence):
+    r"""
+    Args:
+        category_batch : (batch_size, max_atoms_in_sentence) flattened categories in prefix order
+        max_len_sentence : max_len_sentence parameter
+    Returns:
+     (batch_size, max_atoms_in_sentence) flattened categories in prefix order
+    """
+    batch = []
+    for sentence in category_batch:
+        num_atoms_sentence = []
+        for category in sentence:
+            num_atoms_sentence.append(category_to_num_atoms(category, 0))
+        batch.append(torch.as_tensor(num_atoms_sentence))
+    return pad_sequence(batch, max_len=max_len_sentence, padding_value=0)
+
+
 ################################ Polarity ###############################################
-#########################################################################################
 
 def category_to_atoms_polarity(category, polarity):
     r"""
@@ -242,10 +274,7 @@ def find_pos_neg_idexes(atoms_batch):
     return list_batch
 
 
-#########################################################################################
 ################################ GOAL ###############################################
-#########################################################################################
-
 
 def get_GOAL(max_atoms_in_sentence, categories_batch):
     polarities = find_pos_neg_idexes(categories_batch)
@@ -272,23 +301,25 @@ def get_GOAL(max_atoms_in_sentence, categories_batch):
                                      max_len=max_atoms_in_sentence, padding_value=0)
 
 
-#########################################################################################
 ################################ Prepare encoding ###############################################
-#########################################################################################
 
 def get_pos_idx(atoms_batch_tokenized, atoms_polarity_batch, max_atoms_in_one_type):
     inverse_atom_map = {v: k for k, v in atom_map.items()}
-    pos_idx = [pad_sequence([torch.as_tensor([i for i, x in enumerate(sentence) if bool(re.match(r"" + atom_type + "_?\w*", inverse_atom_map[int(atoms_batch_tokenized[s_idx][i])])) and
-                                                     atoms_polarity_batch[s_idx][i]]) for s_idx, sentence in enumerate(atoms_batch_tokenized)], max_len=max_atoms_in_one_type//2, padding_value=-1)
-                for atom_type in list(atom_map_redux.keys())]
+    pos_idx = [pad_sequence([torch.as_tensor([i for i, x in enumerate(sentence) if bool(
+        re.match(r"" + atom_type + "_?\w*", inverse_atom_map[int(atoms_batch_tokenized[s_idx][i])])) and
+                                              atoms_polarity_batch[s_idx][i]]) for s_idx, sentence in
+                             enumerate(atoms_batch_tokenized)], max_len=max_atoms_in_one_type // 2, padding_value=-1)
+               for atom_type in list(atom_map_redux.keys())]
 
     return torch.stack(pos_idx).permute(1, 0, 2)
 
 
 def get_neg_idx(atoms_batch_tokenized, atoms_polarity_batch, max_atoms_in_one_type):
     inverse_atom_map = {v: k for k, v in atom_map.items()}
-    neg_idx = [pad_sequence([torch.as_tensor([i for i, x in enumerate(sentence) if bool(re.match(r"" + atom_type + "_?\w*", inverse_atom_map[int(atoms_batch_tokenized[s_idx][i])])) and
-                                                    not atoms_polarity_batch[s_idx][i]]) for s_idx, sentence in enumerate(atoms_batch_tokenized)], max_len=max_atoms_in_one_type//2, padding_value=-1)
-                for atom_type in list(atom_map_redux.keys())]
+    neg_idx = [pad_sequence([torch.as_tensor([i for i, x in enumerate(sentence) if bool(
+        re.match(r"" + atom_type + "_?\w*", inverse_atom_map[int(atoms_batch_tokenized[s_idx][i])])) and
+                                              not atoms_polarity_batch[s_idx][i]]) for s_idx, sentence in
+                             enumerate(atoms_batch_tokenized)], max_len=max_atoms_in_one_type // 2, padding_value=-1)
+               for atom_type in list(atom_map_redux.keys())]
 
     return torch.stack(neg_idx).permute(1, 0, 2)
diff --git a/Supertagger b/Supertagger
index eeb4774c071e03f460f48798ab8d6820395825c9..7b10151214babc2c3f1bc474eb9bec25458a8347 160000
--- a/Supertagger
+++ b/Supertagger
@@ -1 +1 @@
-Subproject commit eeb4774c071e03f460f48798ab8d6820395825c9
+Subproject commit 7b10151214babc2c3f1bc474eb9bec25458a8347