From f10db47016d40b7b5fabb3964c407b2e4a60b24f Mon Sep 17 00:00:00 2001
From: Caroline DE POURTALES <cdepourt@montana.irit.fr>
Date: Tue, 31 May 2022 16:14:47 +0200
Subject: [PATCH] concatenation works

---
 Configuration/config.ini |  2 +-
 Linker/Linker.py         |  3 ++-
 main.py                  | 23 -----------------------
 3 files changed, 3 insertions(+), 25 deletions(-)
 delete mode 100644 main.py

diff --git a/Configuration/config.ini b/Configuration/config.ini
index 53d2d9a..249f474 100644
--- a/Configuration/config.ini
+++ b/Configuration/config.ini
@@ -3,7 +3,7 @@ transformers = 4.16.2
 
 [DATASET_PARAMS]
 symbols_vocab_size=26
-max_len_sentence=266
+max_len_sentence=290
 max_atoms_in_sentence=1250
 max_atoms_in_one_type=510
 
diff --git a/Linker/Linker.py b/Linker/Linker.py
index 1b3db27..4b298f8 100644
--- a/Linker/Linker.py
+++ b/Linker/Linker.py
@@ -66,6 +66,7 @@ class Linker(Module):
         supertagger = SuperTagger()
         supertagger.load_weights(supertagger_path_model)
         self.Supertagger = supertagger
+        self.Supertagger.model.to(self.device)
 
         self.atom_map = atom_map
         self.sub_atoms_type_list = list(atom_map_redux.keys())
@@ -103,7 +104,7 @@ class Linker(Module):
             the training dataloader and the validation dataloader. They contains the list of atoms, their polarities, the axiom links, the sentences tokenized, sentence mask
         """
         print("Start preprocess Data")
-        sentences_batch = df_axiom_links["X"].tolist()
+        sentences_batch = df_axiom_links["X"].str.strip().tolist()
         sentences_tokens, sentences_mask = self.Supertagger.sent_tokenizer.fit_transform_tensors(sentences_batch)
 
         atoms_batch, atoms_polarity_batch = get_GOAL(self.max_atoms_in_sentence, df_axiom_links["Z"])
diff --git a/main.py b/main.py
deleted file mode 100644
index 14d3fc0..0000000
--- a/main.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import torch.nn.functional as F
-import torch
-from Configuration import Configuration
-from Linker import *
-from Supertagger import *
-
-max_atoms_in_sentence = int(Configuration.datasetConfig['max_atoms_in_sentence'])
-
-# categories tagger
-supertagger = SuperTagger()
-supertagger.load_weights("models/model_supertagger.pt")
-
-# axiom linker
-linker = Linker(supertagger)
-linker.load_weights("models/linker.pt")
-
-# predict categories and links for this sentence
-sentence = ["le chat est noir"]
-sents_tokenized, sents_mask = supertagger.sent_tokenizer.fit_transform_tensors(sentence)
-logits, sentence_embedding = supertagger.foward(sents_tokenized, sents_mask)
-categories = torch.argmax(F.softmax(logits, dim=2), dim=2)
-
-axiom_links = linker.predict(categories, sentence_embedding, sents_mask)
-- 
GitLab