Skip to content
Snippets Groups Projects
Commit f10db470 authored by Caroline DE POURTALES's avatar Caroline DE POURTALES
Browse files

concatenation works

parent 92f65c05
No related branches found
No related tags found
2 merge requests!6Linker with transformer,!5Linker with transformer
...@@ -3,7 +3,7 @@ transformers = 4.16.2 ...@@ -3,7 +3,7 @@ transformers = 4.16.2
[DATASET_PARAMS] [DATASET_PARAMS]
symbols_vocab_size=26 symbols_vocab_size=26
max_len_sentence=266 max_len_sentence=290
max_atoms_in_sentence=1250 max_atoms_in_sentence=1250
max_atoms_in_one_type=510 max_atoms_in_one_type=510
......
...@@ -66,6 +66,7 @@ class Linker(Module): ...@@ -66,6 +66,7 @@ class Linker(Module):
supertagger = SuperTagger() supertagger = SuperTagger()
supertagger.load_weights(supertagger_path_model) supertagger.load_weights(supertagger_path_model)
self.Supertagger = supertagger self.Supertagger = supertagger
self.Supertagger.model.to(self.device)
self.atom_map = atom_map self.atom_map = atom_map
self.sub_atoms_type_list = list(atom_map_redux.keys()) self.sub_atoms_type_list = list(atom_map_redux.keys())
...@@ -103,7 +104,7 @@ class Linker(Module): ...@@ -103,7 +104,7 @@ class Linker(Module):
the training dataloader and the validation dataloader. They contains the list of atoms, their polarities, the axiom links, the sentences tokenized, sentence mask the training dataloader and the validation dataloader. They contains the list of atoms, their polarities, the axiom links, the sentences tokenized, sentence mask
""" """
print("Start preprocess Data") print("Start preprocess Data")
sentences_batch = df_axiom_links["X"].tolist() sentences_batch = df_axiom_links["X"].str.strip().tolist()
sentences_tokens, sentences_mask = self.Supertagger.sent_tokenizer.fit_transform_tensors(sentences_batch) sentences_tokens, sentences_mask = self.Supertagger.sent_tokenizer.fit_transform_tensors(sentences_batch)
atoms_batch, atoms_polarity_batch = get_GOAL(self.max_atoms_in_sentence, df_axiom_links["Z"]) atoms_batch, atoms_polarity_batch = get_GOAL(self.max_atoms_in_sentence, df_axiom_links["Z"])
......
import torch.nn.functional as F
import torch
from Configuration import Configuration
from Linker import *
from Supertagger import *
max_atoms_in_sentence = int(Configuration.datasetConfig['max_atoms_in_sentence'])
# categories tagger
supertagger = SuperTagger()
supertagger.load_weights("models/model_supertagger.pt")
# axiom linker
linker = Linker(supertagger)
linker.load_weights("models/linker.pt")
# predict categories and links for this sentence
sentence = ["le chat est noir"]
sents_tokenized, sents_mask = supertagger.sent_tokenizer.fit_transform_tensors(sentence)
logits, sentence_embedding = supertagger.foward(sents_tokenized, sents_mask)
categories = torch.argmax(F.softmax(logits, dim=2), dim=2)
axiom_links = linker.predict(categories, sentence_embedding, sents_mask)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment