Skip to content
Snippets Groups Projects
Commit 40106882 authored by Julien Breton's avatar Julien Breton
Browse files

update work on rule based approach

parent a86d6056
No related branches found
No related tags found
No related merge requests found
Showing
with 613532 additions and 33 deletions
class MarkersList: class MarkersList:
def getLocationMarkers(self): def getLocationMarkers(self):
return ["voirie", "voie", "territoire", "atelier", "Luxembourg", "fourrière", "centre", "emplacement", "route", return ["voirie", "voie", "territoire", "atelier", "luxembourg", "fourrière", "centre", "emplacement", "route",
"itinéraire", "voies", "emplacements", "itinéraires", "chemins", "centres", "agglomérations", "itinéraire", "voies", "emplacements", "itinéraires", "chemins", "centres", "agglomérations",
"endroits", "endroits",
"tronçons", "places"] "tronçons", "places"]
......
Source diff could not be displayed: it is too large. Options to address this: view the blob.
This diff is collapsed.
This diff is collapsed.
import Levenshtein from transformers import BertTokenizer, BertModel
import torch
phrase2 = "very old blue car" # Load tokenizer and model
phrase1 = "blue car" tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
distance = Levenshtein.distance(phrase1, phrase2) # Encode input text
print("La distance de Levenshtein est :", distance) inputs = tokenizer("Example sentence for BERT attention visualization.", return_tensors="pt")
distance = Levenshtein.distance(phrase1, phrase2)/max(len(phrase1), len(phrase2)) # Forward pass, get attentions
print("La distance de Levenshtein normalisée est :", distance) outputs = model(**inputs)
attentions = outputs.attentions # Tuple of attention tensors for each layer
# Get the token index for a word of interest, e.g., "attention"
token_id = tokenizer.convert_tokens_to_ids("attention")
# Find the positions of this token in the input sequence
token_positions = (inputs['input_ids'][0] == token_id).nonzero(as_tuple=True)[0]
# Access the attention from one of these positions, e.g., first layer, first head
attention_layer_head = attentions[0][0, :, token_positions[0], :]
# Now `attention_layer_head` contains the attention weights from the word "attention" to all other tokens in this specific layer and head
print(attention_layer_head)
import plotly.graph_objects as go import plotly.graph_objects as go
# Exemple de données # Exemple de données
x = [7, 12.9, 70, 175] # Axe des X x = [7, 46.7, 70, 1800] # Axe des X
x_rev = x[::-1] x_rev = x[::-1]
y = [0.448, 0.526, 0.6, 0.69] # Axe des Y y = [0.448, 0.526, 0.6, 0.69] # Axe des Y
...@@ -14,32 +14,36 @@ fig = go.Figure() ...@@ -14,32 +14,36 @@ fig = go.Figure()
# Créer le graphique en ligne # Créer le graphique en ligne
fig.add_trace(go.Scatter( # fig.add_trace(go.Scatter(
x=x+x_rev, # x=x+x_rev,
y=y+y_lower, # y=y+y_lower,
fill='toself', # fill='toself',
fillcolor='rgba(231,107,243,0.2)', # fillcolor='rgba(231,107,243,0.2)',
line_color='rgba(255,255,255,0)', # line_color='rgba(255,255,255,0)',
showlegend=False, # showlegend=False,
)) # ))
fig.add_trace(go.Scatter( fig.add_trace(go.Scatter(
x=x, y=y, x=x, y=y,
line_color='rgb(231,107,243)', line_color='rgb(231,107,243)',
name='Max F1 Score', name='Max F1 Score',
mode='lines+markers' mode='lines+markers',
showlegend=True,
)) ))
fig.add_annotation(x=x[0], y=y[0], text=names[0], showarrow=False, yshift=-10, xshift=40) fig.add_annotation(x=x[0], y=y[0], text=names[0], showarrow=False, yshift=-10, xshift=40)
fig.add_annotation(x=x[1], y=y[1], text=names[1], showarrow=False, yshift=20, xshift=10) fig.add_annotation(x=x[1], y=y[1], text=names[1], showarrow=False, yshift=0, xshift=40)
fig.add_annotation(x=x[2], y=y[2], text=names[2], showarrow=False, yshift=20, xshift=10) fig.add_annotation(x=x[2], y=y[2], text=names[2], showarrow=False, yshift=20, xshift=20)
fig.add_annotation(x=x[3], y=y[3], text=names[3], showarrow=False, yshift=20, xshift=-20) fig.add_annotation(x=x[3], y=y[3], text=names[3], showarrow=False, yshift=20, xshift=-20)
fig.update_yaxes(range=[0, 1]) fig.update_yaxes(range=[0, 1])
fig.update_xaxes(range=[0, None]) fig.update_xaxes(range=[0, None])
fig.update_layout( fig.update_layout(
xaxis=dict(
range=[-50, 2000] # Ajustement des limites avec la marge
),
xaxis_title="Model size in billions of parameters", xaxis_title="Model size in billions of parameters",
yaxis_title="F1 score" yaxis_title="F1 score"
) )
......
from itertools import combinations from itertools import combinations
import plotly.graph_objects as go import plotly.graph_objects as go
from math import comb
import random
class RuleBasedExtractionEvaluator: class RuleBasedExtractionEvaluator:
def evaluate(self, type, E_augmented = {}, E_goldStandard = {}, fileName = "test"): def evaluate(self, E_augmented = {}, E_goldStandard = {}, fileName = "test", sampling_trigger = 50000):
E_goldStandard = set(E_goldStandard) E_goldStandard = set(E_goldStandard)
data = [] data = []
...@@ -16,7 +18,19 @@ class RuleBasedExtractionEvaluator: ...@@ -16,7 +18,19 @@ class RuleBasedExtractionEvaluator:
currentData_precision = [] currentData_precision = []
currentData_recall = [] currentData_recall = []
combinaisons = list(combinations(E_augmented.keys(), i)) if comb(len(E_augmented), i) < sampling_trigger * 100:
combinaisons = list(combinations(E_augmented.keys(), i))
if len(combinaisons) > sampling_trigger:
combinaisons = random.sample(combinaisons, sampling_trigger)
else:
combinaisons_uniques = set()
while len(combinaisons_uniques) < sampling_trigger:
combinaison = tuple(random.sample(list(E_augmented.keys()), i))
combinaisons_uniques.add(combinaison)
# Convertir en liste pour l'affichage
combinaisons = list(combinaisons_uniques)
for combinaison in combinaisons: for combinaison in combinaisons:
merge = set() merge = set()
...@@ -83,9 +97,17 @@ class RuleBasedExtractionEvaluator: ...@@ -83,9 +97,17 @@ class RuleBasedExtractionEvaluator:
showlegend=False showlegend=False
) )
fig.write_image(f"./results/{fileName}.png", format='png', engine='kaleido') #fig.write_image(f"./results/{fileName}.png", format='png', engine='kaleido')
fig_recall.write_image(f"./results/{fileName}_recall.png", format='png', engine='kaleido') fig_recall.write_image(f"{fileName}_recall.png", format='png', engine='kaleido')
fig_precision.write_image(f"./results/{fileName}_precision.png", format='png', engine='kaleido') fig_precision.write_image(f"{fileName}_precision.png", format='png', engine='kaleido')
#éléments non trouvés
merge = set()
for element in E_augmented:
merge = merge | set(E_augmented[element])
not_found = E_goldStandard - merge
print(not_found)
return return
......
...@@ -62,7 +62,7 @@ def fine_tune(base_model, new_model): ...@@ -62,7 +62,7 @@ def fine_tune(base_model, new_model):
training_arguments = TrainingArguments( training_arguments = TrainingArguments(
per_device_train_batch_size=2, per_device_train_batch_size=2,
gradient_accumulation_steps=2, gradient_accumulation_steps=2,
num_train_epochs=6, num_train_epochs= The maint,
learning_rate=1e-4, learning_rate=1e-4,
logging_steps=2, logging_steps=2,
optim="adamw_torch", optim="adamw_torch",
......
import pandas as pd
from numpy.core._multiarray_umath import empty
class MarkerEnhancerSemanticRessources: class MarkerEnhancerSemanticRessources:
fullMarkers = [] fullMarkers = []
augmentedMarkers = {} augmentedMarkers = {}
...@@ -6,7 +10,29 @@ class MarkerEnhancerSemanticRessources: ...@@ -6,7 +10,29 @@ class MarkerEnhancerSemanticRessources:
self.fullMarkers = fullMarkers self.fullMarkers = fullMarkers
self.augmentedMarkers = {element: set() for element in fullMarkers} self.augmentedMarkers = {element: set() for element in fullMarkers}
def exec(self): def exec(self, lemma_setting=False, top_setting=5):
if lemma_setting:
return self.execWithLemma(top_setting)
else:
return self.execWithoutLemma(top_setting)
def execWithoutLemma(self, top_setting):
scores = {element: {} for element in self.fullMarkers}
with open("data/20230731-LEXICALNET-JEUXDEMOTS-R5.txt", "r", encoding='ISO-8859-1') as fichier:
for ligne in fichier:
if ligne.startswith(";"):
ligne = ligne.split(";")
if ligne[1] in self.fullMarkers:
scores[ligne[1]][ligne[2]] = ligne[3]
for key, element in scores.items():
top = sorted(element.items(), key=lambda x: x[1], reverse=True)[:top_setting]
for new_marker, value in top:
self.augmentedMarkers[key].add(new_marker)
return self
def execWithLemma(self, top_setting):
scores = {element: {} for element in self.fullMarkers} scores = {element: {} for element in self.fullMarkers}
with open("data/20230731-LEXICALNET-JEUXDEMOTS-R5.txt", "r", encoding='ISO-8859-1') as fichier: with open("data/20230731-LEXICALNET-JEUXDEMOTS-R5.txt", "r", encoding='ISO-8859-1') as fichier:
for ligne in fichier: for ligne in fichier:
...@@ -15,9 +41,23 @@ class MarkerEnhancerSemanticRessources: ...@@ -15,9 +41,23 @@ class MarkerEnhancerSemanticRessources:
if ligne[1] in self.fullMarkers: if ligne[1] in self.fullMarkers:
scores[ligne[1]][ligne[2]] = ligne[3] scores[ligne[1]][ligne[2]] = ligne[3]
df1 = pd.read_csv('data/lefff-3.4.csv', sep='\t', header=None)
df2 = pd.read_csv('data/lefff-3.4-addition.csv', sep='\t', header=None)
df = pd.concat([df1, df2])
for key, element in scores.items(): for key, element in scores.items():
top_5 = sorted(element.items(), key=lambda x: x[1], reverse=True)[:5] top = [x[0] for x in (sorted(element.items(), key=lambda x: x[1], reverse=True)[:5])]
for new_marker, value in top_5:
mergingList = []
for new_marker in top:
result = self.getOtherLemma(new_marker, df)
if result is not None and result:
mergingList = mergingList + result
top = top + mergingList
top = top + self.getOtherLemma(key, df)
for new_marker in top:
self.augmentedMarkers[key].add(new_marker) self.augmentedMarkers[key].add(new_marker)
return self return self
...@@ -27,3 +67,20 @@ class MarkerEnhancerSemanticRessources: ...@@ -27,3 +67,20 @@ class MarkerEnhancerSemanticRessources:
def getFullMarkers(self): def getFullMarkers(self):
return self.fullMarkers return self.fullMarkers
def getOtherLemma(self, lemma, df):
resultats = df.loc[df[0] == lemma]
mySet = set()
for resultat in resultats.itertuples():
mySet.add(resultat[3])
mySet2 = set()
for el in mySet:
resultats = df.loc[df[2] == el]
for resultat in resultats.itertuples():
mySet2.add(resultat[1])
if lemma in mySet2:
mySet2.remove(lemma)
return list(mySet2)
...@@ -10,7 +10,7 @@ class PatternsRequest: ...@@ -10,7 +10,7 @@ class PatternsRequest:
password = "password" password = "password"
self.driver = GraphDatabase.driver(uri, auth=(username, password)) self.driver = GraphDatabase.driver(uri, auth=(username, password))
def basicRequest(self, markers): def location(self, markers):
output = {} output = {}
for key in markers: for key in markers:
nodes = self.driver.execute_query( nodes = self.driver.execute_query(
...@@ -18,17 +18,243 @@ class PatternsRequest: ...@@ -18,17 +18,243 @@ class PatternsRequest:
match (c:Constituent)-[:CONSREL*..2]->(w:Word) match (c:Constituent)-[:CONSREL*..2]->(w:Word)
where c.type = "NP" where c.type = "NP"
and toInteger(split(w.id, '.')[0]) > 1302 and toInteger(split(w.id, '.')[0]) > 1302
and w.text in $array and toLower(w.text) in $array
with c as constituent with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word) match (constituent)-[:CONSREL*..]->(w:Word)
return w.id return w.id
''', ''',
array=list(markers[key]) array=list(markers[key] | {key})
)
output[key] = [record[0] for record in nodes.records]
return output
def modality(self, markers):
output = {}
for key in markers:
nodes = self.driver.execute_query(
'''
match (c:Constituent)-[:CONSREL]->(w:Word)
where c.type = "VN"
and toInteger(split(w.id, '.')[0]) > 1302
and toLower(w.text) in $array
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
UNION
match (c:Constituent)-[:CONSREL]->(w:Word)
where c.type = "SENT"
and toInteger(split(w.id, '.')[0]) > 1302
and toLower(w.text) in $array
return w.id
''',
array=list(markers[key] | {key})
) )
output[key] = [record[0] for record in nodes.records] output[key] = [record[0] for record in nodes.records]
return output return output
def reason(self, markers):
output = {}
for key in markers:
simpleToken, multiToken = self.splitMultiTokens(list(markers[key] | {key}))
listMultiToken = self.driver.execute_query(
'''
WITH $array AS liste
unwind liste as words
MATCH path = (start:Word)-[:NEXT*]->(end:Word)
where size(words) - 1 = size(relationships(path))
and all(
idx IN range(0, size(words)-2)
WHERE (
toLower(words[idx]) = toLower((nodes(path)[idx]).text)
AND toLower(words[idx+1]) = toLower((nodes(path)[idx + 1]).text))
)
and start.text = words[0]
and end.text = words[size(words) - 1]
with nodes(path) as result
unwind result as results
return collect(results.id) as liste
''',
array=multiToken
)
listMultiToken = listMultiToken.records[0][0]
multiTokenResult1 = set()
if len(listMultiToken) > 0:
multiTokenResult1 = self.driver.execute_query(
'''
match (c:Constituent)-[:CONSREL*..]->(w:Word)
where w.id in $array
and c.type in ["Srel", "Ssub", "PP"]
and toInteger(split(w.id, '.')[0]) > 1302
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
UNION
match (c:Constituent{type:"NP"})-[:CONSREL*..]->(c1:Constituent{type:"P+"})-[:CONSREL]->(w:Word)
match (c1)<-[:CONSREL]-(:Constituent)-[:CONSREL]->(:Constituent{type: "VPinf"})
where w.id in $array
and toInteger(split(w.id, '.')[0]) > 1302
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
UNION
match (c:Constituent{type:"NP"})-[:CONSREL]->(c1:Constituent{type:"VPpart"})-[:CONSREL*..]->(w:Word)
where w.id in $array
and toInteger(split(w.id, '.')[0]) > 1302
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
''',
array=multiToken
)
multiTokenResult1 = set([record[0] for record in multiTokenResult1.records])
simpleTokenResult1 = set()
if len(simpleToken) > 0:
simpleTokenResult1 = self.driver.execute_query(
'''
match (c:Constituent)-[:CONSREL*..]->(w:Word)
where toLower(w.text) in $array
and c.type in ["Srel", "Ssub", "PP"]
and toInteger(split(w.id, '.')[0]) > 1302
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
UNION
match (c:Constituent{type:"NP"})-[:CONSREL*..]->(c1:Constituent{type:"P+"})-[:CONSREL]->(w:Word)
match (c1)<-[:CONSREL]-(:Constituent)-[:CONSREL]->(:Constituent{type: "VPinf"})
where toLower(w.text) in $array
and toInteger(split(w.id, '.')[0]) > 1302
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
UNION
match (c:Constituent{type:"NP"})-[:CONSREL]->(c1:Constituent{type:"VPpart"})-[:CONSREL*..]->(w:Word)
where toLower(w.text) in $array
and toInteger(split(w.id, '.')[0]) > 1302
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
''',
array=list(simpleToken)
)
simpleTokenResult1 = set([record[0] for record in simpleTokenResult1.records])
output[key] = list(multiTokenResult1 | simpleTokenResult1)
return output
def time(self, markers):
output = {}
for key in markers:
simpleToken, multiToken = self.splitMultiTokens(list(markers[key] | {key}))
multiTokenResult1 = self.driver.execute_query(
'''
WITH $array AS liste
unwind liste as words
MATCH path = (start:Word)-[:NEXT*]->(end:Word)
where size(words) - 1 = size(relationships(path))
and all(
idx IN range(0, size(words)-2)
WHERE (
toLower(words[idx]) = toLower((nodes(path)[idx]).text)
AND toLower(words[idx+1]) = toLower((nodes(path)[idx + 1]).text))
)
and start.text = words[0]
and end.text = words[size(words) - 1]
with nodes(path) as result
unwind result as results
with collect(results) as liste
match (c:Constituent)-[:CONSREL]->(w:Word)
where c.type = "NP"
and toInteger(split(w.id, '.')[0]) > 1302
and w in liste
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
''',
array=multiToken
)
multiTokenResult1 = set([record[0] for record in multiTokenResult1.records])
simpleTokenResult1 = self.driver.execute_query(
'''
match (c:Constituent)-[:CONSREL]->(w:Word)
where c.type = "NP"
and toInteger(split(w.id, '.')[0]) > 1302
and w.text in $array
with c as constituent
match (constituent)-[:CONSREL*..]->(w:Word)
return w.id
''',
array=list(simpleToken)
)
simpleTokenResult1 = set([record[0] for record in simpleTokenResult1.records])
multiTokenResult2 = self.driver.execute_query(
'''
WITH $array AS liste
unwind liste as words
MATCH path = (start:Word)-[:NEXT*]->(end:Word)
where size(words) - 1 = size(relationships(path))
and all(
idx IN range(0, size(words)-2)
WHERE (
toLower(words[idx]) = toLower((nodes(path)[idx]).text)
AND toLower(words[idx+1]) = toLower((nodes(path)[idx + 1]).text))
)
and start.text = words[0]
and end.text = words[size(words) - 1]
with nodes(path) as result
unwind result as results
with collect(results) as liste
match (c1:Constituent {type: "PP"})-[:CONSREL]->(c2:Constituent {type: "P+"})-[:CONSREL]->(w:Word)
match (c1)<-[:CONSREL]-(:Constituent)-[:CONSREL]->(:Constituent {type: "NP"})
where w in liste
and toInteger(split(w.id, '.')[0]) > 1302
with c1 as c1
match (c1)-[:CONSREL*..]->(w:Word)
return w.id
''',
array=multiToken
)
multiTokenResult2 = set([record[0] for record in multiTokenResult2.records])
simpleTokenResult2 = self.driver.execute_query(
'''
match (c1:Constituent {type: "PP"})-[:CONSREL]->(c2:Constituent {type: "P+"})-[:CONSREL]->(w:Word)
match (c2)<-[:CONSREL]-(:Constituent)-[:CONSREL]->(:Constituent {type: "NP"})
where w.text in $array
and toInteger(split(w.id, '.')[0]) > 1302
with c1 as c1
match (c1)-[:CONSREL*..]->(w:Word)
return w.id
''',
array=list(simpleToken)
)
simpleTokenResult2 = set([record[0] for record in simpleTokenResult2.records])
output[key] = list(multiTokenResult1 | simpleTokenResult1 | multiTokenResult2 | simpleTokenResult2)
return output
def getGoldStandard(self, type="location"): def getGoldStandard(self, type="location"):
nodes = self.driver.execute_query( nodes = self.driver.execute_query(
''' '''
...@@ -41,3 +267,15 @@ class PatternsRequest: ...@@ -41,3 +267,15 @@ class PatternsRequest:
) )
return [record[0] for record in nodes.records] return [record[0] for record in nodes.records]
def splitMultiTokens(self, liste):
simpleToken = set()
multiToken = []
for token in liste:
if " " in token:
multiToken.append(token.split(" "))
else:
simpleToken.add(token)
return (simpleToken, multiToken)
results/LLM/fig_f1_on_size.png

198 KiB | W: | H:

results/LLM/fig_f1_on_size.png

188 KiB | W: | H:

results/LLM/fig_f1_on_size.png
results/LLM/fig_f1_on_size.png
results/LLM/fig_f1_on_size.png
results/LLM/fig_f1_on_size.png
  • 2-up
  • Swipe
  • Onion skin
Legal Concept; Ground Truth; Extracted; Perfect Match (TP); Partial Match (TP); Misclassified (FP); Missed (FN); Precision (%); Recall (%);F1;F2
Action;157;157;91;64;2;2;0,987;0,987;0,987;0,987
Actor;138;133;110;19;4;9;0,970;0,935;0,952;0,942
Artifact;252;241;182;56;3;14;0,988;0,944;0,966;0,953
Condition;172;179;148;18;13;6;0,927;0,965;0,946;0,957
Location;35;34;27;7;0;1;1,000;0,971;0,986;0,977
Modality;80;81;74;4;3;2;0,963;0,975;0,969;0,973
Reference;72;;;;;;;;;
Time;67;63;56;7;0;4;1,000;0,940;0,969;0,952
Total;973;888;688;175;25;38;0,972;0,958;0,965;0,961
;;;;;;;;;;
;;;;;;;;;;
;;;;;;;;;;
;;;;;;;;;;
;;;;;;;;;;
;;;;;;;;;;
Legal Concept; Ground Truth; Extracted; Perfect Match (TP); Partial Match (TP); Misclassified (FP); Missed (FN); Precision (%); Recall (%);F1;F2
Action;157;;6;124;20;27;0,867;0,828;0,847;0,835
Actor;138;;73;47;12;18;0,909;0,870;0,889;0,877
Artifact;252;;58;92;23;102;0,867;0,595;0,706;0,635
Condition;172;;50;52;14;70;0,879;0,593;0,708;0,634
Location;35;;4;18;0;13;1,000;0,629;0,772;0,679
Modality;80;;46;24;1;10;0,986;0,875;0,927;0,895
Reference;72;;15;34;0;23;1,000;0,681;0,810;0,727
Time;67;;18;24;0;25;1,000;0,627;0,771;0,677
Total;973;;270;415;70;288;0,907;0,704;0,793;0,737
results/S1/MarkerEnhancer/Actor/heatmap.png

634 KiB | W: | H:

results/S1/MarkerEnhancer/Actor/heatmap.png

635 KiB | W: | H:

results/S1/MarkerEnhancer/Actor/heatmap.png
results/S1/MarkerEnhancer/Actor/heatmap.png
results/S1/MarkerEnhancer/Actor/heatmap.png
results/S1/MarkerEnhancer/Actor/heatmap.png
  • 2-up
  • Swipe
  • Onion skin
results/S1/MarkerEnhancer/Location/Result_precision.png

43.7 KiB

results/S1/MarkerEnhancer/Location/Result_recall.png

39.8 KiB

results/S1/MarkerEnhancer/Location/ViaSemanticRessources_precision.png

19.5 KiB | W: | H:

results/S1/MarkerEnhancer/Location/ViaSemanticRessources_precision.png

20.4 KiB | W: | H:

results/S1/MarkerEnhancer/Location/ViaSemanticRessources_precision.png
results/S1/MarkerEnhancer/Location/ViaSemanticRessources_precision.png
results/S1/MarkerEnhancer/Location/ViaSemanticRessources_precision.png
results/S1/MarkerEnhancer/Location/ViaSemanticRessources_precision.png
  • 2-up
  • Swipe
  • Onion skin
results/S1/MarkerEnhancer/Location/ViaSemanticRessources_recall.png

17.4 KiB | W: | H:

results/S1/MarkerEnhancer/Location/ViaSemanticRessources_recall.png

22.2 KiB | W: | H:

results/S1/MarkerEnhancer/Location/ViaSemanticRessources_recall.png
results/S1/MarkerEnhancer/Location/ViaSemanticRessources_recall.png
results/S1/MarkerEnhancer/Location/ViaSemanticRessources_recall.png
results/S1/MarkerEnhancer/Location/ViaSemanticRessources_recall.png
  • 2-up
  • Swipe
  • Onion skin
results/S1/MarkerEnhancer/Location/heatmap.png

288 KiB | W: | H:

results/S1/MarkerEnhancer/Location/heatmap.png

288 KiB | W: | H:

results/S1/MarkerEnhancer/Location/heatmap.png
results/S1/MarkerEnhancer/Location/heatmap.png
results/S1/MarkerEnhancer/Location/heatmap.png
results/S1/MarkerEnhancer/Location/heatmap.png
  • 2-up
  • Swipe
  • Onion skin
results/S1/MarkerEnhancer/Modality/Result_precision.png

24.8 KiB

results/S1/MarkerEnhancer/Modality/Result_recall.png

22.9 KiB

0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment