diff --git a/AMAKFX/src/fr/irit/smac/amak/Agent.java b/AMAKFX/src/fr/irit/smac/amak/Agent.java
index 9685e065123e3ae277e7482e37e39789ce88c9f1..1de07ab93413f5fb8d9581c27fe6955f3862d71d 100644
--- a/AMAKFX/src/fr/irit/smac/amak/Agent.java
+++ b/AMAKFX/src/fr/irit/smac/amak/Agent.java
@@ -1,496 +1,517 @@
-package fr.irit.smac.amak;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import fr.irit.smac.amak.Amas.ExecutionPolicy;
-import fr.irit.smac.amak.tools.Log;
-
-/**
- * This class must be overridden by all agents
- * 
- * @author Alexandre Perles
- *
- * @param <A>
- *            The kind of Amas the agent refers to
- * @param <E>
- *            The kind of Environment the agent AND the Amas refer to
- */
-public abstract class Agent<A extends Amas<E>, E extends Environment> implements Runnable {
-	/**
-	 * Neighborhood of the agent (must refer to the same couple amas, environment
-	 */
-	protected final List<Agent<A, E>> neighborhood;
-	/**
-	 * Criticalities of the neighbors (and it self) as perceived at the beginning of
-	 * the agent's cycle
-	 */
-	protected final Map<Agent<A, E>, Double> criticalities = new HashMap<>();
-	/**
-	 * Last calculated criticality of the agent
-	 */
-	private double criticality;
-	/**
-	 * Amas the agent belongs to
-	 */
-	protected final A amas;
-	/**
-	 * Unique index to give unique id to each agent
-	 */
-	private static int uniqueIndex;
-
-	/**
-	 * The id of the agent
-	 */
-	private final int id;
-	/**
-	 * The order of execution of the agent as computed by
-	 * {@link Agent#_computeExecutionOrder()}
-	 */
-	private int executionOrder;
-	/**
-	 * The parameters that can be user in the initialization process
-	 * {@link Agent#onInitialization()}
-	 */
-	protected Object[] params;
-
-	/**
-	 * These phases are used to synchronize agents on phase
-	 * 
-	 * @see fr.irit.smac.amak.Amas.ExecutionPolicy
-	 * @author perles
-	 *
-	 */
-	public enum Phase {
-		/**
-		 * Agent is perceiving
-		 */
-		PERCEPTION,
-		/**
-		 * Agent is deciding and acting
-		 */
-		DECISION_AND_ACTION,
-		/**
-		 * Agent haven't started to perceive, decide or act
-		 */
-		INITIALIZING,
-		/**
-		 * Agent is ready to decide
-		 */
-		PERCEPTION_DONE,
-		/**
-		 * Agent is ready to perceive or die
-		 */
-		DECISION_AND_ACTION_DONE
-	}
-
-	/**
-	 * The current phase of the agent {@link Phase}
-	 */
-	protected Phase currentPhase = Phase.INITIALIZING;
-	
-	private boolean synchronous = true;
-
-	/**
-	 * The constructor automatically add the agent to the corresponding amas and
-	 * initialize the agent
-	 * 
-	 * @param amas
-	 *            Amas the agent belongs to
-	 * @param params
-	 *            The params to initialize the agent
-	 */
-	public Agent(A amas, Object... params) {
-		this.id = uniqueIndex++;
-		this.params = params;
-		this.amas = amas;
-		neighborhood = new ArrayList<>();
-		neighborhood.add(this);
-		onInitialization();
-		if (!Configuration.commandLineMode)
-			onRenderingInitialization();
-		
-
-		if (amas != null) {
-			this.amas._addAgent(this);
-		}
-	}
-
-	/**
-	 * Add neighbors to the agent
-	 * 
-	 * @param agents
-	 *            The list of agent that should be considered as neighbor
-	 */
-	@SafeVarargs
-	public final void addNeighbor(Agent<A, E>... agents) {
-		for (Agent<A, E> agent : agents) {
-			if (agent != null) {
-				neighborhood.add(agent);
-				criticalities.put(agent, Double.NEGATIVE_INFINITY);
-			}
-		}
-	}
-
-	/**
-	 * This method must be overridden by the agents. This method shouldn't make any
-	 * calls to internal representation an agent has on its environment because
-	 * these information maybe outdated.
-	 * 
-	 * @return the criticality at a given moment
-	 */
-	protected double computeCriticality() {
-		return Double.NEGATIVE_INFINITY;
-	}
-
-	protected void setAsynchronous() {
-		if (currentPhase != Phase.INITIALIZING)
-			Log.defaultLog.fatal("AMAK", "Asynchronous mode must be set during the initialization");
-		this.synchronous = false;
-	}
-	/**
-	 * This method must be overriden if you need to specify an execution order layer
-	 * 
-	 * @return the execution order layer
-	 */
-	protected int computeExecutionOrderLayer() {
-		return 0;
-	}
-
-	/**
-	 * This method is called at the beginning of an agent's cycle
-	 */
-	protected void onAgentCycleBegin() {
-
-	}
-
-	/**
-	 * This method is called at the end of an agent's cycle
-	 */
-	protected void onAgentCycleEnd() {
-
-	}
-
-	/**
-	 * This method corresponds to the perception phase of the agents and must be
-	 * overridden
-	 */
-	protected void onPerceive() {
-
-	}
-
-	/**
-	 * This method corresponds to the decision phase of the agents and must be
-	 * overridden
-	 */
-	protected void onDecide() {
-
-	}
-
-	/**
-	 * This method corresponds to the action phase of the agents and must be
-	 * overridden
-	 */
-	protected void onAct() {
-
-	}
-
-	/**
-	 * In this method the agent should expose some variables with its neighbor
-	 */
-	protected void onExpose() {
-
-	}
-
-	/**
-	 * This method should be used to update the representation of the agent for
-	 * example in a VUI
-	 */
-	public void onUpdateRender() {
-
-	}
-
-	/**
-	 * This method is now deprecated and should be replaced by onUpdateRender
-	 * 
-	 * @deprecated Must be replaced by {@link #onUpdateRender()}
-	 */
-	@Deprecated
-	protected final void onDraw() {
-
-	}
-
-	/**
-	 * Called when all initial agents have been created and are ready to be started
-	 */
-	protected void onReady() {
-
-	}
-
-	/**
-	 * Called by the framework when all initial agents have been created and are
-	 * almost ready to be started
-	 */
-	protected final void _onBeforeReady() {
-		criticality = computeCriticality();
-		executionOrder = _computeExecutionOrder();
-	}
-
-	/**
-	 * Called before all agents are created
-	 */
-	protected void onInitialization() {
-
-	}
-
-	/**
-	 * Replaced by onInitialization
-	 * 
-	 * @deprecated Must be replaced by {@link #onInitialization()}
-	 */
-	@Deprecated
-	protected final void onInitialize() {
-
-	}
-
-	/**
-	 * Called to initialize the rendering of the agent
-	 */
-	protected void onRenderingInitialization() {
-
-	}
-
-	/**
-	 * @deprecated This method is useless because the state of the agent is not
-	 *             supposed to evolve before or after its cycle. Use
-	 *             OnAgentCycleBegin/End instead.
-	 * 
-	 *             This method is final because it must not be implemented.
-	 *             Implement it will have no effect.
-	 */
-	@Deprecated
-	protected final void onSystemCycleBegin() {
-
-	}
-
-	/**
-	 * @deprecated This method is useless because the state of the agent is not
-	 *             supposed to evolve before or after its cycle. Use
-	 *             OnAgentCycleBegin/End instead.
-	 * 
-	 *             This method is final because it must not be implemented.
-	 *             Implement it will have no effect.
-	 */
-	@Deprecated
-	protected final void onSystemCycleEnd() {
-
-	}
-
-	/**
-	 * This method is called automatically and corresponds to a full cycle of an
-	 * agent
-	 */
-	@Override
-	public void run() {
-		ExecutionPolicy executionPolicy = amas.getExecutionPolicy();
-		if (executionPolicy == ExecutionPolicy.TWO_PHASES) {
-
-			currentPhase = nextPhase();
-			switch (currentPhase) {
-			case PERCEPTION:
-				phase1();
-				amas.informThatAgentPerceptionIsFinished();
-				break;
-			case DECISION_AND_ACTION:
-				phase2();
-				amas.informThatAgentDecisionAndActionAreFinished();
-				break;
-			default:
-				Log.defaultLog.fatal("AMAK", "An agent is being run in an invalid phase (%s)", currentPhase);
-			}
-		} else if (executionPolicy == ExecutionPolicy.ONE_PHASE) {
-			onePhaseCycle();
-			amas.informThatAgentPerceptionIsFinished();
-			amas.informThatAgentDecisionAndActionAreFinished();
-		}
-	}
-	public void onePhaseCycle() {
-		currentPhase = Phase.PERCEPTION;
-		phase1();
-		currentPhase = Phase.DECISION_AND_ACTION;
-		phase2();
-	}
-	/**
-	 * This method represents the perception phase of the agent
-	 */
-	protected final void phase1() {
-		onAgentCycleBegin();
-		perceive();
-		currentPhase = Phase.PERCEPTION_DONE;
-	}
-
-	/**
-	 * This method represents the decisionAndAction phase of the agent
-	 */
-	protected final void phase2() {
-		decideAndAct();
-		executionOrder = _computeExecutionOrder();
-		onExpose();
-		if (!Configuration.commandLineMode)
-			onUpdateRender();
-		onAgentCycleEnd();
-		currentPhase = Phase.DECISION_AND_ACTION_DONE;
-	}
-
-	/**
-	 * Determine which phase comes after another
-	 * 
-	 * @return the next phase
-	 */
-	private Phase nextPhase() {
-		switch (currentPhase) {
-		case PERCEPTION_DONE:
-			return Phase.DECISION_AND_ACTION;
-		case INITIALIZING:
-		case DECISION_AND_ACTION_DONE:
-		default:
-			return Phase.PERCEPTION;
-		}
-	}
-
-	/**
-	 * Compute the execution order from the layer and a random value. This method
-	 * shouldn't be overridden.
-	 * 
-	 * @return A number used by amak to determine which agent executes first
-	 */
-	protected int _computeExecutionOrder() {
-		return computeExecutionOrderLayer() * 10000 + amas.getEnvironment().getRandom().nextInt(10000);
-	}
-
-	/**
-	 * Perceive, decide and act
-	 */
-	void perceive() {
-		for (Agent<A, E> agent : neighborhood) {
-			criticalities.put(agent, agent.criticality);
-		}
-		onPerceive();
-		// Criticality of agent should be updated after perception AND after action
-		criticality = computeCriticality();
-		criticalities.put(this, criticality);
-	}
-
-	/**
-	 * A combination of decision and action as called by the framework
-	 */
-	private final void decideAndAct() {
-		onDecideAndAct();
-
-		criticality = computeCriticality();
-	}
-
-	/**
-	 * Decide and act These two phases can often be grouped
-	 */
-	protected void onDecideAndAct() {
-		onDecide();
-		onAct();
-	}
-
-	/**
-	 * Convenient method giving the most critical neighbor at a given moment
-	 * 
-	 * @param includingMe
-	 *            Should the agent also consider its own criticality
-	 * @return the most critical agent
-	 */
-	protected final Agent<A, E> getMostCriticalNeighbor(boolean includingMe) {
-		List<Agent<A, E>> criticalest = new ArrayList<>();
-		double maxCriticality = Double.NEGATIVE_INFINITY;
-
-		if (includingMe) {
-			criticalest.add(this);
-			maxCriticality = criticalities.getOrDefault(criticalest, Double.NEGATIVE_INFINITY);
-		}
-		for (Entry<Agent<A, E>, Double> e : criticalities.entrySet()) {
-			if (e.getValue() > maxCriticality) {
-				criticalest.clear();
-				maxCriticality = e.getValue();
-				criticalest.add(e.getKey());
-			} else if (e.getValue() == maxCriticality) {
-				criticalest.add(e.getKey());
-			}
-		}
-		if (criticalest.isEmpty())
-			return null;
-		return criticalest.get(getEnvironment().getRandom().nextInt(criticalest.size()));
-	}
-
-	/**
-	 * Get the latest computed execution order
-	 * 
-	 * @return the execution order
-	 */
-	public int getExecutionOrder() {
-		return executionOrder;
-	}
-
-	/**
-	 * Getter for the AMAS
-	 * 
-	 * @return the amas
-	 */
-	public A getAmas() {
-		return amas;
-	}
-
-	/**
-	 * Remove the agent from the system
-	 */
-	public void destroy() {
-		getAmas()._removeAgent(this);
-	}
-
-	/**
-	 * Agent toString
-	 */
-	@Override
-	public String toString() {
-		return String.format("Agent #%d", id);
-	}
-
-	/**
-	 * Getter for the current phase of the agent
-	 * 
-	 * @return the current phase
-	 */
-	public Phase getCurrentPhase() {
-		return currentPhase;
-	}
-
-	/**
-	 * Return the id of the agent
-	 * 
-	 * @return the id of the agent
-	 */
-	public int getId() {
-		return id;
-	}
-
-	/**
-	 * Getter for the environment
-	 * 
-	 * @return the environment
-	 */
-	public E getEnvironment() {
-		return getAmas().getEnvironment();
-	}
-
-	public boolean isSynchronous() {
-		return synchronous ;
-	}
-}
+package fr.irit.smac.amak;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import fr.irit.smac.amak.Amas.ExecutionPolicy;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+
+/**
+ * This class must be overridden by all agents
+ * 
+ * @author Alexandre Perles
+ *
+ * @param <A>
+ *            The kind of Amas the agent refers to
+ * @param <E>
+ *            The kind of Environment the agent AND the Amas refer to
+ */
+public abstract class Agent<A extends Amas<E>, E extends Environment> implements Runnable {
+	
+	
+	/**
+	 * Neighborhood of the agent (must refer to the same couple amas, environment
+	 */
+	protected final List<Agent<A, E>> neighborhood;
+	/**
+	 * Criticalities of the neighbors (and it self) as perceived at the beginning of
+	 * the agent's cycle
+	 */
+	protected final Map<Agent<A, E>, Double> criticalities = new HashMap<>();
+	/**
+	 * Last calculated criticality of the agent
+	 */
+	private double criticality;
+	/**
+	 * Amas the agent belongs to
+	 */
+	protected final A amas;
+	/**
+	 * Unique index to give unique id to each agent
+	 */
+	private static int uniqueIndex;
+
+	/**
+	 * The id of the agent
+	 */
+	private final int id;
+	/**
+	 * The order of execution of the agent as computed by
+	 * {@link Agent#_computeExecutionOrder()}
+	 */
+	private int executionOrder;
+	/**
+	 * The parameters that can be user in the initialization process
+	 * {@link Agent#onInitialization()}
+	 */
+	protected Object[] params;
+
+	/**
+	 * These phases are used to synchronize agents on phase
+	 * 
+	 * @see fr.irit.smac.amak.Amas.ExecutionPolicy
+	 * @author perles
+	 *
+	 */
+	public enum Phase {
+		/**
+		 * Agent is perceiving
+		 */
+		PERCEPTION,
+		/**
+		 * Agent is deciding and acting
+		 */
+		DECISION_AND_ACTION,
+		/**
+		 * Agent haven't started to perceive, decide or act
+		 */
+		INITIALIZING,
+		/**
+		 * Agent is ready to decide
+		 */
+		PERCEPTION_DONE,
+		/**
+		 * Agent is ready to perceive or die
+		 */
+		DECISION_AND_ACTION_DONE
+	}
+
+	/**
+	 * The current phase of the agent {@link Phase}
+	 */
+	protected Phase currentPhase = Phase.INITIALIZING;
+	
+	private boolean synchronous = true;
+
+	/**
+	 * The constructor automatically add the agent to the corresponding amas and
+	 * initialize the agent
+	 * 
+	 * @param amas
+	 *            Amas the agent belongs to
+	 * @param params
+	 *            The params to initialize the agent
+	 */
+	public Agent(A amas, Object... params) {
+		this.id = uniqueIndex++;
+		this.params = params;
+		this.amas = amas;
+		neighborhood = new ArrayList<>();
+		neighborhood.add(this);
+		onInitialization();
+		if (!Configuration.commandLineMode)
+			onRenderingInitialization();
+		
+
+		if (amas != null) {
+			this.amas._addAgent(this);
+		}
+	}
+	
+	public Agent(AmasMultiUIWindow window, A amas, Object... params) {
+		
+		this.id = uniqueIndex++;
+		this.params = params;
+		this.amas = amas;
+		neighborhood = new ArrayList<>();
+		neighborhood.add(this);
+		onInitialization();
+		if (!Configuration.commandLineMode)
+			onRenderingInitialization();
+		
+
+		if (amas != null) {
+			this.amas._addAgent(this);
+		}
+	}
+
+	/**
+	 * Add neighbors to the agent
+	 * 
+	 * @param agents
+	 *            The list of agent that should be considered as neighbor
+	 */
+	@SafeVarargs
+	public final void addNeighbor(Agent<A, E>... agents) {
+		for (Agent<A, E> agent : agents) {
+			if (agent != null) {
+				neighborhood.add(agent);
+				criticalities.put(agent, Double.NEGATIVE_INFINITY);
+			}
+		}
+	}
+
+	/**
+	 * This method must be overridden by the agents. This method shouldn't make any
+	 * calls to internal representation an agent has on its environment because
+	 * these information maybe outdated.
+	 * 
+	 * @return the criticality at a given moment
+	 */
+	protected double computeCriticality() {
+		return Double.NEGATIVE_INFINITY;
+	}
+
+	protected void setAsynchronous() {
+		if (currentPhase != Phase.INITIALIZING)
+			Log.defaultLog.fatal("AMAK", "Asynchronous mode must be set during the initialization");
+		this.synchronous = false;
+	}
+	/**
+	 * This method must be overriden if you need to specify an execution order layer
+	 * 
+	 * @return the execution order layer
+	 */
+	protected int computeExecutionOrderLayer() {
+		return 0;
+	}
+
+	/**
+	 * This method is called at the beginning of an agent's cycle
+	 */
+	protected void onAgentCycleBegin() {
+
+	}
+
+	/**
+	 * This method is called at the end of an agent's cycle
+	 */
+	protected void onAgentCycleEnd() {
+
+	}
+
+	/**
+	 * This method corresponds to the perception phase of the agents and must be
+	 * overridden
+	 */
+	protected void onPerceive() {
+
+	}
+
+	/**
+	 * This method corresponds to the decision phase of the agents and must be
+	 * overridden
+	 */
+	protected void onDecide() {
+
+	}
+
+	/**
+	 * This method corresponds to the action phase of the agents and must be
+	 * overridden
+	 */
+	protected void onAct() {
+
+	}
+
+	/**
+	 * In this method the agent should expose some variables with its neighbor
+	 */
+	protected void onExpose() {
+
+	}
+
+	/**
+	 * This method should be used to update the representation of the agent for
+	 * example in a VUI
+	 */
+	public void onUpdateRender() {
+
+	}
+
+	/**
+	 * This method is now deprecated and should be replaced by onUpdateRender
+	 * 
+	 * @deprecated Must be replaced by {@link #onUpdateRender()}
+	 */
+	@Deprecated
+	protected final void onDraw() {
+
+	}
+
+	/**
+	 * Called when all initial agents have been created and are ready to be started
+	 */
+	protected void onReady() {
+
+	}
+
+	/**
+	 * Called by the framework when all initial agents have been created and are
+	 * almost ready to be started
+	 */
+	protected final void _onBeforeReady() {
+		criticality = computeCriticality();
+		executionOrder = _computeExecutionOrder();
+	}
+
+	/**
+	 * Called before all agents are created
+	 */
+	protected void onInitialization() {
+
+	}
+
+	/**
+	 * Replaced by onInitialization
+	 * 
+	 * @deprecated Must be replaced by {@link #onInitialization()}
+	 */
+	@Deprecated
+	protected final void onInitialize() {
+
+	}
+
+	/**
+	 * Called to initialize the rendering of the agent
+	 */
+	protected void onRenderingInitialization() {
+
+	}
+
+	/**
+	 * @deprecated This method is useless because the state of the agent is not
+	 *             supposed to evolve before or after its cycle. Use
+	 *             OnAgentCycleBegin/End instead.
+	 * 
+	 *             This method is final because it must not be implemented.
+	 *             Implement it will have no effect.
+	 */
+	@Deprecated
+	protected final void onSystemCycleBegin() {
+
+	}
+
+	/**
+	 * @deprecated This method is useless because the state of the agent is not
+	 *             supposed to evolve before or after its cycle. Use
+	 *             OnAgentCycleBegin/End instead.
+	 * 
+	 *             This method is final because it must not be implemented.
+	 *             Implement it will have no effect.
+	 */
+	@Deprecated
+	protected final void onSystemCycleEnd() {
+
+	}
+
+	/**
+	 * This method is called automatically and corresponds to a full cycle of an
+	 * agent
+	 */
+	@Override
+	public void run() {
+		
+		ExecutionPolicy executionPolicy = amas.getExecutionPolicy();
+		if (executionPolicy == ExecutionPolicy.TWO_PHASES) {
+
+			currentPhase = nextPhase();
+			switch (currentPhase) {
+			case PERCEPTION:
+				phase1();
+				amas.informThatAgentPerceptionIsFinished();
+				break;
+			case DECISION_AND_ACTION:
+				phase2();
+				amas.informThatAgentDecisionAndActionAreFinished();
+				break;
+			default:
+				Log.defaultLog.fatal("AMAK", "An agent is being run in an invalid phase (%s)", currentPhase);
+			}
+		} else if (executionPolicy == ExecutionPolicy.ONE_PHASE) {
+			onePhaseCycle();
+			amas.informThatAgentPerceptionIsFinished();
+			amas.informThatAgentDecisionAndActionAreFinished();
+		}
+	}
+	public void onePhaseCycle() {
+		currentPhase = Phase.PERCEPTION;
+		phase1();
+		currentPhase = Phase.DECISION_AND_ACTION;
+		phase2();
+	}
+	/**
+	 * This method represents the perception phase of the agent
+	 */
+	protected final void phase1() {
+		onAgentCycleBegin();
+		perceive();
+		currentPhase = Phase.PERCEPTION_DONE;
+	}
+
+	/**
+	 * This method represents the decisionAndAction phase of the agent
+	 */
+	protected final void phase2() {
+		decideAndAct();
+		executionOrder = _computeExecutionOrder();
+		onExpose();
+		if (!Configuration.commandLineMode)
+			onUpdateRender();
+		onAgentCycleEnd();
+		currentPhase = Phase.DECISION_AND_ACTION_DONE;
+	}
+
+	/**
+	 * Determine which phase comes after another
+	 * 
+	 * @return the next phase
+	 */
+	private Phase nextPhase() {
+		switch (currentPhase) {
+		case PERCEPTION_DONE:
+			return Phase.DECISION_AND_ACTION;
+		case INITIALIZING:
+		case DECISION_AND_ACTION_DONE:
+		default:
+			return Phase.PERCEPTION;
+		}
+	}
+
+	/**
+	 * Compute the execution order from the layer and a random value. This method
+	 * shouldn't be overridden.
+	 * 
+	 * @return A number used by amak to determine which agent executes first
+	 */
+	protected int _computeExecutionOrder() {
+		return computeExecutionOrderLayer() * 10000 + amas.getEnvironment().getRandom().nextInt(10000);
+	}
+
+	/**
+	 * Perceive, decide and act
+	 */
+	void perceive() {
+		for (Agent<A, E> agent : neighborhood) {
+			criticalities.put(agent, agent.criticality);
+		}
+		onPerceive();
+		// Criticality of agent should be updated after perception AND after action
+		criticality = computeCriticality();
+		criticalities.put(this, criticality);
+	}
+
+	/**
+	 * A combination of decision and action as called by the framework
+	 */
+	private final void decideAndAct() {
+		onDecideAndAct();
+
+		criticality = computeCriticality();
+	}
+
+	/**
+	 * Decide and act These two phases can often be grouped
+	 */
+	protected void onDecideAndAct() {
+		onDecide();
+		onAct();
+	}
+
+	/**
+	 * Convenient method giving the most critical neighbor at a given moment
+	 * 
+	 * @param includingMe
+	 *            Should the agent also consider its own criticality
+	 * @return the most critical agent
+	 */
+	protected final Agent<A, E> getMostCriticalNeighbor(boolean includingMe) {
+		List<Agent<A, E>> criticalest = new ArrayList<>();
+		double maxCriticality = Double.NEGATIVE_INFINITY;
+
+		if (includingMe) {
+			criticalest.add(this);
+			maxCriticality = criticalities.getOrDefault(criticalest, Double.NEGATIVE_INFINITY);
+		}
+		for (Entry<Agent<A, E>, Double> e : criticalities.entrySet()) {
+			if (e.getValue() > maxCriticality) {
+				criticalest.clear();
+				maxCriticality = e.getValue();
+				criticalest.add(e.getKey());
+			} else if (e.getValue() == maxCriticality) {
+				criticalest.add(e.getKey());
+			}
+		}
+		if (criticalest.isEmpty())
+			return null;
+		return criticalest.get(getEnvironment().getRandom().nextInt(criticalest.size()));
+	}
+
+	/**
+	 * Get the latest computed execution order
+	 * 
+	 * @return the execution order
+	 */
+	public int getExecutionOrder() {
+		return executionOrder;
+	}
+
+	/**
+	 * Getter for the AMAS
+	 * 
+	 * @return the amas
+	 */
+	public A getAmas() {
+		return amas;
+	}
+
+	/**
+	 * Remove the agent from the system
+	 */
+	public void destroy() {
+		getAmas()._removeAgent(this);
+	}
+
+	/**
+	 * Agent toString
+	 */
+	@Override
+	public String toString() {
+		return String.format("Agent #%d", id);
+	}
+
+	/**
+	 * Getter for the current phase of the agent
+	 * 
+	 * @return the current phase
+	 */
+	public Phase getCurrentPhase() {
+		return currentPhase;
+	}
+
+	/**
+	 * Return the id of the agent
+	 * 
+	 * @return the id of the agent
+	 */
+	public int getId() {
+		return id;
+	}
+
+	/**
+	 * Getter for the environment
+	 * 
+	 * @return the environment
+	 */
+	public E getEnvironment() {
+		return getAmas().getEnvironment();
+	}
+
+	public boolean isSynchronous() {
+		return synchronous ;
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/Amas.java b/AMAKFX/src/fr/irit/smac/amak/Amas.java
index a73d0e5107ef13fc638c7e17037858d93ae44c5d..965306ca9e864c7cab05e8149012192f0e71e70c 100644
--- a/AMAKFX/src/fr/irit/smac/amak/Amas.java
+++ b/AMAKFX/src/fr/irit/smac/amak/Amas.java
@@ -13,9 +13,11 @@ import java.util.stream.Collectors;
 
 import fr.irit.smac.amak.tools.Log;
 import fr.irit.smac.amak.tools.RunLaterHelper;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
 import fr.irit.smac.amak.ui.MainWindow;
 import fr.irit.smac.amak.ui.SchedulerToolbar;
 import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
 
 /**
  * This class must be overridden by multi-agent systems
@@ -26,6 +28,10 @@ import fr.irit.smac.amak.ui.VUI;
  *            The environment of the MAS
  */
 public class Amas<E extends Environment> implements Schedulable {
+	
+	public AmasMultiUIWindow amasMultiUIWindow;
+	public VUIMulti vuiMulti;
+	
 	/**
 	 * List of agents present in the system
 	 */
@@ -155,6 +161,43 @@ public class Amas<E extends Environment> implements Schedulable {
 			this.onRenderingInitialization();
 		this.scheduler.unlock();
 	}
+	
+	public Amas(AmasMultiUIWindow window, VUIMulti vui, E environment, Scheduling scheduling, Object... params) {
+		
+		if(!Configuration.commandLineMode) {
+			amasMultiUIWindow = window;
+			vuiMulti = vui;
+			amasMultiUIWindow.addTabbedPanel(vuiMulti.title, vuiMulti.getPanel());
+		}
+		
+		
+		executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(Configuration.allowedSimultaneousAgentsExecution);
+		
+		//this.scheduler = environment.getScheduler();
+		if (scheduling == Scheduling.DEFAULT) {
+
+			this.scheduler = Scheduler.getDefaultMultiUIScheduler(window);
+			this.scheduler.add(this);
+		} else {
+			this.scheduler = new Scheduler(this);
+			if (scheduling == Scheduling.UI && !Configuration.commandLineMode) {
+				amasMultiUIWindow.addToolbar(new SchedulerToolbar("Amas #" + id, getScheduler()));
+			}
+		}
+		
+		this.scheduler.lock();
+		this.params = params;
+		this.environment = environment;
+		this.onInitialConfiguration();
+		executionPolicy = Configuration.executionPolicy;
+		this.onInitialAgentsCreation();
+
+		addPendingAgents();
+		this.onReady();
+		if (!Configuration.commandLineMode)
+			this.onRenderingInitialization();
+		this.scheduler.unlock();
+	}
 
 	/**
 	 * The method in which the rendering initialization should be made. For example,
@@ -371,7 +414,19 @@ public class Amas<E extends Environment> implements Schedulable {
 	 * {@link Amas#onRenderingInitialization}
 	 */
 	protected void onUpdateRender() {
-		VUI.get().updateCanvas();
+		if(Configuration.multiUI) {
+			vuiMulti.updateCanvas();
+		}else {
+			VUI.get().updateCanvas();
+		}
+		
+	}
+	
+	
+
+	
+	public VUIMulti getVUIMulti() {
+		return vuiMulti;
 	}
 
 	/**
diff --git a/AMAKFX/src/fr/irit/smac/amak/Configuration.java b/AMAKFX/src/fr/irit/smac/amak/Configuration.java
index bc8050f8b87e296f7497acf1932764c02508c4f2..34c301cc8ae3334a7c8f49476db23e17af8a6b65 100644
--- a/AMAKFX/src/fr/irit/smac/amak/Configuration.java
+++ b/AMAKFX/src/fr/irit/smac/amak/Configuration.java
@@ -47,4 +47,6 @@ public class Configuration {
 	 * By default AMAK will wait for 1 sec before updating the plots
 	 */
 	public static double plotMilliSecondsUpdate = 1000;
+	
+	public static boolean multiUI = false;
 }
diff --git a/AMAKFX/src/fr/irit/smac/amak/Environment.java b/AMAKFX/src/fr/irit/smac/amak/Environment.java
index 68433e456f0c7b95e24b646a47e8806d67dd4666..e048ea299ca9fb0433762c93d30b570cca6d19b8 100644
--- a/AMAKFX/src/fr/irit/smac/amak/Environment.java
+++ b/AMAKFX/src/fr/irit/smac/amak/Environment.java
@@ -1,145 +1,170 @@
-package fr.irit.smac.amak;
-
-import java.util.Random;
-
-import fr.irit.smac.amak.ui.MainWindow;
-import fr.irit.smac.amak.ui.SchedulerToolbar;
-
-/**
- * This class must be overridden by environments
- * 
- * @author Alexandre Perles
- *
- */
-public abstract class Environment implements Schedulable {
-
-	/**
-	 * Unique index to give unique id to each environment
-	 */
-	private static int uniqueIndex;
-
-	/**
-	 * The id of the environment
-	 */
-	private final int id = uniqueIndex++;
-
-	/**
-	 * The parameters that are passed to {@link Environment#onInitialization()}
-	 */
-	protected Object[] params;
-	/**
-	 * Random object common to the amas
-	 */
-	private Random random = new Random();
-	/**
-	 * The scheduler of the environment
-	 */
-	private Scheduler scheduler;
-
-	/**
-	 * Constructor
-	 * 
-	 * @param _scheduling
-	 *            The scheduling of the environment
-	 * @param params
-	 *            The parameters to initialize the environment
-	 */
-	public Environment(Scheduling _scheduling, Object... params) {
-		if (_scheduling == Scheduling.DEFAULT) {
-			this.scheduler = Scheduler.getDefaultScheduler();
-			this.scheduler.add(this);
-		} else {
-			this.scheduler = new Scheduler(this);
-			if (_scheduling == Scheduling.UI && !Configuration.commandLineMode)
-				MainWindow.addToolbar(new SchedulerToolbar("Environment #" + id, getScheduler()));
-		}
-		this.scheduler.lock();
-		this.params = params;
-		onInitialization();
-		onInitialEntitiesCreation();
-		if (!Configuration.commandLineMode)
-			onRenderingInitialization();
-		this.scheduler.unlock();
-	}
-
-	/**
-	 * Override this method is you wish to render environment. For example, you can
-	 * use this method to create a VUI drawable object.
-	 */
-	private void onRenderingInitialization() {
-	}
-
-	/**
-	 * Getter for the scheduler
-	 * 
-	 * @return the scheduler
-	 */
-	public Scheduler getScheduler() {
-		return scheduler;
-	}
-
-	/**
-	 * Set the seed for the common random object. This method should be called at
-	 * the very beginning of the initialization process
-	 * 
-	 * @param _seed
-	 *            The seed to initialize the random object
-	 */
-	public void setSeed(long _seed) {
-		random = new Random(_seed);
-	}
-
-	/**
-	 * This method is called during the initialization process of the environment
-	 */
-	public void onInitialization() {
-	}
-
-	/**
-	 * This method is called after the initialization process of the environment to
-	 * create entities
-	 */
-	public void onInitialEntitiesCreation() {
-	}
-
-	/**
-	 * This method is called at each cycle of the environment
-	 */
-	public void onCycle() {
-	}
-
-	@Override
-	public boolean stopCondition() {
-		return false;
-	}
-
-	@Override
-	public final void cycle() {
-		onCycle();
-		if (!Configuration.commandLineMode)
-			onUpdateRender();
-	}
-
-	/**
-	 * Override this method to update rendering related to the environment
-	 */
-	protected void onUpdateRender() {
-	}
-
-	/**
-	 * Getter for the random object
-	 * 
-	 * @return the random object
-	 */
-	public Random getRandom() {
-		return random;
-	}
-
-	@Override
-	public void onSchedulingStarts() {
-	}
-
-	@Override
-	public void onSchedulingStops() {
-	}
-}
+package fr.irit.smac.amak;
+
+import java.util.Random;
+
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.MainWindow;
+import fr.irit.smac.amak.ui.SchedulerToolbar;
+
+/**
+ * This class must be overridden by environments
+ * 
+ * @author Alexandre Perles
+ *
+ */
+public abstract class Environment implements Schedulable {
+	
+	public AmasMultiUIWindow amasMultiUIWindow;
+
+	/**
+	 * Unique index to give unique id to each environment
+	 */
+	private static int uniqueIndex;
+
+	/**
+	 * The id of the environment
+	 */
+	private final int id = uniqueIndex++;
+
+	/**
+	 * The parameters that are passed to {@link Environment#onInitialization()}
+	 */
+	protected Object[] params;
+	/**
+	 * Random object common to the amas
+	 */
+	private Random random = new Random();
+	/**
+	 * The scheduler of the environment
+	 */
+	private Scheduler scheduler;
+
+	/**
+	 * Constructor
+	 * 
+	 * @param _scheduling
+	 *            The scheduling of the environment
+	 * @param params
+	 *            The parameters to initialize the environment
+	 */
+	public Environment(Scheduling _scheduling, Object... params) {
+		if (_scheduling == Scheduling.DEFAULT) {
+			this.scheduler = Scheduler.getDefaultScheduler();
+			this.scheduler.add(this);
+		} else {
+			this.scheduler = new Scheduler(this);
+			if (_scheduling == Scheduling.UI && !Configuration.commandLineMode)
+				MainWindow.addToolbar(new SchedulerToolbar("Environment #" + id, getScheduler()));
+		}
+		this.scheduler.lock();
+		this.params = params;
+		onInitialization();
+		onInitialEntitiesCreation();
+		if (!Configuration.commandLineMode)
+			onRenderingInitialization();
+		this.scheduler.unlock();
+	}
+	
+	public Environment(AmasMultiUIWindow window, Scheduling _scheduling, Object... params) {
+		amasMultiUIWindow = window;
+//		if (_scheduling == Scheduling.DEFAULT) {
+//			this.scheduler = Scheduler.getDefaultMultiUIScheduler(window);
+//			this.scheduler.add(this);
+//		} else {
+//		this.scheduler = new Scheduler(this);
+//		if (_scheduling == Scheduling.UI && !Configuration.commandLineMode)
+//			amasMultiUIWindow.addToolbar(new SchedulerToolbar("Environment #" + id, getScheduler()));
+//		}
+//
+//		this.scheduler.lock();
+		this.params = params;
+		onInitialization();
+		onInitialEntitiesCreation();
+		if (!Configuration.commandLineMode)
+			onRenderingInitialization();
+//		this.scheduler.unlock();
+	}
+
+	/**
+	 * Override this method is you wish to render environment. For example, you can
+	 * use this method to create a VUI drawable object.
+	 */
+	private void onRenderingInitialization() {
+	}
+
+	/**
+	 * Getter for the scheduler
+	 * 
+	 * @return the scheduler
+	 */
+	public Scheduler getScheduler() {
+		return scheduler;
+	}
+
+	/**
+	 * Set the seed for the common random object. This method should be called at
+	 * the very beginning of the initialization process
+	 * 
+	 * @param _seed
+	 *            The seed to initialize the random object
+	 */
+	public void setSeed(long _seed) {
+		random = new Random(_seed);
+	}
+
+	/**
+	 * This method is called during the initialization process of the environment
+	 */
+	public void onInitialization() {
+	}
+
+	/**
+	 * This method is called after the initialization process of the environment to
+	 * create entities
+	 */
+	public void onInitialEntitiesCreation() {
+	}
+
+	/**
+	 * This method is called at each cycle of the environment
+	 */
+	public void onCycle() {
+	}
+
+	@Override
+	public boolean stopCondition() {
+		return false;
+	}
+
+	@Override
+	public final void cycle() {
+		onCycle();
+		if (!Configuration.commandLineMode)
+			onUpdateRender();
+	}
+
+	/**
+	 * Override this method to update rendering related to the environment
+	 */
+	protected void onUpdateRender() {
+	}
+
+	/**
+	 * Getter for the random object
+	 * 
+	 * @return the random object
+	 */
+	public Random getRandom() {
+		return random;
+	}
+
+	@Override
+	public void onSchedulingStarts() {
+	}
+
+	@Override
+	public void onSchedulingStops() {
+	}
+	
+
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/Scheduler.java b/AMAKFX/src/fr/irit/smac/amak/Scheduler.java
index bffeeb2ce6a8d7e7a2422d165b41817e2e8b44c6..2c6a1a18701474de8807f168378aca26f69b8ee8 100644
--- a/AMAKFX/src/fr/irit/smac/amak/Scheduler.java
+++ b/AMAKFX/src/fr/irit/smac/amak/Scheduler.java
@@ -1,341 +1,357 @@
-package fr.irit.smac.amak;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.ConcurrentModificationException;
-import java.util.LinkedHashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.function.Consumer;
-
-import fr.irit.smac.amak.ui.MainWindow;
-import fr.irit.smac.amak.ui.SchedulerToolbar;
-
-/**
- * A scheduler associated to a MAS
- * 
- * @author Alexandre Perles
- *
- */
-public class Scheduler implements Runnable, Serializable {
-	/**
-	 * Unique ID meant to handle serialization correctly
-	 */
-	private static final long serialVersionUID = -4765899565369100376L;
-	/**
-	 * The schedulables object handled by the scheduler
-	 */
-	private final Set<Schedulable> schedulables = new LinkedHashSet<>();
-	/**
-	 * The state of the scheduler {@link State}
-	 */
-	private State state;
-	/**
-	 * The sleep time in ms between each cycle
-	 */
-	private int sleep;
-	/**
-	 * A lock to protect the state
-	 */
-	private final ReentrantLock stateLock = new ReentrantLock();
-	/**
-	 * Method that is called when the scheduler stops
-	 */
-	private Consumer<Scheduler> onStop;
-	/**
-	 * The methods called when the speed is changed. Useful to change the value of
-	 * the GUI slider of {@link SchedulerToolbar}
-	 */
-	private List<Consumer<Scheduler>> onChange = new ArrayList<>();
-	/**
-	 * The idea is to prevent scheduler from launching if the schedulables are not
-	 * yet fully ready
-	 */
-	private int locked = 0;
-	/**
-	 * The default scheduler
-	 */
-	private static Scheduler defaultScheduler;
-	/**
-	 * The schedulables that must be added
-	 */
-	private Queue<Schedulable> pendingAdditionSchedulables = new LinkedList<>();
-	/**
-	 * The schedulables that must be removed
-	 */
-	private Queue<Schedulable> pendingRemovalSchedulables = new LinkedList<>();
-
-	/**
-	 * State of the scheduler
-	 *
-	 */
-	public enum State {
-		/**
-		 * The scheduler is running
-		 */
-		RUNNING,
-		/**
-		 * The scheduler is paused
-		 */
-		IDLE,
-		/**
-		 * The scheduler is expected to stop at the end at the current cycle
-		 */
-		PENDING_STOP
-
-	}
-
-	/**
-	 * Constructor which set the initial state and auto start if requested
-	 * 
-	 * @param _schedulables
-	 *            the corresponding schedulables
-	 */
-	public Scheduler(Schedulable... _schedulables) {
-
-		for (Schedulable schedulable : _schedulables) {
-			this.add(schedulable);
-		}
-		this.state = State.IDLE;
-	}
-
-	/**
-	 * Create or return the default scheduler
-	 * 
-	 * @return The default scheduler
-	 */
-	public static Scheduler getDefaultScheduler() {
-		if (defaultScheduler == null) {
-			defaultScheduler = new Scheduler();
-			if (!Configuration.commandLineMode) {
-				MainWindow.instance();
-				SchedulerToolbar st = new SchedulerToolbar("Default", defaultScheduler);
-				MainWindow.addToolbar(st);
-			}
-		}
-		return defaultScheduler;
-	}
-
-	/**
-	 * Set the delay between two cycles and launch the scheduler if it is not
-	 * running
-	 * 
-	 * @param i
-	 *            the delay between two cycles
-	 */
-	public void startWithSleep(int i) {
-		if (locked > 0) {
-
-			synchronized (onChange) {
-				onChange.forEach(c -> c.accept(this));
-			}
-			return;
-		}
-		setSleep(i);
-		stateLock.lock();
-		switch (state) {
-		case IDLE:
-			state = State.RUNNING;
-			new Thread(this).start();
-			break;
-		default:
-			break;
-		}
-		stateLock.unlock();
-		synchronized (onChange) {
-			onChange.forEach(c -> c.accept(this));
-		}
-	}
-
-	/**
-	 * Start (or continue) with no delay between cycles
-	 */
-	public void start() {
-		startWithSleep(Schedulable.DEFAULT_SLEEP);
-	}
-
-	/**
-	 * Execute one cycle
-	 */
-	public void step() {
-		if (locked > 0) {
-			synchronized (onChange) {
-				onChange.forEach(c -> c.accept(this));
-			}
-			return;
-		}
-		this.setSleep(0);
-		stateLock.lock();
-		switch (state) {
-		case IDLE:
-			state = State.PENDING_STOP;
-			new Thread(this).start();
-			break;
-		default:
-			break;
-
-		}
-		stateLock.unlock();
-		synchronized (onChange) {
-			onChange.forEach(c -> c.accept(this));
-		}
-	}
-
-	/**
-	 * Stop the scheduler if it is running
-	 */
-	public void stop() {
-		stateLock.lock();
-		switch (state) {
-		case RUNNING:
-			state = State.PENDING_STOP;
-			break;
-		default:
-			break;
-
-		}
-		stateLock.unlock();
-		synchronized (onChange) {
-			onChange.forEach(c -> c.accept(this));
-		}
-	}
-
-	/**
-	 * Threaded run method
-	 */
-	@Override
-	public void run() {
-		treatPendingSchedulables();
-		for (Schedulable schedulable : schedulables) {
-			schedulable.onSchedulingStarts();
-		}
-		boolean mustStop;
-		do {
-			for (Schedulable schedulable : schedulables) {
-				schedulable.cycle();
-			}
-			if (getSleep() != 0) {
-				try {
-					Thread.sleep(getSleep());
-				} catch (final InterruptedException e) {
-					e.printStackTrace();
-				}
-			}
-			mustStop = false;
-			for (Schedulable schedulable : schedulables) {
-				mustStop |= schedulable.stopCondition();
-			}
-		} while (state == State.RUNNING && !mustStop);
-		stateLock.lock();
-		state = State.IDLE;
-		stateLock.unlock();
-
-		for (Schedulable schedulable : schedulables) {
-			schedulable.onSchedulingStops();
-		}
-		treatPendingSchedulables();
-		if (onStop != null)
-			onStop.accept(this);
-	}
-
-	/**
-	 * Effectively Add or Remove the schedulables that were added or removed during
-	 * a cycle to avoid {@link ConcurrentModificationException}
-	 */
-	private void treatPendingSchedulables() {
-		while (!pendingAdditionSchedulables.isEmpty())
-			schedulables.add(pendingAdditionSchedulables.poll());
-		while (!pendingRemovalSchedulables.isEmpty())
-			schedulables.remove(pendingRemovalSchedulables.poll());
-
-	}
-
-	/**
-	 * Set the method that must be executed when the system is stopped
-	 * 
-	 * @param _onStop
-	 *            Consumer method
-	 */
-	public final void setOnStop(Consumer<Scheduler> _onStop) {
-		this.onStop = _onStop;
-	}
-
-	/**
-	 * Add a method that must be executed when the scheduler speed is changed
-	 * 
-	 * @param _onChange
-	 *            Consumer method
-	 */
-	public final void addOnChange(Consumer<Scheduler> _onChange) {
-		synchronized (onChange) {
-			this.onChange.add(_onChange);
-		}
-	}
-
-	/**
-	 * Is the scheduler running ?
-	 * 
-	 * @return true if the scheduler is running
-	 */
-	public boolean isRunning() {
-		return state == State.RUNNING;
-	}
-
-	/**
-	 * Getter for the sleep time
-	 * 
-	 * @return the sleep time
-	 */
-
-	public int getSleep() {
-		return sleep;
-	}
-
-	/**
-	 * Setter for the sleep time
-	 * 
-	 * @param sleep
-	 *            The time between each cycle
-	 */
-	public void setSleep(int sleep) {
-		this.sleep = sleep;
-	}
-
-	/**
-	 * Plan to add a schedulable
-	 * 
-	 * @param _schedulable
-	 *            the schedulable to add
-	 */
-	public void add(Schedulable _schedulable) {
-		this.pendingAdditionSchedulables.add(_schedulable);
-	}
-
-	/**
-	 * Plan to remove a schedulable
-	 * 
-	 * @param _schedulable
-	 *            the schedulable to remove
-	 */
-	public void remove(Schedulable _schedulable) {
-		this.pendingRemovalSchedulables.add(_schedulable);
-	}
-
-	/**
-	 * Soft lock the scheduler to avoid a too early running
-	 */
-	public void lock() {
-		locked++;
-	}
-
-	/**
-	 * Soft unlock the scheduler to avoid a too early running
-	 */
-	public void unlock() {
-		locked--;
-	}
-
-}
+package fr.irit.smac.amak;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.ConcurrentModificationException;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Consumer;
+
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.MainWindow;
+import fr.irit.smac.amak.ui.SchedulerToolbar;
+
+/**
+ * A scheduler associated to a MAS
+ * 
+ * @author Alexandre Perles
+ *
+ */
+public class Scheduler implements Runnable, Serializable {
+	/**
+	 * Unique ID meant to handle serialization correctly
+	 */
+	private static final long serialVersionUID = -4765899565369100376L;
+	/**
+	 * The schedulables object handled by the scheduler
+	 */
+	private final Set<Schedulable> schedulables = new LinkedHashSet<>();
+	/**
+	 * The state of the scheduler {@link State}
+	 */
+	private State state;
+	/**
+	 * The sleep time in ms between each cycle
+	 */
+	private int sleep;
+	/**
+	 * A lock to protect the state
+	 */
+	private final ReentrantLock stateLock = new ReentrantLock();
+	/**
+	 * Method that is called when the scheduler stops
+	 */
+	private Consumer<Scheduler> onStop;
+	/**
+	 * The methods called when the speed is changed. Useful to change the value of
+	 * the GUI slider of {@link SchedulerToolbar}
+	 */
+	private List<Consumer<Scheduler>> onChange = new ArrayList<>();
+	/**
+	 * The idea is to prevent scheduler from launching if the schedulables are not
+	 * yet fully ready
+	 */
+	private int locked = 0;
+	/**
+	 * The default scheduler
+	 */
+	private static Scheduler defaultScheduler;
+	
+
+	/**
+	 * The schedulables that must be added
+	 */
+	private Queue<Schedulable> pendingAdditionSchedulables = new LinkedList<>();
+	/**
+	 * The schedulables that must be removed
+	 */
+	private Queue<Schedulable> pendingRemovalSchedulables = new LinkedList<>();
+
+	/**
+	 * State of the scheduler
+	 *
+	 */
+	public enum State {
+		/**
+		 * The scheduler is running
+		 */
+		RUNNING,
+		/**
+		 * The scheduler is paused
+		 */
+		IDLE,
+		/**
+		 * The scheduler is expected to stop at the end at the current cycle
+		 */
+		PENDING_STOP
+
+	}
+
+	/**
+	 * Constructor which set the initial state and auto start if requested
+	 * 
+	 * @param _schedulables
+	 *            the corresponding schedulables
+	 */
+	public Scheduler(Schedulable... _schedulables) {
+
+		for (Schedulable schedulable : _schedulables) {
+			this.add(schedulable);
+		}
+		this.state = State.IDLE;
+	}
+
+	/**
+	 * Create or return the default scheduler
+	 * 
+	 * @return The default scheduler
+	 */
+	public static Scheduler getDefaultScheduler() {
+		if (defaultScheduler == null) {
+			defaultScheduler = new Scheduler();
+			if (!Configuration.commandLineMode) {
+				MainWindow.instance();
+				SchedulerToolbar st = new SchedulerToolbar("Default", defaultScheduler);
+				MainWindow.addToolbar(st);
+			}
+		}
+		return defaultScheduler;
+	}
+	
+	
+	
+	
+	
+	public static Scheduler getDefaultMultiUIScheduler(AmasMultiUIWindow window) {
+		Scheduler multiUIScheduler = new Scheduler();
+			if (!Configuration.commandLineMode) {
+				SchedulerToolbar st = new SchedulerToolbar("Default", multiUIScheduler);
+				window.addToolbar(st);
+			}
+		return multiUIScheduler;
+	}
+
+	/**
+	 * Set the delay between two cycles and launch the scheduler if it is not
+	 * running
+	 * 
+	 * @param i
+	 *            the delay between two cycles
+	 */
+	public void startWithSleep(int i) {
+		if (locked > 0) {
+
+			synchronized (onChange) {
+				onChange.forEach(c -> c.accept(this));
+			}
+			return;
+		}
+		setSleep(i);
+		stateLock.lock();
+		switch (state) {
+		case IDLE:
+			state = State.RUNNING;
+			new Thread(this).start();
+			break;
+		default:
+			break;
+		}
+		stateLock.unlock();
+		synchronized (onChange) {
+			onChange.forEach(c -> c.accept(this));
+		}
+	}
+
+	/**
+	 * Start (or continue) with no delay between cycles
+	 */
+	public void start() {
+		startWithSleep(Schedulable.DEFAULT_SLEEP);
+	}
+
+	/**
+	 * Execute one cycle
+	 */
+	public void step() {
+		if (locked > 0) {
+			synchronized (onChange) {
+				onChange.forEach(c -> c.accept(this));
+			}
+			return;
+		}
+		this.setSleep(0);
+		stateLock.lock();
+		switch (state) {
+		case IDLE:
+			state = State.PENDING_STOP;
+			new Thread(this).start();
+			break;
+		default:
+			break;
+
+		}
+		stateLock.unlock();
+		synchronized (onChange) {
+			onChange.forEach(c -> c.accept(this));
+		}
+	}
+
+	/**
+	 * Stop the scheduler if it is running
+	 */
+	public void stop() {
+		stateLock.lock();
+		switch (state) {
+		case RUNNING:
+			state = State.PENDING_STOP;
+			break;
+		default:
+			break;
+
+		}
+		stateLock.unlock();
+		synchronized (onChange) {
+			onChange.forEach(c -> c.accept(this));
+		}
+	}
+
+	/**
+	 * Threaded run method
+	 */
+	@Override
+	public void run() {
+		treatPendingSchedulables();
+		for (Schedulable schedulable : schedulables) {
+			schedulable.onSchedulingStarts();
+		}
+		boolean mustStop;
+		do {
+			for (Schedulable schedulable : schedulables) {
+				schedulable.cycle();
+			}
+			if (getSleep() != 0) {
+				try {
+					Thread.sleep(getSleep());
+				} catch (final InterruptedException e) {
+					e.printStackTrace();
+				}
+			}
+			mustStop = false;
+			for (Schedulable schedulable : schedulables) {
+				mustStop |= schedulable.stopCondition();
+			}
+		} while (state == State.RUNNING && !mustStop);
+		stateLock.lock();
+		state = State.IDLE;
+		stateLock.unlock();
+
+		for (Schedulable schedulable : schedulables) {
+			schedulable.onSchedulingStops();
+		}
+		treatPendingSchedulables();
+		if (onStop != null)
+			onStop.accept(this);
+	}
+
+	/**
+	 * Effectively Add or Remove the schedulables that were added or removed during
+	 * a cycle to avoid {@link ConcurrentModificationException}
+	 */
+	private void treatPendingSchedulables() {
+		while (!pendingAdditionSchedulables.isEmpty())
+			schedulables.add(pendingAdditionSchedulables.poll());
+		while (!pendingRemovalSchedulables.isEmpty())
+			schedulables.remove(pendingRemovalSchedulables.poll());
+
+	}
+
+	/**
+	 * Set the method that must be executed when the system is stopped
+	 * 
+	 * @param _onStop
+	 *            Consumer method
+	 */
+	public final void setOnStop(Consumer<Scheduler> _onStop) {
+		this.onStop = _onStop;
+	}
+
+	/**
+	 * Add a method that must be executed when the scheduler speed is changed
+	 * 
+	 * @param _onChange
+	 *            Consumer method
+	 */
+	public final void addOnChange(Consumer<Scheduler> _onChange) {
+		synchronized (onChange) {
+			this.onChange.add(_onChange);
+		}
+	}
+
+	/**
+	 * Is the scheduler running ?
+	 * 
+	 * @return true if the scheduler is running
+	 */
+	public boolean isRunning() {
+		return state == State.RUNNING;
+	}
+
+	/**
+	 * Getter for the sleep time
+	 * 
+	 * @return the sleep time
+	 */
+
+	public int getSleep() {
+		return sleep;
+	}
+
+	/**
+	 * Setter for the sleep time
+	 * 
+	 * @param sleep
+	 *            The time between each cycle
+	 */
+	public void setSleep(int sleep) {
+		this.sleep = sleep;
+	}
+
+	/**
+	 * Plan to add a schedulable
+	 * 
+	 * @param _schedulable
+	 *            the schedulable to add
+	 */
+	public void add(Schedulable _schedulable) {
+		this.pendingAdditionSchedulables.add(_schedulable);
+	}
+
+	/**
+	 * Plan to remove a schedulable
+	 * 
+	 * @param _schedulable
+	 *            the schedulable to remove
+	 */
+	public void remove(Schedulable _schedulable) {
+		this.pendingRemovalSchedulables.add(_schedulable);
+	}
+
+	/**
+	 * Soft lock the scheduler to avoid a too early running
+	 */
+	public void lock() {
+		locked++;
+	}
+
+	/**
+	 * Soft unlock the scheduler to avoid a too early running
+	 */
+	public void unlock() {
+		locked--;
+	}
+
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/examples/randomants/AntExample.java b/AMAKFX/src/fr/irit/smac/amak/examples/randomants/AntExample.java
index 8186b0d4b27f736e337391f0110cb3340d7b2d2d..9a847c638d781660d25ca8c7f246d6fea72d7116 100644
--- a/AMAKFX/src/fr/irit/smac/amak/examples/randomants/AntExample.java
+++ b/AMAKFX/src/fr/irit/smac/amak/examples/randomants/AntExample.java
@@ -1,88 +1,88 @@
-package fr.irit.smac.amak.examples.randomants;
-
-import fr.irit.smac.amak.Agent;
-import fr.irit.smac.amak.ui.VUI;
-import fr.irit.smac.amak.ui.drawables.DrawableImage;
-
-public class AntExample extends Agent<AntHillExample, WorldExample> {
-	
-	private boolean dead = false; 
-	
-	/**
-	 * X coordinate of the ant in the world
-	 */
-	public double dx;
-	/**
-	 * Y coordinate of the ant in the world
-	 */
-	public double dy;
-	/**
-	 * Angle in radians
-	 */
-	private double angle = Math.random() * Math.PI * 2;
-	private DrawableImage image;
-
-	/**
-	 * Constructor of the ant
-	 * 
-	 * @param amas
-	 *            the amas the ant belongs to
-	 * @param startX
-	 *            Initial X coordinate
-	 * @param startY
-	 *            Initial Y coordinate
-	 */
-	public AntExample(AntHillExample amas, double startX, double startY) {
-		super(amas, startX, startY);
-	}
-	@Override
-	public void onInitialization() {
-		dx = (double) params[0];
-		dy = (double) params[1];
-	}
-
-	@Override
-	protected void onRenderingInitialization() {
-		image = VUI.get().createAndAddImage(dx, dy, "file:resources/ant.png");
-		image.setName("Ant "+getId());
-	}
-
-	/**
-	 * Move in a random direction
-	 */
-	@Override
-	protected void onDecideAndAct() {
-		double random = amas.getEnvironment().getRandom().nextGaussian();
-		angle += random * 0.1;
-		dx += Math.cos(angle);
-		dy += Math.sin(angle);
-		while (dx >= getAmas().getEnvironment().getWidth() / 2)
-			dx -= getAmas().getEnvironment().getWidth();
-		while (dy >= getAmas().getEnvironment().getHeight() / 2)
-			dy -= getAmas().getEnvironment().getHeight();
-		while (dx < -getAmas().getEnvironment().getWidth() / 2)
-			dx += getAmas().getEnvironment().getWidth();
-		while (dy < -getAmas().getEnvironment().getHeight() / 2)
-			dy += getAmas().getEnvironment().getHeight();
-
-		if (amas.getEnvironment().getRandom().nextDouble() < 0.001) {
-			dead = true;
-			destroy();
-		}
-
-		if (amas.getEnvironment().getRandom().nextDouble() < 0.001) {
-			new AntExample(getAmas(), dx, dy);
-		}
-	}
-
-	@Override
-	public void onUpdateRender() {
-		image.move(dx, dy);
-		image.setAngle(angle);
-		image.setInfo("Ant "+getId()+"\nPosition "+dx+" "+dy+"\nAngle "+angle);
-		if(dead) {
-			image.setFilename("file:Resources/ant_dead.png");
-			image.setInfo("Ant "+getId()+"\nPosition "+dx+" "+dy+"\nAngle "+angle+"\nDead");
-		}
-	}
-}
+package fr.irit.smac.amak.examples.randomants;
+
+import fr.irit.smac.amak.Agent;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.drawables.DrawableImage;
+
+public class AntExample extends Agent<AntHillExample, WorldExample> {
+	
+	private boolean dead = false; 
+	
+	/**
+	 * X coordinate of the ant in the world
+	 */
+	public double dx;
+	/**
+	 * Y coordinate of the ant in the world
+	 */
+	public double dy;
+	/**
+	 * Angle in radians
+	 */
+	private double angle = Math.random() * Math.PI * 2;
+	private DrawableImage image;
+
+	/**
+	 * Constructor of the ant
+	 * 
+	 * @param amas
+	 *            the amas the ant belongs to
+	 * @param startX
+	 *            Initial X coordinate
+	 * @param startY
+	 *            Initial Y coordinate
+	 */
+	public AntExample(AntHillExample amas, double startX, double startY) {
+		super(amas, startX, startY);
+	}
+	@Override
+	public void onInitialization() {
+		dx = (double) params[0];
+		dy = (double) params[1];
+	}
+
+	@Override
+	protected void onRenderingInitialization() {
+		image = VUI.get().createAndAddImage(dx, dy, "file:resources/ant.png");
+		image.setName("Ant "+getId());
+	}
+
+	/**
+	 * Move in a random direction
+	 */
+	@Override
+	protected void onDecideAndAct() {
+		double random = amas.getEnvironment().getRandom().nextGaussian();
+		angle += random * 0.1;
+		dx += Math.cos(angle);
+		dy += Math.sin(angle);
+		while (dx >= getAmas().getEnvironment().getWidth() / 2)
+			dx -= getAmas().getEnvironment().getWidth();
+		while (dy >= getAmas().getEnvironment().getHeight() / 2)
+			dy -= getAmas().getEnvironment().getHeight();
+		while (dx < -getAmas().getEnvironment().getWidth() / 2)
+			dx += getAmas().getEnvironment().getWidth();
+		while (dy < -getAmas().getEnvironment().getHeight() / 2)
+			dy += getAmas().getEnvironment().getHeight();
+
+		if (amas.getEnvironment().getRandom().nextDouble() < 0.001) {
+			dead = true;
+			destroy();
+		}
+
+		if (amas.getEnvironment().getRandom().nextDouble() < 0.001) {
+			new AntExample(getAmas(), dx, dy);
+		}
+	}
+
+	@Override
+	public void onUpdateRender() {
+		image.move(dx, dy);
+		image.setAngle(angle);
+		image.setInfo("Ant "+getId()+"\nPosition "+dx+" "+dy+"\nAngle "+angle);
+		if(dead) {
+			image.setFilename("file:Resources/ant_dead.png");
+			image.setInfo("Ant "+getId()+"\nPosition "+dx+" "+dy+"\nAngle "+angle+"\nDead");
+		}
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/examples/randomants/WorldExample.java b/AMAKFX/src/fr/irit/smac/amak/examples/randomants/WorldExample.java
index 2c6ea56a9ef181a9cf191d87669c578c3dd7e24b..88156754ae2aaa969ed1b24c99ad7143f3854987 100644
--- a/AMAKFX/src/fr/irit/smac/amak/examples/randomants/WorldExample.java
+++ b/AMAKFX/src/fr/irit/smac/amak/examples/randomants/WorldExample.java
@@ -1,28 +1,28 @@
-package fr.irit.smac.amak.examples.randomants;
-
-import fr.irit.smac.amak.Environment;
-import fr.irit.smac.amak.Scheduling;
-
-public class WorldExample extends Environment {
-	public WorldExample(Object...params) {
-		super(Scheduling.DEFAULT, params);
-	}
-
-	private int width;
-	private int height;
-
-	public int getWidth() {
-		return width;
-	}
-
-	public int getHeight() {
-		return height;
-	}
-
-	@Override
-	public void onInitialization() {
-		this.width = 800;
-		this.height = 600;
-	}
-
-}
+package fr.irit.smac.amak.examples.randomants;
+
+import fr.irit.smac.amak.Environment;
+import fr.irit.smac.amak.Scheduling;
+
+public class WorldExample extends Environment {
+	public WorldExample(Object...params) {
+		super(Scheduling.DEFAULT, params);
+	}
+
+	private int width;
+	private int height;
+
+	public int getWidth() {
+		return width;
+	}
+
+	public int getHeight() {
+		return height;
+	}
+
+	@Override
+	public void onInitialization() {
+		this.width = 800;
+		this.height = 600;
+	}
+
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntExampleMutliUI.java b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntExampleMutliUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..a8b731ca2be36cc141828375e17721adcf89ea4e
--- /dev/null
+++ b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntExampleMutliUI.java
@@ -0,0 +1,92 @@
+package fr.irit.smac.amak.examples.randomantsMultiUi;
+
+import fr.irit.smac.amak.Agent;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.drawables.DrawableImage;
+
+public class AntExampleMutliUI extends Agent<AntHillExampleMultiUI, WorldExampleMultiUI> {
+	
+	
+	
+	private boolean dead = false; 
+	
+	/**
+	 * X coordinate of the ant in the world
+	 */
+	public double dx;
+	/**
+	 * Y coordinate of the ant in the world
+	 */
+	public double dy;
+	/**
+	 * Angle in radians
+	 */
+	private double angle = Math.random() * Math.PI * 2;
+	private DrawableImage image;
+
+	/**
+	 * Constructor of the ant
+	 * 
+	 * @param amas
+	 *            the amas the ant belongs to
+	 * @param startX
+	 *            Initial X coordinate
+	 * @param startY
+	 *            Initial Y coordinate
+	 */
+	public AntExampleMutliUI(AmasMultiUIWindow window, AntHillExampleMultiUI amas, double startX, double startY) {
+		super(window, amas, startX, startY);
+	}
+	@Override
+	public void onInitialization() {
+		dx = (double) params[0];
+		dy = (double) params[1];
+	}
+
+	@Override
+	protected void onRenderingInitialization() {
+		image =  getAmas().getVUIMulti().createAndAddImage(dx, dy, "file:resources/ant.png");
+		image.setName("Ant "+getId());
+	}
+
+	/**
+	 * Move in a random direction
+	 */
+	@Override
+	protected void onDecideAndAct() {
+
+		double random = amas.getEnvironment().getRandom().nextGaussian();
+		angle += random * 0.1;
+		dx += Math.cos(angle);
+		dy += Math.sin(angle);
+		while (dx >= getAmas().getEnvironment().getWidth() / 2)
+			dx -= getAmas().getEnvironment().getWidth();
+		while (dy >= getAmas().getEnvironment().getHeight() / 2)
+			dy -= getAmas().getEnvironment().getHeight();
+		while (dx < -getAmas().getEnvironment().getWidth() / 2)
+			dx += getAmas().getEnvironment().getWidth();
+		while (dy < -getAmas().getEnvironment().getHeight() / 2)
+			dy += getAmas().getEnvironment().getHeight();
+
+		if (amas.getEnvironment().getRandom().nextDouble() < 0.001) {
+			dead = true;
+			destroy();
+		}
+
+		if (amas.getEnvironment().getRandom().nextDouble() < 0.001) {
+			new AntExampleMutliUI(getAmas().amasMultiUIWindow, getAmas(), dx, dy);
+		}
+	}
+
+	@Override
+	public void onUpdateRender() {
+		image.move(dx, dy);
+		image.setAngle(angle);
+		image.setInfo("Ant "+getId()+"\nPosition "+dx+" "+dy+"\nAngle "+angle);
+		if(dead) {
+			image.setFilename("file:Resources/ant_dead.png");
+			image.setInfo("Ant "+getId()+"\nPosition "+dx+" "+dy+"\nAngle "+angle+"\nDead");
+		}
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntHillExampleMultiUI.java b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntHillExampleMultiUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..344561afb24ba67df1e4fa1031eab902844ff01f
--- /dev/null
+++ b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntHillExampleMultiUI.java
@@ -0,0 +1,37 @@
+package fr.irit.smac.amak.examples.randomantsMultiUi;
+
+import fr.irit.smac.amak.Amas;
+import fr.irit.smac.amak.Scheduling;
+import fr.irit.smac.amak.tools.RunLaterHelper;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import fr.irit.smac.amak.ui.drawables.DrawableString;
+
+public class AntHillExampleMultiUI extends Amas<WorldExampleMultiUI> {
+
+	private DrawableString antsCountLabel;
+
+	public AntHillExampleMultiUI(AmasMultiUIWindow window, VUIMulti vui, WorldExampleMultiUI env) {
+		super(window, vui, env, Scheduling.DEFAULT);
+	}
+
+	@Override
+	protected void onRenderingInitialization() {
+		vuiMulti.createAndAddImage(20, 20, "file:Resources/ant.png").setFixed().setLayer(10).setShowInExplorer(false);
+		antsCountLabel = (DrawableString) vuiMulti.createAndAddString(45, 25, "Ants count").setFixed().setLayer(10).setShowInExplorer(false);
+	}
+
+	@Override
+	protected void onInitialAgentsCreation() {
+		for (int i = 0; i < 50; i++) {
+			new AntExampleMutliUI(amasMultiUIWindow, this, 0, 0);
+		}
+			
+	}
+
+	@Override
+	protected void onSystemCycleEnd() {
+		RunLaterHelper.runLater(()->antsCountLabel.setText("Ants count: " + getAgents().size()));
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntsLaunchExampleMultiUI.java b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntsLaunchExampleMultiUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..9fec860bf0a96823e3bebd1851b17bf05cad77a6
--- /dev/null
+++ b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/AntsLaunchExampleMultiUI.java
@@ -0,0 +1,111 @@
+package fr.irit.smac.amak.examples.randomantsMultiUi;
+
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.MainWindow;
+import fr.irit.smac.amak.ui.VUIMulti;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.event.ActionEvent;
+import javafx.event.EventHandler;
+import javafx.scene.control.Button;
+import javafx.scene.control.Label;
+import javafx.scene.control.Slider;
+import javafx.scene.layout.Pane;
+import javafx.stage.Stage;
+
+public class AntsLaunchExampleMultiUI extends Application{
+
+	
+	public static void main (String[] args) {
+		
+		
+		Application.launch(args);
+		
+	
+	}
+
+	@Override
+	public void start(Stage primaryStage) throws Exception {
+		
+		Configuration.multiUI=true;
+		Configuration.commandLineMode =false;
+		
+		
+		AmasMultiUIWindow window = new AmasMultiUIWindow("Random Ants Multi UI 1");
+		AmasMultiUIWindow window2 = new AmasMultiUIWindow("Random Ants Multi UI 2");
+		
+		
+		WorldExampleMultiUI env = new WorldExampleMultiUI(window);
+		WorldExampleMultiUI env2 = new WorldExampleMultiUI(window2);
+		
+
+		AntHillExampleMultiUI ants = new AntHillExampleMultiUI(window, new VUIMulti("Ants VUI 1"), env);
+		AntHillExampleMultiUI ants2 = new AntHillExampleMultiUI(window2, new VUIMulti("Ants VUI 2"), env2);
+		
+		startTask(ants, 500, 10);
+		
+		startTask(ants2, 250, 30);
+		
+		
+		
+			
+	}
+	
+	public void startTask(AntHillExampleMultiUI amas, long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask(amas, wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+    }
+	
+	public void runTask(AntHillExampleMultiUI amas, long wait, int cycles) 
+    {
+        for(int i = 0; i < cycles; i++) 
+        {
+            try
+            {
+                // Get the Status
+                final String status = "Processing " + i + " of " + cycles;
+                 
+                // Update the Label on the JavaFx Application Thread        
+                Platform.runLater(new Runnable() 
+                {
+                    @Override
+                    public void run() 
+                    {
+                    	amas.cycle();
+                    	System.out.println(status);
+                    }
+                });
+         
+                Thread.sleep(wait);
+            }
+            catch (InterruptedException e) 
+            {
+                e.printStackTrace();
+            }
+        }
+    }   
+
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/WorldExampleMultiUI.java b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/WorldExampleMultiUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..ffb5c0f1f034f6826152b0bcdd10c42356c5a186
--- /dev/null
+++ b/AMAKFX/src/fr/irit/smac/amak/examples/randomantsMultiUi/WorldExampleMultiUI.java
@@ -0,0 +1,29 @@
+package fr.irit.smac.amak.examples.randomantsMultiUi;
+
+import fr.irit.smac.amak.Environment;
+import fr.irit.smac.amak.Scheduling;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+
+public class WorldExampleMultiUI extends Environment {
+	public WorldExampleMultiUI(AmasMultiUIWindow window, Object...params) {
+		super(window, Scheduling.DEFAULT, params);
+	}
+
+	private int width;
+	private int height;
+
+	public int getWidth() {
+		return width;
+	}
+
+	public int getHeight() {
+		return height;
+	}
+
+	@Override
+	public void onInitialization() {
+		this.width = 800;
+		this.height = 600;
+	}
+
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/ui/AmakPlot.java b/AMAKFX/src/fr/irit/smac/amak/ui/AmakPlot.java
index 60a58286244634a53702a0cea0126b6a792c29aa..a50dcea052955e308ffca135ce65c6d0998898a2 100644
--- a/AMAKFX/src/fr/irit/smac/amak/ui/AmakPlot.java
+++ b/AMAKFX/src/fr/irit/smac/amak/ui/AmakPlot.java
@@ -36,6 +36,10 @@ public class AmakPlot {
 	public static void add(AmakPlot chart) {
 		MainWindow.addTabbedPanel(chart.name, new ChartViewer(chart.chart));
 	}
+	
+	public static void add(AmasMultiUIWindow window, AmakPlot chart) {
+		window.addTabbedPanel(chart.name, new ChartViewer(chart.chart));
+	}
 	/* ----- */
 	
 	private String name;
@@ -119,6 +123,79 @@ public class AmakPlot {
 		this(name, chart, true);
 	}
 	
+	
+	/**
+	 * Create a chart
+	 * @param name the name of the chart, used as the tab name.
+	 * @param chartType {@link ChartType#LINE} or {@link ChartType#BAR}
+	 * @param xAxisLabel label for the x (horizontal) axis 
+	 * @param yAxisLabel label for the y (vertical) axis
+	 * @param autoAdd automatically make an {@link AmakPlot#add(AmakPlot)} call ?
+	 */
+	public AmakPlot(AmasMultiUIWindow window, String name, ChartType chartType, String xAxisLabel, String yAxisLabel, boolean autoAdd) {
+		this.name = name;
+		seriesCollection = new XYSeriesCollection();
+		switch (chartType) {
+		case BAR:
+			chart = ChartFactory.createXYBarChart(name, xAxisLabel, false, yAxisLabel, seriesCollection);
+			break;
+		case LINE:
+			chart = ChartFactory.createXYLineChart(name, xAxisLabel, yAxisLabel, seriesCollection);
+			if(useSamplingRenderer) {
+				chart.getXYPlot().setRenderer(new SamplingXYLineRenderer());
+			}
+			XYPlot plot = (XYPlot)chart.getPlot();
+			plot.setDomainGridlinesVisible(true);
+	        plot.setDomainGridlinePaint(Color.lightGray);
+	        plot.setRangeGridlinePaint(Color.lightGray);
+			break;
+		default:
+			System.err.println("AmakPlot : unknow ChartType \""+chartType+"\".");
+			break;
+		}
+		chart.setAntiAlias(false);
+		chart.getPlot().setBackgroundPaint(Color.WHITE);
+		if(autoAdd) {
+			add(window, this);
+		}
+	}
+	
+	/**
+	 * Create a chart and add it to the main window.
+	 * @param name the name of the chart, used as the tab name.
+	 * @param chartType {@link ChartType#LINE} or {@link ChartType#BAR}
+	 * @param xAxisLabel label for the x (horizontal) axis 
+	 * @param yAxisLabel label for the y (vertical) axis
+	 */
+	public AmakPlot(AmasMultiUIWindow window, String name, ChartType chartType, String xAxisLabel, String yAxisLabel) {
+		this(window, name, chartType, xAxisLabel, yAxisLabel, true);
+	}
+	
+	
+	/**
+	 * Create a chart out of a JFreeChart.
+	 * Make sure that your chart use an {@link XYSeriesCollection} as dataset.
+	 * @param name the name of the chart, used as the tab name.
+	 * @param chart the {@link JFreeChart} using a {@link XYSeriesCollection} for dataset.
+	 * @param autoAdd automatically make an {@link AmakPlot#add(AmakPlot)} call ?
+	 */
+	public AmakPlot(AmasMultiUIWindow window, String name, JFreeChart chart, boolean autoAdd) {
+		this.name = name;
+		this.seriesCollection = (XYSeriesCollection) chart.getXYPlot().getDataset();
+		this.chart = chart;
+		add(window, this);
+	}
+	
+	/**
+	 * Create a chart out of a JFreeChart and add it to the main window.
+	 * Make sure that your chart use an {@link XYSeriesCollection} as dataset.
+	 * @param name the name of the chart, used as the tab name.
+	 * @param chart the {@link JFreeChart} using a {@link XYSeriesCollection} for dataset.
+	 */
+	public AmakPlot(AmasMultiUIWindow window, String name, JFreeChart chart) {
+		this(window, name, chart, true);
+	}
+	
 	public String getName() {
 		return name;
 	}
diff --git a/AMAKFX/src/fr/irit/smac/amak/ui/AmasMultiUIWindow.java b/AMAKFX/src/fr/irit/smac/amak/ui/AmasMultiUIWindow.java
new file mode 100644
index 0000000000000000000000000000000000000000..c9691c2fa2efb8fd78d00a77096548c2dc26a642
--- /dev/null
+++ b/AMAKFX/src/fr/irit/smac/amak/ui/AmasMultiUIWindow.java
@@ -0,0 +1,252 @@
+package fr.irit.smac.amak.ui;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.locks.ReentrantLock;
+
+import javax.management.InstanceAlreadyExistsException;
+
+import fr.irit.smac.amak.Information;
+import fr.irit.smac.amak.tools.RunLaterHelper;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.event.ActionEvent;
+import javafx.event.EventHandler;
+import javafx.scene.Node;
+import javafx.scene.Scene;
+import javafx.scene.control.Menu;
+import javafx.scene.control.MenuBar;
+import javafx.scene.control.MenuItem;
+import javafx.scene.control.Tab;
+import javafx.scene.control.TabPane;
+import javafx.scene.control.ToolBar;
+import javafx.scene.image.Image;
+import javafx.scene.layout.BorderPane;
+import javafx.scene.layout.Priority;
+import javafx.scene.layout.VBox;
+import javafx.stage.Stage;
+import javafx.stage.WindowEvent;
+
+/**
+ * This window is the main one of an AMAS developed using AMAK. It contains a
+ * toolbar panel and various spaces for panels
+ * 
+ * @author of the original version (the Swing one) Alexandre Perles, Marcillaud
+ *         Guilhem
+ *
+ */
+public class AmasMultiUIWindow extends Stage{
+//	/**
+//	 * The window itself
+//	 */
+//	public Stage stage;
+	/**
+	 * The panel which contains the toolbar
+	 */
+	public ToolBar toolbarPanel;
+
+	/**
+	 * The main pane of AMAK
+	 */
+	public BorderPane organizationPane;
+	
+	/**
+	 * The menu bar of the window
+	 */
+	public MenuBar menuBar;
+	/**
+	 * The menus
+	 */
+	public HashMap<String, Menu> menus = new HashMap<String, Menu>();
+	/**
+	 * The panel in which panels with tab can be added
+	 */
+	public TabPane tabbedPanel;
+
+	
+
+	/**
+	 * Create the frame.
+	 * 
+	 * @throws InstanceAlreadyExistsException
+	 *             if the MainWindow has already been instantiated. This constructor
+	 *             should be used by the Application of JavaFX only.
+	 */
+	public AmasMultiUIWindow(String title) {
+
+		RunLaterHelper.runLater(() -> {	
+			
+		VBox root = new VBox();
+		
+		// Creation of the menu bar (Top)
+		menuBar = new MenuBar();
+		root.getChildren().add(menuBar);
+		
+		// Border organization
+		organizationPane = new BorderPane();
+		organizationPane.setMinSize(200, 200); //that way we avoid 0 size, which can cause problems
+		root.getChildren().add(organizationPane);
+		VBox.setVgrow(organizationPane, Priority.ALWAYS);
+		
+		// Creation of scene
+		this.setTitle(title);
+		Scene scene = new Scene(root, 450, 300);
+		//stage = primaryStage;
+		this.setScene(scene);
+		this.setOnCloseRequest(new EventHandler<WindowEvent>() {
+			@Override
+			public void handle(WindowEvent event) {
+				Platform.exit();
+			}
+		});
+
+		// Creation of the toolbar (Bottom)
+		toolbarPanel = new ToolBar();
+		organizationPane.setBottom(toolbarPanel);
+
+		// Creation of the right part of the split pane (Center Right)
+		tabbedPanel = new TabPane();
+		organizationPane.setCenter(tabbedPanel);
+
+		// Creation of the close menu item
+		MenuItem menuItem = new MenuItem("Close");
+		menuItem.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				System.exit(0);
+			}
+		});
+		addToMenu("Options", menuItem);
+
+		menuBar.getMenus().add(new Menu("AMAKFX v" + Information.VERSION));
+
+		this.show();
+			
+		});	
+
+	}
+
+//	@Override
+//	public void start(Stage primaryStage) throws Exception {
+//		
+//	}
+
+	/**
+	 * Add an action when the JavaFX app close.
+	 * 
+	 * @param onClose
+	 *            The action to be executed when the window is closed
+	 */
+	public void addOnCloseAction(Runnable onClose) {
+		Runtime.getRuntime().addShutdownHook(new Thread() {
+		    public void run() { onClose.run(); }
+		});
+	}
+	
+//	@Override
+//	public void stop() throws Exception {
+//		super.stop();
+//		System.exit(0);
+//	}
+
+	/**
+	 * Change the icon of the window
+	 * 
+	 * @param filename
+	 *            The filename of the icon
+	 */
+	public  void setWindowIcon(String filename) {
+		RunLaterHelper.runLater(() -> this.getIcons().add(new Image(filename)));
+	}
+
+	/**
+	 * Change the title of the main window
+	 * 
+	 * @param title
+	 *            The new title
+	 */
+	public void setWindowTitle(String title) {
+		RunLaterHelper.runLater(() -> this.setTitle(title));
+	}
+	
+	/**
+	 * Add a button in the menu options
+	 * 
+	 * @param title
+	 *            The title of the button
+	 * @param event
+	 *            The action to be executed
+	 */
+	public  void addOptionsItem(String title, EventHandler<ActionEvent> event) {
+		MenuItem menuItem = new MenuItem(title);
+		menuItem.setOnAction(event);
+		RunLaterHelper.runLater(() -> addToMenu("Options", menuItem));
+	}
+
+	/**
+	 * Add a tool in the toolbar.
+	 * 
+	 * @param tool
+	 */
+	public  void addToolbar(Node tool) {
+		RunLaterHelper.runLater(() -> toolbarPanel.getItems().add(tool));
+	}
+
+	/**
+	 * Set a panel to the left
+	 * 
+	 * @param panel
+	 *            The panel
+	 */
+	public void setLeftPanel(Node panel) {
+		RunLaterHelper.runLater(() -> organizationPane.setLeft(panel));
+	}
+
+	/**
+	 * Set a panel to the right
+	 * 
+	 * @param panel
+	 *            The panel
+	 */
+	public void setRightPanel(Node panel) {
+		RunLaterHelper.runLater(() -> organizationPane.setRight(panel));
+	}
+
+	/**
+	 * Return the unique instance of MainWindow, may create it.
+	 * 
+	 * @return instance
+	 */
+	
+	
+
+
+	/**
+	 * Add a panel with a tab
+	 * 
+	 * @param title
+	 *            The title of the tab
+	 * @param panel
+	 *            The panel to add
+	 */
+	public void addTabbedPanel(String title, Node panel) {
+		Tab t = new DraggableTab(title, panel);
+		RunLaterHelper.runLater(() -> tabbedPanel.getTabs().add(t));
+	}
+	
+	/**
+	 * Add a {@link MenuItem} to a {@link Menu}. May create the menu and add it to the menu bar.
+	 * @param menuName the name of the menu where the item will be added.
+	 * @param item the item to be added.
+	 */
+	public void addToMenu(String menuName, MenuItem item) {
+		//instance();
+		if( !menus.containsKey(menuName) ) {
+			Menu m = new Menu(menuName);
+			menus.put(menuName,m);
+			RunLaterHelper.runLater(() -> menuBar.getMenus().add(m));
+		}
+		RunLaterHelper.runLater(() -> menus.get(menuName).getItems().add(item));
+	}
+}
\ No newline at end of file
diff --git a/AMAKFX/src/fr/irit/smac/amak/ui/VUI.java b/AMAKFX/src/fr/irit/smac/amak/ui/VUI.java
index 136cb472ac33fbffb9e2f7c7ddda551fe2384d81..2037681065b7736b2c48bb4eea8348a843194faf 100644
--- a/AMAKFX/src/fr/irit/smac/amak/ui/VUI.java
+++ b/AMAKFX/src/fr/irit/smac/amak/ui/VUI.java
@@ -1,577 +1,579 @@
-package fr.irit.smac.amak.ui;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.locks.ReentrantLock;
-
-import fr.irit.smac.amak.tools.RunLaterHelper;
-import fr.irit.smac.amak.ui.drawables.Drawable;
-import fr.irit.smac.amak.ui.drawables.DrawableImage;
-import fr.irit.smac.amak.ui.drawables.DrawablePoint;
-import fr.irit.smac.amak.ui.drawables.DrawableRectangle;
-import fr.irit.smac.amak.ui.drawables.DrawableString;
-import javafx.event.ActionEvent;
-import javafx.event.EventHandler;
-import javafx.geometry.Insets;
-import javafx.scene.control.Button;
-import javafx.scene.control.Label;
-import javafx.scene.control.ToolBar;
-import javafx.scene.control.Tooltip;
-import javafx.scene.input.MouseEvent;
-import javafx.scene.input.ScrollEvent;
-import javafx.scene.layout.Background;
-import javafx.scene.layout.BackgroundFill;
-import javafx.scene.layout.BorderPane;
-import javafx.scene.layout.CornerRadii;
-import javafx.scene.layout.Pane;
-import javafx.scene.paint.Color;
-import javafx.scene.shape.Rectangle;
-import javafx.scene.text.TextAlignment;
-
-/**
- * 
- * Vectorial UI: This class allows to create dynamic rendering with zoom and
- * move capacities
- * 
- * @author of original version (the Swing one) perles
- *
- */
-public class VUI {
-	/**
-	 * The toolbar of the VUI.
-	 */
-	public ToolBar toolbar;
-	
-	/**
-	 * The VUI explorer.
-	 * @see VuiExplorer
-	 */
-	private VuiExplorer vuiExplorer;
-	
-	/**
-	 * List of objects currently being drawn by the VUI
-	 */
-	private List<Drawable> drawables = new LinkedList<>();
-	/**
-	 * Lock to avoid concurrent modification on the list {@link #drawables}
-	 */
-	private ReentrantLock drawablesLock = new ReentrantLock();
-
-	/**
-	 * A static map to facilitate access to different instances of VUI
-	 */
-	private static Map<String, VUI> instances = new HashMap<>();
-
-	/**
-	 * The horizontal offset of the drawing zone. Used to allow the user to move the
-	 * view.
-	 */
-	private double worldOffsetX;
-
-	/**
-	 * The vertical offset of the drawing zone. Used to allow the user to move the
-	 * view.
-	 */
-	private double worldOffsetY;
-
-	/**
-	 * The last horizontal position of the mouse when dragging
-	 */
-	protected Double lastDragX;
-
-	/**
-	 * The last vertical position of the mouse when dragging
-	 */
-	protected Double lastDragY;
-
-	/**
-	 * The main panel of the VUI
-	 */
-	private BorderPane panel;
-
-	/**
-	 * The canvas on which all is drawn
-	 */
-	private Pane canvas;
-
-	/**
-	 * Label aiming at showing information about the VUI (zoom and offset)
-	 */
-	private Label statusLabel;
-
-	/**
-	 * The default value of the {@link #zoom}
-	 */
-	private double defaultZoom = 100;
-	/**
-	 * The default horizontal position of the view
-	 */
-	private double defaultWorldCenterX = 0;
-	/**
-	 * The default vertical position of the view
-	 */
-	private double defaultWorldCenterY = 0;
-	/**
-	 * The value of the zoom. 100 means 1/1 scale
-	 */
-	protected double zoom = defaultZoom;
-
-	/**
-	 * The horizontal position of the view
-	 */
-	private double worldCenterX = defaultWorldCenterX;
-
-	/**
-	 * The vertical position of the view
-	 */
-	private double worldCenterY = defaultWorldCenterY;
-
-	/**
-	 * Used to be sure that only one thread at the same time create a VUI
-	 */
-	private static ReentrantLock instanceLock = new ReentrantLock();
-
-	/**
-	 * Get the default VUI
-	 * 
-	 * @return the default VUI
-	 */
-	public static VUI get() {
-		if(!instances.containsKey("Default"))
-			MainWindow.addTabbedPanel("Default VUI", get("Default").getPanel());
-		return get("Default");
-	}
-
-	/**
-	 * Create or get a VUI.<br/>
-	 * You have add its panel to the MainWindow yourself.
-	 * 
-	 * @param id
-	 *            The unique id of the VUI
-	 * @return The VUI with id "id"
-	 */
-	public static VUI get(String id) {
-		instanceLock.lock();
-		if (!instances.containsKey(id)) {
-			VUI value = new VUI(id);
-			instances.put(id, value);
-			instanceLock.unlock();
-			return value;
-		}
-		instanceLock.unlock();
-		return instances.get(id);
-	}
-
-	/**
-	 * Constructor of the VUI. This one is private as it can only be created through
-	 * static method.
-	 * 
-	 * @param title
-	 *            The title used for the vui
-	 */
-	private VUI(String title) {
-		Semaphore done = new Semaphore(0);
-		RunLaterHelper.runLater(() -> {
-			panel = new BorderPane();
-
-			toolbar = new ToolBar();
-			statusLabel = new Label("status");
-			statusLabel.setTextAlignment(TextAlignment.LEFT);
-			toolbar.getItems().add(statusLabel);
-			panel.setBottom(toolbar);
-
-			Button resetButton = new Button("Reset");
-			resetButton.setOnAction(new EventHandler<ActionEvent>() {
-				@Override
-				public void handle(ActionEvent event) {
-					zoom = defaultZoom;
-					worldCenterX = defaultWorldCenterX;
-					worldCenterY = defaultWorldCenterY;
-					updateCanvas();
-				}
-			});
-			toolbar.getItems().add(resetButton);
-
-			canvas = new Pane();
-			canvas.setBackground(new Background(new BackgroundFill(Color.WHITE, CornerRadii.EMPTY, Insets.EMPTY)));
-			// clip the canvas (avoid drawing outside of it)
-			Rectangle clip = new Rectangle(0, 0, 0, 0);
-			clip.widthProperty().bind(canvas.widthProperty());
-			clip.heightProperty().bind(canvas.heightProperty());
-			canvas.setClip(clip);
-			
-			canvas.setOnMousePressed(new EventHandler<MouseEvent>() {
-				@Override
-				public void handle(MouseEvent event) {
-					lastDragX = event.getX();
-					lastDragY = event.getY();
-				}
-			});
-			canvas.setOnMouseExited(new EventHandler<MouseEvent>() {
-				@Override
-				public void handle(MouseEvent event) {
-					lastDragX = null;
-					lastDragY = null;
-				}
-			});
-			canvas.setOnMouseDragged(new EventHandler<MouseEvent>() {
-				@Override
-				public void handle(MouseEvent event) {
-					try {
-						double transX = screenToWorldDistance(event.getX() - lastDragX);
-						double transY = screenToWorldDistance(event.getY() - lastDragY);
-						worldCenterX += transX;
-						worldCenterY += transY;
-						worldOffsetX += transX;
-						worldOffsetY += transY;
-						lastDragX = event.getX();
-						lastDragY = event.getY();
-						updateCanvas();
-					} catch (Exception ez) {
-						// Catch exception occurring when mouse is out of the canvas
-					}
-				}
-			});
-
-			canvas.setOnScroll(new EventHandler<ScrollEvent>() {
-				@Override
-				public void handle(ScrollEvent event) {
-					double wdx = screenToWorldDistance(canvas.getWidth() / 2 - event.getX());
-					double wdy = screenToWorldDistance(canvas.getHeight() / 2 - event.getY());
-					zoom += event.getDeltaY() / event.getMultiplierY() * 10;
-					if (zoom < 10)
-						zoom = 10;
-
-					double wdx2 = screenToWorldDistance(canvas.getWidth() / 2 - event.getX());
-					double wdy2 = screenToWorldDistance(canvas.getHeight() / 2 - event.getY());
-					worldCenterX -= wdx2 - wdx;
-					worldCenterY -= wdy2 - wdy;
-					updateCanvas();
-				}
-			});
-
-			panel.setCenter(canvas);
-			
-			//add VuiExplorer
-			vuiExplorer = new VuiExplorer(this);
-			panel.setLeft(vuiExplorer);
-			Button veButton = new Button("VUI explorer");
-			veButton.setOnAction(new EventHandler<ActionEvent>() {
-				@Override
-				public void handle(ActionEvent event) {
-					panel.setLeft(vuiExplorer);
-				}
-			});
-			veButton.setTooltip(new Tooltip("Show the VUI explorer if it was hidden."));
-			toolbar.getItems().add(veButton);
-			
-			done.release();
-		});
-		try {
-			done.acquire();
-		} catch (InterruptedException e) {
-			System.err.println("Failed to make sure that the VUI is correctly initialized.");
-			e.printStackTrace();
-		}
-	}
-
-	/**
-	 * Convert a distance in the world to its equivalent on the screen
-	 * 
-	 * @param d
-	 *            the in world distance
-	 * @return the on screen distance
-	 */
-	public double worldToScreenDistance(double d) {
-		return d * getZoomFactor();
-	}
-
-	/**
-	 * Convert a distance on the screen to its equivalent in the world
-	 * 
-	 * @param d
-	 *            the on screen distance
-	 * @return the in world distance
-	 */
-	public double screenToWorldDistance(double d) {
-		return d / getZoomFactor();
-	}
-
-	/**
-	 * Convert a X in the world to its equivalent on the screen
-	 * 
-	 * @param x
-	 *            the X in world
-	 *
-	 * @return the X on screen distance
-	 */
-	public double worldToScreenX(double x) {
-		return (x + getWorldOffsetX()) * getZoomFactor();
-	}
-
-	/**
-	 * A value that must be multiplied to scale objects
-	 * 
-	 * @return the zoom factor
-	 */
-	public double getZoomFactor() {
-		return zoom / 100;
-	}
-
-	/**
-	 * Convert a Y in the world to its equivalent on the screen
-	 * 
-	 * @param y
-	 *            the Y in world
-	 *
-	 * @return the Y on screen distance
-	 */
-	public double worldToScreenY(double y) {
-		return (-y + getWorldOffsetY()) * getZoomFactor();
-	}
-
-	/**
-	 * Convert a X on the screen to its equivalent in the world
-	 * 
-	 * @param x
-	 *            the X on screen
-	 *
-	 * @return the X in the world distance
-	 */
-	public double screenToWorldX(double x) {
-		return x / getZoomFactor() - getWorldOffsetX();
-	}
-
-	/**
-	 * Convert a Y on the screen to its equivalent in the world
-	 * 
-	 * @param y
-	 *            the Y on screen
-	 *
-	 * @return the Y in the world distance
-	 */
-	public double screenToWorldY(double y) {
-		return -y / getZoomFactor() + getWorldOffsetY();
-	}
-
-	/**
-	 * Add a drawable to the VUI.
-	 * 
-	 * @param d
-	 *            the new drawable
-	 */
-	public void add(Drawable d) {
-		d.setVUI(this);
-		RunLaterHelper.runLater(()-> canvas.getChildren().add(d.getNode()));
-		drawablesLock.lock();
-		drawables.add(d);
-		drawablesLock.unlock();
-		updateCanvas();
-	}
-	
-	/**
-	 * Remove a drawable from the VUI.
-	 * 
-	 * @param d
-	 *            the new drawable
-	 */
-	public void remove(Drawable d) {
-		drawablesLock.lock();
-		drawables.remove(d);
-		drawablesLock.unlock();
-		RunLaterHelper.runLater(()-> canvas.getChildren().remove(d.getNode()));
-		updateCanvas();
-	}
-	
-	/**
-	 * Remove all drawables from the VUI.
-	 */
-	public void clear() {
-		drawablesLock.lock();
-		drawables.clear();
-		RunLaterHelper.runLater(()->canvas.getChildren().clear());
-		drawablesLock.unlock();
-	}
-
-	/**
-	 * Refresh the canvas
-	 */
-	public void updateCanvas() {
-		final double w = canvas.getWidth();
-		final double h = canvas.getHeight();
-
-		setWorldOffsetX(worldCenterX + screenToWorldDistance(w / 2));
-		setWorldOffsetY(worldCenterY + screenToWorldDistance(h / 2));
-
-		drawablesLock.lock();
-		Collections.sort(drawables, (o1, o2) -> o1.getLayer() - o2.getLayer());
-		for (Drawable d : drawables)
-			RunLaterHelper.runLater(()-> d.onDraw());
-		drawablesLock.unlock();
-
-		RunLaterHelper.runLater(() -> {
-			statusLabel.setText(String.format("Zoom: %.2f Center: (%.2f,%.2f)", zoom, worldCenterX, worldCenterY));
-		});
-		
-		RunLaterHelper.runLater(()-> vuiExplorer.update(true));
-	}
-
-	/**
-	 * Get the width of the canvas
-	 * 
-	 * @return the canvas width
-	 */
-	public double getCanvasWidth() {
-		return canvas.getWidth();
-	}
-
-	/**
-	 * Get the height of the canvas
-	 * 
-	 * @return the canvas height
-	 */
-	public double getCanvasHeight() {
-		return canvas.getHeight();
-	}
-
-	/**
-	 * Get the value that must be added to the X coordinate of in world object
-	 * 
-	 * @return the X offset
-	 */
-	public double getWorldOffsetX() {
-		return worldOffsetX;
-	}
-
-	/**
-	 * Set the value that must be added to the X coordinate of in world object
-	 * 
-	 * @param offsetX
-	 *            the X offset
-	 */
-	public void setWorldOffsetX(double offsetX) {
-		this.worldOffsetX = offsetX;
-	}
-
-	/**
-	 * Get the value that must be added to the Y coordinate of in world object
-	 * 
-	 * @return the Y offset
-	 */
-	public double getWorldOffsetY() {
-		return worldOffsetY;
-	}
-
-	/**
-	 * Set the value that must be added to the Y coordinate of in world object
-	 * 
-	 * @param offsetY
-	 *            the Y offset
-	 */
-	public void setWorldOffsetY(double offsetY) {
-		this.worldOffsetY = offsetY;
-	}
-
-	/**
-	 * Create a point and start rendering it
-	 * 
-	 * @param dx
-	 *            the x coordinate
-	 * @param dy
-	 *            the y coordinate
-	 * @return the point object
-	 */
-	public DrawablePoint createAndAddPoint(double dx, double dy) {
-		DrawablePoint drawablePoint = new DrawablePoint(dx, dy);
-		add(drawablePoint);
-		return drawablePoint;
-	}
-
-	/**
-	 * Create a rectangle and start rendering it
-	 * 
-	 * @param x
-	 *            the x coordinate
-	 * @param y
-	 *            the y coordinate
-	 * @param w
-	 *            the width
-	 * @param h
-	 *            the height
-	 * @return the rectangle object
-	 */
-	public DrawableRectangle createAndAddRectangle(double x, double y, double w, double h) {
-		DrawableRectangle d = new DrawableRectangle(x, y, w, h);
-		add(d);
-		return d;
-	}
-
-	/**
-	 * Set the default configuration of the view
-	 * 
-	 * @param zoom
-	 *            the initial zoom value
-	 * @param worldCenterX
-	 *            the initial X center value
-	 * @param worldCenterY
-	 *            the initial Y center value
-	 */
-	public void setDefaultView(double zoom, double worldCenterX, double worldCenterY) {
-		this.zoom = zoom;
-		this.worldCenterX = worldCenterX;
-		this.worldCenterY = worldCenterY;
-		this.defaultZoom = zoom;
-		this.defaultWorldCenterX = worldCenterX;
-		this.defaultWorldCenterY = worldCenterY;
-	}
-
-	/**
-	 * Create an image and start rendering it
-	 * 
-	 * @param dx
-	 *            the x coordinate
-	 * @param dy
-	 *            the y coordinate
-	 * @param filename
-	 *            the filename of the image
-	 * @return the created image
-	 */
-	public DrawableImage createAndAddImage(double dx, double dy, String filename) {
-		DrawableImage image = new DrawableImage(dx, dy, filename);
-		add(image);
-		return image;
-	}
-
-	/**
-	 * Create a string and start rendering it
-	 * 
-	 * @param dx
-	 *            the x coordinate
-	 * @param dy
-	 *            the y coordinate
-	 * @param text
-	 *            the text to display
-	 * @return the created string
-	 */
-	public DrawableString createAndAddString(int dx, int dy, String text) {
-		DrawableString ds = new DrawableString(dx, dy, text);
-		add(ds);
-		return ds;
-	}
-
-	public Pane getCanvas() {
-		return canvas;
-	}
-	
-	public BorderPane getPanel() {
-		return panel;
-	}
-	
-	public List<Drawable> getDrawables() {
-		return drawables;
-	}
-}
+package fr.irit.smac.amak.ui;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.locks.ReentrantLock;
+
+import fr.irit.smac.amak.tools.RunLaterHelper;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import fr.irit.smac.amak.ui.drawables.DrawableImage;
+import fr.irit.smac.amak.ui.drawables.DrawablePoint;
+import fr.irit.smac.amak.ui.drawables.DrawableRectangle;
+import fr.irit.smac.amak.ui.drawables.DrawableString;
+import javafx.event.ActionEvent;
+import javafx.event.EventHandler;
+import javafx.geometry.Insets;
+import javafx.scene.control.Button;
+import javafx.scene.control.Label;
+import javafx.scene.control.ToolBar;
+import javafx.scene.control.Tooltip;
+import javafx.scene.input.MouseEvent;
+import javafx.scene.input.ScrollEvent;
+import javafx.scene.layout.Background;
+import javafx.scene.layout.BackgroundFill;
+import javafx.scene.layout.BorderPane;
+import javafx.scene.layout.CornerRadii;
+import javafx.scene.layout.Pane;
+import javafx.scene.paint.Color;
+import javafx.scene.shape.Rectangle;
+import javafx.scene.text.TextAlignment;
+
+/**
+ * 
+ * Vectorial UI: This class allows to create dynamic rendering with zoom and
+ * move capacities
+ * 
+ * @author of original version (the Swing one) perles
+ *
+ */
+public class VUI {
+	/**
+	 * The toolbar of the VUI.
+	 */
+	public ToolBar toolbar;
+	
+	/**
+	 * The VUI explorer.
+	 * @see VuiExplorer
+	 */
+	private VuiExplorer vuiExplorer;
+	
+	/**
+	 * List of objects currently being drawn by the VUI
+	 */
+	private List<Drawable> drawables = new LinkedList<>();
+	/**
+	 * Lock to avoid concurrent modification on the list {@link #drawables}
+	 */
+	private ReentrantLock drawablesLock = new ReentrantLock();
+
+	/**
+	 * A static map to facilitate access to different instances of VUI
+	 */
+	private static Map<String, VUI> instances = new HashMap<>();
+
+	/**
+	 * The horizontal offset of the drawing zone. Used to allow the user to move the
+	 * view.
+	 */
+	private double worldOffsetX;
+
+	/**
+	 * The vertical offset of the drawing zone. Used to allow the user to move the
+	 * view.
+	 */
+	private double worldOffsetY;
+
+	/**
+	 * The last horizontal position of the mouse when dragging
+	 */
+	protected Double lastDragX;
+
+	/**
+	 * The last vertical position of the mouse when dragging
+	 */
+	protected Double lastDragY;
+
+	/**
+	 * The main panel of the VUI
+	 */
+	private BorderPane panel;
+
+	/**
+	 * The canvas on which all is drawn
+	 */
+	private Pane canvas;
+
+	/**
+	 * Label aiming at showing information about the VUI (zoom and offset)
+	 */
+	private Label statusLabel;
+
+	/**
+	 * The default value of the {@link #zoom}
+	 */
+	private double defaultZoom = 100;
+	/**
+	 * The default horizontal position of the view
+	 */
+	private double defaultWorldCenterX = 0;
+	/**
+	 * The default vertical position of the view
+	 */
+	private double defaultWorldCenterY = 0;
+	/**
+	 * The value of the zoom. 100 means 1/1 scale
+	 */
+	protected double zoom = defaultZoom;
+
+	/**
+	 * The horizontal position of the view
+	 */
+	private double worldCenterX = defaultWorldCenterX;
+
+	/**
+	 * The vertical position of the view
+	 */
+	private double worldCenterY = defaultWorldCenterY;
+
+	/**
+	 * Used to be sure that only one thread at the same time create a VUI
+	 */
+	private static ReentrantLock instanceLock = new ReentrantLock();
+
+	/**
+	 * Get the default VUI
+	 * 
+	 * @return the default VUI
+	 */
+	public static VUI get() {
+		if(!instances.containsKey("Default"))
+			MainWindow.addTabbedPanel("Default VUI", get("Default").getPanel());
+		return get("Default");
+	}
+	
+	
+
+	/**
+	 * Create or get a VUI.<br/>
+	 * You have add its panel to the MainWindow yourself.
+	 * 
+	 * @param id
+	 *            The unique id of the VUI
+	 * @return The VUI with id "id"
+	 */
+	public static VUI get(String id) {
+		instanceLock.lock();
+		if (!instances.containsKey(id)) {
+			VUI value = new VUI(id);
+			instances.put(id, value);
+			instanceLock.unlock();
+			return value;
+		}
+		instanceLock.unlock();
+		return instances.get(id);
+	}
+
+	/**
+	 * Constructor of the VUI. This one is private as it can only be created through
+	 * static method.
+	 * 
+	 * @param title
+	 *            The title used for the vui
+	 */
+	private VUI(String title) {
+		Semaphore done = new Semaphore(0);
+		RunLaterHelper.runLater(() -> {
+			panel = new BorderPane();
+
+			toolbar = new ToolBar();
+			statusLabel = new Label("status");
+			statusLabel.setTextAlignment(TextAlignment.LEFT);
+			toolbar.getItems().add(statusLabel);
+			panel.setBottom(toolbar);
+
+			Button resetButton = new Button("Reset");
+			resetButton.setOnAction(new EventHandler<ActionEvent>() {
+				@Override
+				public void handle(ActionEvent event) {
+					zoom = defaultZoom;
+					worldCenterX = defaultWorldCenterX;
+					worldCenterY = defaultWorldCenterY;
+					updateCanvas();
+				}
+			});
+			toolbar.getItems().add(resetButton);
+
+			canvas = new Pane();
+			canvas.setBackground(new Background(new BackgroundFill(Color.WHITE, CornerRadii.EMPTY, Insets.EMPTY)));
+			// clip the canvas (avoid drawing outside of it)
+			Rectangle clip = new Rectangle(0, 0, 0, 0);
+			clip.widthProperty().bind(canvas.widthProperty());
+			clip.heightProperty().bind(canvas.heightProperty());
+			canvas.setClip(clip);
+			
+			canvas.setOnMousePressed(new EventHandler<MouseEvent>() {
+				@Override
+				public void handle(MouseEvent event) {
+					lastDragX = event.getX();
+					lastDragY = event.getY();
+				}
+			});
+			canvas.setOnMouseExited(new EventHandler<MouseEvent>() {
+				@Override
+				public void handle(MouseEvent event) {
+					lastDragX = null;
+					lastDragY = null;
+				}
+			});
+			canvas.setOnMouseDragged(new EventHandler<MouseEvent>() {
+				@Override
+				public void handle(MouseEvent event) {
+					try {
+						double transX = screenToWorldDistance(event.getX() - lastDragX);
+						double transY = screenToWorldDistance(event.getY() - lastDragY);
+						worldCenterX += transX;
+						worldCenterY += transY;
+						worldOffsetX += transX;
+						worldOffsetY += transY;
+						lastDragX = event.getX();
+						lastDragY = event.getY();
+						updateCanvas();
+					} catch (Exception ez) {
+						// Catch exception occurring when mouse is out of the canvas
+					}
+				}
+			});
+
+			canvas.setOnScroll(new EventHandler<ScrollEvent>() {
+				@Override
+				public void handle(ScrollEvent event) {
+					double wdx = screenToWorldDistance(canvas.getWidth() / 2 - event.getX());
+					double wdy = screenToWorldDistance(canvas.getHeight() / 2 - event.getY());
+					zoom += event.getDeltaY() / event.getMultiplierY() * 10;
+					if (zoom < 10)
+						zoom = 10;
+
+					double wdx2 = screenToWorldDistance(canvas.getWidth() / 2 - event.getX());
+					double wdy2 = screenToWorldDistance(canvas.getHeight() / 2 - event.getY());
+					worldCenterX -= wdx2 - wdx;
+					worldCenterY -= wdy2 - wdy;
+					updateCanvas();
+				}
+			});
+
+			panel.setCenter(canvas);
+			
+			//add VuiExplorer
+			vuiExplorer = new VuiExplorer(this);
+			panel.setLeft(vuiExplorer);
+			Button veButton = new Button("VUI explorer");
+			veButton.setOnAction(new EventHandler<ActionEvent>() {
+				@Override
+				public void handle(ActionEvent event) {
+					panel.setLeft(vuiExplorer);
+				}
+			});
+			veButton.setTooltip(new Tooltip("Show the VUI explorer if it was hidden."));
+			toolbar.getItems().add(veButton);
+			
+			done.release();
+		});
+		try {
+			done.acquire();
+		} catch (InterruptedException e) {
+			System.err.println("Failed to make sure that the VUI is correctly initialized.");
+			e.printStackTrace();
+		}
+	}
+
+	/**
+	 * Convert a distance in the world to its equivalent on the screen
+	 * 
+	 * @param d
+	 *            the in world distance
+	 * @return the on screen distance
+	 */
+	public double worldToScreenDistance(double d) {
+		return d * getZoomFactor();
+	}
+
+	/**
+	 * Convert a distance on the screen to its equivalent in the world
+	 * 
+	 * @param d
+	 *            the on screen distance
+	 * @return the in world distance
+	 */
+	public double screenToWorldDistance(double d) {
+		return d / getZoomFactor();
+	}
+
+	/**
+	 * Convert a X in the world to its equivalent on the screen
+	 * 
+	 * @param x
+	 *            the X in world
+	 *
+	 * @return the X on screen distance
+	 */
+	public double worldToScreenX(double x) {
+		return (x + getWorldOffsetX()) * getZoomFactor();
+	}
+
+	/**
+	 * A value that must be multiplied to scale objects
+	 * 
+	 * @return the zoom factor
+	 */
+	public double getZoomFactor() {
+		return zoom / 100;
+	}
+
+	/**
+	 * Convert a Y in the world to its equivalent on the screen
+	 * 
+	 * @param y
+	 *            the Y in world
+	 *
+	 * @return the Y on screen distance
+	 */
+	public double worldToScreenY(double y) {
+		return (-y + getWorldOffsetY()) * getZoomFactor();
+	}
+
+	/**
+	 * Convert a X on the screen to its equivalent in the world
+	 * 
+	 * @param x
+	 *            the X on screen
+	 *
+	 * @return the X in the world distance
+	 */
+	public double screenToWorldX(double x) {
+		return x / getZoomFactor() - getWorldOffsetX();
+	}
+
+	/**
+	 * Convert a Y on the screen to its equivalent in the world
+	 * 
+	 * @param y
+	 *            the Y on screen
+	 *
+	 * @return the Y in the world distance
+	 */
+	public double screenToWorldY(double y) {
+		return -y / getZoomFactor() + getWorldOffsetY();
+	}
+
+	/**
+	 * Add a drawable to the VUI.
+	 * 
+	 * @param d
+	 *            the new drawable
+	 */
+	public void add(Drawable d) {
+		d.setVUI(this);
+		RunLaterHelper.runLater(()-> canvas.getChildren().add(d.getNode()));
+		drawablesLock.lock();
+		drawables.add(d);
+		drawablesLock.unlock();
+		updateCanvas();
+	}
+	
+	/**
+	 * Remove a drawable from the VUI.
+	 * 
+	 * @param d
+	 *            the new drawable
+	 */
+	public void remove(Drawable d) {
+		drawablesLock.lock();
+		drawables.remove(d);
+		drawablesLock.unlock();
+		RunLaterHelper.runLater(()-> canvas.getChildren().remove(d.getNode()));
+		updateCanvas();
+	}
+	
+	/**
+	 * Remove all drawables from the VUI.
+	 */
+	public void clear() {
+		drawablesLock.lock();
+		drawables.clear();
+		RunLaterHelper.runLater(()->canvas.getChildren().clear());
+		drawablesLock.unlock();
+	}
+
+	/**
+	 * Refresh the canvas
+	 */
+	public void updateCanvas() {
+		final double w = canvas.getWidth();
+		final double h = canvas.getHeight();
+
+		setWorldOffsetX(worldCenterX + screenToWorldDistance(w / 2));
+		setWorldOffsetY(worldCenterY + screenToWorldDistance(h / 2));
+
+		drawablesLock.lock();
+		Collections.sort(drawables, (o1, o2) -> o1.getLayer() - o2.getLayer());
+		for (Drawable d : drawables)
+			RunLaterHelper.runLater(()-> d.onDraw());
+		drawablesLock.unlock();
+
+		RunLaterHelper.runLater(() -> {
+			statusLabel.setText(String.format("Zoom: %.2f Center: (%.2f,%.2f)", zoom, worldCenterX, worldCenterY));
+		});
+		
+		RunLaterHelper.runLater(()-> vuiExplorer.update(true));
+	}
+
+	/**
+	 * Get the width of the canvas
+	 * 
+	 * @return the canvas width
+	 */
+	public double getCanvasWidth() {
+		return canvas.getWidth();
+	}
+
+	/**
+	 * Get the height of the canvas
+	 * 
+	 * @return the canvas height
+	 */
+	public double getCanvasHeight() {
+		return canvas.getHeight();
+	}
+
+	/**
+	 * Get the value that must be added to the X coordinate of in world object
+	 * 
+	 * @return the X offset
+	 */
+	public double getWorldOffsetX() {
+		return worldOffsetX;
+	}
+
+	/**
+	 * Set the value that must be added to the X coordinate of in world object
+	 * 
+	 * @param offsetX
+	 *            the X offset
+	 */
+	public void setWorldOffsetX(double offsetX) {
+		this.worldOffsetX = offsetX;
+	}
+
+	/**
+	 * Get the value that must be added to the Y coordinate of in world object
+	 * 
+	 * @return the Y offset
+	 */
+	public double getWorldOffsetY() {
+		return worldOffsetY;
+	}
+
+	/**
+	 * Set the value that must be added to the Y coordinate of in world object
+	 * 
+	 * @param offsetY
+	 *            the Y offset
+	 */
+	public void setWorldOffsetY(double offsetY) {
+		this.worldOffsetY = offsetY;
+	}
+
+	/**
+	 * Create a point and start rendering it
+	 * 
+	 * @param dx
+	 *            the x coordinate
+	 * @param dy
+	 *            the y coordinate
+	 * @return the point object
+	 */
+	public DrawablePoint createAndAddPoint(double dx, double dy) {
+		DrawablePoint drawablePoint = new DrawablePoint(dx, dy);
+		add(drawablePoint);
+		return drawablePoint;
+	}
+
+	/**
+	 * Create a rectangle and start rendering it
+	 * 
+	 * @param x
+	 *            the x coordinate
+	 * @param y
+	 *            the y coordinate
+	 * @param w
+	 *            the width
+	 * @param h
+	 *            the height
+	 * @return the rectangle object
+	 */
+	public DrawableRectangle createAndAddRectangle(double x, double y, double w, double h) {
+		DrawableRectangle d = new DrawableRectangle(x, y, w, h);
+		add(d);
+		return d;
+	}
+
+	/**
+	 * Set the default configuration of the view
+	 * 
+	 * @param zoom
+	 *            the initial zoom value
+	 * @param worldCenterX
+	 *            the initial X center value
+	 * @param worldCenterY
+	 *            the initial Y center value
+	 */
+	public void setDefaultView(double zoom, double worldCenterX, double worldCenterY) {
+		this.zoom = zoom;
+		this.worldCenterX = worldCenterX;
+		this.worldCenterY = worldCenterY;
+		this.defaultZoom = zoom;
+		this.defaultWorldCenterX = worldCenterX;
+		this.defaultWorldCenterY = worldCenterY;
+	}
+
+	/**
+	 * Create an image and start rendering it
+	 * 
+	 * @param dx
+	 *            the x coordinate
+	 * @param dy
+	 *            the y coordinate
+	 * @param filename
+	 *            the filename of the image
+	 * @return the created image
+	 */
+	public DrawableImage createAndAddImage(double dx, double dy, String filename) {
+		DrawableImage image = new DrawableImage(dx, dy, filename);
+		add(image);
+		return image;
+	}
+
+	/**
+	 * Create a string and start rendering it
+	 * 
+	 * @param dx
+	 *            the x coordinate
+	 * @param dy
+	 *            the y coordinate
+	 * @param text
+	 *            the text to display
+	 * @return the created string
+	 */
+	public DrawableString createAndAddString(int dx, int dy, String text) {
+		DrawableString ds = new DrawableString(dx, dy, text);
+		add(ds);
+		return ds;
+	}
+
+	public Pane getCanvas() {
+		return canvas;
+	}
+	
+	public BorderPane getPanel() {
+		return panel;
+	}
+	
+	public List<Drawable> getDrawables() {
+		return drawables;
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/ui/VUIMulti.java b/AMAKFX/src/fr/irit/smac/amak/ui/VUIMulti.java
new file mode 100644
index 0000000000000000000000000000000000000000..eeeb4862787055d93d93e4eb772108c06dab4e44
--- /dev/null
+++ b/AMAKFX/src/fr/irit/smac/amak/ui/VUIMulti.java
@@ -0,0 +1,546 @@
+package fr.irit.smac.amak.ui;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.locks.ReentrantLock;
+
+import fr.irit.smac.amak.tools.RunLaterHelper;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import fr.irit.smac.amak.ui.drawables.DrawableImage;
+import fr.irit.smac.amak.ui.drawables.DrawablePoint;
+import fr.irit.smac.amak.ui.drawables.DrawableRectangle;
+import fr.irit.smac.amak.ui.drawables.DrawableString;
+import javafx.event.ActionEvent;
+import javafx.event.EventHandler;
+import javafx.geometry.Insets;
+import javafx.scene.control.Button;
+import javafx.scene.control.Label;
+import javafx.scene.control.ToolBar;
+import javafx.scene.control.Tooltip;
+import javafx.scene.input.MouseEvent;
+import javafx.scene.input.ScrollEvent;
+import javafx.scene.layout.Background;
+import javafx.scene.layout.BackgroundFill;
+import javafx.scene.layout.BorderPane;
+import javafx.scene.layout.CornerRadii;
+import javafx.scene.layout.Pane;
+import javafx.scene.paint.Color;
+import javafx.scene.shape.Rectangle;
+import javafx.scene.text.TextAlignment;
+
+/**
+ * 
+ * Vectorial UI: This class allows to create dynamic rendering with zoom and
+ * move capacities
+ * 
+ * @author of original version (the Swing one) perles
+ *
+ */
+public class VUIMulti {
+	
+	public String title;
+	
+	/**
+	 * The toolbar of the VUI.
+	 */
+	public ToolBar toolbar;
+	
+	/**
+	 * The VUI explorer.
+	 * @see VuiExplorer
+	 */
+	private VuiExplorer vuiExplorer;
+	
+	/**
+	 * List of objects currently being drawn by the VUI
+	 */
+	private List<Drawable> drawables = new LinkedList<>();
+	/**
+	 * Lock to avoid concurrent modification on the list {@link #drawables}
+	 */
+	private ReentrantLock drawablesLock = new ReentrantLock();
+
+	/**
+	 * A static map to facilitate access to different instances of VUI
+	 */
+	//private static Map<String, VUIMulti> instances = new HashMap<>();
+
+	/**
+	 * The horizontal offset of the drawing zone. Used to allow the user to move the
+	 * view.
+	 */
+	private double worldOffsetX;
+
+	/**
+	 * The vertical offset of the drawing zone. Used to allow the user to move the
+	 * view.
+	 */
+	private double worldOffsetY;
+
+	/**
+	 * The last horizontal position of the mouse when dragging
+	 */
+	protected Double lastDragX;
+
+	/**
+	 * The last vertical position of the mouse when dragging
+	 */
+	protected Double lastDragY;
+
+	/**
+	 * The main panel of the VUI
+	 */
+	private BorderPane panel;
+
+	/**
+	 * The canvas on which all is drawn
+	 */
+	private Pane canvas;
+
+	/**
+	 * Label aiming at showing information about the VUI (zoom and offset)
+	 */
+	private Label statusLabel;
+
+	/**
+	 * The default value of the {@link #zoom}
+	 */
+	private double defaultZoom = 100;
+	/**
+	 * The default horizontal position of the view
+	 */
+	private double defaultWorldCenterX = 0;
+	/**
+	 * The default vertical position of the view
+	 */
+	private double defaultWorldCenterY = 0;
+	/**
+	 * The value of the zoom. 100 means 1/1 scale
+	 */
+	protected double zoom = defaultZoom;
+
+	/**
+	 * The horizontal position of the view
+	 */
+	private double worldCenterX = defaultWorldCenterX;
+
+	/**
+	 * The vertical position of the view
+	 */
+	private double worldCenterY = defaultWorldCenterY;
+
+	
+	
+
+	
+	/**
+	 * Constructor of the VUI. This one is private as it can only be created through
+	 * static method.
+	 * 
+	 * @param title
+	 *            The title used for the vui
+	 */
+	public VUIMulti(String titleValue) {
+		
+		RunLaterHelper.runLater(() -> {
+		
+		this.title = titleValue;
+		panel = new BorderPane();
+
+		toolbar = new ToolBar();
+		statusLabel = new Label("status");
+		statusLabel.setTextAlignment(TextAlignment.LEFT);
+		toolbar.getItems().add(statusLabel);
+		panel.setBottom(toolbar);
+
+		Button resetButton = new Button("Reset");
+		resetButton.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				zoom = defaultZoom;
+				worldCenterX = defaultWorldCenterX;
+				worldCenterY = defaultWorldCenterY;
+				updateCanvas();
+			}
+		});
+		toolbar.getItems().add(resetButton);
+
+		canvas = new Pane();
+		canvas.setBackground(new Background(new BackgroundFill(Color.WHITE, CornerRadii.EMPTY, Insets.EMPTY)));
+		// clip the canvas (avoid drawing outside of it)
+		Rectangle clip = new Rectangle(0, 0, 0, 0);
+		clip.widthProperty().bind(canvas.widthProperty());
+		clip.heightProperty().bind(canvas.heightProperty());
+		canvas.setClip(clip);
+		
+		canvas.setOnMousePressed(new EventHandler<MouseEvent>() {
+			@Override
+			public void handle(MouseEvent event) {
+				lastDragX = event.getX();
+				lastDragY = event.getY();
+			}
+		});
+		canvas.setOnMouseExited(new EventHandler<MouseEvent>() {
+			@Override
+			public void handle(MouseEvent event) {
+				lastDragX = null;
+				lastDragY = null;
+			}
+		});
+		canvas.setOnMouseDragged(new EventHandler<MouseEvent>() {
+			@Override
+			public void handle(MouseEvent event) {
+				try {
+					double transX = screenToWorldDistance(event.getX() - lastDragX);
+					double transY = screenToWorldDistance(event.getY() - lastDragY);
+					worldCenterX += transX;
+					worldCenterY += transY;
+					worldOffsetX += transX;
+					worldOffsetY += transY;
+					lastDragX = event.getX();
+					lastDragY = event.getY();
+					updateCanvas();
+				} catch (Exception ez) {
+					// Catch exception occurring when mouse is out of the canvas
+				}
+			}
+		});
+
+		canvas.setOnScroll(new EventHandler<ScrollEvent>() {
+			@Override
+			public void handle(ScrollEvent event) {
+				double wdx = screenToWorldDistance(canvas.getWidth() / 2 - event.getX());
+				double wdy = screenToWorldDistance(canvas.getHeight() / 2 - event.getY());
+				zoom += event.getDeltaY() / event.getMultiplierY() * 10;
+				if (zoom < 10)
+					zoom = 10;
+
+				double wdx2 = screenToWorldDistance(canvas.getWidth() / 2 - event.getX());
+				double wdy2 = screenToWorldDistance(canvas.getHeight() / 2 - event.getY());
+				worldCenterX -= wdx2 - wdx;
+				worldCenterY -= wdy2 - wdy;
+				updateCanvas();
+			}
+		});
+
+		panel.setCenter(canvas);
+		
+		//add VuiExplorer
+		vuiExplorer = new VuiExplorer(this);
+		panel.setLeft(vuiExplorer);
+		Button veButton = new Button("VUI explorer");
+		veButton.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				panel.setLeft(vuiExplorer);
+			}
+		});
+		veButton.setTooltip(new Tooltip("Show the VUI explorer if it was hidden."));
+		toolbar.getItems().add(veButton);
+			
+
+		});	
+		
+	}
+
+	/**
+	 * Convert a distance in the world to its equivalent on the screen
+	 * 
+	 * @param d
+	 *            the in world distance
+	 * @return the on screen distance
+	 */
+	public double worldToScreenDistance(double d) {
+		return d * getZoomFactor();
+	}
+
+	/**
+	 * Convert a distance on the screen to its equivalent in the world
+	 * 
+	 * @param d
+	 *            the on screen distance
+	 * @return the in world distance
+	 */
+	public double screenToWorldDistance(double d) {
+		return d / getZoomFactor();
+	}
+
+	/**
+	 * Convert a X in the world to its equivalent on the screen
+	 * 
+	 * @param x
+	 *            the X in world
+	 *
+	 * @return the X on screen distance
+	 */
+	public double worldToScreenX(double x) {
+		return (x + getWorldOffsetX()) * getZoomFactor();
+	}
+
+	/**
+	 * A value that must be multiplied to scale objects
+	 * 
+	 * @return the zoom factor
+	 */
+	public double getZoomFactor() {
+		return zoom / 100;
+	}
+
+	/**
+	 * Convert a Y in the world to its equivalent on the screen
+	 * 
+	 * @param y
+	 *            the Y in world
+	 *
+	 * @return the Y on screen distance
+	 */
+	public double worldToScreenY(double y) {
+		return (-y + getWorldOffsetY()) * getZoomFactor();
+	}
+
+	/**
+	 * Convert a X on the screen to its equivalent in the world
+	 * 
+	 * @param x
+	 *            the X on screen
+	 *
+	 * @return the X in the world distance
+	 */
+	public double screenToWorldX(double x) {
+		return x / getZoomFactor() - getWorldOffsetX();
+	}
+
+	/**
+	 * Convert a Y on the screen to its equivalent in the world
+	 * 
+	 * @param y
+	 *            the Y on screen
+	 *
+	 * @return the Y in the world distance
+	 */
+	public double screenToWorldY(double y) {
+		return -y / getZoomFactor() + getWorldOffsetY();
+	}
+
+	/**
+	 * Add a drawable to the VUI.
+	 * 
+	 * @param d
+	 *            the new drawable
+	 */
+	public void add(Drawable d) {
+		
+		d.setVUIMulti(this);
+		RunLaterHelper.runLater(()-> canvas.getChildren().add(d.getNode()));
+		drawablesLock.lock();
+		drawables.add(d);
+		drawablesLock.unlock();
+		updateCanvas();
+	}
+	
+	/**
+	 * Remove a drawable from the VUI.
+	 * 
+	 * @param d
+	 *            the new drawable
+	 */
+	public void remove(Drawable d) {
+		drawablesLock.lock();
+		drawables.remove(d);
+		drawablesLock.unlock();
+		RunLaterHelper.runLater(()-> canvas.getChildren().remove(d.getNode()));
+		updateCanvas();
+	}
+	
+	/**
+	 * Remove all drawables from the VUI.
+	 */
+	public void clear() {
+		drawablesLock.lock();
+		drawables.clear();
+		RunLaterHelper.runLater(()->canvas.getChildren().clear());
+		drawablesLock.unlock();
+	}
+
+	/**
+	 * Refresh the canvas
+	 */
+	public void updateCanvas() {
+		final double w = canvas.getWidth();
+		final double h = canvas.getHeight();
+
+		setWorldOffsetX(worldCenterX + screenToWorldDistance(w / 2));
+		setWorldOffsetY(worldCenterY + screenToWorldDistance(h / 2));
+
+		drawablesLock.lock();
+		Collections.sort(drawables, (o1, o2) -> o1.getLayer() - o2.getLayer());
+		for (Drawable d : drawables)
+			RunLaterHelper.runLater(()-> d.onDraw());
+		drawablesLock.unlock();
+
+		RunLaterHelper.runLater(() -> {
+			statusLabel.setText(String.format("Zoom: %.2f Center: (%.2f,%.2f)", zoom, worldCenterX, worldCenterY));
+		});
+		
+		RunLaterHelper.runLater(()-> vuiExplorer.update(true));
+	}
+
+	/**
+	 * Get the width of the canvas
+	 * 
+	 * @return the canvas width
+	 */
+	public double getCanvasWidth() {
+		return canvas.getWidth();
+	}
+
+	/**
+	 * Get the height of the canvas
+	 * 
+	 * @return the canvas height
+	 */
+	public double getCanvasHeight() {
+		return canvas.getHeight();
+	}
+
+	/**
+	 * Get the value that must be added to the X coordinate of in world object
+	 * 
+	 * @return the X offset
+	 */
+	public double getWorldOffsetX() {
+		return worldOffsetX;
+	}
+
+	/**
+	 * Set the value that must be added to the X coordinate of in world object
+	 * 
+	 * @param offsetX
+	 *            the X offset
+	 */
+	public void setWorldOffsetX(double offsetX) {
+		this.worldOffsetX = offsetX;
+	}
+
+	/**
+	 * Get the value that must be added to the Y coordinate of in world object
+	 * 
+	 * @return the Y offset
+	 */
+	public double getWorldOffsetY() {
+		return worldOffsetY;
+	}
+
+	/**
+	 * Set the value that must be added to the Y coordinate of in world object
+	 * 
+	 * @param offsetY
+	 *            the Y offset
+	 */
+	public void setWorldOffsetY(double offsetY) {
+		this.worldOffsetY = offsetY;
+	}
+
+	/**
+	 * Create a point and start rendering it
+	 * 
+	 * @param dx
+	 *            the x coordinate
+	 * @param dy
+	 *            the y coordinate
+	 * @return the point object
+	 */
+	public DrawablePoint createAndAddPoint(double dx, double dy) {
+		DrawablePoint drawablePoint = new DrawablePoint(dx, dy);
+		add(drawablePoint);
+		return drawablePoint;
+	}
+
+	/**
+	 * Create a rectangle and start rendering it
+	 * 
+	 * @param x
+	 *            the x coordinate
+	 * @param y
+	 *            the y coordinate
+	 * @param w
+	 *            the width
+	 * @param h
+	 *            the height
+	 * @return the rectangle object
+	 */
+	public DrawableRectangle createAndAddRectangle(double x, double y, double w, double h) {
+		DrawableRectangle d = new DrawableRectangle(x, y, w, h);
+		add(d);
+		return d;
+	}
+
+	/**
+	 * Set the default configuration of the view
+	 * 
+	 * @param zoom
+	 *            the initial zoom value
+	 * @param worldCenterX
+	 *            the initial X center value
+	 * @param worldCenterY
+	 *            the initial Y center value
+	 */
+	public void setDefaultView(double zoom, double worldCenterX, double worldCenterY) {
+		this.zoom = zoom;
+		this.worldCenterX = worldCenterX;
+		this.worldCenterY = worldCenterY;
+		this.defaultZoom = zoom;
+		this.defaultWorldCenterX = worldCenterX;
+		this.defaultWorldCenterY = worldCenterY;
+	}
+
+	/**
+	 * Create an image and start rendering it
+	 * 
+	 * @param dx
+	 *            the x coordinate
+	 * @param dy
+	 *            the y coordinate
+	 * @param filename
+	 *            the filename of the image
+	 * @return the created image
+	 */
+	public DrawableImage createAndAddImage(double dx, double dy, String filename) {
+		DrawableImage image = new DrawableImage(dx, dy, filename);
+		add(image);
+		return image;
+	}
+
+	/**
+	 * Create a string and start rendering it
+	 * 
+	 * @param dx
+	 *            the x coordinate
+	 * @param dy
+	 *            the y coordinate
+	 * @param text
+	 *            the text to display
+	 * @return the created string
+	 */
+	public DrawableString createAndAddString(int dx, int dy, String text) {
+		DrawableString ds = new DrawableString(dx, dy, text);
+		add(ds);
+		return ds;
+	}
+
+	public Pane getCanvas() {
+		return canvas;
+	}
+	
+	public BorderPane getPanel() {
+		return panel;
+	}
+	
+	public List<Drawable> getDrawables() {
+		return drawables;
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/ui/VuiExplorer.java b/AMAKFX/src/fr/irit/smac/amak/ui/VuiExplorer.java
index 957e6a8ae9c4507c27f9832abb99d7023ab19883..dbb94088b1911433c82eccdd7a3c5c2a5170c067 100644
--- a/AMAKFX/src/fr/irit/smac/amak/ui/VuiExplorer.java
+++ b/AMAKFX/src/fr/irit/smac/amak/ui/VuiExplorer.java
@@ -23,13 +23,14 @@ import javafx.scene.layout.VBox;
 import javafx.scene.paint.Color;
 
 /**
- * A piece of GUI allowing to see and look for contexts.
+ * A piece of GUI allowing to see and look for agents.
  * @author Hugo
  *
  */
 public class VuiExplorer extends ScrollPane {
 
-	private VUI vui;
+	private VUI vui = null;
+	private VUIMulti vuiMulti = null;
 	
 	private VBox vbox;
 	private TitledPane contextsPane;
@@ -102,6 +103,74 @@ public class VuiExplorer extends ScrollPane {
 		RunLaterHelper.runLater(()->vui.getPanel().setLeft(this));
 
 	}
+	
+	
+	public VuiExplorer(VUIMulti vuiMlt) {
+		this.vuiMulti = vuiMlt;
+
+		this.setMaxWidth(Double.MAX_VALUE);
+		this.setMaxHeight(Double.MAX_VALUE);
+
+		vbox = new VBox();
+		vbox.setFillWidth(true);
+		this.setContent(vbox);
+
+		// refresh, close, and collapseAll button
+		HBox hboxButtons = new HBox();
+		Button refresh = new Button("Refresh");
+		refresh.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				update();
+			}
+		});
+		Button close = new Button("Close");
+		close.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				vuiMulti.getPanel().setLeft(null);
+			}
+		});
+		Button collapseAll = new Button("Collapse all");
+		collapseAll.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				collapseAll();
+			}
+		});
+		hboxButtons.getChildren().addAll(refresh, close, collapseAll);
+		
+		// check box
+		autoRefresh = new CheckBox("Auto refresh");
+		autoRefresh.setTooltip(new Tooltip("Try to automatically refresh the VUI explorer when the VUI is updated."));
+		
+		// search bar
+		search = new TextField();
+		search.setPromptText("regular expression");
+		// update list on change
+		search.textProperty().addListener(new ChangeListener<String>() {
+			@Override
+			public void changed(ObservableValue<? extends String> observable, String oldValue, String newValue) {
+				search.setStyle(null);
+				try {
+					update();
+				} catch (PatternSyntaxException ex) {
+					search.setStyle("-fx-border-color: red;");
+				}
+			}
+		});
+
+		cpVBox = new VBox();
+		contextsPane = new TitledPane("Drawables", cpVBox);
+
+		vbox.getChildren().addAll(hboxButtons, autoRefresh, search, contextsPane);
+		update();
+		
+		// Add to vui
+		RunLaterHelper.runLater(()->vuiMulti.getPanel().setLeft(this));
+
+	}
+	
 
 	public void update(boolean auto) {
 		if(auto && autoRefresh.isSelected()) {
@@ -113,7 +182,13 @@ public class VuiExplorer extends ScrollPane {
 	 * Update the list of context
 	 */
 	public void update() {
-		List<Drawable> drawableList = vui.getDrawables();
+		List<Drawable> drawableList = null;
+		if(vui != null) {
+			drawableList = vui.getDrawables();
+		}
+		if(vuiMulti != null) {
+			drawableList = vuiMulti.getDrawables();
+		}
 		// crude color sort
 		drawableList.sort(new Comparator<Drawable>() {
 			@Override
@@ -142,7 +217,13 @@ public class VuiExplorer extends ScrollPane {
 	}
 	
 	private void collapseAll() {
-		List<Drawable> drawableList = vui.getDrawables();
+		List<Drawable> drawableList = null;
+		if(vui != null) {
+			drawableList = vui.getDrawables();
+		}
+		if(vuiMulti != null) {
+			drawableList = vuiMulti.getDrawables();
+		}
 		for(Drawable d : drawableList) {
 			if(d.showInExplorer && d.isVisible()) {
 				Drawable mini = d.getLinkedDrawable("mini");
diff --git a/AMAKFX/src/fr/irit/smac/amak/ui/drawables/Drawable.java b/AMAKFX/src/fr/irit/smac/amak/ui/drawables/Drawable.java
index ec5ce1fe94b59134da30c00edbd7368c6c8470f3..377377c5904d07114052daeb12d959a6664a725d 100644
--- a/AMAKFX/src/fr/irit/smac/amak/ui/drawables/Drawable.java
+++ b/AMAKFX/src/fr/irit/smac/amak/ui/drawables/Drawable.java
@@ -1,605 +1,618 @@
-package fr.irit.smac.amak.ui.drawables;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-
-import fr.irit.smac.amak.tools.RunLaterHelper;
-import fr.irit.smac.amak.ui.VUI;
-import javafx.event.Event;
-import javafx.event.EventHandler;
-import javafx.scene.Node;
-import javafx.scene.input.MouseEvent;
-import javafx.scene.paint.Color;
-
-/**
- * A drawable is an object that can be drawn by the {@link VUI} system
- * 
- * @author of original version (the Swing one) Alexandre Perles
- *
- */
-public abstract class Drawable {
-	
-	/**
-	 * If this drawable should be shown in the vui explorer.
-	 */
-	public boolean showInExplorer = true;
-
-	/**
-	 * Default style applied to drawable node.
-	 */
-	protected static String defaultStyle = "-fx-stroke: black; -fx-stroke-width: 1;";
-	
-	/**
-	 * Linked drawables will receive the same event when using dispatchEvent.
-	 */
-	private HashMap<String, Drawable> linkedDrawable = new HashMap<String, Drawable>();
-	
-	/**
-	 * The horizontal position of the object
-	 */
-	protected double x;
-	/**
-	 * The vertical position of the object
-	 */
-	private double y;
-	/**
-	 * The width of the object
-	 */
-	private double width;
-	
-	/**
-	 * The real height
-	 */
-	protected double height;
-
-	/**
-	 * Does only the border must be displayed ?
-	 */
-	protected boolean strokeMode = false;
-
-	/**
-	 * The color of the object
-	 */
-	protected Color color = Color.BLACK;
-	
-	/**
-	 * The VUI on which the object is drawn
-	 */
-	protected VUI vui;
-	
-	/**
-	 * The order of drawing. An higher layer is drawn on top of the other.
-	 */
-	protected int layer = 0;
-	
-	/**
-	 * The angle of rotation of the object
-	 */
-	private double angle;
-	
-	/**
-	 * A fixed object doesn't move with the view. It can be used for HUD
-	 */
-	private boolean fixed = false;
-	
-	/**
-	 * Must the object be drawn ?
-	 */
-	private boolean visible = true;
-	
-	/**
-	 * Is the drawable expanded ?
-	 * @see Drawable#onMouseClick(MouseEvent)
-	 * @see Drawable#expand()
-	 * @see Drawable#collapse()
-	 */
-	private boolean expanded = false;
-	
-	/**
-	 * If relevant, the name of the drawable, usually it's the name
-	 * of the agent represented by this drawable.
-	 */
-	private String name;
-	
-	/**
-	 * If relevant, additional info on the drawable, usually it's the
-	 * state of the agent represented by this drawable.
-	 */
-	private String info;
-
-	/**
-	 * Constructor of the object
-	 * 
-	 * @param vui
-	 *            the VUI on which the object must be drawn
-	 * @param dx
-	 *            the x real position
-	 * @param dy
-	 *            the y real position
-	 * @param width
-	 *            the real width
-	 * @param height
-	 *            the real height
-	 */
-	protected Drawable(double dx, double dy, double width, double height) {
-		move(dx, dy);
-		setWidth(width);
-		setHeight(height);
-	}
-	
-	/**
-	 * If you wish to use some default settings for your drawable.<br/>
-	 * Must be called AFTER the node for your drawable has been created.
-	 */
-	protected void defaultInit() {
-		getNode().setStyle(defaultStyle);
-		
-		getNode().addEventHandler(MouseEvent.ANY, new EventHandler<MouseEvent>() {
-			@Override
-			public void handle(MouseEvent event) {
-				dispatchEvent(event);
-			}
-		});
-	}
-	
-	/**
-	 * Compute the width as it must be displayed on screen. Given the zoom factor,
-	 * the width displayed can be different than the real width.
-	 * 
-	 * @return the width
-	 */
-	public double getRenderedWidth() {
-		if (isFixed())
-			return width;
-		else
-			return vui.worldToScreenDistance(width);
-	}
-
-	/**
-	 * Set the real width of the object
-	 * 
-	 * @param width
-	 *            The new width
-	 */
-	public void setWidth(double width) {
-		this.width = width;
-	}
-
-	/**
-	 * Compute the height as it must be displayed on screen. Given the zoom factor,
-	 * the height displayed can be different than the real height.
-	 * 
-	 * @return the width
-	 */
-	public double getRenderedHeight() {
-		if (isFixed())
-			return height;
-		else
-			return vui.worldToScreenDistance(height);
-	}
-
-	/**
-	 * Set the real height of the object
-	 * 
-	 * @param height
-	 *            The new height
-	 */
-	public void setHeight(double height) {
-		this.height = height;
-	}
-
-	/**
-	 * Get the real width
-	 * 
-	 * @return the real width
-	 */
-	public double getWidth() {
-		return width;
-	}
-
-	/**
-	 * Get the real height
-	 * 
-	 * @return the real height
-	 */
-	public double getHeight() {
-		return height;
-	}
-
-	
-
-	/**
-	 * Getter for the fixed attribute
-	 * 
-	 * @return if the obejct is fixed
-	 */
-	public boolean isFixed() {
-		return fixed;
-	}
-
-	/**
-	 * Getter for the angle attribute
-	 * 
-	 * @return the angle
-	 */
-	public double getAngle() {
-		return angle;
-	}
-
-	/**
-	 * Getter for the layer attribute
-	 * 
-	 * @return the layer
-	 */
-	public int getLayer() {
-		return layer;
-	}
-
-	/**
-	 * Set the layer and update
-	 * 
-	 * @param layer
-	 *            the new layer
-	 * @return the object for chained methods
-	 */
-	public Drawable setLayer(int layer) {
-		this.layer = layer;
-		return this;
-	}
-
-	/**
-	 * Set the new angle
-	 * 
-	 * @param angle2
-	 *            the new angle
-	 * @return the object for chained methods
-	 */
-	public Drawable setAngle(double angle2) {
-		this.angle = angle2;
-		return this;
-	}
-
-	/**
-	 * Draw the object if visible and if on screen
-	 * 
-	 */
-	public void onDraw() {
-		if (isVisible()) {
-			_onDraw();
-		}
-	}
-
-	/**
-	 * Method that must be overrided to draw
-	 */
-	public abstract void _onDraw();
-
-	/**
-	 * Set the associated VUI
-	 * 
-	 * @param vectorialUI
-	 */
-	public void setVUI(VUI vectorialUI) {
-		vui = vectorialUI;
-	}
-
-	/**
-	 * Get the top y coordinate
-	 * 
-	 * @return the top y coordinate
-	 */
-	public double top() {
-		if (isFixed())
-			return y - height / 2;
-		else
-			return vui.worldToScreenY(y - height / 2);
-	}
-
-	/**
-	 * Get the left x coordinate
-	 * 
-	 * @return the left x coordinate
-	 */
-	public double left() {
-		if (isFixed())
-			return x - width / 2;
-		else
-			return vui.worldToScreenX(x - width / 2);
-	}
-
-	/**
-	 * Get the bottom y coordinate
-	 * 
-	 * @return the bottom y coordinate
-	 */
-	public double bottom() {
-		if (isFixed())
-			return y + height / 2;
-		else
-			return vui.worldToScreenY(y + height / 2);
-	}
-
-	/**
-	 * Get the right x coordinate
-	 * 
-	 * @return the right x coordinate
-	 */
-	public double right() {
-		if (isFixed())
-			return x + width / 2;
-		else
-			return vui.worldToScreenX(x + width / 2);
-	}
-
-	/**
-	 * Only draw the border of the object
-	 * 
-	 * @return the object for chained methods
-	 */
-	public Drawable setStrokeOnly() {
-		strokeMode = true;
-		return this;
-	}
-
-	/**
-	 * 
-	 * @param color
-	 * @return the object for chained methods
-	 */
-	public Drawable setColor(Color color) {
-		if (color == this.color)
-			return this;
-		this.color = color;
-		return this;
-	}
-	
-	/**
-	 * The color of the drawable.
-	 * @return
-	 */
-	public Color getColor() {
-		return color;
-	}
-
-	/**
-	 * 
-	 * @param dx
-	 * @param dy
-	 * @return the object for chained methods
-	 */
-	public Drawable move(double dx, double dy) {
-		if (x == dx && y == dy)
-			return this;
-		this.x = dx;
-		this.y = dy;
-		return this;
-	}
-
-	/**
-	 * 
-	 * @return the object for chained methods
-	 */
-	public Drawable setFixed() {
-		this.fixed = true;
-		return this;
-	}
-
-	/**
-	 * 
-	 * @return the object for chained methods
-	 */
-	public Drawable show() {
-		return this.setVisible(true);
-	}
-	
-	protected abstract void _hide();
-
-	/**
-	 * 
-	 * @return
-	 */
-	public Drawable hide() {
-		_hide();
-		return this.setVisible(false);
-	}
-
-	/**
-	 * 
-	 * @return
-	 */
-	public boolean isVisible() {
-		return visible;
-	}
-	
-	public abstract void _show();
-
-	/**
-	 * 
-	 * @param visible
-	 * @return the object for chained methods
-	 */
-	public Drawable setVisible(boolean visible) {
-		this.visible = visible;
-		if (visible)
-			_show();
-		else
-			_hide();
-		return this;
-	}
-	
-	/**
-	 * The graphical element that is displayed
-	 * @return
-	 */
-	public abstract Node getNode();
-	
-	/**
-	 * Remove the drawable from its VUI
-	 */
-	public void delete() {
-		vui.remove(this);
-	}
-
-	/**
-	 * Get the linked drawable or null if it does not exist.
-	 * @param name name of the linked drawable
-	 * @return the linked drawable or null
-	 */
-	public Drawable getLinkedDrawable(String name) {
-		Drawable ret = null;
-		if(linkedDrawable.containsKey(name)) {
-			ret = linkedDrawable.get(name);
-		}
-		return ret;
-	}
-	
-	/**
-	 * Add a drawable to the list of linked drawables.<br/>
-	 * The relation is not symmetrical.
-	 * @param name
-	 * @param drawable
-	 */
-	public void addLinkedDrawable(String name, Drawable drawable) {
-		linkedDrawable.put(name, drawable);
-	}
-	
-	/**
-	 * Return the list of linked drawables. <br/>
-	 * Linked drawables will receive the same event when using dispatchEvent.
-	 */
-	public List<Drawable> getLinkedDrawables(){
-		return new ArrayList<Drawable>(linkedDrawable.values());
-	}
-	
-	/**
-	 * Used by dispatchEvent. Override if you want to register more event with the dispatchEvent
-	 * @param event
-	 */
-	protected void onEvent(Event event) {
-		switch (event.getEventType().getName()) {
-		case "MOUSE_CLICKED":
-			onMouseClick((MouseEvent)event);
-			break;
-		case "MOUSE_ENTERED":
-			onMouseEntered((MouseEvent)event);
-			break;
-		case "MOUSE_EXITED":
-			onMouseExited((MouseEvent)event);
-			break;
-		default:
-			break;
-		}
-	}
-	
-	/**
-	 * Called when onEvent receive a MOUSE_EXITED event.
-	 * @param event
-	 */
-	protected void onMouseExited(MouseEvent event) {
-		getNode().setStyle(defaultStyle);
-	}
-
-	/**
-	 * Called when onEvent receive a MOUSE_ENTERED event.
-	 * @param event
-	 */
-	protected void onMouseEntered(MouseEvent event) {
-		getNode().setStyle("-fx-stroke: black; -fx-stroke-width: 3;");
-	}
-
-	/**
-	 * Called when onEvent receive a MOUSE_CLICKED event.
-	 * @param event
-	 */
-	protected void onMouseClick(MouseEvent event) {
-		if(expanded) {
-			collapse();
-		} else {
-			expand();
-		}
-	}
-
-	/**
-	 * Dispatch an event to all linked drawable, and this drawable.
-	 * @param event
-	 */
-	public void dispatchEvent(Event event) {
-		for(Drawable d : getLinkedDrawables()) {
-			d.onEvent(event);
-		}
-		onEvent(event);
-	}
-	
-	/**
-	 * If this drawable should be shown in the vui explorer.
-	 */
-	public Drawable setShowInExplorer(boolean showInExplorer) {
-		this.showInExplorer = showInExplorer;
-		return this;
-	}
-	
-	/**
-	 * If relevant, the name of the drawable, usually it's the name
-	 * of the agent represented by this drawable.
-	 */
-	public String getName() {
-		return name == null ? toString() : name;
-	}
-	
-	/**
-	 * If relevant, additional info on the drawable, usually it's the
-	 * state of the agent represented by this drawable.
-	 */
-	public String getInfo() {
-		return info == null ? toString() : info;
-	}
-	
-	/**
-	 * If relevant, the name of the drawable, usually it's the name
-	 * of the agent represented by this drawable.
-	 */
-	public Drawable setName(String name) {
-		this.name = name;
-		return this;
-	}
-	
-	/**
-	 * If relevant, additional info on the drawable, usually it's the
-	 * state of the agent represented by this drawable.
-	 */
-	public Drawable setInfo(String info) {
-		this.info = info;
-		return this;
-	}
-	
-	/**
-	 * Action performed if drawable is clicked while collapsed.<br/>
-	 * By default do nothing
-	 * @see Drawable#collapse()
-	 */
-	public void expand() {
-		expanded = true;
-	}
-	
-	/**
-	 * Action performed if drawable is clicked while expanded.
-	 * @see Drawable#expand()
-	 */
-	public void collapse() {
-		expanded = false;
-	}
-	
-	public boolean isExpanded() {
-		return expanded;
-	}
-	
-	/**
-	 * Set the drawable on top of all others
-	 * @return
-	 */
-	public Drawable toFront() {
-		RunLaterHelper.runLater(()-> getNode().toFront());
-		return this;
-	}
-}
+package fr.irit.smac.amak.ui.drawables;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+import fr.irit.smac.amak.tools.RunLaterHelper;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import javafx.event.Event;
+import javafx.event.EventHandler;
+import javafx.scene.Node;
+import javafx.scene.input.MouseEvent;
+import javafx.scene.paint.Color;
+
+/**
+ * A drawable is an object that can be drawn by the {@link VUI} system
+ * 
+ * @author of original version (the Swing one) Alexandre Perles
+ *
+ */
+public abstract class Drawable {
+	
+	/**
+	 * If this drawable should be shown in the vui explorer.
+	 */
+	public boolean showInExplorer = true;
+
+	/**
+	 * Default style applied to drawable node.
+	 */
+	protected static String defaultStyle = "-fx-stroke: black; -fx-stroke-width: 1;";
+	
+	/**
+	 * Linked drawables will receive the same event when using dispatchEvent.
+	 */
+	private HashMap<String, Drawable> linkedDrawable = new HashMap<String, Drawable>();
+	
+	/**
+	 * The horizontal position of the object
+	 */
+	protected double x;
+	/**
+	 * The vertical position of the object
+	 */
+	private double y;
+	/**
+	 * The width of the object
+	 */
+	private double width;
+	
+	/**
+	 * The real height
+	 */
+	protected double height;
+
+	/**
+	 * Does only the border must be displayed ?
+	 */
+	protected boolean strokeMode = false;
+
+	/**
+	 * The color of the object
+	 */
+	protected Color color = Color.BLACK;
+	
+	/**
+	 * The VUI on which the object is drawn
+	 */
+	protected VUI vui;
+	
+	protected VUIMulti vuiMulti;
+	
+	/**
+	 * The order of drawing. An higher layer is drawn on top of the other.
+	 */
+	protected int layer = 0;
+	
+	/**
+	 * The angle of rotation of the object
+	 */
+	private double angle;
+	
+	/**
+	 * A fixed object doesn't move with the view. It can be used for HUD
+	 */
+	private boolean fixed = false;
+	
+	/**
+	 * Must the object be drawn ?
+	 */
+	private boolean visible = true;
+	
+	/**
+	 * Is the drawable expanded ?
+	 * @see Drawable#onMouseClick(MouseEvent)
+	 * @see Drawable#expand()
+	 * @see Drawable#collapse()
+	 */
+	private boolean expanded = false;
+	
+	/**
+	 * If relevant, the name of the drawable, usually it's the name
+	 * of the agent represented by this drawable.
+	 */
+	private String name;
+	
+	/**
+	 * If relevant, additional info on the drawable, usually it's the
+	 * state of the agent represented by this drawable.
+	 */
+	private String info;
+
+	/**
+	 * Constructor of the object
+	 * 
+	 * @param vui
+	 *            the VUI on which the object must be drawn
+	 * @param dx
+	 *            the x real position
+	 * @param dy
+	 *            the y real position
+	 * @param width
+	 *            the real width
+	 * @param height
+	 *            the real height
+	 */
+	protected Drawable(double dx, double dy, double width, double height) {
+		move(dx, dy);
+		setWidth(width);
+		setHeight(height);
+	}
+	
+	/**
+	 * If you wish to use some default settings for your drawable.<br/>
+	 * Must be called AFTER the node for your drawable has been created.
+	 */
+	protected void defaultInit() {
+		getNode().setStyle(defaultStyle);
+		
+		getNode().addEventHandler(MouseEvent.ANY, new EventHandler<MouseEvent>() {
+			@Override
+			public void handle(MouseEvent event) {
+				dispatchEvent(event);
+			}
+		});
+	}
+	
+	/**
+	 * Compute the width as it must be displayed on screen. Given the zoom factor,
+	 * the width displayed can be different than the real width.
+	 * 
+	 * @return the width
+	 */
+	public double getRenderedWidth() {
+		if (isFixed())
+			return width;
+		else
+			return vui != null ? vui.worldToScreenDistance(width) : vuiMulti.worldToScreenDistance(width);
+		
+	}
+
+	/**
+	 * Set the real width of the object
+	 * 
+	 * @param width
+	 *            The new width
+	 */
+	public void setWidth(double width) {
+		this.width = width;
+	}
+
+	/**
+	 * Compute the height as it must be displayed on screen. Given the zoom factor,
+	 * the height displayed can be different than the real height.
+	 * 
+	 * @return the width
+	 */
+	public double getRenderedHeight() {
+		if (isFixed())
+			return height;
+		else
+			return vui != null ? vui.worldToScreenDistance(height) : vuiMulti.worldToScreenDistance(height);
+	}
+
+	/**
+	 * Set the real height of the object
+	 * 
+	 * @param height
+	 *            The new height
+	 */
+	public void setHeight(double height) {
+		this.height = height;
+	}
+
+	/**
+	 * Get the real width
+	 * 
+	 * @return the real width
+	 */
+	public double getWidth() {
+		return width;
+	}
+
+	/**
+	 * Get the real height
+	 * 
+	 * @return the real height
+	 */
+	public double getHeight() {
+		return height;
+	}
+
+	
+
+	/**
+	 * Getter for the fixed attribute
+	 * 
+	 * @return if the obejct is fixed
+	 */
+	public boolean isFixed() {
+		return fixed;
+	}
+
+	/**
+	 * Getter for the angle attribute
+	 * 
+	 * @return the angle
+	 */
+	public double getAngle() {
+		return angle;
+	}
+
+	/**
+	 * Getter for the layer attribute
+	 * 
+	 * @return the layer
+	 */
+	public int getLayer() {
+		return layer;
+	}
+
+	/**
+	 * Set the layer and update
+	 * 
+	 * @param layer
+	 *            the new layer
+	 * @return the object for chained methods
+	 */
+	public Drawable setLayer(int layer) {
+		this.layer = layer;
+		return this;
+	}
+
+	/**
+	 * Set the new angle
+	 * 
+	 * @param angle2
+	 *            the new angle
+	 * @return the object for chained methods
+	 */
+	public Drawable setAngle(double angle2) {
+		this.angle = angle2;
+		return this;
+	}
+
+	/**
+	 * Draw the object if visible and if on screen
+	 * 
+	 */
+	public void onDraw() {
+		if (isVisible()) {
+			_onDraw();
+		}
+	}
+
+	/**
+	 * Method that must be overrided to draw
+	 */
+	public abstract void _onDraw();
+
+	/**
+	 * Set the associated VUI
+	 * 
+	 * @param vectorialUI
+	 */
+	public void setVUI(VUI vectorialUI) {
+		vui = vectorialUI;
+	}
+	
+	public void setVUIMulti(VUIMulti vectorialUI) {
+		vuiMulti = vectorialUI;
+	}
+
+	/**
+	 * Get the top y coordinate
+	 * 
+	 * @return the top y coordinate
+	 */
+	public double top() {
+		if (isFixed())
+			return y - height / 2;
+		else
+			return vui != null ? vui.worldToScreenY(y - height / 2) : vuiMulti.worldToScreenY(y - height / 2);
+	}
+
+	/**
+	 * Get the left x coordinate
+	 * 
+	 * @return the left x coordinate
+	 */
+	public double left() {
+		if (isFixed())
+			return x - width / 2;
+		else
+			return vui != null ? vui.worldToScreenX(x - width / 2) : vuiMulti.worldToScreenX(x - width / 2);
+		
+	}
+
+	/**
+	 * Get the bottom y coordinate
+	 * 
+	 * @return the bottom y coordinate
+	 */
+	public double bottom() {
+		if (isFixed())
+			return y + height / 2;
+		else
+			return vui != null ? vui.worldToScreenY(y + height / 2) : vuiMulti.worldToScreenY(y + height / 2);
+	}
+
+	/**
+	 * Get the right x coordinate
+	 * 
+	 * @return the right x coordinate
+	 */
+	public double right() {
+		if (isFixed())
+			return x + width / 2;
+		else
+			return vui != null ? vui.worldToScreenX(x + width / 2) : vuiMulti.worldToScreenX(x + width / 2);
+	}
+
+	/**
+	 * Only draw the border of the object
+	 * 
+	 * @return the object for chained methods
+	 */
+	public Drawable setStrokeOnly() {
+		strokeMode = true;
+		return this;
+	}
+
+	/**
+	 * 
+	 * @param color
+	 * @return the object for chained methods
+	 */
+	public Drawable setColor(Color color) {
+		if (color == this.color)
+			return this;
+		this.color = color;
+		return this;
+	}
+	
+	/**
+	 * The color of the drawable.
+	 * @return
+	 */
+	public Color getColor() {
+		return color;
+	}
+
+	/**
+	 * 
+	 * @param dx
+	 * @param dy
+	 * @return the object for chained methods
+	 */
+	public Drawable move(double dx, double dy) {
+		if (x == dx && y == dy)
+			return this;
+		this.x = dx;
+		this.y = dy;
+		return this;
+	}
+
+	/**
+	 * 
+	 * @return the object for chained methods
+	 */
+	public Drawable setFixed() {
+		this.fixed = true;
+		return this;
+	}
+
+	/**
+	 * 
+	 * @return the object for chained methods
+	 */
+	public Drawable show() {
+		return this.setVisible(true);
+	}
+	
+	protected abstract void _hide();
+
+	/**
+	 * 
+	 * @return
+	 */
+	public Drawable hide() {
+		_hide();
+		return this.setVisible(false);
+	}
+
+	/**
+	 * 
+	 * @return
+	 */
+	public boolean isVisible() {
+		return visible;
+	}
+	
+	public abstract void _show();
+
+	/**
+	 * 
+	 * @param visible
+	 * @return the object for chained methods
+	 */
+	public Drawable setVisible(boolean visible) {
+		this.visible = visible;
+		if (visible)
+			_show();
+		else
+			_hide();
+		return this;
+	}
+	
+	/**
+	 * The graphical element that is displayed
+	 * @return
+	 */
+	public abstract Node getNode();
+	
+	/**
+	 * Remove the drawable from its VUI
+	 */
+	public void delete() {
+		if(vui != null)
+			vui.remove(this);
+		
+		if(vuiMulti != null)
+			vuiMulti.remove(this);
+	}
+
+	/**
+	 * Get the linked drawable or null if it does not exist.
+	 * @param name name of the linked drawable
+	 * @return the linked drawable or null
+	 */
+	public Drawable getLinkedDrawable(String name) {
+		Drawable ret = null;
+		if(linkedDrawable.containsKey(name)) {
+			ret = linkedDrawable.get(name);
+		}
+		return ret;
+	}
+	
+	/**
+	 * Add a drawable to the list of linked drawables.<br/>
+	 * The relation is not symmetrical.
+	 * @param name
+	 * @param drawable
+	 */
+	public void addLinkedDrawable(String name, Drawable drawable) {
+		linkedDrawable.put(name, drawable);
+	}
+	
+	/**
+	 * Return the list of linked drawables. <br/>
+	 * Linked drawables will receive the same event when using dispatchEvent.
+	 */
+	public List<Drawable> getLinkedDrawables(){
+		return new ArrayList<Drawable>(linkedDrawable.values());
+	}
+	
+	/**
+	 * Used by dispatchEvent. Override if you want to register more event with the dispatchEvent
+	 * @param event
+	 */
+	protected void onEvent(Event event) {
+		switch (event.getEventType().getName()) {
+		case "MOUSE_CLICKED":
+			onMouseClick((MouseEvent)event);
+			break;
+		case "MOUSE_ENTERED":
+			onMouseEntered((MouseEvent)event);
+			break;
+		case "MOUSE_EXITED":
+			onMouseExited((MouseEvent)event);
+			break;
+		default:
+			break;
+		}
+	}
+	
+	/**
+	 * Called when onEvent receive a MOUSE_EXITED event.
+	 * @param event
+	 */
+	protected void onMouseExited(MouseEvent event) {
+		getNode().setStyle(defaultStyle);
+	}
+
+	/**
+	 * Called when onEvent receive a MOUSE_ENTERED event.
+	 * @param event
+	 */
+	protected void onMouseEntered(MouseEvent event) {
+		getNode().setStyle("-fx-stroke: black; -fx-stroke-width: 3;");
+	}
+
+	/**
+	 * Called when onEvent receive a MOUSE_CLICKED event.
+	 * @param event
+	 */
+	protected void onMouseClick(MouseEvent event) {
+		if(expanded) {
+			collapse();
+		} else {
+			expand();
+		}
+	}
+
+	/**
+	 * Dispatch an event to all linked drawable, and this drawable.
+	 * @param event
+	 */
+	public void dispatchEvent(Event event) {
+		for(Drawable d : getLinkedDrawables()) {
+			d.onEvent(event);
+		}
+		onEvent(event);
+	}
+	
+	/**
+	 * If this drawable should be shown in the vui explorer.
+	 */
+	public Drawable setShowInExplorer(boolean showInExplorer) {
+		this.showInExplorer = showInExplorer;
+		return this;
+	}
+	
+	/**
+	 * If relevant, the name of the drawable, usually it's the name
+	 * of the agent represented by this drawable.
+	 */
+	public String getName() {
+		return name == null ? toString() : name;
+	}
+	
+	/**
+	 * If relevant, additional info on the drawable, usually it's the
+	 * state of the agent represented by this drawable.
+	 */
+	public String getInfo() {
+		return info == null ? toString() : info;
+	}
+	
+	/**
+	 * If relevant, the name of the drawable, usually it's the name
+	 * of the agent represented by this drawable.
+	 */
+	public Drawable setName(String name) {
+		this.name = name;
+		return this;
+	}
+	
+	/**
+	 * If relevant, additional info on the drawable, usually it's the
+	 * state of the agent represented by this drawable.
+	 */
+	public Drawable setInfo(String info) {
+		this.info = info;
+		return this;
+	}
+	
+	/**
+	 * Action performed if drawable is clicked while collapsed.<br/>
+	 * By default do nothing
+	 * @see Drawable#collapse()
+	 */
+	public void expand() {
+		expanded = true;
+	}
+	
+	/**
+	 * Action performed if drawable is clicked while expanded.
+	 * @see Drawable#expand()
+	 */
+	public void collapse() {
+		expanded = false;
+	}
+	
+	public boolean isExpanded() {
+		return expanded;
+	}
+	
+	/**
+	 * Set the drawable on top of all others
+	 * @return
+	 */
+	public Drawable toFront() {
+		RunLaterHelper.runLater(()-> getNode().toFront());
+		return this;
+	}
+}
diff --git a/AMAKFX/src/fr/irit/smac/amak/ui/drawables/DrawablePoint.java b/AMAKFX/src/fr/irit/smac/amak/ui/drawables/DrawablePoint.java
index f17758a89e8b780e74d300884c1e325ea36f70ae..13145dff91cdde22924968cb7d21fe59d8953aae 100644
--- a/AMAKFX/src/fr/irit/smac/amak/ui/drawables/DrawablePoint.java
+++ b/AMAKFX/src/fr/irit/smac/amak/ui/drawables/DrawablePoint.java
@@ -1,65 +1,65 @@
-package fr.irit.smac.amak.ui.drawables;
-
-import javafx.event.EventHandler;
-import javafx.scene.Node;
-import javafx.scene.input.MouseEvent;
-import javafx.scene.shape.SVGPath;
-
-/**
- * Drawable to point things on the VUI, use a '+' icon as graphical representation.
- * @author Hugo
- *
- */
-public class DrawablePoint extends Drawable {
-
-	private SVGPath svg = new SVGPath();
-	
-	public DrawablePoint(double dx, double dy) {
-		super(dx, dy, 0.5, 0.5);
-		svg.setContent("M24 10h-10v-10h-4v10h-10v4h10v10h4v-10h10z");
-		getNode().addEventHandler(MouseEvent.ANY, new EventHandler<MouseEvent>() {
-			@Override
-			public void handle(MouseEvent event) {
-				dispatchEvent(event);
-			}
-		});
-	}
-
-	@Override
-	public void _onDraw() {
-		svg.setFill(color);
-		svg.setScaleX(getRenderedWidth());
-		svg.setScaleY(getRenderedHeight());
-		// the render has an offset, 10 look like a good value 
-		svg.setTranslateX(left()-10);
-		svg.setTranslateY(top()-10);
-	}
-
-	@Override
-	protected void _hide() {
-		svg.setVisible(false);
-	}
-
-	@Override
-	public void _show() {
-		svg.setVisible(true);
-	}
-
-	@Override
-	public Node getNode() {
-		return svg;
-	}
-	
-	@Override
-	protected void onMouseExited(MouseEvent event) {
-		svg.setScaleX(getRenderedWidth());
-		svg.setScaleY(getRenderedHeight());
-	}
-
-	@Override
-	protected void onMouseEntered(MouseEvent event) {
-		svg.setScaleX(getRenderedWidth()*1.5);
-		svg.setScaleY(getRenderedHeight()*1.5);
-	}
-
-}
+package fr.irit.smac.amak.ui.drawables;
+
+import javafx.event.EventHandler;
+import javafx.scene.Node;
+import javafx.scene.input.MouseEvent;
+import javafx.scene.shape.SVGPath;
+
+/**
+ * Drawable to point things on the VUI, use a '+' icon as graphical representation.
+ * @author Hugo
+ *
+ */
+public class DrawablePoint extends Drawable {
+
+	private SVGPath svg = new SVGPath();
+	
+	public DrawablePoint(double dx, double dy) {
+		super(dx, dy, 0.1, 0.1);
+		svg.setContent("M24 10h-10v-10h-4v10h-10v4h10v10h4v-10h10z");
+		getNode().addEventHandler(MouseEvent.ANY, new EventHandler<MouseEvent>() {
+			@Override
+			public void handle(MouseEvent event) {
+				dispatchEvent(event);
+			}
+		});
+	}
+
+	@Override
+	public void _onDraw() {
+		svg.setFill(color);
+		svg.setScaleX(getRenderedWidth());
+		svg.setScaleY(getRenderedHeight());
+		// the render has an offset, 10 look like a good value 
+		svg.setTranslateX(left()-10);
+		svg.setTranslateY(top()-10);
+	}
+
+	@Override
+	protected void _hide() {
+		svg.setVisible(false);
+	}
+
+	@Override
+	public void _show() {
+		svg.setVisible(true);
+	}
+
+	@Override
+	public Node getNode() {
+		return svg;
+	}
+	
+	@Override
+	protected void onMouseExited(MouseEvent event) {
+		svg.setScaleX(getRenderedWidth());
+		svg.setScaleY(getRenderedHeight());
+	}
+
+	@Override
+	protected void onMouseEntered(MouseEvent event) {
+		svg.setScaleX(getRenderedWidth()*1.5);
+		svg.setScaleY(getRenderedHeight()*1.5);
+	}
+
+}
diff --git a/AMOEBAonAMAK/resources/simpleReinManualTrained.xml b/AMOEBAonAMAK/resources/simpleReinManualTrained.xml
new file mode 100644
index 0000000000000000000000000000000000000000..41c1737bbd847dfb282b76954bf82ae900985887
--- /dev/null
+++ b/AMOEBAonAMAK/resources/simpleReinManualTrained.xml
@@ -0,0 +1,185 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<System>
+    <Configuration>
+        <Learning creationOfNewContext="true" loadPresetContext="true" />
+    </Configuration>
+    <StartingAgents>
+        <Controller Name="Controller">
+            <ErrorMargin ErrorAllowed="1.0" AugmentationFactorError="0.5" DiminutionFactorError="0.5" MinErrorAllowed="0.01" NConflictBeforeAugmentation="50" NSuccessBeforeDiminution="50" />
+        </Controller>
+        <Sensor Name="p1" Enum="false" Max="50.0" Min="-50.0" />
+        <Sensor Name="a1" Enum="false" Max="1" Min="-1" />
+    </StartingAgents>
+    <PresetContexts>
+        <LastPerceptionsAndActionState oracle="-1" p1="10" a1="-1" />
+        <!-- Nb contexts = 4 -->
+        <Context Name="101" Confidence="100.0" ActionsProposal="100.0" Activated="false">
+            <Ranges>
+                <Range Sensor="p1" Start="0.0" End="1.0" />
+                <Range Sensor="a1" Start="-1" End="-1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="100.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="100.0">
+                        <Values>
+                            <Value Sensor="p1" Value="1.0" />
+                            <Value Sensor="a1" Value="-1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+        <Context Name="102" Confidence="100.0" ActionsProposal="-1" Activated="true">
+            <Ranges>
+                <Range Sensor="p1" Start="1.0" End="50.0" />
+                <Range Sensor="a1" Start="-1" End="-1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="-1.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="-1.0">
+                        <Values>
+                            <Value Sensor="p1" Value="10.0" />
+                            <Value Sensor="a1" Value="-1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+        <Context Name="103" Confidence="100.0" ActionsProposal="-1" Activated="false">
+            <Ranges>
+                <Range Sensor="p1" Start="0.0" End="49.0" />
+                <Range Sensor="a1" Start="1" End="1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="-1.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="-1.0">
+                        <Values>
+                            <Value Sensor="p1" Value="10.0" />
+                            <Value Sensor="a1" Value="1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+        <Context Name="104" Confidence="100.0" ActionsProposal="-100" Activated="false">
+            <Ranges>
+                <Range Sensor="p1" Start="49.0" End="50.0" />
+                <Range Sensor="a1" Start="1" End="1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="-100.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="-100.0">
+                        <Values>
+                            <Value Sensor="p1" Value="49.0" />
+                            <Value Sensor="a1" Value="1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+        <Context Name="105" Confidence="100.0" ActionsProposal="-1" Activated="false">
+            <Ranges>
+                <Range Sensor="p1" Start="-49.0" End="-0.0" />
+                <Range Sensor="a1" Start="-1" End="-1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="-1.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="-1.0">
+                        <Values>
+                            <Value Sensor="p1" Value="-10.0" />
+                            <Value Sensor="a1" Value="-1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+        <Context Name="106" Confidence="100.0" ActionsProposal="-100" Activated="false">
+            <Ranges>
+                <Range Sensor="p1" Start="-50.0" End="-49.0" />
+                <Range Sensor="a1" Start="-1" End="-1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="-100.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="-100.0">
+                        <Values>
+                            <Value Sensor="p1" Value="-49.0" />
+                            <Value Sensor="a1" Value="-1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+        <Context Name="107" Confidence="100.0" ActionsProposal="-1" Activated="false">
+            <Ranges>
+                <Range Sensor="p1" Start="-50.0" End="-1.0" />
+                <Range Sensor="a1" Start="1" End="1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="-1.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="-1.0">
+                        <Values>
+                            <Value Sensor="p1" Value="-10.0" />
+                            <Value Sensor="a1" Value="1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+        <Context Name="108" Confidence="100.0" ActionsProposal="100" Activated="false">
+            <Ranges>
+                <Range Sensor="p1" Start="-1.0" End="-0.0" />
+                <Range Sensor="a1" Start="1" End="1" />
+            </Ranges>
+            <LocalModel Type="MILLER_REGRESSION">
+                <Coefs>
+                    <Value v="100.0" />
+                    <Value v="0.0" />
+                    <Value v="0.0" />
+                </Coefs>
+                <Experiments>
+                    <Experiment Proposition="100.0">
+                        <Values>
+                            <Value Sensor="p1" Value="-1.0" />
+                            <Value Sensor="a1" Value="1.0" />
+                        </Values>
+                    </Experiment>
+                </Experiments>
+            </LocalModel>
+        </Context>
+    </PresetContexts>
+</System>
\ No newline at end of file
diff --git a/AMOEBAonAMAK/src/agents/AmoebaAgent.java b/AMOEBAonAMAK/src/agents/AmoebaAgent.java
index 45a2c6c1ba8e1985d2d4a11c0c2075cfeff62fb5..8ae90bc81b63ebea93836dfe2a4d0c0d07bab4fc 100644
--- a/AMOEBAonAMAK/src/agents/AmoebaAgent.java
+++ b/AMOEBAonAMAK/src/agents/AmoebaAgent.java
@@ -1,109 +1,110 @@
-package agents;
-
-import agents.percept.Percept;
-import fr.irit.smac.amak.Agent;
-import fr.irit.smac.amak.tools.Loggable;
-import gui.RenderStrategy;
-import kernel.AMOEBA;
-import kernel.World;
-
-/**
- * The base class for all AMOEBA agents
- */
-public abstract class AmoebaAgent extends Agent<AMOEBA, World> implements Loggable {
-	// Attributes
-	protected String name;
-	private boolean dying;
-	
-	protected RenderStrategy renderStrategy;
-
-	/**
-	 * Instantiate a new agent attached to an amoeba
-	 * @param the amoeba
-	 */
-	public AmoebaAgent(AMOEBA amas, Object... params) {
-		super(amas, params);
-		this.dying = false;
-	}
-	
-	@Override
-	protected void onReady() {
-		super.onReady();
-		logger().debug("CYCLE "+getAmas().getCycle(), "Agent %s ready.", toString());
-	}
-
-	@Override
-	protected void onDecide() {
-	}
-
-	@Override
-	protected void onRenderingInitialization() {
-		if(renderStrategy != null) {
-			renderStrategy.initialize();
-		}
-	}
-	
-	@Override
-	public void onUpdateRender() {
-		amas.getEnvironment().incrementNbActivatedAgent();
-		if(renderStrategy != null && !isDying()) {
-			if (amas.isRenderUpdate()) {
-				renderStrategy.render();
-			}
-		}
-	}
-
-	/**
-	 * Set the name of the agent. Useful for visualization, and essential for {@link Percept}.
-	 * @param name
-	 */
-	public void setName(String name) {
-		this.name = name;
-	}
-	
-	@Override
-	public void destroy() {
-		dying = true;
-		if(renderStrategy != null) {
-			renderStrategy.delete();
-		}
-		super.destroy();
-		logger().debug("CYCLE "+getAmas().getCycle(), "Agent %s destroyed.", toString());
-	}
-
-	/**
-	 * Get the name of the agent. Useful for visualization, and essential for {@link Percept}.
-	 * @param name
-	 */
-	public String getName() {
-		return name;
-	}
-
-	/**
-	 * Tell if the agent is dying. A dying agent no longer perform any useful action, but is not yet removed from its system.
-	 * @return
-	 */
-	public boolean isDying() {
-		return dying;
-	}
-	
-	/**
-	 * Set the render strategy of an agent.<br/>
-	 * {@link RenderStrategy#delete()} the old one, and {@link RenderStrategy#initialize()} the new one.
-	 * @param renderStrategy
-	 * @see RenderStrategy
-	 */
-	public void setRenderStrategy(RenderStrategy renderStrategy) {
-		if(this.renderStrategy != null) this.renderStrategy.delete();
-		this.renderStrategy = renderStrategy;
-		if(this.renderStrategy != null) this.renderStrategy.initialize();
-	}
-	
-	/**
-	 * Get the render strategy of an agent.
-	 * @return
-	 */
-	public RenderStrategy getRenderStrategy() {
-		return renderStrategy;
-	}
-}
+package agents;
+
+import agents.percept.Percept;
+import fr.irit.smac.amak.Agent;
+import fr.irit.smac.amak.tools.Loggable;
+import gui.RenderStrategy;
+import kernel.AMOEBA;
+import kernel.World;
+
+/**
+ * The base class for all AMOEBA agents
+ */
+public abstract class AmoebaAgent extends Agent<AMOEBA, World> implements Loggable {
+	// Attributes
+	protected String name;
+	private boolean dying;
+	
+	protected RenderStrategy renderStrategy;
+
+	/**
+	 * Instantiate a new agent attached to an amoeba
+	 * @param the amoeba
+	 */
+	public AmoebaAgent(AMOEBA amas, Object... params) {
+		super(amas, params);
+		this.dying = false;
+	}
+	
+	@Override
+	protected void onReady() {
+		super.onReady();
+		logger().debug("CYCLE "+getAmas().getCycle(), "Agent %s ready.", toString());
+	}
+
+	@Override
+	protected void onDecide() {
+	}
+
+	@Override
+	protected void onRenderingInitialization() {
+		if(renderStrategy != null) {
+			renderStrategy.initialize(getAmas().getVUIMulti());
+			
+		}
+	}
+	
+	@Override
+	public void onUpdateRender() {
+		amas.getEnvironment().incrementNbActivatedAgent();
+		if(renderStrategy != null && !isDying()) {
+			if (amas.isRenderUpdate()) {
+				renderStrategy.render();
+			}
+		}
+	}
+
+	/**
+	 * Set the name of the agent. Useful for visualization, and essential for {@link Percept}.
+	 * @param name
+	 */
+	public void setName(String name) {
+		this.name = name;
+	}
+	
+	@Override
+	public void destroy() {
+		dying = true;
+		if(renderStrategy != null) {
+			renderStrategy.delete();
+		}
+		super.destroy();
+		logger().debug("CYCLE "+getAmas().getCycle(), "Agent %s destroyed.", toString());
+	}
+
+	/**
+	 * Get the name of the agent. Useful for visualization, and essential for {@link Percept}.
+	 * @param name
+	 */
+	public String getName() {
+		return name;
+	}
+
+	/**
+	 * Tell if the agent is dying. A dying agent no longer perform any useful action, but is not yet removed from its system.
+	 * @return
+	 */
+	public boolean isDying() {
+		return dying;
+	}
+	
+	/**
+	 * Set the render strategy of an agent.<br/>
+	 * {@link RenderStrategy#delete()} the old one, and {@link RenderStrategy#initialize()} the new one.
+	 * @param renderStrategy
+	 * @see RenderStrategy
+	 */
+	public void setRenderStrategy(RenderStrategy renderStrategy) {
+		if(this.renderStrategy != null) this.renderStrategy.delete();
+		this.renderStrategy = renderStrategy;
+		if(this.renderStrategy != null) this.renderStrategy.initialize(getAmas().getVUIMulti());
+	}
+	
+	/**
+	 * Get the render strategy of an agent.
+	 * @return
+	 */
+	public RenderStrategy getRenderStrategy() {
+		return renderStrategy;
+	}
+}
diff --git a/AMOEBAonAMAK/src/agents/context/Context.java b/AMOEBAonAMAK/src/agents/context/Context.java
index 9cf57cf509fc3d7f4aad60f2750594701635e15f..758792156bdb0e3ae40f72cee8d5f301e2cdd4a6 100644
--- a/AMOEBAonAMAK/src/agents/context/Context.java
+++ b/AMOEBAonAMAK/src/agents/context/Context.java
@@ -23,6 +23,10 @@ import ncs.NCS;
 import utils.Pair;
 import utils.TRACE_LEVEL;
 
+
+
+
+
 /**
  * The core agent of AMOEBA.
  * 
@@ -47,6 +51,8 @@ public class Context extends AmoebaAgent {
 	private double action;
 
 	private Double actionProposition = null;
+	public Double lastPrediction = null;
+	//public Double smoothedPrediction = null;
 
 	//private boolean valid;
 
@@ -63,6 +69,12 @@ public class Context extends AmoebaAgent {
 	public static final int successesBeforeDiminution = 5;
 	public static final int errorsBeforeAugmentation = 5;
 	
+	public boolean fusionned = false;
+	public boolean isInNeighborhood = false;
+	
+	static final int VOID_CYCLE_START = 0;
+	static final int OVERLAP_CYCLE_START = 0;
+	
 	public Context(AMOEBA amoeba) {
 		super(amoeba);
 		buildContext();
@@ -106,12 +118,47 @@ public class Context extends AmoebaAgent {
 		Experiment firstPoint = new Experiment(this);
 		ArrayList<Percept> var = getAmas().getPercepts();
 		for (Percept p : var) {
-			Range r;
+			Range r = null;
 
 			//Pair<Double, Double> radiuses = getAmas().getHeadAgent().getMaxRadiusesForContextCreation(p);
+			//TODO use neihbors sizes to define radiuses for creation !!!!!!!!!!!
 			Pair<Double, Double> radiuses = getAmas().getHeadAgent().getRadiusesForContextCreation(p);
 
-			r = new Range(this, p.getValue() - radiuses.getA(), p.getValue() + radiuses.getB(), 0, true, true, p);
+			
+					
+			
+			if(getAmas().getHeadAgent().activatedNeighborsContexts.size()>0 && getAmas().data.isActiveLearning) {
+				
+				if(getAmas().getHeadAgent().lastEndogenousRequest != null) {
+					if(getAmas().getHeadAgent().lastEndogenousRequest.getType() == REQUEST.VOID) {
+						double startRange = getAmas().getHeadAgent().lastEndogenousRequest.getBounds().get(p).getA();
+						double endRange = getAmas().getHeadAgent().lastEndogenousRequest.getBounds().get(p).getB();
+						
+						getAmas().getEnvironment()
+						.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("Range creation by VOID", this.getName(), p.getName(), getAmas().getHeadAgent().meanNeighborhoodRaduises.get(p).toString())));
+						r = new Range(this, startRange, endRange, 0, true, true, p, getAmas().getHeadAgent().minMeanNeighborhoodStartIncrements, getAmas().getHeadAgent().minMeanNeighborhoodEndIncrements);
+					
+					}
+				}
+				if(r==null) {
+					double radiusCreation = getAmas().getHeadAgent().minNeighborhoodRadius;
+					//double radiusCreation = getAmas().getHeadAgent().meanNeighborhoodRaduises.get(p);
+					//double radiusCreation = getAmas().getHeadAgent().minMeanNeighborhoodRaduises;
+					getAmas().getEnvironment()
+					.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("Range creation by mean", this.getName(), p.getName(), getAmas().getHeadAgent().meanNeighborhoodRaduises.get(p).toString())));
+					r = new Range(this, p.getValue() - radiusCreation, p.getValue() + radiusCreation, 0, true, true, p, getAmas().getHeadAgent().minMeanNeighborhoodStartIncrements, getAmas().getHeadAgent().minMeanNeighborhoodEndIncrements);
+				
+				}
+				
+			
+			
+			}
+			if(r==null){
+				r = new Range(this, p.getValue() - radiuses.getA(), p.getValue() + radiuses.getB(), 0, true, true, p);
+				getAmas().getEnvironment()
+				.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("Range creation by init", this.getName(), p.getName(), radiuses.getA().toString())));
+			}
+			
 
 			// r = new Range(this, v.getValue() - radius, v.getValue() + radius, 0, true,
 			// true, v, world);
@@ -157,16 +204,13 @@ public class Context extends AmoebaAgent {
 		// expand();
 
 		this.confidence = fatherContext.confidence;
-		if (fatherContext.getLocalModel().getType() == TypeLocalModel.MILLER_REGRESSION) {
-
-			this.localModel = new LocalModelMillerRegression(this);
-			// this.formulaLocalModel = ((LocalModelMillerRegression)
-			// bestNearestContext.localModel).getFormula(bestNearestContext);
-			Double[] coef = ((LocalModelMillerRegression) fatherContext.localModel).getCoef();
-			((LocalModelMillerRegression) this.localModel).setCoef(coef);
-			this.actionProposition = ((LocalModelMillerRegression) fatherContext.localModel)
-					.getProposition();
-		}
+	
+		this.localModel = getAmas().buildLocalModel(this);
+		// this.formulaLocalModel = ((LocalModelMillerRegression)
+		// bestNearestContext.localModel).getFormula(bestNearestContext);
+		Double[] coef = fatherContext.localModel.getCoef();
+		this.localModel.setCoef(coef);
+		this.actionProposition = fatherContext.localModel.getProposition();
 
 		getAmas().addAlteredContext(this);
 		this.setName(String.valueOf(this.hashCode()));
@@ -180,35 +224,63 @@ public class Context extends AmoebaAgent {
 
 		Experiment firstPoint = new Experiment(this);
 		ArrayList<Percept> var = getAmas().getPercepts();
-		for (Percept v : var) {
-			Range r;
+		for (Percept p : var) {
+			Range r = null;
 			//Pair<Double, Double> radiuses = getAmas().getHeadAgent().getMaxRadiusesForContextCreation(v);
-			Pair<Double, Double> radiuses = getAmas().getHeadAgent().getRadiusesForContextCreation(v);
+			//TODO use neihbors sizes to define radiuses for creation !!!!!!!!!!!
+			Pair<Double, Double> radiuses = getAmas().getHeadAgent().getRadiusesForContextCreation(p);
 			
 
-			r = new Range(this, v.getValue() - radiuses.getA(), v.getValue() + radiuses.getB(), 0, true, true, v);
+			if(getAmas().getHeadAgent().activatedNeighborsContexts.size()>0 && getAmas().data.isActiveLearning) {
+				
+				
+				
+				if(getAmas().getHeadAgent().lastEndogenousRequest != null) {
+					if(getAmas().getHeadAgent().lastEndogenousRequest.getType() == REQUEST.VOID) {
+						double startRange = getAmas().getHeadAgent().lastEndogenousRequest.getBounds().get(p).getA();
+						double endRange = getAmas().getHeadAgent().lastEndogenousRequest.getBounds().get(p).getB();
+						//System.out.println(startRange + "  " + endRange);
+						getAmas().getEnvironment()
+						.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("Range creation by VOID", this.getName(), p.getName(), getAmas().getHeadAgent().meanNeighborhoodRaduises.get(p).toString())));
+						r = new Range(this, startRange, endRange, 0, true, true, p, getAmas().getHeadAgent().minMeanNeighborhoodStartIncrements, getAmas().getHeadAgent().minMeanNeighborhoodEndIncrements);
+					
+					}
+				}
+				if(r==null) {
+					double radiusCreation = getAmas().getHeadAgent().minNeighborhoodRadius;
+					//double radiusCreation = getAmas().getHeadAgent().meanNeighborhoodRaduises.get(p);
+					//double radiusCreation = getAmas().getHeadAgent().minMeanNeighborhoodRaduises;
+					getAmas().getEnvironment()
+					.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("Range creation by mean", this.getName(), p.getName(), getAmas().getHeadAgent().meanNeighborhoodRaduises.get(p).toString())));
+					r = new Range(this, p.getValue() - radiusCreation, p.getValue() + radiusCreation, 0, true, true, p, getAmas().getHeadAgent().minMeanNeighborhoodStartIncrements, getAmas().getHeadAgent().minMeanNeighborhoodEndIncrements);
+				
+				}
+				
+			}
+			if(r==null) {
+				r = new Range(this, p.getValue() - radiuses.getA(), p.getValue() + radiuses.getB(), 0, true, true, p);
+				getAmas().getEnvironment()
+				.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("Range creation by init", this.getName(), p.getName(), radiuses.getA().toString())));
+			}
+
 
-			ranges.put(v, r);
-			ranges.get(v).setValue(v.getValue());
+			ranges.put(p, r);
+			ranges.get(p).setValue(p.getValue());
 
-			firstPoint.addDimension(v, v.getValue());
+			firstPoint.addDimension(p, p.getValue());
 
-			v.addContextProjection(this);;
+			p.addContextProjection(this);;
 		}
 
 		//expand();
 
-		this.confidence = bestNearestContext.confidence;
-		if (bestNearestContext.getLocalModel().getType() == TypeLocalModel.MILLER_REGRESSION) {
-
-			this.localModel = new LocalModelMillerRegression(this);
-			// this.formulaLocalModel = ((LocalModelMillerRegression)
-			// bestNearestContext.localModel).getFormula(bestNearestContext);
-			Double[] coef = ((LocalModelMillerRegression) bestNearestContext.localModel).getCoef();
-			((LocalModelMillerRegression) this.localModel).setCoef(coef);
-			this.actionProposition = ((LocalModelMillerRegression) bestNearestContext.localModel)
-					.getProposition();
-		}
+		//this.confidence = bestNearestContext.confidence;
+		this.localModel = getAmas().buildLocalModel(this);
+		// this.formulaLocalModel = ((LocalModelMillerRegression)
+		// bestNearestContext.localModel).getFormula(bestNearestContext);
+		Double[] coef = bestNearestContext.localModel.getCoef();
+		this.localModel.setCoef(coef);
+		this.actionProposition = bestNearestContext.localModel.getProposition();
 		
 		localModel.setFirstExperiments(new ArrayList<Experiment>(bestNearestContext.getLocalModel().getFirstExperiments()));
 
@@ -565,7 +637,7 @@ public class Context extends AmoebaAgent {
 		for (Percept pct : getAmas().getPercepts()) {
 			currentDistance = this.distance(ctxt, pct);
 			
-			if(currentDistance<-pct.getMappingErrorAllowedMin() && getAmas().getCycle()>500) {
+			if(currentDistance<-pct.getMappingErrorAllowedMin() && getAmas().getCycle()>OVERLAP_CYCLE_START) {
 				getEnvironment().trace(TRACE_LEVEL.DEBUG,new ArrayList<String>(Arrays.asList("OVERLAP",pct.getName(), ""+this,""+ctxt)) );
 				overlapCounts+=1;
 				overlapDistances.put(pct, Math.abs(currentDistance));
@@ -575,7 +647,7 @@ public class Context extends AmoebaAgent {
 			}
 			
 
-			if (currentDistance > pct.getMappingErrorAllowedMin() && getAmas().getCycle()>1000) {
+			if (currentDistance > pct.getMappingErrorAllowedMin() && getAmas().getCycle()>VOID_CYCLE_START) {
 				getEnvironment().trace(TRACE_LEVEL.DEBUG,new ArrayList<String>(Arrays.asList("VOID",pct.getName(), ""+this,""+ctxt, "distance", ""+currentDistance)) );
 				voidDistances.put(pct, currentDistance);
 				bounds.put(pct, this.voidBounds(ctxt, pct));
@@ -587,7 +659,7 @@ public class Context extends AmoebaAgent {
 
 		}
 
-		if (overlapCounts == getAmas().getPercepts().size() && getAmas().getCycle() > 500) {
+		if (overlapCounts == getAmas().getPercepts().size() && getAmas().getCycle() > OVERLAP_CYCLE_START) {
 			
 			getEnvironment().trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList(getAmas().getPercepts().size() + "OVERLAPS", ""+this,""+ctxt)) );
 			
@@ -613,11 +685,14 @@ public class Context extends AmoebaAgent {
 				
 			}		
 		}
-		else if(overlapCounts == getAmas().getPercepts().size()-1 && voidDistances.size() == 1 && getAmas().getCycle() > 750) {
+		else if(overlapCounts == getAmas().getPercepts().size()-1 && voidDistances.size() == 1 && getAmas().getCycle() > VOID_CYCLE_START) {
 			
 			getEnvironment().trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("VOID", ""+this,""+ctxt)) );
 			
+			updateBoundsWithNeighborhood(bounds);
+			
 			HashMap<Percept, Double> request = boundsToRequest(bounds);
+			
 			if(request != null) {
 				
 				if(getAmas().getHeadAgent().isRealVoid(request)) {
@@ -631,6 +706,30 @@ public class Context extends AmoebaAgent {
 	
 		return null;	
 	}
+	
+	private void updateBoundsWithNeighborhood(HashMap<Percept, Pair<Double, Double>> voidBounds) {
+
+		
+		
+		for (HashMap.Entry<Percept,  Pair<Double, Double>> entry : voidBounds.entrySet()) {
+			
+			double neighborhoodRadius = entry.getKey().getRadiusContextForCreation()*2;
+			
+			if(entry.getValue().getA()<entry.getKey().getValue()-neighborhoodRadius) {
+				entry.getValue().setA(entry.getKey().getValue()-neighborhoodRadius);
+			}
+			if(entry.getKey().getValue()+neighborhoodRadius < entry.getValue().getB()) {
+				entry.getValue().setB(entry.getKey().getValue()+neighborhoodRadius);
+			}
+			
+		    
+		    
+		}
+		
+
+		
+	}
+	
 
 	public double distanceAsVolume(Context ctxt) {
 		double totalDistanceAsVolume = 1.0;
@@ -769,11 +868,12 @@ public class Context extends AmoebaAgent {
 			this.getRanges().get(pct).setStart(Math.min(this.getRanges().get(pct).getStart(), fusionContext.getRanges().get(pct).getStart()));
 		}
 		
-		this.setConfidence(Math.max(this.getConfidence(), fusionContext.getConfidence()));
+		this.setConfidence(2*Math.max(this.getConfidence(), fusionContext.getConfidence()));
 		regressionPerformance.setPerformanceIndicator(Math.max(this.regressionPerformance.getPerformanceIndicator(), fusionContext.regressionPerformance.getPerformanceIndicator()));
 		
 		
 		fusionContext.destroy();
+		fusionned =  true;
 		getAmas().getHeadAgent().setBadCurrentCriticalityMapping();
 	}
 	
@@ -783,7 +883,8 @@ public class Context extends AmoebaAgent {
 			request.put(pct, getRandomValueInRange(pct));
 		}
 		getEnvironment().trace(TRACE_LEVEL.EVENT,new ArrayList<String>(Arrays.asList("NEW ENDO REQUEST","10", ""+request, ""+this.getName())));
-		getAmas().getHeadAgent().addSelfRequest(request, 10,this);
+		getAmas().getHeadAgent().addChildRequest(request, 10,this);
+		
 	}
 	
 	private Double getRandomValueInRange(Percept pct) {
@@ -800,6 +901,17 @@ public class Context extends AmoebaAgent {
 
 		return exp;
 	}
+	
+	public Experiment getArtificialExperiment() {
+		ArrayList<Percept> percepts = getAmas().getPercepts();
+		Experiment exp = new Experiment(this);
+		for (Percept pct : percepts) {
+			exp.addDimension(pct, this.getRanges().get(pct).getCenter());
+		}
+		
+
+		return exp;
+	}
 
 	public double sumOfRangesLengths() {
 		double sum = 0;
@@ -861,7 +973,7 @@ public class Context extends AmoebaAgent {
 //			ranges.get(p).adapt(p.getValue()); 
 //		}
 
-		ranges.get(p).adapt(p.getValue());
+		ranges.get(p).adapt(p.getValue(), false, null);
 
 //		if(perceptForAdapatationAndOverlapingContext.getB()!=null) {
 //			if(testIfOtherContextShouldFinalyShrink(perceptForAdapatationAndOverlapingContext.getB(), perceptForAdapatationAndOverlapingContext.getA())){
@@ -904,6 +1016,34 @@ public class Context extends AmoebaAgent {
 			}
 		}
 	}
+	
+	
+	private Percept getPerceptWithLesserImpactOnContext(ArrayList<Percept> percepts) {
+		
+		Percept perceptForAdapation = null;
+		double minDistanceToFrontier = Double.MAX_VALUE;
+		double distanceToFrontier;
+		
+		for (Percept pct : percepts) {
+			if (!ranges.get(pct).isPerceptEnum()) {
+
+				distanceToFrontier = Math.min(ranges.get(pct).startDistance(pct.getValue()),
+						ranges.get(pct).endDistance(pct.getValue()));
+				
+				for(Percept otherPct : percepts) {
+					if(otherPct != pct) {
+						distanceToFrontier*= this.getRanges().get(otherPct).getLenght();
+					}
+				}
+
+				if (distanceToFrontier < minDistanceToFrontier) {
+					minDistanceToFrontier = distanceToFrontier;
+					perceptForAdapation = pct;
+				}
+			}
+		}
+		return perceptForAdapation;
+	}
 
 	private Percept getPerceptWithBiggerImpactOnOverlap(ArrayList<Percept> percepts, Context bestContext) {
 		Percept perceptWithBiggerImpact = null;
@@ -961,7 +1101,8 @@ public class Context extends AmoebaAgent {
 	}
 
 	private Pair<Percept, Context> getPerceptForAdaptationWithOverlapingContext(ArrayList<Percept> percepts) {
-		Percept perceptForAdapation = null;
+		Percept perceptForBigerImpactOnOverlap = null;
+		Percept perceptWithLesserImpactOnContext = null;
 		Context overlapingContext = null;
 		double minDistanceToFrontier = Double.MAX_VALUE;
 		double distanceToFrontier;
@@ -977,29 +1118,34 @@ public class Context extends AmoebaAgent {
 					} else {
 						overlappingVolume = this.getOverlappingVolume(ctxt);
 						if (overlappingVolume > maxOverlappingVolume) {
-							perceptForAdapation = getPerceptWithBiggerImpactOnOverlap(percepts, ctxt);
+							
 							overlapingContext = ctxt;
 						}
 					}
 				}
 			}
+			
+			if(overlapingContext != null) {
+				perceptForBigerImpactOnOverlap = getPerceptWithBiggerImpactOnOverlap(percepts, overlapingContext);
+				
+			}
+			
 		}
-		if (perceptForAdapation == null) {
-			for (Percept pct : percepts) {
-				if (!ranges.get(pct).isPerceptEnum()) {
-
-					distanceToFrontier = Math.min(ranges.get(pct).startDistance(pct.getValue()),
-							ranges.get(pct).endDistance(pct.getValue()));
-
-					if (distanceToFrontier < minDistanceToFrontier) {
-						minDistanceToFrontier = distanceToFrontier;
-						perceptForAdapation = pct;
-					}
-				}
+		
+		perceptWithLesserImpactOnContext = getPerceptWithLesserImpactOnContext(percepts);
+		if(perceptForBigerImpactOnOverlap != null) {
+			
+			if(perceptForBigerImpactOnOverlap == perceptWithLesserImpactOnContext) {
+				return new Pair<Percept, Context>(perceptForBigerImpactOnOverlap, overlapingContext);
 			}
+			
 		}
+		
+		return new Pair<Percept, Context>(perceptWithLesserImpactOnContext, overlapingContext);
+		
+		
 
-		return new Pair<Percept, Context>(perceptForAdapation, overlapingContext);
+		
 	}
 
 	public boolean containedBy(Context ctxt) {
@@ -1185,14 +1331,14 @@ public class Context extends AmoebaAgent {
 	 * @param head the head
 	 */
 	public void growRanges() {
-
+		
 		ArrayList<Percept> allPercepts = getAmas().getPercepts();
 		for (Percept pct : allPercepts) {
 			boolean contain = ranges.get(pct).contains(pct.getValue()) == 0 ;
-	
-			if (!contain) {
+			getEnvironment().trace(TRACE_LEVEL.NCS, new ArrayList<String>(Arrays.asList(this.getName(), "CONTAINED", ""+contain)));
+			if (!contain && !fusionned) {
 				if(ranges.get(pct).getLenght()<pct.getMappingErrorAllowedMax()) {
-					ranges.get(pct).adapt(pct.getValue());
+					ranges.get(pct).adapt(pct.getValue(), false, null);
 				}
 				
 				//ranges.get(pct).extend(pct.getValue(), pct);
@@ -1210,14 +1356,21 @@ public class Context extends AmoebaAgent {
 	public void shrinkRangesToJoinBorders(Context bestContext) {
 		Percept perceptWithBiggerImpactOnOverlap = getPerceptWithBiggerImpactOnOverlap(getAmas().getPercepts(),
 				bestContext);
+		
+		Percept perceptWithLesserImpactOnContext = getPerceptWithLesserImpactOnContext(getAmas().getPercepts());
 
 
 		if (perceptWithBiggerImpactOnOverlap == null) {
 			this.destroy();
 		} else {
 
+				if(perceptWithBiggerImpactOnOverlap == perceptWithLesserImpactOnContext) {
+					ranges.get(perceptWithBiggerImpactOnOverlap).adapt(perceptWithBiggerImpactOnOverlap.getValue(), true, bestContext);
+				}else {
+					ranges.get(perceptWithLesserImpactOnContext).adapt(perceptWithLesserImpactOnContext.getValue(), true, bestContext);
+				}
 
-			ranges.get(perceptWithBiggerImpactOnOverlap).adapt(perceptWithBiggerImpactOnOverlap.getValue());
+			
 
 		}
 	}
@@ -1418,12 +1571,23 @@ public class Context extends AmoebaAgent {
 	public String toStringFull() {
 		String s = "";
 		s += "Context : " + getName() + "\n";
-		s += "creation tick : " + tickCreation +"\n";
+		s += "Creation tick : " + tickCreation +"\n";
+		s += "Confidence : " + confidence + "\n";
 		s += "\n";
 
-		s += "Model : ";
+		s += "Model "+this.localModel.getType()+" :";
 		s += this.localModel.getCoefsFormula() + "\n";
-
+		s += "Last Predicition " + lastPrediction  +"\n";
+		//s += "Smoothed Predicition " + smoothedPrediction  +"\n";
+		
+		s += "\n";
+		s += "Ranges :\n";
+		for(Percept p : getRanges().keySet()) {
+			s += p + " : " + getRangeByPercept(p)+"\n"; 
+			s += p + " : " + getRangeByPercept(p).getStartIncrement()+"\n"; 
+			s += p + " : " + getRangeByPercept(p).getEndIncrement()+"\n"; 
+			
+		}
 		s += "\n";
 		
 		s += "Last Distance to Regression " + lastDistanceToModel + "\n";
@@ -1447,7 +1611,7 @@ public class Context extends AmoebaAgent {
 			s += "Action proposed : " + this.getActionProposal() + "\n";
 		}
 
-		s += "Confidence : " + confidence + "\n";
+		
 
 		s += "\n";
 
@@ -1507,6 +1671,8 @@ public class Context extends AmoebaAgent {
 	}
 
 	public double getActionProposal() {
+		
+		
 		return localModel.getProposition();
 	}
 
diff --git a/AMOEBAonAMAK/src/agents/context/Range.java b/AMOEBAonAMAK/src/agents/context/Range.java
index 5529a9ac87ad02a18553d7e6df475900d45f6517..5f48de97a246b5b257caae938623f4d95c9e4878 100644
--- a/AMOEBAonAMAK/src/agents/context/Range.java
+++ b/AMOEBAonAMAK/src/agents/context/Range.java
@@ -102,8 +102,8 @@ public class Range implements Serializable, Comparable, Cloneable {
 		if (isPerceptEnum()) {
 			this.setStart_inclu(start_inclu);
 			this.setEnd_inclu(end_inclu);
-			this.setStart(Math.round(p.getValue()));
-			this.setEnd(Math.round(p.getValue()));
+			this.setStart(Math.round(start));
+			this.setEnd(Math.round(end));
 		} else {
 			this.setStart_inclu(start_inclu);
 			this.setEnd_inclu(end_inclu);
@@ -128,6 +128,52 @@ public class Range implements Serializable, Comparable, Cloneable {
 
 		startIncrement = 0.25 * world.getMappingErrorAllowed() * percept.getMinMaxDistance();
 		endIncrement = startIncrement;
+		world
+		.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList(context.getName(), p.getName(), "Init start increment " + startIncrement)));
+		world
+		.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList(context.getName(), p.getName(), "Init end increment " + endIncrement)));
+	}
+	
+	public Range(Context context, double start, double end, double extendedrangeatcreation, boolean start_inclu,
+			boolean end_inclu, Percept p, double startIncr, double endIncr) {
+		super();
+
+		world = context.getAmas().getEnvironment();
+
+		AVT_deceleration = world.getAVT_deceleration();
+		AVT_acceleration = world.getAVT_acceleration();
+		AVT_minRatio = world.getAVT_percentAtStart();
+
+		this.percept = p;
+		if (isPerceptEnum()) {
+			this.setStart_inclu(start_inclu);
+			this.setEnd_inclu(end_inclu);
+			this.setStart(Math.round(start));
+			this.setEnd(Math.round(end));
+		} else {
+			this.setStart_inclu(start_inclu);
+			this.setEnd_inclu(end_inclu);
+			this.setStart(start - Math.abs(extendedrangeatcreation * start));
+			this.setEnd(end + Math.abs(extendedrangeatcreation * end));
+		}
+		this.context = context;
+		id = maxid;
+		maxid++;
+
+		/* Initialization of AVT : a better way to do that should be developped */
+//		this.AVT_deltaStart = (end - start) * AVT_minRatio + 0.0001;
+//		this.AVT_deltaEnd = (end - start) * AVT_minRatio + 0.0001;
+		this.AVT_deltaStart = getLenght() * 0.2 + 0.0001;
+		this.AVT_deltaEnd = getLenght() * 0.2 + 0.0001;
+		////// System.out.println(world.getScheduler().getTick() + "\t" +
+		////// context.getName() + "\t" + percept.getName()+ "\t" + "Creation" + "\t" +
+		////// "START" + "\t" + AVT_deltaStart);
+		////// System.out.println(world.getScheduler().getTick() + "\t" +
+		////// context.getName() + "\t" + percept.getName()+ "\t" + "Creation" + "\t" +
+		////// "END" + "\t" + AVT_deltaEnd);
+
+		startIncrement =startIncr;
+		endIncrement = endIncr;
 	}
 
 	/**
@@ -171,12 +217,12 @@ public class Range implements Serializable, Comparable, Cloneable {
 	 * @param oracleValue the oracle value
 	 * @param p           the p
 	 */
-	public void adapt(Double oracleValue, double increment) {
+	public void adapt(Double oracleValue, double increment, boolean isOverlap, Context bestContext) {
 		if (!isPerceptEnum()) {
 
 			double minIncrement = Math.min(increment, getIncrement());
 
-			staticAdapt(oracleValue, minIncrement);
+			staticAdapt(oracleValue, minIncrement, isOverlap, bestContext);
 
 			// adaptUsingAVT(c, oracleValue);
 			// adaptWithoutAVT(c, oracleValue);
@@ -189,10 +235,10 @@ public class Range implements Serializable, Comparable, Cloneable {
 		}
 	}
 
-	public void adapt(Double oracleValue) {
+	public void adapt(Double oracleValue, boolean isOverlap, Context bestContext) {
 		if (!isPerceptEnum()) {
 
-			staticAdapt(oracleValue, getIncrement());
+			staticAdapt(oracleValue, getIncrement(), isOverlap, bestContext);
 
 			// adaptUsingAVT(c, oracleValue);
 			// adaptWithoutAVT(c, oracleValue);
@@ -204,6 +250,8 @@ public class Range implements Serializable, Comparable, Cloneable {
 //			}
 		}
 	}
+	
+	
 
 	/**
 	 * Adapt without AVT.
@@ -264,11 +312,11 @@ public class Range implements Serializable, Comparable, Cloneable {
 
 	}
 
-	private void staticAdapt(double oracleValue, double increment) {
+	private void staticAdapt(double oracleValue, double increment, boolean isOverlap, Context bestContext) {
 		if (Math.abs(end - oracleValue) < Math.abs(oracleValue - start)) {
-			adaptEnd(oracleValue, increment);
+			adaptEnd(oracleValue, increment, isOverlap, bestContext);
 		} else {
-			adaptStart(oracleValue, increment);
+			adaptStart(oracleValue, increment, isOverlap, bestContext);
 		}
 	}
 
@@ -353,15 +401,15 @@ public class Range implements Serializable, Comparable, Cloneable {
 
 	}
 
-	private void adaptEnd(double oracleValue, double increment) {
+	private void adaptEnd(double oracleValue, double increment, boolean isOverlap, Context bestContext) {
 		world.trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("INCREMENT ON END ADAPT", context.getName(), percept.getName(), "" + increment )));
 
-		classicEndAdapt(oracleValue, increment);
+		classicEndAdapt(oracleValue, increment, isOverlap, bestContext);
 		// adaptEndWithSplitting(oracleValue, increment);
 
 	}
 
-	private void classicEndAdapt(double oracleValue, double increment) {
+	private void classicEndAdapt(double oracleValue, double increment, boolean isOverlap, Context bestContext) {
 		if (!(contains(oracleValue) == 0.0)) { // value not contained --> end range will grow (growing direction = 1)
 
 			if (lastEndDirection == -1) { // opposite direction -> negative feedback
@@ -378,7 +426,10 @@ public class Range implements Serializable, Comparable, Cloneable {
 				// endIncrement *=2;
 			}
 
+			
 			this.setEnd(end + endIncrement);
+			
+			
 		} else { // value contained --> end range will shrink (shrinking direction = -1)
 
 			if (lastEndDirection == 1) { // opposite direction -> negative feedback
@@ -389,13 +440,18 @@ public class Range implements Serializable, Comparable, Cloneable {
 			lastEndDirection = -1; // shrinking direction
 
 			if (endCriticality == 1) { // negative feedback -> increment decreases
-				endIncrement /= 2;
+				endIncrement /= 3;
 			} else if (endCriticality == 0) { // positive feedback -> increment increases
 				endIncrement = Math.min(percept.getRadiusContextForCreation(), endIncrement * 2);
 				// endIncrement *=2;
 			}
 
-			this.setEnd(end - endIncrement);
+			if(isOverlap) {
+				this.setEnd(bestContext.getRanges().get(this.percept).getStart());
+			}else {
+				this.setEnd(end - endIncrement);
+			}
+			
 
 		}
 
@@ -524,16 +580,16 @@ public class Range implements Serializable, Comparable, Cloneable {
 
 	}
 
-	private void adaptStart(double oracleValue, double increment) {
+	private void adaptStart(double oracleValue, double increment, boolean isOverlap, Context bestContext) {
 		world.trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("INCREMENT ON END ADAPT", context.getName(), percept.getName(), "" + increment )));
 
 
-		classicStartAdapt(oracleValue, increment);
+		classicStartAdapt(oracleValue, increment, isOverlap, bestContext);
 		// adaptStartWithSplitting(oracleValue, increment);
 
 	}
 
-	private void classicStartAdapt(double oracleValue, double increment) {
+	private void classicStartAdapt(double oracleValue, double increment, boolean isOverlap, Context bestContext) {
 		if (!(contains(oracleValue) == 0.0)) {
 
 			if (lastStartDirection == -1) {
@@ -574,7 +630,7 @@ public class Range implements Serializable, Comparable, Cloneable {
 			lastStartDirection = -1;
 
 			if (startCriticality == 1) {
-				startIncrement /= 2;
+				startIncrement /= 3;
 			} else if (startCriticality == 0) {
 				startIncrement = Math.min(percept.getRadiusContextForCreation(), startIncrement * 2);
 				// startIncrement *=2;
@@ -589,7 +645,12 @@ public class Range implements Serializable, Comparable, Cloneable {
 //					startIncrement 
 //					);
 
-			this.setStart(start + startIncrement);
+			
+			if(isOverlap) {
+				this.setStart(bestContext.getRanges().get(this.percept).getEnd());
+			}else {
+				this.setStart(start + startIncrement);
+			}
 		}
 
 		// this.setStart(start + getIncrementDependingOnNeighboorDistances("start"));
@@ -976,16 +1037,16 @@ public class Range implements Serializable, Comparable, Cloneable {
 		if (overlapDistance(overlappingContextRanges) > nonOverlapDistance(overlappingContextRanges)) {
 
 			if (Math.abs(end - border) > Math.abs(border - start)) {
-				adaptEnd(border, increment);
+				adaptEnd(border, increment, false, null);
 			} else {
-				adaptStart(border, increment);
+				adaptStart(border, increment, false, null);
 			}
 
 		} else {
 			if (Math.abs(end - border) < Math.abs(border - start)) {
-				adaptEnd(border, increment);
+				adaptEnd(border, increment, false, null);
 			} else {
-				adaptStart(border, increment);
+				adaptStart(border, increment, false, null);
 			}
 		}
 
@@ -1447,18 +1508,18 @@ public class Range implements Serializable, Comparable, Cloneable {
 			////// " + (Math.abs(newStartValue-this.start)));
 		}
 
-//		if ((Double) newStartValue != null) {
-//			if (newStartValue < percept.getMin()) {
-//				this.start = percept.getMin();
-//
-//			} else {
-//				this.start = newStartValue;
-//			}
-//		} else {
-//			this.start = newStartValue;
-//		}
+		if ((Double) newStartValue != null) {
+			if (newStartValue < percept.getMin()) {
+				this.start = percept.getMin();
+
+			} else {
+				this.start = newStartValue;
+			}
+		} else {
+			this.start = newStartValue;
+		}
 		
-		this.start = newStartValue;
+		//this.start = newStartValue;
 		
 		
 
@@ -1486,17 +1547,17 @@ public class Range implements Serializable, Comparable, Cloneable {
 			////// System.out.println(context.getName() + " " + percept.getName() + " END "
 			////// + (Math.abs(newEndValue-this.end)));
 		}
-//		if ((Double) newEndValue != null) {
-//			if (newEndValue > percept.getMax()) {
-//				this.end = percept.getMax();
-//			} else {
-//				this.end = newEndValue;
-//			}
-//		} else {
-//			this.end = newEndValue;
-//		}
+		if ((Double) newEndValue != null) {
+			if (newEndValue > percept.getMax()) {
+				this.end = percept.getMax();
+			} else {
+				this.end = newEndValue;
+			}
+		} else {
+			this.end = newEndValue;
+		}
 		
-		this.end = newEndValue;
+		//this.end = newEndValue;
 		
 
 		if (this.context != null) {
@@ -1636,5 +1697,13 @@ public class Range implements Serializable, Comparable, Cloneable {
 		return this.contains(percept.getValue(), context.getEnvironment().getContextCreationNeighborhood(context, percept))
 				|| this.contains(percept.getValue(), context.getEnvironment().getContextCreationNeighborhood(context, percept));
 	}
+	
+	public double getStartIncrement() {
+		return startIncrement;
+	}
+	
+	public double getEndIncrement() {
+		return endIncrement;
+	}
 
 }
diff --git a/AMOEBAonAMAK/src/agents/context/localModel/LocalModel.java b/AMOEBAonAMAK/src/agents/context/localModel/LocalModel.java
index 2aab80c87bae8e396c3e7eaec30e4073612fb3b9..befa9dfb30016c48f16f53bf1a459bba5d4cb05c 100644
--- a/AMOEBAonAMAK/src/agents/context/localModel/LocalModel.java
+++ b/AMOEBAonAMAK/src/agents/context/localModel/LocalModel.java
@@ -9,102 +9,176 @@ import agents.context.Experiment;
 /**
  * A LocalModel is used by a Context to store information and generate prediction.
  */
-public interface LocalModel {
-
+public abstract class LocalModel {
+	
+	protected LocalModel modifier = null;
+	protected LocalModel modified = null; // Be careful ! One letter and it's totally a different thing !
+	
+	
 	/**
 	 * Sets the context that use the LocalModel
 	 * @param context
 	 */
-	public void setContext(Context context);
+	public abstract void setContext(Context context);
 	
 	/**
 	 * gets the context that use the LocalModel
 	 * @return
 	 */
-	public Context getContext();
+	public abstract Context getContext();
 	
 	/**
 	 * Gets the proposition.
 	 *
 	 * @return the proposition
 	 */
-	public double getProposition();
+	public abstract double getProposition();
 	
 	/**
 	 * Gets the proposition with the highest value possible
 	 * @return
 	 */
-	public double getMaxProposition();
+	public abstract double getMaxProposition();
 	
 	/**
 	 * Return the point (percept value) that produce the max proposition, considering some percepts are fixed.
 	 * @return a HashMap with percept names as key, and their corresponding value. The oracle is the max proposition
 	 * @see LocalModel#getMaxProposition(Context)  
 	 */
-	public HashMap<String, Double> getMaxWithConstraint(HashMap<String, Double> fixedPercepts);;
+	public abstract HashMap<String, Double> getMaxWithConstraint(HashMap<String, Double> fixedPercepts);;
 	
 	/**
 	 * Gets the proposition with the lowest value possible
 	 * @return
 	 */
-	public double getMinProposition();
+	public abstract double getMinProposition();
 
 	/**
 	 * Gets the formula of the model
 	 * @return
 	 */
-	public String getCoefsFormula();
+	public String getCoefsFormula() {
+		Double[] coefs = getCoef();
+		String result = "" +coefs[0];
+		if (coefs[0] == Double.NaN) System.exit(0);
+		
+		for (int i = 1 ; i < coefs.length ; i++) {
+			if (Double.isNaN(coefs[i])) coefs[i] = 0.0;
+			
+			result += "\t" + coefs[i] + " (" + getContext().getAmas().getPercepts().get(i-1) +")";
+			
+		}
+		
+		return result;
+	}
 	
 	/**
 	 * Update the model with a new experiment.
 	 * @param newExperiment
 	 * @param weight the weight of the new experiment in the compute of the model
 	 */
-	public void updateModel(Experiment newExperiment, double weight);
+	public abstract void updateModel(Experiment newExperiment, double weight);
 	
-	public String coefsToString();
+	public String coefsToString() {
+		String coefsString = "";
+		Double[] coefs = getCoef();
+		if(coefs != null) {
+			for(int i=0; i<coefs.length; i ++) {
+				coefsString += coefs[i]  + "\t";
+			}
+		}
+		return coefsString;
+	}
 	
 	/**
 	 * The distance between an experiment and the model.
 	 * @param experiment
 	 * @return
 	 */
-	public double distance(Experiment experiment);
+	public abstract double distance(Experiment experiment);
 	
 	/**
 	 * Gets the experiments used to properly initialize the model.
 	 * @return
 	 */
-	public ArrayList<Experiment> getFirstExperiments();
+	public abstract ArrayList<Experiment> getFirstExperiments();
 	
 	/**
 	 * Sets the experiments used to properly initialize the model.
 	 * This may not trigger an update of the model.
 	 * @param frstExp
 	 */
-	public void setFirstExperiments( ArrayList<Experiment> frstExp);
+	public abstract void setFirstExperiments( ArrayList<Experiment> frstExp);
 	
 	/**
 	 * Tells if the model has enough experiments to produce a good prediction.
 	 * For example, a regression need a number of experiments equals or superior to the number of dimension.
 	 * @return
 	 */
-	public boolean finishedFirstExperiments();
+	public abstract boolean finishedFirstExperiments();
 	
 	/**
 	 * Gets coefficients of the model
 	 * @return
 	 */
-	public Double[] getCoef();
+	public abstract Double[] getCoef();
 	
 	/**
 	 * Sets coefficients of the model
 	 * @return
 	 */
-	public void setCoef(Double[] coef);
+	public abstract void setCoef(Double[] coef);
 
 	/**
 	 * Gets the {@link TypeLocalModel} corresponding to this LocalModel
 	 */
-	public TypeLocalModel getType();
+	public abstract TypeLocalModel getType();
+	
+	/**
+	 * Sets the type of the LocalModel, if it ever has to change.
+	 */
+	public abstract void setType(TypeLocalModel type);
+	
+	/**
+	 * Set an LocalModel that modify the behavior of the current LocalModel.<br/>
+	 * The modifier can then be used by calling {@link LocalModel#getModifier()} on the modified LocalModel.
+	 * @param Modifier a LocalModel
+	 * @see LocalModel#getModifier()
+	 * @see LocalModel#hasModifier()
+	 */
+	public void setModifier(LocalModel modifier) {
+		this.modifier = modifier;
+		modifier.modified = this;
+	}
+	
+	/**
+	 * @return true if the LocalModel has an usable modifier.
+	 */
+	public boolean hasModifier() {
+		return modifier != null;
+	}
+	
+	/**
+	 * Get the LocalModel that modify the behavior of the current LocalModel.
+	 * @return a LocalModel or null
+	 */
+	public LocalModel getModifier() {
+		return modifier;
+	}
+	
+	/**
+	 * 
+	 * @return true if the LocalModel is a modifier, it means that {@link LocalModel#getModified()} is not null
+	 */
+	public boolean hasModified() {
+		return modified != null;
+	}
+	
+	/**
+	 * If the LocalModel is a modifier, return the modified LocalModel
+	 * @return a LocalModel or null
+	 */
+	public LocalModel getModified() {
+		return modified;
+	}
 }
diff --git a/AMOEBAonAMAK/src/agents/context/localModel/LocalModelCoopModifier.java b/AMOEBAonAMAK/src/agents/context/localModel/LocalModelCoopModifier.java
new file mode 100644
index 0000000000000000000000000000000000000000..fd860bc0c2a5f938afd9035b79bf45e3108c9d96
--- /dev/null
+++ b/AMOEBAonAMAK/src/agents/context/localModel/LocalModelCoopModifier.java
@@ -0,0 +1,204 @@
+package agents.context.localModel;
+
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
+
+import agents.context.Context;
+import agents.context.Experiment;
+import agents.percept.Percept;
+import utils.Pair;
+
+public class LocalModelCoopModifier extends LocalModel {
+	private LocalModel localModel;
+	private TypeLocalModel type;
+	
+	public LocalModelCoopModifier(LocalModel localModel, TypeLocalModel type) {
+		this.localModel = localModel;
+		localModel.setModifier(this);
+		setType(type);
+	}
+	
+	@Override
+	public Context getContext() {
+		return localModel.getContext();
+	}
+	
+	@Override
+	public void setContext(Context context) {
+		localModel.setContext(context);
+	}
+
+	@Override
+	public double getProposition() {
+		return localModel.getProposition();
+	}
+	
+	public double getPropositionCoop() {
+		
+		return localModel.getProposition();
+	}
+
+	@Override
+	public double getMaxProposition() {
+		return localModel.getMaxProposition();
+	}
+
+	@Override
+	public HashMap<String, Double> getMaxWithConstraint(HashMap<String, Double> fixedPercepts) {
+		ArrayList<Percept> percepts = getContext().getAmas().getPercepts();
+		
+		HashMap<String, Double> result = new HashMap<String, Double>();
+		Double[] coefs = getCoefCoop();
+		result.put("oracle", coefs[0]);
+
+		if (coefs[0] == Double.NaN)
+			throw new ArithmeticException("First coeficient of model cannot be NaN");
+		
+		for (int i = 1 ; i < coefs.length ; i++) {
+			double coef = coefs[i];
+			if (Double.isNaN(coef)) coef = 0.0;
+			double pos;
+			Percept p = percepts.get(i-1);
+			if(fixedPercepts.containsKey(p.getName())) {
+				pos = fixedPercepts.get(p.getName());
+			} else {
+				if(coef>0) {
+					pos = getContext().getRanges().get(p).getEnd();
+				}
+				else {
+					pos = getContext().getRanges().get(p).getStart();
+				}
+			}
+			double value = coef * pos;
+			result.put("oracle", result.get("oracle") + value);
+			result.put(p.getName(), pos);
+		}
+		
+		return result;
+	}
+
+	@Override
+	public double getMinProposition() {
+		return localModel.getMinProposition();
+	}
+
+	@Override
+	public void updateModel(Experiment newExperiment, double weight) {
+		localModel.updateModel(newExperiment, weight);
+	}
+
+	@Override
+	public String coefsToString() {
+		return localModel.coefsToString();
+	}
+
+	@Override
+	public double distance(Experiment experiment) {
+		return localModel.distance(experiment);
+	}
+
+	@Override
+	public ArrayList<Experiment> getFirstExperiments() {
+		return localModel.getFirstExperiments();
+	}
+
+	@Override
+	public void setFirstExperiments(ArrayList<Experiment> frstExp) {
+		localModel.setFirstExperiments(frstExp);
+	}
+
+	@Override
+	public boolean finishedFirstExperiments() {
+		return localModel.finishedFirstExperiments();
+	}
+	
+	@Override
+	public Double[] getCoef() {
+		return localModel.getCoef();
+	}
+	
+	@Override
+	public String getCoefsFormula() {
+		Double[] coefs = getCoefCoop();
+		String result = "" +coefs[0];
+		if (coefs[0] == Double.NaN) System.exit(0);
+		
+		for (int i = 1 ; i < coefs.length ; i++) {
+			if (Double.isNaN(coefs[i])) coefs[i] = 0.0;
+			
+			result += "\t" + coefs[i] + " (" + getContext().getAmas().getPercepts().get(i-1) +")";
+			
+		}
+		result += "\nFrom " +localModel.getType()+" : "+localModel.getCoefsFormula(); 
+		
+		return result;
+	}
+	
+	public Double[] getCoefCoop() {
+		Set<Context> neighbors = getNeighbors();
+		Double[] coef = localModel.getCoef().clone();
+		int i = 0;
+		for(Percept p : getContext().getRanges().keySet()) {
+			for(Context c : neighbors) {
+				LocalModel model = c.getLocalModel();
+				while(model.hasModified()) {
+					model = model.getModified();
+				}
+				Double[] coef2 = model.getCoef();
+				coef[i] += coef2[i]/neighbors.size()*getCommonFrontierCoef(p, getContext(), c);
+			}
+			i++;
+		}
+		return coef;
+	}
+	
+	private Set<Context> getNeighbors() {
+		Set<Percept> percepts = getContext().getRanges().keySet();
+		Set<Context> contexts = new HashSet<>(getContext().getAmas().getContexts());
+		
+		for(Percept p : percepts) {
+			contexts.removeIf(c -> !(p.inNeighborhood(c, getContext().getRangeByPercept(p).getStart()) || p.inNeighborhood(c, getContext().getRangeByPercept(p).getEnd())));
+		}
+		return contexts;
+	}
+	
+	private double getCommonFrontierCoef(Percept p, Context c1, Context c2) {
+		ArrayList<Pair<Context, Double>> sorted = new ArrayList<>();
+		sorted.add(new Pair<Context, Double>(c1, c1.getRangeByPercept(p).getStart()));
+		sorted.add(new Pair<Context, Double>(c2, c2.getRangeByPercept(p).getStart()));
+		sorted.add(new Pair<Context, Double>(c1, c1.getRangeByPercept(p).getEnd()));
+		sorted.add(new Pair<Context, Double>(c2, c2.getRangeByPercept(p).getEnd()));
+		
+		sorted.sort(new Comparator<Pair<Context, Double>>() {
+			@Override
+			public int compare(Pair<Context, Double> o1, Pair<Context, Double> o2) {
+				return o1.b < o2.b ? -1 : 1;
+			}
+		});
+		
+		if(sorted.get(0).a == sorted.get(1).a ) {
+			return 0.0;
+		} else {
+			return (Math.abs(sorted.get(2).b-sorted.get(1).b)+1)/(Math.abs(sorted.get(3).b-sorted.get(0).b)+1);
+		}
+	}
+
+	@Override
+	public void setCoef(Double[] coef) {
+		localModel.setCoef(coef);
+	}
+
+	@Override
+	public TypeLocalModel getType() {
+		return type;
+	}
+
+	@Override
+	public void setType(TypeLocalModel type) {
+		this.type = type;
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/agents/context/localModel/LocalModelMillerRegression.java b/AMOEBAonAMAK/src/agents/context/localModel/LocalModelMillerRegression.java
index 09e8863819aa2a36fcb0d8d9d4896a2bc36979e3..6de7f0d3cfa97447976f90373cca51bd4fdb5a96 100644
--- a/AMOEBAonAMAK/src/agents/context/localModel/LocalModelMillerRegression.java
+++ b/AMOEBAonAMAK/src/agents/context/localModel/LocalModelMillerRegression.java
@@ -14,9 +14,7 @@ import utils.TRACE_LEVEL;
 /**
  * The Class LocalModelMillerRegression.
  */
-public class LocalModelMillerRegression implements LocalModel{
-	
-	private Context context;
+public class LocalModelMillerRegression extends LocalModel{
 	
 	/** The n parameters. */
 	private int nParameters;
@@ -24,11 +22,14 @@ public class LocalModelMillerRegression implements LocalModel{
 	/** The regression. */
 	transient private Regression regression;
 
+	private Context context;
 	
 	/** The coef. */
 	private Double[] coefs;
 	
 	private ArrayList<Experiment> firstExperiments;
+	
+	public boolean isReinforcement = false;
 
 	/**
 	 * Instantiates a new local model miller regression.
@@ -36,15 +37,17 @@ public class LocalModelMillerRegression implements LocalModel{
 	 * @param world the world
 	 */
 	public LocalModelMillerRegression(Context associatedContext) {
-		context = associatedContext;
+		this.context = associatedContext;
 		ArrayList<Percept> var = associatedContext.getAmas().getPercepts();
 		this.nParameters = var.size();
 		regression = new Regression(nParameters,true);
 		firstExperiments = new ArrayList<Experiment>();
+		
+		isReinforcement = associatedContext.getAmas().isReinforcement();
 	}
 	
 	public LocalModelMillerRegression(Context associatedContext, Double[] coefsCopy, List<Experiment> fstExperiments) {
-		context = associatedContext;
+		this.context = associatedContext;
 		ArrayList<Percept> var = associatedContext.getAmas().getPercepts();
 		this.nParameters = var.size();
 		regression = new Regression(nParameters,true);
@@ -53,30 +56,20 @@ public class LocalModelMillerRegression implements LocalModel{
 	}
 	
 	@Override
-	public void setContext(Context context) {
-		this.context = context;
+	public Context getContext() {
+		return context;
 	}
 	
 	@Override
-	public Context getContext() {
-		return context;
+	public void setContext(Context context) {
+		this.context = context;
 	}
 	
-	/**
-	 * Sets the coef.
-	 *
-	 * @param coef the new coef
-	 */
 	@Override
 	public void setCoef(Double[] coef) {
 		this.coefs = coef.clone();
 	}
 	
-	/**
-	 * Gets the coef.
-	 *
-	 * @return the coef
-	 */
 	@Override
 	public Double[] getCoef() {
 		return coefs;
@@ -159,6 +152,7 @@ public class LocalModelMillerRegression implements LocalModel{
 			if (Double.isNaN(coef)) coef = 0.0;
 			double pos;
 			Percept p = percepts.get(i-1);
+			
 			if(fixedPercepts.containsKey(p.getName())) {
 				pos = fixedPercepts.get(p.getName());
 			} else {
@@ -169,6 +163,7 @@ public class LocalModelMillerRegression implements LocalModel{
 					pos = context.getRanges().get(p).getStart();
 				}
 			}
+			
 			double value = coef * pos;
 			result.put("oracle", result.get("oracle") + value);
 			result.put(p.getName(), pos);
@@ -215,27 +210,14 @@ public class LocalModelMillerRegression implements LocalModel{
 			
 	}
 	
-	@Override
-	public String getCoefsFormula() {
-		String result = "" +coefs[0];
-	//	//System.out.println("Result 0" + " : " + result);
-		if (coefs[0] == Double.NaN) System.exit(0);
-		
-		for (int i = 1 ; i < coefs.length ; i++) {
-			if (Double.isNaN(coefs[i])) coefs[i] = 0.0;
-			
-			result += "\t" + coefs[i] + " (" + context.getAmas().getPercepts().get(i-1) +")";
-			
-		}
-		
-		return result;
-
-}
 	@Override
 	public void updateModel(Experiment newExperiment, double weight) {
 		context.getAmas().getEnvironment().trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList(context.getName(),"NEW POINT REGRESSION", "FIRST POINTS :", ""+firstExperiments.size(), "OLD MODEL :", coefsToString()))); 
 		
-		if(firstExperiments.size()< (nParameters + 2)) {
+		if(isReinforcement) {
+			updateModelReinforcement(newExperiment, weight);
+		}
+		else if(firstExperiments.size()< (nParameters + 2)) {
 			firstExperiments.add(newExperiment); 
 			updateModel();
 			
@@ -247,9 +229,54 @@ public class LocalModelMillerRegression implements LocalModel{
 		context.getAmas().getEnvironment().trace(TRACE_LEVEL.INFORM,new ArrayList<String>(Arrays.asList(context.getName(),"NEW POINT REGRESSION", "FIRST POINTS :", ""+firstExperiments.size(), "MODEL :", coefsToString()))); 
 	}
 	
-	public void updateModel() {
+	
+	
+	
+	
+	
+	
+	public void updateModelReinforcement(Experiment newExperiment, double weight) {
+		
+		context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),"REINFORCEMENT")));
+		
+		
+		double weightedNewProposition;
+		
+		if(coefs != null) {
+			weightedNewProposition = (newExperiment.getOracleProposition() * weight) + ((1-weight) * this.getProposition());
+			context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),weight+ " " + newExperiment.getOracleProposition(),(1-weight)+  " " + this.getProposition())));
+		}
+		else {
+			weightedNewProposition = newExperiment.getOracleProposition();
+			context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(), "NEW CTXT " + newExperiment.getOracleProposition())));
+		}
+		
+		
+		regression = new Regression(nParameters,true);
+		
+		int i = 0;
+		while (regression.getN() < nParameters + 2) { 
+			
+			//context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),i+"", ""+firstExperiments.get(i%firstExperiments.size()).getValuesAsArray(), firstExperiments.get(i%firstExperiments.size()).getOracleProposition()+"" )));
+			regression.addObservation(newExperiment.getValuesAsArray(), weightedNewProposition);
+			i++;
+		}
 		
+
+		double[] coef = regression.regress().getParameterEstimates();
+		coefs = new Double[coef.length];
+		for(int j = 0; j < coef.length; j++) {
+			coefs[j] = coef[j];
+		}
 		
+	}
+	
+	
+	
+	
+	public void updateModel() {
+		
+		context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),"FIRST EXPERIMENTS")));
 		regression = new Regression(nParameters,true);
 		
 		for (Experiment exp : firstExperiments) {
@@ -261,6 +288,7 @@ public class LocalModelMillerRegression implements LocalModel{
 		int i = 0;
 		while (regression.getN() < nParameters + 2) { 
 			
+			//context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),i+"", ""+firstExperiments.get(i%firstExperiments.size()).getValuesAsArray(), firstExperiments.get(i%firstExperiments.size()).getOracleProposition()+"" )));
 			regression.addObservation(firstExperiments.get(i%firstExperiments.size()).getValuesAsArray(), firstExperiments.get(i%firstExperiments.size()).getOracleProposition());
 			i++;
 		}
@@ -277,6 +305,8 @@ public class LocalModelMillerRegression implements LocalModel{
 	
 	public void updateModelWithExperimentAndWeight(Experiment newExperiment, double weight, int numberOfPoints) {
 		
+		context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),"EXPERIMENTS WITH WEIGHT")));
+		
 		regression = new Regression(nParameters,true);
 
 		
@@ -288,10 +318,12 @@ public class LocalModelMillerRegression implements LocalModel{
 		Pair<double[][], double[]> artificialSituations = getRandomlyDistributedArtificialExperiments((int)(numberOfPointsForRegression - (numberOfPointsForRegression*weight)));
 		//Pair<double[][], double[]> artificialSituations = getEquallyDistributedArtificialExperiments((int)(numberOfPointsForRegression - (numberOfPointsForRegression*weight)));
 		
+		context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),"ARTIFICIAL" )));
 
 		int numberOfArtificialPoints = artificialSituations.getB().length;
 		for (int i =0;i<numberOfArtificialPoints;i++) {
 			
+			context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),i+"", ""+artificialSituations.getA()[i].toString(), artificialSituations.getB()[i]+"" )));
 			regression.addObservation(artificialSituations.getA()[i], artificialSituations.getB()[i]);	
 		}
 		
@@ -303,9 +335,11 @@ public class LocalModelMillerRegression implements LocalModel{
 		else {
 			numberOfXPPoints = (int)(numberOfPointsForRegression*weight);
 		}
-
+		
+		context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),"XP")));
 		for (int i =0;i<numberOfXPPoints;i++) {
 			
+			context.getAmas().getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList(context.getName(),i+"", ""+newExperiment.getValuesAsArray(), newExperiment.getOracleProposition()+"" )));
 			regression.addObservation(newExperiment.getValuesAsArray(), newExperiment.getOracleProposition());
 			
 			
@@ -500,4 +534,8 @@ public class LocalModelMillerRegression implements LocalModel{
 	public TypeLocalModel getType() {
 		return TypeLocalModel.MILLER_REGRESSION;
 	}
+
+	@Override
+	public void setType(TypeLocalModel type) {
+	}
 }
diff --git a/AMOEBAonAMAK/src/agents/context/localModel/TypeLocalModel.java b/AMOEBAonAMAK/src/agents/context/localModel/TypeLocalModel.java
index 8d52a78c587b3de8eb38f1a35d03e55199a4bfac..bc05c3fd1d0c892af001dcddff6bedf8f61ad2d0 100644
--- a/AMOEBAonAMAK/src/agents/context/localModel/TypeLocalModel.java
+++ b/AMOEBAonAMAK/src/agents/context/localModel/TypeLocalModel.java
@@ -1,6 +1,12 @@
 package agents.context.localModel;
 
 import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+import agents.context.localModel.factories.LocalModelCoopFactory;
+import agents.context.localModel.factories.LocalModelFactory;
+import agents.context.localModel.factories.LocalModelMillerRegressionFactory;
 
 /**
  * Defines the different implemented local model. Each local model is associated
@@ -9,5 +15,24 @@ import java.io.Serializable;
  */
 public enum TypeLocalModel implements Serializable {
 	/** The miller regression. */
-	MILLER_REGRESSION
+	MILLER_REGRESSION(new LocalModelMillerRegressionFactory()),
+	
+	COOP_MILLER_REGRESSION(new LocalModelCoopFactory(MILLER_REGRESSION.factory));
+	
+	public final LocalModelFactory factory;
+	private static final Map<LocalModelFactory, TypeLocalModel> BY_FACTORY = new HashMap<>();
+	
+	static {
+		for (TypeLocalModel t : values()) {
+			BY_FACTORY.put(t.factory, t);
+		}
+	}
+	
+	private TypeLocalModel(LocalModelFactory factory) {
+		this.factory = factory;
+	}
+	
+	public static TypeLocalModel valueOf(LocalModelCoopFactory factory) {
+		return BY_FACTORY.get(factory);
+	}
 }
diff --git a/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelCoopFactory.java b/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelCoopFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..d604befe3df6cf5784a96023c07207ff9a428028
--- /dev/null
+++ b/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelCoopFactory.java
@@ -0,0 +1,41 @@
+package agents.context.localModel.factories;
+
+import agents.context.localModel.LocalModel;
+import agents.context.localModel.LocalModelCoopModifier;
+import agents.context.localModel.TypeLocalModel;
+
+/**
+ * A factory for creating {@link LocalModelCoopModifier}. Take a {@link LocalModel} as param, 
+ * or a {@link LocalModelFactory} with all param to build a LocalModel.
+ * @author Hugo
+ *
+ */
+public class LocalModelCoopFactory implements LocalModelFactory {
+	private LocalModelFactory factory;
+	
+	public LocalModelCoopFactory(LocalModelFactory factory) {
+		this.factory = factory;
+	}
+	
+	public LocalModelCoopFactory() {
+		this.factory = null;
+	}
+	
+	@Override
+	public LocalModel buildLocalModel(Object... params) {
+		if(factory != null) {
+			return new LocalModelCoopModifier(factory.buildLocalModel(params), TypeLocalModel.valueOf(this));
+		} else {
+			if(params.length != 1) {
+				throw new IllegalArgumentException("Expected one "+LocalModel.class+", got "+params.length+" arguments");
+			}
+			if(!(params[0] instanceof LocalModel)) {
+				throw new IllegalArgumentException("Expected "+LocalModel.class+", got "+params[0].getClass());
+			}
+			
+			LocalModel lm = (LocalModel) params[0];
+			return new LocalModelCoopModifier(lm, TypeLocalModel.valueOf(this));
+		}
+	}
+	
+}
diff --git a/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelFactory.java b/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..e220acc63f04b00ec4c2134640e887c186811b66
--- /dev/null
+++ b/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelFactory.java
@@ -0,0 +1,7 @@
+package agents.context.localModel.factories;
+
+import agents.context.localModel.LocalModel;
+
+public interface LocalModelFactory {
+	public LocalModel buildLocalModel(Object ...params);
+}
diff --git a/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelMillerRegressionFactory.java b/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelMillerRegressionFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..9cbb9279ebb39343a009df6f846bc00d49ef4b9a
--- /dev/null
+++ b/AMOEBAonAMAK/src/agents/context/localModel/factories/LocalModelMillerRegressionFactory.java
@@ -0,0 +1,26 @@
+package agents.context.localModel.factories;
+
+import agents.context.Context;
+import agents.context.localModel.LocalModel;
+import agents.context.localModel.LocalModelMillerRegression;
+
+/**
+ * A factory for creating {@link LocalModelMillerRegression}. Take a {@link Context} as param.
+ * @author Hugo
+ *
+ */
+public class LocalModelMillerRegressionFactory implements LocalModelFactory {
+
+	@Override
+	public LocalModel buildLocalModel(Object... params) {
+		if(params.length != 1) {
+			throw new IllegalArgumentException("Expected one "+Context.class+", got "+params.length+" arguments");
+		}
+		if(!(params[0] instanceof Context)) {
+			throw new IllegalArgumentException("Expected "+Context.class+", got "+params[0].getClass());
+		}
+		Context c = (Context) params[0];
+		return new LocalModelMillerRegression(c);
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/agents/head/EndogenousRequest.java b/AMOEBAonAMAK/src/agents/head/EndogenousRequest.java
index e6c7ea98015042a6cc07c5e84d199b729cb16343..599f41ebe8967811363e03d149e0fef7cf5b1153 100644
--- a/AMOEBAonAMAK/src/agents/head/EndogenousRequest.java
+++ b/AMOEBAonAMAK/src/agents/head/EndogenousRequest.java
@@ -97,6 +97,10 @@ public class EndogenousRequest {
 		return requestType;
 	}
 	
+	public HashMap<Percept, Pair<Double, Double>> getBounds(){
+		return bounds;
+	}
+	
 	
 	
 }
diff --git a/AMOEBAonAMAK/src/agents/head/Head.java b/AMOEBAonAMAK/src/agents/head/Head.java
index 261df1a11146fb554e063c0d72abb80bddd11757..e9cd39f20ecde87beb29684b399bce65a89941e4 100644
--- a/AMOEBAonAMAK/src/agents/head/Head.java
+++ b/AMOEBAonAMAK/src/agents/head/Head.java
@@ -13,8 +13,10 @@ import java.util.Queue;
 import agents.AmoebaAgent;
 import agents.context.Context;
 import agents.context.CustomComparator;
+import agents.context.Experiment;
 import agents.percept.Percept;
 import kernel.AMOEBA;
+import kernel.AmoebaData;
 import ncs.NCS;
 import utils.Pair;
 import utils.PrintOnce;
@@ -38,7 +40,22 @@ public class Head extends AmoebaAgent {
 	public Criticalities endogenousCriticalities;
 
 	private ArrayList<Context> activatedContexts = new ArrayList<Context>();
-	private ArrayList<Context> activatedNeighborsContexts = new ArrayList<Context>();
+	public ArrayList<Context> activatedNeighborsContexts = new ArrayList<Context>();
+	
+	public Double meanNeighborhoodVolume;
+	public HashMap<Percept, Double> meanNeighborhoodRaduises;
+	public HashMap<Percept, Double> meanNeighborhoodStartIncrements;
+	public HashMap<Percept, Double> meanNeighborhoodEndIncrements;
+	
+	public Double minMeanNeighborhoodRaduises = null;
+	public Double minMeanNeighborhoodStartIncrements = null;
+	public Double minMeanNeighborhoodEndIncrements = null;
+	
+	public Double minNeighborhoodRadius = null;
+	public Double minNeighborhoodStartIncrement = null;
+	public Double minNeighborhoodEndIncrement = null;
+	
+	public EndogenousRequest lastEndogenousRequest = null;
 
 	Queue<EndogenousRequest> endogenousRequests = new PriorityQueue<EndogenousRequest>(new Comparator<EndogenousRequest>(){
 		   public int compare(EndogenousRequest r1, EndogenousRequest r2) {
@@ -46,6 +63,12 @@ public class Head extends AmoebaAgent {
 			   }
 			});
 	
+	Queue<EndogenousRequest> endogenousChildRequests = new PriorityQueue<EndogenousRequest>(new Comparator<EndogenousRequest>(){
+		   public int compare(EndogenousRequest r1, EndogenousRequest r2) {
+			      return r2.getPriority().compareTo(r1.getPriority());
+			   }
+			});
+	
 	static double lembda = 0.99;
 	// -----------------------------
 	
@@ -76,6 +99,20 @@ public class Head extends AmoebaAgent {
 	@Override
 	public void onAct() {
 		
+		
+		meanNeighborhoodVolume = null;
+		meanNeighborhoodRaduises = null; 
+		meanNeighborhoodEndIncrements = null; 
+		meanNeighborhoodStartIncrements = null; 
+		
+		minMeanNeighborhoodRaduises = Double.POSITIVE_INFINITY;
+		minMeanNeighborhoodStartIncrements = Double.POSITIVE_INFINITY;
+		minMeanNeighborhoodEndIncrements = Double.POSITIVE_INFINITY;
+		
+		minNeighborhoodRadius = Double.POSITIVE_INFINITY;
+		minNeighborhoodStartIncrement = Double.POSITIVE_INFINITY;
+		minNeighborhoodEndIncrement = Double.POSITIVE_INFINITY;
+		
 		getAmas().data.currentCriticalityPrediction = 0;
 		getAmas().data.currentCriticalityMapping = 0;
 		getAmas().data.currentCriticalityConfidence = 0;
@@ -94,6 +131,120 @@ public class Head extends AmoebaAgent {
 		/* The head memorize last used context agent */
 		lastUsedContext = bestContext;
 		bestContext = null;
+		
+		/* Neighbors */
+
+				
+		double neighborhoodVolumesSum = 0;
+		HashMap<Percept,Double> neighborhoodRangesSums = new HashMap<Percept,Double>();
+		HashMap<Percept,Double> neighborhoodStartIncrementSums = new HashMap<Percept,Double>();
+		HashMap<Percept,Double> neighborhoodEndIncrementSums = new HashMap<Percept,Double>();
+		
+		
+		
+		for (Percept pct : getAmas().getPercepts()) {
+			neighborhoodRangesSums.put(pct, 0.0);
+			neighborhoodStartIncrementSums.put(pct, 0.0);
+			neighborhoodEndIncrementSums.put(pct, 0.0);
+		}
+		
+		
+		
+		if(activatedNeighborsContexts.size()>0) {
+			
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", ""+activatedNeighborsContexts)));
+			
+			
+			
+			for (Context ctxt : activatedNeighborsContexts) {
+				
+				ctxt.isInNeighborhood = true;
+				neighborhoodVolumesSum += ctxt.getVolume();
+				
+				for (Percept pct : ctxt.getRanges().keySet()) {
+					Double oldRadiusSum = neighborhoodRangesSums.get(pct);
+					Double oldStartIncrSum = neighborhoodStartIncrementSums.get(pct);
+					Double oldEndIncrSum = neighborhoodEndIncrementSums.get(pct);
+					
+					if( ctxt.getRanges().get(pct).getRadius() < minNeighborhoodRadius) {
+						minNeighborhoodRadius = ctxt.getRanges().get(pct).getRadius();
+					}
+					
+					if( ctxt.getRanges().get(pct).getStartIncrement() < minNeighborhoodStartIncrement) {
+						minNeighborhoodStartIncrement = ctxt.getRanges().get(pct).getStartIncrement();
+					}
+					
+					if( ctxt.getRanges().get(pct).getEndIncrement() < minNeighborhoodEndIncrement) {
+						minNeighborhoodEndIncrement = ctxt.getRanges().get(pct).getEndIncrement();
+					}
+					
+					
+					
+					
+					neighborhoodRangesSums.put(pct, oldRadiusSum + ctxt.getRanges().get(pct).getRadius());
+					neighborhoodStartIncrementSums.put(pct, oldStartIncrSum + ctxt.getRanges().get(pct).getStartIncrement());
+					neighborhoodEndIncrementSums.put(pct, oldEndIncrSum + ctxt.getRanges().get(pct).getEndIncrement());
+				}
+
+				
+			}
+	
+		meanNeighborhoodVolume = neighborhoodVolumesSum / activatedNeighborsContexts.size();
+		
+		meanNeighborhoodRaduises = new HashMap<Percept, Double>();
+		meanNeighborhoodStartIncrements = new HashMap<Percept, Double>();
+		meanNeighborhoodEndIncrements = new HashMap<Percept, Double>();
+		
+		
+		for (Percept pct : getAmas().getPercepts()) {
+			
+			
+			double meanRadius = neighborhoodRangesSums.get(pct)/activatedNeighborsContexts.size();
+			double meanStartIncrement = neighborhoodStartIncrementSums.get(pct)/activatedNeighborsContexts.size();
+			double meanEndIncrement = neighborhoodEndIncrementSums.get(pct)/activatedNeighborsContexts.size();
+			meanNeighborhoodRaduises.put(pct, meanRadius);
+			meanNeighborhoodStartIncrements.put(pct, meanStartIncrement);
+			meanNeighborhoodEndIncrements.put(pct, meanEndIncrement);
+			
+			if(meanRadius < minMeanNeighborhoodRaduises) {
+				minMeanNeighborhoodRaduises = meanRadius;
+			}
+			if(meanStartIncrement < minMeanNeighborhoodStartIncrements) {
+				minMeanNeighborhoodStartIncrements = meanStartIncrement;
+			}
+			if(meanEndIncrement < minMeanNeighborhoodEndIncrements) {
+				minMeanNeighborhoodEndIncrements = meanEndIncrement;
+			}
+			
+			
+		}
+			
+		
+			
+			
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "size", ""+activatedNeighborsContexts.size())));
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "meanNeighborhoodVolume", ""+meanNeighborhoodVolume)));
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "meanNeighborhoodRaduises", ""+meanNeighborhoodRaduises)));
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "meanNeighborhoodStartIncrements", ""+meanNeighborhoodStartIncrements)));
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "meanNeighborhoodEndIncrements", ""+meanNeighborhoodEndIncrements)));
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "minMeanNeighborhoodRaduises", ""+minMeanNeighborhoodRaduises)));
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "minMeanNeighborhoodStartIncrements", ""+minMeanNeighborhoodStartIncrements)));
+			getAmas().getEnvironment()
+			.trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("NEIGHBORDBOOD", "minMeanNeighborhoodEndIncrements", ""+minMeanNeighborhoodEndIncrements)));
+		}
+			
+			
+			
+			
+
 
 		if (getAmas().data.useOracle) {
 			playWithOracle();
@@ -101,6 +252,19 @@ public class Head extends AmoebaAgent {
 			playWithoutOracle();
 		}
 
+		
+		if(isSelfRequest() ) {
+			if(getAmas().data.isSelfLearning) {
+				getAmas().data.selfLearning = true;
+			}else if(getAmas().data.isActiveLearning) {
+				getAmas().data.activeLearning = true;
+			}
+		}else if(isSelfChildRequest()) {
+			if(getAmas().data.isActiveLearning) {
+				getAmas().data.activeLearning = true;
+			}
+		}
+		
 		updateStatisticalInformations(); /// regarder dans le détail, possible que ce pas trop utile
 
 		newContext = null;
@@ -108,15 +272,89 @@ public class Head extends AmoebaAgent {
 	}
 
 	private void playWithOracle() {
+		
+		
+		if(getAmas().isReinforcement()) {
+			
+			int nb=0;
+			Double meanNeighborsLastPredictions = null;
+			
+			
+			
+			
+			
+			ArrayList<Context> usedNeighbors = new ArrayList<Context>();
+			
+			if(activatedNeighborsContexts.size()>0) { 
+				
+				meanNeighborsLastPredictions = 0.0;
+				for (Context ctxt : activatedNeighborsContexts) {
+					
+					
+
+					if(ctxt.lastPrediction != null) {
+						usedNeighbors.add(ctxt);
+						meanNeighborsLastPredictions += ctxt.lastPrediction;
+						nb++;
+					}
+				}
+				if(nb>0) {
+					meanNeighborsLastPredictions /= nb;	
+				}
+				else {
+					meanNeighborsLastPredictions = null;
+				}
+				
+				
+				
+				
+				
+				
+			}
+			
+			
+			
+			
+			if(meanNeighborsLastPredictions != null) {
+//				System.out.println("####################### NEIGHBORS #############################");
+//				System.out.println("ORACLE BEFORE" + getAmas().data.oracleValue);
+				
+				getAmas().data.oracleValue = (getAmas().data.oracleValue + meanNeighborsLastPredictions)/2;
+				
+				
+					
+				
+//				System.out.println("PCT " + getAmas().getPerceptionsAndActionState());
+//				System.out.println("ORACLE AFTER " +getAmas().data.oracleValue);
+//				for(Context ctxt : usedNeighbors) {
+//					System.out.println(ctxt.getName() + " " + ctxt.lastPrediction);
+//				}
+//				System.out.println(usedNeighbors.size() + " " + nb);
+					
+					
+
+			}
+			
+			
+			
+		}
+		
+		
+		
+		
+		
+		
+		
+		
 		getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("\n\n")));
 		getAmas().data.executionTimes[0]=System.currentTimeMillis();
-		getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
+		getEnvironment().trace(TRACE_LEVEL.CYCLE, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
 				+ "---------------------------------------- PLAY WITH ORACLE")));
 		
 		if (activatedContexts.size() > 0) {
-			//selectBestContext(); // using highest confidence
-			selectBestContextWithDistanceToModel(); // using closest distance
-			// selectBestContextWithDistanceToModel();
+			//selectBestContextWithConfidenceAndVolume(); // using highest confidence and volume
+			selectBestContextWithDistanceToModelAndVolume(); // using closest distance and volume
+			
 		} else {
 			bestContext = lastUsedContext;
 		}
@@ -148,7 +386,9 @@ public class Head extends AmoebaAgent {
 		getAmas().data.executionTimes[1]=System.currentTimeMillis()- getAmas().data.executionTimes[1];
 
 		getAmas().data.executionTimes[2]=System.currentTimeMillis();
+
 		selfAnalysationOfContexts4();
+
 		getAmas().data.executionTimes[2]=System.currentTimeMillis()- getAmas().data.executionTimes[2];
 		
 		getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("bestContext != null 2", "" + (bestContext != null))));
@@ -188,6 +428,9 @@ public class Head extends AmoebaAgent {
 		
 		getAmas().data.executionTimes[7]=System.currentTimeMillis();
 		
+		
+		
+		
 		criticalities.addCriticality("spatialCriticality",
 				(getMinMaxVolume() - getVolumeOfAllContexts()) / getMinMaxVolume());
 
@@ -200,12 +443,31 @@ public class Head extends AmoebaAgent {
 		}
 		globalConfidence = globalConfidence / getAmas().getContexts().size();
 
+		
 		if (activatedNeighborsContexts.size() > 1) {
 
-
+			
+			double bestNeighborLastPrediction = Double.NEGATIVE_INFINITY;
+			Context bestNeighbor = null;
 
 			int i = 1;
 			for (Context ctxt : activatedNeighborsContexts) {
+				
+//				if(getAmas().isReinforcement()) {
+//					System.out.println("####################### NEIGHBORS #############################");
+//					System.out.println(ctxt.getName()  + " " + ctxt.lastPrediction);
+//					if(ctxt.lastPrediction> bestNeighborLastPrediction) {
+//						
+//						
+//						bestNeighborLastPrediction = ctxt.lastPrediction;
+//						bestNeighbor = ctxt;
+//					}
+//				}
+				
+
+				
+				
+				
 
 				for (Context otherCtxt : activatedNeighborsContexts.subList(i, activatedNeighborsContexts.size())) {
 
@@ -226,8 +488,22 @@ public class Head extends AmoebaAgent {
 
 				}
 				i++;
+				
+				
+				
 
 			}
+			
+//			if(getAmas().isReinforcement()) {
+//				System.out.println(bestNeighbor.getName() );
+//				getAmas().data.higherNeighborLastPredictionPercepts = new HashMap<String, Double>();
+//				for(Percept pct : getAmas().getPercepts()) {
+//					getAmas().data.higherNeighborLastPredictionPercepts.put(pct.getName(),bestNeighbor.getRanges().get(pct).getCenter());
+//				}
+//				System.out.println(getAmas().data.higherNeighborLastPredictionPercepts );
+//			}
+			
+			
 
 		}
 		
@@ -276,10 +552,13 @@ public class Head extends AmoebaAgent {
 	 * Play without oracle.
 	 */
 	private void playWithoutOracle() {
+		
+		getEnvironment().trace(TRACE_LEVEL.CYCLE, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
+				+ "---------------------------------------- PLAY WITHOUT ORACLE")));
 
 		logger().debug("HEAD without oracle", "Nombre de contextes activés: " + activatedContexts.size());
 
-		selectBestContext();
+		selectBestContextWithConfidenceAndVolume();
 		if (bestContext != null) {
 			getAmas().data.noBestContext = false;
 			getAmas().data.prediction = bestContext.getActionProposal();
@@ -317,9 +596,44 @@ public class Head extends AmoebaAgent {
 			logger().debug("HEAD without oracle", "no Best context selected ");
 		}
 		
-		getAmas().data.criticity = Math.abs(getAmas().data.oracleValue - getAmas().data.prediction);
+		//getAmas().data.criticity = Math.abs(getAmas().data.oracleValue - getAmas().data.prediction);
+
+		if(getAmas().isReinforcement()) {
+			if (activatedNeighborsContexts.size() > 1) {
 
-		endogenousPlay();
+				double bestNeighborLastPrediction = Double.NEGATIVE_INFINITY;
+				Context bestNeighbor = null;
+
+				int i = 1;
+				System.out.println("####################### NEIGHBORS ############################# " +  activatedNeighborsContexts.size());
+				for (Context ctxt : activatedNeighborsContexts) {
+					
+		
+					
+					System.out.println(ctxt.getName()  + " " + ctxt.lastPrediction);
+					if(ctxt.lastPrediction> bestNeighborLastPrediction) {
+						
+						
+						bestNeighborLastPrediction = ctxt.lastPrediction;
+						bestNeighbor = ctxt;
+					}
+
+
+				}
+				
+		
+				System.out.println(bestNeighbor.getName() );
+				getAmas().data.higherNeighborLastPredictionPercepts = new HashMap<String, Double>();
+				for(Percept pct : getAmas().getPercepts()) {
+					getAmas().data.higherNeighborLastPredictionPercepts.put(pct.getName(),bestNeighbor.getRanges().get(pct).getCenter());
+				}
+				System.out.println(getAmas().data.higherNeighborLastPredictionPercepts );
+
+			}
+		}
+		
+		
+		//endogenousPlay();
 	}
 
 	private void endogenousPlay() {
@@ -688,22 +1002,34 @@ public class Head extends AmoebaAgent {
 	
 	private void NCSDetection_ChildContext() {
 		
-		getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
-				+ "---------------------------------------- NCS DETECTION CHILD CONTEXT")));
-		
-		if(bestContext!=null) {
-			if(!bestContext.getLocalModel().finishedFirstExperiments() && getAmas().data.firstContext && getAmas().getCycle()>0 && !bestContext.isDying()) {
-				bestContext.solveNCS_ChildContext();
-				
-				
+		if(getAmas().data.isActiveLearning) {
+			getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
+					+ "---------------------------------------- NCS DETECTION CHILD CONTEXT")));
+			
+			if(bestContext!=null) {
+				if(!bestContext.getLocalModel().finishedFirstExperiments() && getAmas().data.firstContext && getAmas().getCycle()>0 && !bestContext.isDying()) {
+					bestContext.solveNCS_ChildContext();
+					
+					
+				}
 			}
 		}
 		
 		
 		
+		
+		
 	}
 	
 	
+		
+		
+		
+		
+		
+	
+	
+	
 
 	private Double compareClosestContextPairModels(ContextPair<Context, Context> closestContexts) {
 		Double difference = 0.0;
@@ -759,6 +1085,30 @@ public class Head extends AmoebaAgent {
 			newContext = context;
 			newContextCreated = true;
 			
+			newContext.lastPrediction = newContext.getActionProposal();
+			
+			double maxCoef = 0.0;
+			for(Double coef : newContext.getLocalModel().getCoef()) {
+				if(Math.abs(coef)> maxCoef) {
+					maxCoef = Math.abs(coef);
+				}
+			}
+			
+			
+			if(newContext.lastPrediction>0 || maxCoef>10000) {
+//				System.out.println("##################################################################################################################");
+//				System.out.println(getAverageRegressionPerformanceIndicator());
+//				System.out.println(newContext.getName());
+//				System.out.println(newContext.getLocalModel().coefsToString());
+//				System.out.println(newContext.lastPrediction);
+//				System.out.println(getOracleValue());
+				
+				
+				
+				//System.exit(0);
+			}
+			
+
 			
 		}
 		getAmas().data.executionTimes[9]=System.currentTimeMillis()- getAmas().data.executionTimes[9];
@@ -868,33 +1218,42 @@ public class Head extends AmoebaAgent {
 	
 	private void NCSDetection_PotentialRequest() {
 		
-		getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
-				+ "---------------------------------------- NCS DETECTION POTENTIAL REQUESTS")));
-		
-		if (activatedNeighborsContexts.size() > 1) {
-			int i = 1;
-			for (Context ctxt : activatedNeighborsContexts) {
-				for (Context otherCtxt : activatedNeighborsContexts.subList(i, activatedNeighborsContexts.size())) {
-					if(!this.isDying() && !ctxt.isDying()) {
-						EndogenousRequest potentialRequest = ctxt.endogenousRequest(otherCtxt);
-						if(potentialRequest != null) {
-							addEndogenousRequest(potentialRequest);
+		if(getAmas().data.isActiveLearning) {
+			getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
+					+ "---------------------------------------- NCS DETECTION POTENTIAL REQUESTS")));
+			
+			if (activatedNeighborsContexts.size() > 1) {
+				int i = 1;
+				for (Context ctxt : activatedNeighborsContexts) {
+					for (Context otherCtxt : activatedNeighborsContexts.subList(i, activatedNeighborsContexts.size())) {
+						if(!this.isDying() && !ctxt.isDying()) {
+							EndogenousRequest potentialRequest = ctxt.endogenousRequest(otherCtxt);
+							if(potentialRequest != null) {
+								
+								
+								addEndogenousRequest(potentialRequest, endogenousRequests);
+							}
 						}
 					}
+					i++;
 				}
-				i++;
 			}
+			
+			
 		}
 		
-		getEnvironment().trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("ENDO REQUESTS", ""+endogenousRequests.size())));
-		for(EndogenousRequest endoRequest : endogenousRequests) {
-			getEnvironment().trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("" + endoRequest)));
-		}
+		
 		
 	}
+	
+	
+	
 
 	private void selfAnalysationOfContexts4() {
 
+		
+		
+		
 		getEnvironment().trace(TRACE_LEVEL.DEBUG, new ArrayList<String>(Arrays.asList("------------------------------------------------------------------------------------"
 				+ "---------------------------------------- SELF ANALYSIS OF CTXT")));
 		
@@ -919,6 +1278,12 @@ public class Head extends AmoebaAgent {
 				
 				activatedContext.getLocalModel().updateModel(activatedContext.getCurrentExperiment(), getAmas().data.learningSpeed);
 
+				if(getAmas().data.oracleValue>0) {
+					
+					//System.out.println(activatedContext.getName()); REINFORCEMENT ?
+					
+					
+				}
 			}
 
 			if (currentDistanceToOraclePrediction < minDistanceToOraclePrediction) {
@@ -934,6 +1299,28 @@ public class Head extends AmoebaAgent {
 			activatedContext.criticalities.addCriticality("distanceToRegression", currentDistanceToOraclePrediction);
 			//getEnvironment().trace(new ArrayList<String>(Arrays.asList("ADD CRITICALITY TO CTXT", ""+activatedContext.getName(), ""+criticalities.getLastValues().get("distanceToRegression").size())));
 
+			activatedContext.lastPrediction = activatedContext.getActionProposal();
+			
+			double maxCoef = 0.0;
+			for(Double coef : activatedContext.getLocalModel().getCoef()) {
+				if(Math.abs(coef)> maxCoef) {
+					maxCoef = Math.abs(coef);
+				}
+			}
+			
+			if(activatedContext.lastPrediction>0 || maxCoef>10000) {
+//				System.out.println("##################################################################################################################");
+//				System.out.println(getAverageRegressionPerformanceIndicator());
+//				System.out.println(activatedContext.getName());
+//				System.out.println(activatedContext.getLocalModel().coefsToString());
+//				System.out.println(activatedContext.lastPrediction);
+//				System.out.println(getOracleValue());
+				
+				
+				
+				//System.exit(0);
+			}
+			
 		}
 
 		
@@ -1146,6 +1533,7 @@ public class Head extends AmoebaAgent {
 			getAmas().data.firstContext = true;
 		}
 
+		resetLastEndogenousRequest();
 		return context;
 	}
 
@@ -1154,47 +1542,49 @@ public class Head extends AmoebaAgent {
 	 */
 	private void updateStatisticalInformations() {
 
-		
-		if(Math.abs(getAmas().data.oracleValue)>getAmas().data.maxPrediction) {
-			getAmas().data.maxPrediction = Math.abs(getAmas().data.oracleValue);
-		}
-		
-
-		getAmas().data.normalizedCriticality = getAmas().data.criticity/getAmas().data.maxPrediction;
-		criticalities.addCriticality("predictionCriticality", getAmas().data.normalizedCriticality);
-		
-		criticalities.updateMeans();
-
-		if (severalActivatedContexts()) {
-
-			endogenousCriticalities.addCriticality("predictionCriticality", getAmas().data.criticity);
-			endogenousCriticalities.addCriticality("endogenousPredictionActivatedContextsOverlapspredictionCriticality",
-					Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlaps));
-			endogenousCriticalities.addCriticality(
-					"endogenousPredictionActivatedContextsOverlapsWorstDimInfluencepredictionCriticality",
-					Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlapsWorstDimInfluence));
-			endogenousCriticalities.addCriticality(
-					"endogenousPredictionActivatedContextsOverlapsInfluenceWithoutConfidencepredictionCriticality",
-					Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlapsInfluenceWithoutConfidence));
-			endogenousCriticalities.addCriticality(
-					"endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithoutConfidencepredictionCriticality",
-					Math.abs(getAmas().data.oracleValue
-							- getAmas().data.endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithoutConfidence));
-			endogenousCriticalities.addCriticality(
-					"endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithVolumepredictionCriticality",
-					Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithVolume));
-			endogenousCriticalities.addCriticality(
-					"endogenousPredictionActivatedContextsSharedIncompetencepredictionCriticality",
-					Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsSharedIncompetence));
+		if(getAmas().data.oracleValue != null) {
+			if(Math.abs(getAmas().data.oracleValue)>getAmas().data.maxPrediction) {
+				getAmas().data.maxPrediction = Math.abs(getAmas().data.oracleValue);
+			}
+			
 
-			endogenousCriticalities.updateMeans();
+			getAmas().data.normalizedCriticality = getAmas().data.criticity/getAmas().data.maxPrediction;
+			criticalities.addCriticality("predictionCriticality", getAmas().data.normalizedCriticality);
+			
+			criticalities.updateMeans();
+
+			if (severalActivatedContexts()) {
+
+				endogenousCriticalities.addCriticality("predictionCriticality", getAmas().data.criticity);
+				endogenousCriticalities.addCriticality("endogenousPredictionActivatedContextsOverlapspredictionCriticality",
+						Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlaps));
+				endogenousCriticalities.addCriticality(
+						"endogenousPredictionActivatedContextsOverlapsWorstDimInfluencepredictionCriticality",
+						Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlapsWorstDimInfluence));
+				endogenousCriticalities.addCriticality(
+						"endogenousPredictionActivatedContextsOverlapsInfluenceWithoutConfidencepredictionCriticality",
+						Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlapsInfluenceWithoutConfidence));
+				endogenousCriticalities.addCriticality(
+						"endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithoutConfidencepredictionCriticality",
+						Math.abs(getAmas().data.oracleValue
+								- getAmas().data.endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithoutConfidence));
+				endogenousCriticalities.addCriticality(
+						"endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithVolumepredictionCriticality",
+						Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsOverlapsWorstDimInfluenceWithVolume));
+				endogenousCriticalities.addCriticality(
+						"endogenousPredictionActivatedContextsSharedIncompetencepredictionCriticality",
+						Math.abs(getAmas().data.oracleValue - getAmas().data.endogenousPredictionActivatedContextsSharedIncompetence));
+
+				endogenousCriticalities.updateMeans();
 
-		}
+			}
 
-		getAmas().data.predictionPerformance.update(criticalities.getCriticalityMean("predictionCriticality"));
-		if (criticalities.getCriticalityMean("distanceToRegression") != null) {
-			getAmas().data.regressionPerformance.update(criticalities.getCriticalityMean("distanceToRegression"));
+			getAmas().data.predictionPerformance.update(criticalities.getCriticalityMean("predictionCriticality"));
+			if (criticalities.getCriticalityMean("distanceToRegression") != null) {
+				getAmas().data.regressionPerformance.update(criticalities.getCriticalityMean("distanceToRegression"));
+			}
 		}
+		
 
 		// getAmas().data.mappingPerformance.update(?);
 	}
@@ -1220,7 +1610,7 @@ public class Head extends AmoebaAgent {
 	/**
 	 * Select best context.
 	 */
-	private void selectBestContext() {
+	private void selectBestContextWithConfidenceAndVolume() {
 		if(activatedContexts != null && !activatedContexts.isEmpty()) {
 			Context bc;
 	
@@ -1228,9 +1618,10 @@ public class Head extends AmoebaAgent {
 			double currentConfidence = bc.getConfidence();
 
 			for (Context context : activatedContexts) {
-				if (context.getConfidence() > currentConfidence) {
+				double confidenceWithVolume = context.getConfidence()*context.getVolume();
+				if (confidenceWithVolume > currentConfidence) {
 					bc = context;
-					currentConfidence = bc.getConfidence();
+					currentConfidence = confidenceWithVolume;
 				}
 			}
 			bestContext = bc;
@@ -1241,20 +1632,21 @@ public class Head extends AmoebaAgent {
 	
 	
 
-	private void selectBestContextWithDistanceToModel() {
+	private void selectBestContextWithDistanceToModelAndVolume() {
 
 		Context bc;
 
 		bc = activatedContexts.get(0);
 		double distanceToModel = bc.getLocalModel().distance(bc.getCurrentExperiment());
-		double currentDistanceToModel;
+		double currentDistanceToModelWithVolume;
 
 		for (Context context : activatedContexts) {
 
-			currentDistanceToModel = context.getLocalModel().distance(context.getCurrentExperiment());
-			if (currentDistanceToModel < distanceToModel) {
+			currentDistanceToModelWithVolume = context.getLocalModel().distance(context.getCurrentExperiment())/context.getVolume();
+			getEnvironment().trace(TRACE_LEVEL.INFORM, new ArrayList<String>(Arrays.asList("DISTANCE / VOLUME ", context.getName(), ""+currentDistanceToModelWithVolume)));
+			if (currentDistanceToModelWithVolume < distanceToModel) {
 				bc = context;
-				distanceToModel = currentDistanceToModel;
+				distanceToModel = currentDistanceToModelWithVolume;
 			}
 		}
 		bestContext = bc;
@@ -1319,6 +1711,12 @@ public class Head extends AmoebaAgent {
 		this.getAmas().data.criticity = criticity;
 	}
 
+	
+	public HashMap<String, Double> getHigherNeighborLastPredictionPercepts() {
+		return getAmas().data.higherNeighborLastPredictionPercepts;
+	}
+
+	
 	/**
 	 * Gets the action.
 	 *
@@ -1684,6 +2082,9 @@ public class Head extends AmoebaAgent {
 
 	public void clearAllUseableContextLists() {
 		activatedContexts.clear();
+		for (Context ctxt : activatedNeighborsContexts) {
+			ctxt.isInNeighborhood = false;
+		}
 		activatedNeighborsContexts.clear();
 	}
 
@@ -1809,40 +2210,84 @@ public class Head extends AmoebaAgent {
 	
 	
 	public boolean isActiveLearning() {
-		return isSelfRequest();
+		return isSelfRequest() && getAmas().data.activeLearning;
+	}
+	
+	public boolean isSelfLearning() {
+		return isSelfRequest() && getAmas().data.selfLearning;
 	}
 	
 	
 	
 	public HashMap<Percept, Double> getSelfRequest(){
-		getEnvironment().trace(TRACE_LEVEL.EVENT, new ArrayList<String>(Arrays.asList("FUTURE ACTIVE LEARNING", ""+endogenousRequests.element())));
+		getEnvironment().trace(TRACE_LEVEL.EVENT, new ArrayList<String>(Arrays.asList("FUTURE SELF LEARNING", ""+endogenousRequests.element())));
 		EndogenousRequest futureRequest = endogenousRequests.poll();
+		lastEndogenousRequest = futureRequest;
+		for(Context ctxt : futureRequest.getAskingContexts()) {
+			ctxt.deleteWaitingRequest(futureRequest);
+		}
+		
+		return futureRequest.getRequest();
+	}
+	
+	public HashMap<Percept, Double> getActiveRequest(){
+		EndogenousRequest futureRequest = null;
+		if(endogenousChildRequests.size()>0) {
+			futureRequest = endogenousChildRequests.poll();
+		}else if(endogenousRequests.size()>0) {
+			futureRequest = endogenousRequests.poll();
+		}
+		getEnvironment().trace(TRACE_LEVEL.EVENT, new ArrayList<String>(Arrays.asList("FUTURE ACTIVE LEARNING", ""+futureRequest)));
+		
+		lastEndogenousRequest = futureRequest;
 		for(Context ctxt : futureRequest.getAskingContexts()) {
 			ctxt.deleteWaitingRequest(futureRequest);
 		}
+		
 		return futureRequest.getRequest();
 	}
 	
+	public EndogenousRequest getLastEndogenousRequest() {
+		return lastEndogenousRequest;
+	}
+	
+	public void resetLastEndogenousRequest() {
+		lastEndogenousRequest = null;
+	}
+	
 	public void deleteRequest(Context ctxt) {
 		
 	}
 	
+	public boolean isSelfChildRequest(){
+		getEnvironment().trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("ENDO CHILD REQUESTS", ""+endogenousChildRequests.size())));
+		for(EndogenousRequest endoRequest : endogenousChildRequests) {
+			getEnvironment().trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("" + endoRequest)));
+		}
+		return endogenousChildRequests.size()>0;
+	}
+	
 	public boolean isSelfRequest(){
+		getEnvironment().trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("ENDO REQUESTS", ""+endogenousRequests.size())));
+		for(EndogenousRequest endoRequest : endogenousRequests) {
+			getEnvironment().trace(TRACE_LEVEL.STATE, new ArrayList<String>(Arrays.asList("" + endoRequest)));
+		}
 		return endogenousRequests.size()>0;
 	}
 	
-	public void addSelfRequest(HashMap<Percept, Double> request, int priority, Context ctxt){		
+	public void addChildRequest(HashMap<Percept, Double> request, int priority, Context ctxt){		
 		
-		addEndogenousRequest(new EndogenousRequest(request, null, priority,new ArrayList<Context>(Arrays.asList(ctxt)), REQUEST.SELF));
+		getAmas().data.activeLearning = true;
+		addEndogenousRequest(new EndogenousRequest(request, null, priority,new ArrayList<Context>(Arrays.asList(ctxt)), REQUEST.SELF), endogenousChildRequests);
 	}
 	
-	public void addEndogenousRequest(EndogenousRequest request) {
+	public void addEndogenousRequest(EndogenousRequest request, Queue<EndogenousRequest> endogenousRequestsList) {
 		
 		boolean existingRequestTest = false;
 		
 		if(request.getAskingContexts().size()>1) {
 			
-			Iterator<EndogenousRequest> itr = endogenousRequests.iterator();
+			Iterator<EndogenousRequest> itr = endogenousRequestsList.iterator();
 			while(!existingRequestTest && itr.hasNext()) {
 				
 				EndogenousRequest currentRequest = itr.next();
@@ -1860,12 +2305,12 @@ public class Head extends AmoebaAgent {
 				for(Context ctxt : request.getAskingContexts()) {
 					ctxt.addWaitingRequest(request);
 				}
-				endogenousRequests.add(request);
+				endogenousRequestsList.add(request);
 				getEnvironment().trace(TRACE_LEVEL.EVENT, new ArrayList<String>(Arrays.asList("NEW ADDED ENDO REQUEST", ""+request)));
 			}
 		}else {
 			request.getAskingContexts().get(0).addWaitingRequest(request);
-			endogenousRequests.add(request);
+			endogenousRequestsList.add(request);
 			getEnvironment().trace(TRACE_LEVEL.EVENT, new ArrayList<String>(Arrays.asList("NEW ADDED ENDO REQUEST", ""+request)));
 		}
 		
diff --git a/AMOEBAonAMAK/src/agents/percept/ContextProjection.java b/AMOEBAonAMAK/src/agents/percept/ContextProjection.java
index 82556e3e5c520ab1500a64191f06ed397fb1bd52..988083df54769aebabca2000783c3be502dc9131 100644
--- a/AMOEBAonAMAK/src/agents/percept/ContextProjection.java
+++ b/AMOEBAonAMAK/src/agents/percept/ContextProjection.java
@@ -111,9 +111,9 @@ public class ContextProjection implements Serializable {
 		return getRadius();
 	}
 
-	public boolean inNeighborhood() {
-		return this.contains(percept.getValue(), context.getEnvironment().getContextCreationNeighborhood(context, percept))
-				|| this.contains(percept.getValue(), this.getRadius() / 2);
+	public boolean inNeighborhood(double value) {
+		return this.contains(value, context.getEnvironment().getContextCreationNeighborhood(context, percept))
+				|| this.contains(value, this.getRadius() / 2);
 	}
 	
 	public double getStart() {
diff --git a/AMOEBAonAMAK/src/agents/percept/Percept.java b/AMOEBAonAMAK/src/agents/percept/Percept.java
index 2820e695eecb8c41accb4e9fb7d4b2a1d2ed68ee..7bb940b25a64554e3644babd5ff984a21880baab 100644
--- a/AMOEBAonAMAK/src/agents/percept/Percept.java
+++ b/AMOEBAonAMAK/src/agents/percept/Percept.java
@@ -3,6 +3,7 @@ package agents.percept;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.function.Function;
 
 import agents.AmoebaAgent;
 import agents.context.Context;
@@ -20,8 +21,6 @@ public class Percept extends AmoebaAgent {
 	protected ArrayList<Context> activatedContext = new ArrayList<>();
 
 	public HashMap<Context, ContextProjection> contextProjections = new HashMap<Context, ContextProjection>();
-	private HashSet<Context> validContextProjection = new HashSet<Context>();
-	private HashSet<Context> neighborContextProjection = new HashSet<Context>();
 
 	private double min = Double.POSITIVE_INFINITY;
 	private double max = Double.NEGATIVE_INFINITY;
@@ -81,38 +80,25 @@ public class Percept extends AmoebaAgent {
 		 * 
 		 */
 		
-		validContextProjection = new HashSet<Context>();
-		neighborContextProjection = new HashSet<Context>();
-		
 		// To avoid unnecessary tests, we only compute validity on context
 		// validated by percepts that have finished before us
-		HashSet<Context> contexts = amas.getValidContexts();
-		if(contexts == null) {
+		HashSet<Context> activatedContexts = amas.getValidContexts();
+		if(activatedContexts == null) {
 			// If we are one of the first percept to run, we compute validity on all contexts
-			contexts = new HashSet<>(amas.getContexts());
+			activatedContexts = new HashSet<>(amas.getContexts());
 		}
-		
-		for (Context c : contexts) {
-			if (activateContext(c)) {
-				validContextProjection.add(c);
-			}
-		} 
-		amas.updateValidContexts(validContextProjection);
+		activatedContexts.removeIf(c -> !activateContext(c));
+		amas.updateValidContexts(activatedContexts);
 		
 		HashSet<Context> neighborsContexts = amas.getNeighborContexts();
 		if(neighborsContexts == null) {
 			// If we are one of the first percept to run, we compute validity on all contexts
 			neighborsContexts = new HashSet<>(amas.getContexts());
 		}
+		neighborsContexts.removeIf(c -> !inNeighborhood(c));
+		amas.updateNeighborContexts(neighborsContexts);
 		
-		for (Context c : neighborsContexts) {
-			if(inNeighborhood(c)) {
-				neighborContextProjection.add(c);
-			}
-		} 
-		amas.updateNeighborContexts(neighborContextProjection);
-		
-		logger().debug("CYCLE "+getAmas().getCycle(), "%s's valid contexts : %s", toString(), validContextProjection.toString());
+		logger().debug("CYCLE "+getAmas().getCycle(), "%s's valid contexts : %s", toString(), activatedContexts.toString());
 	}
 	
 	/**
@@ -125,12 +111,22 @@ public class Percept extends AmoebaAgent {
 	}
 	
 	/**
-	 * Return true if the context is in the neighborhood of this percept.
+	 * Return true if the context is in the neighborhood of this percept's current value.
 	 * @param context
 	 * @return
 	 */
 	public boolean inNeighborhood(Context context) {
-		return contextProjections.get(context).inNeighborhood();
+		return contextProjections.get(context).inNeighborhood(this.value);
+	}
+	
+	/**
+	 * Return true if the context is in the neighborhood of this percept's at a value.
+	 * @param context
+	 * @param value
+	 * @return
+	 */
+	public boolean inNeighborhood(Context context, double value) {
+		return contextProjections.get(context).inNeighborhood(value);
 	}
 
 
@@ -396,7 +392,7 @@ public class Percept extends AmoebaAgent {
 	}
 	
 	public double getMappingErrorAllowedMin() {
-		return getMinMaxDistance() * getEnvironment().getMappingErrorAllowed() * 0.4;
+		return getMinMaxDistance() * getEnvironment().getMappingErrorAllowed() * 0.25;
 	}
 	
 	public double getMappingErrorAllowedOverMapping() {
@@ -404,7 +400,7 @@ public class Percept extends AmoebaAgent {
 	}
 	
 	public double getMappingErrorAllowedMax() {
-		return getMinMaxDistance() * getEnvironment().getMappingErrorAllowed() * 1.5;
+		return getMinMaxDistance() * getEnvironment().getMappingErrorAllowed() * 2.0;
 	}
 
 	// -----------------------
diff --git a/AMOEBAonAMAK/src/experiments/AdvancedMain.java b/AMOEBAonAMAK/src/experiments/AdvancedMain.java
index d0267bb169fd4cb920fb0d8ae0dd01ab69ffec4c..39be228a80022f9765d872c9d0e3e7d2362a76c7 100644
--- a/AMOEBAonAMAK/src/experiments/AdvancedMain.java
+++ b/AMOEBAonAMAK/src/experiments/AdvancedMain.java
@@ -1,122 +1,152 @@
-package experiments;
-
-import java.io.File;
-import java.io.IOException;
-
-import fr.irit.smac.amak.Configuration;
-import fr.irit.smac.amak.tools.Log;
-import gui.AmoebaWindow;
-import javafx.beans.value.ChangeListener;
-import javafx.beans.value.ObservableValue;
-import javafx.scene.control.Slider;
-import kernel.AMOEBA;
-import kernel.StudiedSystem;
-import kernel.backup.BackupSystem;
-import kernel.backup.IBackupSystem;
-import kernel.backup.SaveHelperImpl;
-
-/**
- * A more advanced and complete main.
- * @author Hugo
- *
- */
-public class AdvancedMain {
-
-	public static void main(String[] args) throws IOException {
-		// Instantiating the MainWindow before usage.
-		// It also allows you to change some of its behavior before creating an AMOEBA.
-		// If you use Configuration.commandLineMode = True , then you should skip it. 
-		AmoebaWindow.instance();
-		example();
-	}
-
-	private static void example() throws IOException {
-
-		// Set AMAK configuration before creating an AMOEBA
-		Configuration.commandLineMode = false;
-		Configuration.allowedSimultaneousAgentsExecution = 1;
-		Configuration.waitForGUI = true;
-
-		// Create an AMOEBA
-		AMOEBA amoeba = new AMOEBA();
-		// Create a studied system and add it to the amoeba.
-		// Adding a studied system to an amoeba allow you to control the learning speed (the simulation : how many cycles per second)
-		// with amoeba's scheduler, graphically or programmatically.
-		StudiedSystem studiedSystem = new F_XY_System(50.0);
-		amoeba.setStudiedSystem(studiedSystem);
-		// A window appeared, allowing to control the simulation, but if you try to run it
-		// it will crash (there's no percepts !). We need to load a configuration :
-		
-		// Change how new Context are rendered.
-		//Context.defaultRenderStrategy = NoneRenderer.class;
-		
-		// Create a backup system for the AMOEBA
-		IBackupSystem backupSystem = new BackupSystem(amoeba);
-		// Load a configuration matching the studied system
-		File file = new File("resources/twoDimensionsLauncher.xml");
-		backupSystem.load(file);
-		// Note : if you intend to use a SaveHelper, you can use SaveHelper.load instead of a BackupSystem
-		
-		// We add an optional saver, allowing us to autosave the amoeba at each cycle.
-		// The SaveHelper also add graphical tools to save and load AMOEBA's state.
-		amoeba.saver = new SaveHelperImpl(amoeba);
-		// Autosave slow execution, if you want fast training, set saver to null,
-		// or saver.autoSave = false.
-
-		// The amoeba is ready to be used.
-		// Next we show how to control it with code :
-
-		// We deny the possibility to change simulation speed with the UI
-		amoeba.allowGraphicalScheduler(false);
-		// We allow rendering
-		amoeba.setRenderUpdate(true);
-		long start = System.currentTimeMillis();
-		// We run some learning cycles
-		int nbCycle = 1000;
-		for (int i = 0; i < nbCycle; ++i) {
-			studiedSystem.playOneStep();
-			amoeba.learn(studiedSystem.getOutput());
-		}
-		long end = System.currentTimeMillis();
-		System.out.println("Done in : " + (end - start) / 1000.0);
-		
-		// We create a manual save point
-		amoeba.saver.newManualSave("TestManualSave");
-		
-		// We set the log level to INFORM, to avoid debug logs that slow down simulation
-		Log.defaultMinLevel = Log.Level.INFORM;
-		
-		// We deactivate rendering
-		amoeba.setRenderUpdate(false);
-		// Do some more learning
-		start = System.currentTimeMillis();
-		for (int i = 0; i < nbCycle; ++i) {
-			studiedSystem.playOneStep();
-			amoeba.learn(studiedSystem.getOutput());
-		}
-		end = System.currentTimeMillis();
-		System.out.println("Done in : " + (end - start) / 1000.0);
-		
-		
-		// Activate rendering back
-		amoeba.setRenderUpdate(true);
-		// After activating rendering we need to update agent's visualization
-		amoeba.updateAgentsVisualisation();
-		// We allow simulation control with the UI
-		amoeba.allowGraphicalScheduler(true);
-		
-		// Exemple for adding a tool in the toolbar
-		Slider slider = new Slider(0, 10, 0);
-		slider.setShowTickLabels(true);
-		slider.setShowTickMarks(true);
-		slider.valueProperty().addListener(new ChangeListener<Number>() {
-			@Override
-			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
-				System.out.println("new Value "+newValue);
-			}
-		});
-		AmoebaWindow.addToolbar(slider);
-
-		System.out.println("End main");
-	}
-}
+package experiments;
+
+import java.io.File;
+import java.io.IOException;
+
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.VUIMulti;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.scene.control.Slider;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperImpl;
+
+/**
+ * A more advanced and complete main.
+ * @author Hugo
+ *
+ */
+public class AdvancedMain extends Application{
+	
+	
+	public static void main(String[] args) throws IOException {
+		
+		// Application.launch(args) launches JavaFX process 
+		// It also allows you to change some of its behavior before creating an AMOEBA.
+		// If you use Configuration.commandLineMode = True , then you should skip it. 
+		Application.launch(args);
+
+
+	}
+	
+	@Override
+	public void start(Stage primaryStage) throws Exception, IOException {
+
+		example();
+		
+	}
+
+	
+
+	private static void example() throws IOException {
+
+		// Set AMAK configuration before creating an AMOEBA
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.multiUI = true;
+
+		
+		VUIMulti amoebaVUI = new VUIMulti("2D");
+		AmoebaMultiUIWindow amoebaUI = new AmoebaMultiUIWindow("ELLSA", amoebaVUI);
+		
+		// Create an AMOEBA
+		AMOEBA amoeba = new AMOEBA(amoebaUI, amoebaVUI);
+		// Create a studied system and add it to the amoeba.
+		// Adding a studied system to an amoeba allow you to control the learning speed (the simulation : how many cycles per second)
+		// with amoeba's scheduler, graphically or programmatically.
+		StudiedSystem studiedSystem = new F_XY_System(50.0);
+		amoeba.setStudiedSystem(studiedSystem);
+		// A window appeared, allowing to control the simulation, but if you try to run it
+		// it will crash (there's no percepts !). We need to load a configuration :
+		
+		// Change how new Context are rendered.
+		//Context.defaultRenderStrategy = NoneRenderer.class;
+		
+		// Create a backup system for the AMOEBA
+		IBackupSystem backupSystem = new BackupSystem(amoeba);
+		// Load a configuration matching the studied system
+		File file = new File("resources/twoDimensionsLauncher.xml");
+		backupSystem.load(file);
+		// Note : if you intend to use a SaveHelper, you can use SaveHelper.load instead of a BackupSystem
+		
+		// We add an optional saver, allowing us to autosave the amoeba at each cycle.
+		// The SaveHelper also add graphical tools to save and load AMOEBA's state.
+		amoeba.saver = new SaveHelperImpl(amoeba);
+		// Autosave slow execution, if you want fast training, set saver to null,
+		// or saver.autoSave = false.
+
+		// The amoeba is ready to be used.
+		// Next we show how to control it with code :
+
+		// We deny the possibility to change simulation speed with the UI
+		amoeba.allowGraphicalScheduler(false);
+		// We allow rendering
+		amoeba.setRenderUpdate(true);
+		long start = System.currentTimeMillis();
+		// We run some learning cycles
+		int nbCycle = 100;
+		for (int i = 0; i < nbCycle; ++i) {
+			System.out.println(i);
+			studiedSystem.playOneStep();
+			amoeba.learn(studiedSystem.getOutput());
+		}
+		long end = System.currentTimeMillis();
+		System.out.println("Done in : " + (end - start) / 1000.0);
+		
+		// We create a manual save point
+		amoeba.saver.newManualSave("TestManualSave");
+		
+		// We set the log level to INFORM, to avoid debug logs that slow down simulation
+		Log.defaultMinLevel = Log.Level.INFORM;
+		
+		// We deactivate rendering
+		amoeba.setRenderUpdate(false);
+		// Do some more learning
+		start = System.currentTimeMillis();
+		for (int i = 0; i < nbCycle; ++i) {
+			studiedSystem.playOneStep();
+			amoeba.learn(studiedSystem.getOutput());
+		}
+		end = System.currentTimeMillis();
+		System.out.println("Done in : " + (end - start) / 1000.0);
+		
+		
+		// Activate rendering back
+		amoeba.setRenderUpdate(true);
+		// After activating rendering we need to update agent's visualization
+		amoeba.updateAgentsVisualisation();
+		// We allow simulation control with the UI
+		amoeba.allowGraphicalScheduler(true);
+		
+		// Exemple for adding a tool in the toolbar
+		Slider slider = new Slider(0, 10, 0);
+		slider.setShowTickLabels(true);
+		slider.setShowTickMarks(true);
+		slider.valueProperty().addListener(new ChangeListener<Number>() {
+			@Override
+			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+				System.out.println("new Value "+newValue);
+			}
+		});
+		amoebaUI.addToolbar(slider);
+
+		System.out.println("End main");
+	}
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+
+	
+}
diff --git a/AMOEBAonAMAK/src/experiments/F_XY_System.java b/AMOEBAonAMAK/src/experiments/F_XY_System.java
index 8aba6d446a4bb86d6aea058b234028ff40b8adac..d2ebc1a8fc27319bb7cb2de89416d8d16c686682 100644
--- a/AMOEBAonAMAK/src/experiments/F_XY_System.java
+++ b/AMOEBAonAMAK/src/experiments/F_XY_System.java
@@ -4,6 +4,7 @@ import java.util.HashMap;
 import java.util.Random;
 
 import agents.percept.Percept;
+import kernel.AMOEBA;
 import kernel.StudiedSystem;
 
 /**
@@ -31,13 +32,15 @@ public class F_XY_System implements StudiedSystem {
 	}
 	
 	@Override
-	public void playOneStep() {
+	public HashMap<String, Double> playOneStep() {
 		if (generator == null) {
 			generator = new Random(29);
 		}
 
 		x = (generator.nextDouble() - 0.5) * spaceSize * 4;
 		y = (generator.nextDouble() - 0.5) * spaceSize * 4;
+		
+		return null;
 	}
 
 	@Override
@@ -48,7 +51,7 @@ public class F_XY_System implements StudiedSystem {
 
 		out.put("px0", x);
 		out.put("px1", y);
-		out.put("oracle", 0.0);
+		out.put("oracle", result);
 		return out;
 	}
 
@@ -80,4 +83,46 @@ public class F_XY_System implements StudiedSystem {
 	public HashMap<String, Double> getIntput() {
 		return null;
 	}
+
+	@Override
+	public HashMap<String, Double> playOneStepWithControlModel() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public void setControlModels(HashMap<String, AMOEBA> controlModels) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	@Override
+	public void setControl(boolean value) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	@Override
+	public void setSelfLearning(boolean value) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	@Override
+	public Double getActiveRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public Double getSelfRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public Double getRandomRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
 }
\ No newline at end of file
diff --git a/AMOEBAonAMAK/src/experiments/Main.java b/AMOEBAonAMAK/src/experiments/Main.java
index 4cd5643ea4f16c72567fe716d54b72cd9223787b..721e209ebdd996c13a773954808eb66aa9d84c38 100644
--- a/AMOEBAonAMAK/src/experiments/Main.java
+++ b/AMOEBAonAMAK/src/experiments/Main.java
@@ -39,7 +39,7 @@ public class Main {
 		}
 		
 		System.out.println("Creating the amoeba");
-		AMOEBA amoeba = new AMOEBA(configFile, ss);
+		AMOEBA amoeba = new AMOEBA(null,null,configFile, ss);
 		
 		synchronized (Thread.currentThread()){
 			try {
diff --git a/AMOEBAonAMAK/src/experiments/MinimalMain.java b/AMOEBAonAMAK/src/experiments/MinimalMain.java
deleted file mode 100644
index b9b0bde35821831dfe658b762a50be32ceb0540a..0000000000000000000000000000000000000000
--- a/AMOEBAonAMAK/src/experiments/MinimalMain.java
+++ /dev/null
@@ -1,22 +0,0 @@
-package experiments;
-
-import kernel.AMOEBA;
-import kernel.StudiedSystem;
-
-/**
- * The most minimal main possible producing a functioning amoeba.
- * @author Hugo
- *
- */
-public class MinimalMain {
-
-	public static void main(String[] args) throws InterruptedException {
-		// create a system to be studied
-		StudiedSystem studiedSystem = new F_XY_System(50.0);
-		// create the amoeba
-		// Make sure the path to the config file is correct.
-		AMOEBA amoeba = new AMOEBA("resources/twoDimensionsLauncher.xml", studiedSystem);
-		// a window should have appeared, allowing you to control and visualize the amoeba.
-	}
-
-}
diff --git a/AMOEBAonAMAK/src/experiments/MinimalMainCommandLineMode.java b/AMOEBAonAMAK/src/experiments/MinimalMainCommandLineMode.java
new file mode 100644
index 0000000000000000000000000000000000000000..d57dfb6fab68c4228f1c088e2d2c2cc19b06b0bf
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/MinimalMainCommandLineMode.java
@@ -0,0 +1,40 @@
+package experiments;
+
+import fr.irit.smac.amak.Configuration;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+
+/**
+ * The most minimal main possible producing a functioning amoeba.
+ * @author Hugo
+ *
+ */
+public class MinimalMainCommandLineMode {
+
+	public static void main(String[] args) throws InterruptedException {
+		
+		Configuration.commandLineMode = true;
+		
+		// create a system to be studied
+		StudiedSystem studiedSystem = new F_XY_System(50.0);
+		// create the amoeba
+		// Make sure the path to the config file is correct.
+		AMOEBA amoeba = new AMOEBA(null,null,"resources/twoDimensionsLauncher.xml", studiedSystem);
+		// a window should have appeared, allowing you to control and visualize the amoeba.
+		
+		// Learning and Request example
+		long start = System.currentTimeMillis();
+		for (int i = 0; i < 1001; ++i) {
+			studiedSystem.playOneStep();
+			amoeba.learn(studiedSystem.getOutput());
+		}
+		long end = System.currentTimeMillis();
+		System.out.println("Done in : " + (end - start)  + " ms");
+		
+		for (int i = 0; i < 10; ++i) {
+			studiedSystem.playOneStep();
+			System.out.println(amoeba.request(studiedSystem.getOutput()));
+		}
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/experiments/MinimalMainUI.java b/AMOEBAonAMAK/src/experiments/MinimalMainUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..074596944882d8c54c4c104060d81602d3365698
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/MinimalMainUI.java
@@ -0,0 +1,40 @@
+package experiments;
+
+import fr.irit.smac.amak.Configuration;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+
+/**
+ * The most minimal main possible producing a functioning amoeba.
+ * @author Hugo
+ *
+ */
+public class MinimalMainUI {//TODO
+
+	public static void main(String[] args) throws InterruptedException {
+		
+		Configuration.commandLineMode = false;
+		
+		// create a system to be studied
+		StudiedSystem studiedSystem = new F_XY_System(50.0);
+		// create the amoeba
+		// Make sure the path to the config file is correct.
+		AMOEBA amoeba = new AMOEBA(null,null,"resources/twoDimensionsLauncher.xml", studiedSystem);
+		// a window should have appeared, allowing you to control and visualize the amoeba.
+		
+		// Learning and Request example
+		long start = System.currentTimeMillis();
+		for (int i = 0; i < 1001; ++i) {
+			studiedSystem.playOneStep();
+			amoeba.learn(studiedSystem.getOutput());
+		}
+		long end = System.currentTimeMillis();
+		System.out.println("Done in : " + (end - start)  + " ms");
+		
+		for (int i = 0; i < 10; ++i) {
+			studiedSystem.playOneStep();
+			System.out.println(amoeba.request(studiedSystem.getOutput()));
+		}
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/experiments/SimpleReinforcement.java b/AMOEBAonAMAK/src/experiments/SimpleReinforcement.java
deleted file mode 100644
index 85be42842f749fcb26b09c7ae377c9c3a386bf30..0000000000000000000000000000000000000000
--- a/AMOEBAonAMAK/src/experiments/SimpleReinforcement.java
+++ /dev/null
@@ -1,169 +0,0 @@
-package experiments;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.Random;
-
-import fr.irit.smac.amak.Configuration;
-import fr.irit.smac.amak.ui.drawables.Drawable;
-import fr.irit.smac.amak.ui.drawables.DrawableOval;
-import gui.AmoebaWindow;
-import javafx.scene.paint.Color;
-import kernel.AMOEBA;
-import kernel.backup.SaveHelperDummy;
-import utils.Pair;
-import utils.RandomUtils;
-import utils.XmlConfigGenerator;
-
-/**
- * Train an amoeba on a simple reinforcement task.
- * The goal of the task is to get to the center. When the position of the agent cross 0, it gets a reward of 100.
- * The agent can only moves in 2 directions, of a distance of 1. Moving give a reward of -1.
- * If the agent moves outside of the allowed range, it gets a reward of -100. 
- * @author Hugo
- *
- */
-public class SimpleReinforcement {
-	
-	private Random rand = new Random();
-	private double x = 0;
-	private double reward = 0;
-	private Drawable pos;
-
-	public static void main(String[] args) {
-		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
-		sensors.add(new Pair<String, Boolean>("p1", false));
-		sensors.add(new Pair<String, Boolean>("a1", true));
-		File config;
-		try {
-			config = File.createTempFile("config", "xml");
-			XmlConfigGenerator.makeXML(config, sensors);
-		} catch (IOException e) {
-			e.printStackTrace();
-			System.exit(1);
-			return; // now compilator know config is initialized
-		}
-		
-		Configuration.commandLineMode = true;
-		AMOEBA amoeba = new AMOEBA(config.getAbsolutePath(), null);
-		amoeba.saver = new SaveHelperDummy();
-		SimpleReinforcement env = new SimpleReinforcement();
-		
-		Random r = new Random();
-		HashMap<String, Double> state = env.reset();
-		HashMap<String, Double> state2;
-		double explo = 0.5;
-		for(int i = 0; i < 100; i++) {
-			boolean done = false;
-			Deque<HashMap<String, Double>> actions = new ArrayDeque<>();
-			//System.out.println("Explore "+i);
-			int nbStep = 0;
-			state = env.reset();
-			while(!done) {
-				nbStep++;
-				if(nbStep > 500) {
-					done = true;
-				}
-				state.remove("oracle");
-				state.remove("a1");
-				HashMap<String, Double> action = amoeba.maximize(state);
-				if(r.nextDouble() < 0.5 || action.get("oracle").equals(Double.NEGATIVE_INFINITY) ) {
-					//System.out.println("Random action");
-					action.put("a1", (r.nextBoolean() ? 10.0 : -10.0));
-				}
-				state2 = env.step(action.get("a1"));
-				
-				if(state2.get("oracle") != -1.0) {
-					done = true;
-				}
-				
-				action.put("p1", state.get("p1"));
-				action.put("oracle", state2.get("oracle"));
-				//System.out.println(action);
-				actions.add(action);
-				
-				state = state2;
-			}
-			
-			//System.out.println("Learn "+i);
-			HashMap<String, Double> action = actions.pop();
-			double reward = action.get("oracle");
-			amoeba.learn(action);
-			
-			while(!actions.isEmpty()) {
-				action = actions.pop();
-				reward += action.get("oracle");
-				action.put("oracle", reward);
-				amoeba.learn(action);
-			}
-			
-			if(explo > 0.1) {
-				explo -= 0.01;
-				if(explo < 0.1)
-					explo = 0.1;
-			}
-			
-			System.out.println("Episode "+i+"  reward : "+reward+"  explo : "+explo);
-		}
-	}
-	
-	/**
-	 * Must be called AFTER an AMOEBA with GUI
-	 */
-	public SimpleReinforcement() {
-		//Configuration.commandLineMode = false;
-		//AmoebaWindow instance = AmoebaWindow.instance();
-		//pos = new DrawableOval(0.5, 0.5, 1, 1);
-		//pos.setColor(new Color(0.5, 0.0, 0.0, 0.5));
-		//instance.mainVUI.add(pos);
-		//instance.mainVUI.createAndAddRectangle(-50, -0.25, 100, 0.5);
-		//instance.mainVUI.createAndAddRectangle(-0.25, -1, 0.5, 2);
-		
-		
-		
-	}
-	
-	public HashMap<String, Double> step(double action){
-		if(action == 0.0) action = rand.nextDouble();
-		if(action > 0.0) action = Math.ceil(action);
-		if(action < 0.0 ) action = Math.floor(action);
-		if(action > 1.0) action = 1.0;
-		if(action < -1.0) action = -1.0;
-		double oldX = x;
-		x = x + action;
-		if(x < -50.0 || x > 50.0) {
-			x = RandomUtils.nextDouble(rand, -50.0, Math.nextUp(50.0));
-			reward = -100.0;
-		} else if(x == 0.0 || sign(oldX) != sign(x)) {
-			// win !
-			reward = 1000.0;
-			x = RandomUtils.nextDouble(rand, -50.0, Math.nextUp(50.0));
-		} else {
-			reward = -1.0;
-		}
-		HashMap<String, Double> ret = new HashMap<>();
-		ret.put("p1", x);
-		ret.put("oracle", reward);
-		//pos.move(x+0.5, 0.5);
-		return ret;
-	}
-	
-	public HashMap<String, Double> reset(){
-		x = RandomUtils.nextDouble(rand, -50.0, Math.nextUp(50.0));
-		reward = 0.0;
-		
-		HashMap<String, Double> ret = new HashMap<>();
-		ret.put("p1", x);
-		ret.put("oracle", reward);
-		return ret;
-	}
-	
-	private int sign(double x) {
-		return x < 0 ? -1 : 1;
-	}
-
-}
diff --git a/AMOEBAonAMAK/src/experiments/TestingMain.java b/AMOEBAonAMAK/src/experiments/TestingMain.java
index 55bfd61be6a785e91185c2cf2eff2c396e810a15..afd007ed60898b49643f02550a46fe1ebce31cdc 100644
--- a/AMOEBAonAMAK/src/experiments/TestingMain.java
+++ b/AMOEBAonAMAK/src/experiments/TestingMain.java
@@ -2,6 +2,7 @@ package experiments;
 
 import java.util.HashMap;
 
+import experiments.benchmark.NDimCube;
 import fr.irit.smac.amak.Configuration;
 import fr.irit.smac.amak.tools.Log;
 import kernel.AMOEBA;
@@ -22,7 +23,7 @@ public class TestingMain {
 		StudiedSystem studiedSystem = new NDimCube(50.0, 3);
 		// create the amoeba
 		// Make sure the path to the config file is correct.
-		AMOEBA amoeba = new AMOEBA("resources/threeDimensionsLauncher.xml", studiedSystem);
+		AMOEBA amoeba = new AMOEBA(null,null,"resources/threeDimensionsLauncher.xml", studiedSystem);
 		amoeba.saver = new SaveHelperDummy();
 		// a window should have appeared, allowing you to control and visualize the amoeba.
 		
diff --git a/AMOEBAonAMAK/src/experiments/UnityLauncher/Main.java b/AMOEBAonAMAK/src/experiments/UnityLauncher/Main.java
index 099acfb8fc051ddd4d8bdc9e921b199dc79844f7..b8100b80089a1fcbd2a0432375bcf8cfd9a2c342 100644
--- a/AMOEBAonAMAK/src/experiments/UnityLauncher/Main.java
+++ b/AMOEBAonAMAK/src/experiments/UnityLauncher/Main.java
@@ -76,7 +76,7 @@ public class Main implements Runnable {
 		Configuration.waitForGUI = true;
 		Configuration.plotMilliSecondsUpdate = 20000;
 		
-		AMOEBA amoeba = new AMOEBA();
+		AMOEBA amoeba = new AMOEBA(null,null);
 		StudiedSystem studiedSystem = new F_N_Manager(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
 		amoeba.setStudiedSystem(studiedSystem);
 		IBackupSystem backupSystem = new BackupSystem(amoeba);
@@ -111,7 +111,7 @@ public class Main implements Runnable {
 			if(amoeba.getHeadAgent().isActiveLearning()) {
 			
 				studiedSystem.setActiveLearning(true);
-				studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest());
+				studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest()); //TODO self active ...
 			 
 			}
 			
diff --git a/AMOEBAonAMAK/src/experiments/Benchmark.java b/AMOEBAonAMAK/src/experiments/benchmark/Benchmark.java
similarity index 92%
rename from AMOEBAonAMAK/src/experiments/Benchmark.java
rename to AMOEBAonAMAK/src/experiments/benchmark/Benchmark.java
index 17c5376ea16351c8290ce2ca1e704656b83408ba..7eb0eeb7e16366c07be8ebff5cd9af53d2d14a2b 100644
--- a/AMOEBAonAMAK/src/experiments/Benchmark.java
+++ b/AMOEBAonAMAK/src/experiments/benchmark/Benchmark.java
@@ -1,4 +1,4 @@
-package experiments;
+package experiments.benchmark;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -43,7 +43,7 @@ public class Benchmark {
 	private static void execLearn(int nbCycle, String configFile) {
 		System.out.println("Start "+nbCycle+" learning cycles.");
 		
-		AMOEBA amoeba = new AMOEBA();
+		AMOEBA amoeba = new AMOEBA(null,null);
 		BackupSystem bs = new BackupSystem(amoeba);
 		bs.load(new File(configFile));
 		StudiedSystem ss = new NDimCube(50, amoeba.getPercepts().size());
@@ -67,7 +67,7 @@ public class Benchmark {
 	private static void execRequest(int nbCycle, String configFile) {
 		System.out.println("Start "+nbCycle+" request cycles.");
 		
-		AMOEBA amoeba = new AMOEBA();
+		AMOEBA amoeba = new AMOEBA(null,null);
 		BackupSystem bs = new BackupSystem(amoeba);
 		bs.load(new File(configFile));
 		StudiedSystem ss = new NDimCube(50, amoeba.getPercepts().size());
diff --git a/AMOEBAonAMAK/src/experiments/BenchmarkThreading.java b/AMOEBAonAMAK/src/experiments/benchmark/BenchmarkThreading.java
similarity index 95%
rename from AMOEBAonAMAK/src/experiments/BenchmarkThreading.java
rename to AMOEBAonAMAK/src/experiments/benchmark/BenchmarkThreading.java
index 58529462bf7b4a9ae7054e3d2860d7408c912bea..23bce11e9f8b3c719377ad461f8364d73ee74cf7 100644
--- a/AMOEBAonAMAK/src/experiments/BenchmarkThreading.java
+++ b/AMOEBAonAMAK/src/experiments/benchmark/BenchmarkThreading.java
@@ -1,4 +1,4 @@
-package experiments;
+package experiments.benchmark;
 
 import java.io.File;
 import java.util.ArrayList;
@@ -116,7 +116,7 @@ public class BenchmarkThreading {
 		// setup cache --- (very important to reduce impact of the 1st measure)
 		Configuration.allowedSimultaneousAgentsExecution = 1;
 		StudiedSystem learnSystem = new NDimCube(50.0, 100);
-		AMOEBA amoeba = new AMOEBA();
+		AMOEBA amoeba = new AMOEBA(null,null);
 		amoeba.setStudiedSystem(learnSystem);
 		IBackupSystem backupSystem = new BackupSystem(amoeba);
 		backupSystem.load(file);
@@ -127,7 +127,7 @@ public class BenchmarkThreading {
 		for(int thd = 1; thd <= 8; thd *= 2) {
 			Configuration.allowedSimultaneousAgentsExecution = thd;
 			learnSystem = new NDimCube(50.0, 100);
-			amoeba = new AMOEBA();
+			amoeba = new AMOEBA(null,null);
 			backupSystem = new BackupSystem(amoeba);
 			backupSystem.load(file);
 			List<List<Double>> bench = benchmark(amoeba, learnSystem, learnSystem, 0, 10000, 1000, null);
diff --git a/AMOEBAonAMAK/src/experiments/NDimCube.java b/AMOEBAonAMAK/src/experiments/benchmark/NDimCube.java
similarity index 69%
rename from AMOEBAonAMAK/src/experiments/NDimCube.java
rename to AMOEBAonAMAK/src/experiments/benchmark/NDimCube.java
index fcb7ceb617403a236849ff2f00d7b0b545c27ac3..88e5e7075c9ea2dc43233895433ddaeeb96f73cb 100644
--- a/AMOEBAonAMAK/src/experiments/NDimCube.java
+++ b/AMOEBAonAMAK/src/experiments/benchmark/NDimCube.java
@@ -1,10 +1,11 @@
-package experiments;
+package experiments.benchmark;
 
 import java.util.HashMap;
 import java.util.Random;
 
 import agents.percept.Percept;
 import fr.irit.smac.amak.tools.Log;
+import kernel.AMOEBA;
 import kernel.StudiedSystem;
 
 /**
@@ -50,10 +51,12 @@ public class NDimCube implements StudiedSystem{
 	}
 	
 	@Override
-	public void playOneStep() {
+	public HashMap<String, Double> playOneStep() {
 		for(int i = 0; i<dim; i++) {
 			x[i] = (generator.nextDouble() - 0.5) * spaceSize * 4;
 		}
+		
+		return null;
 	}
 	
 	@Override
@@ -127,4 +130,46 @@ public class NDimCube implements StudiedSystem{
 	public HashMap<String, Double> getIntput() {
 		return null;
 	}
+
+	@Override
+	public HashMap<String, Double> playOneStepWithControlModel() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public void setControlModels(HashMap<String, AMOEBA> controlModels) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	@Override
+	public void setControl(boolean value) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	@Override
+	public void setSelfLearning(boolean value) {
+		// TODO Auto-generated method stub
+		
+	}
+
+	@Override
+	public Double getActiveRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public Double getSelfRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	@Override
+	public Double getRandomRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
 }
\ No newline at end of file
diff --git a/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Launcher.java b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Launcher.java
index ffd17fe59ad4e7b8d2bcd7b5023e7057099d6127..2bb7df9c576c824ef81c488f80701aed001cff63 100644
--- a/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Launcher.java
+++ b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Launcher.java
@@ -1,27 +1,38 @@
 package experiments.nDimensionsLaunchers;
 
+
 import java.io.File;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.OptionalDouble;
 
 import experiments.FILE;
 import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.ui.VUIMulti;
+import gui.AmoebaMultiUIWindow;
 import gui.AmoebaWindow;
+import javafx.application.Application;
 import javafx.beans.value.ChangeListener;
 import javafx.beans.value.ObservableValue;
 import javafx.scene.control.Slider;
+import javafx.stage.Stage;
 import kernel.AMOEBA;
 import kernel.StudiedSystem;
+import kernel.World;
 import kernel.backup.BackupSystem;
 import kernel.backup.IBackupSystem;
 import kernel.backup.SaveHelperImpl;
+import utils.TRACE_LEVEL;
 
 
 /**
  * The Class BadContextLauncherEasy.
  */
-public class F_N_Launcher implements Serializable {
+public class F_N_Launcher  extends Application implements Serializable {
 
 
 	public static final double oracleNoiseRange = 0.5;
@@ -29,126 +40,109 @@ public class F_N_Launcher implements Serializable {
 	public static final int regressionPoints = 100;
 	public static final int dimension = 2;
 	public static final double spaceSize = 50.0	;
-	public static final int nbOfModels = 3	;
+	public static final int nbOfModels = 2	;
 	public static final int normType = 2	;
 	public static final boolean randomExploration = true;
 	public static final boolean limitedToSpaceZone = true;
 	//public static final double mappingErrorAllowed = 0.07; // BIG SQUARE
-	public static double mappingErrorAllowed = 0.03; // MULTI
+	public static double mappingErrorAllowed = 0.05; // MULTI
 	public static final double explorationIncrement = 1.0	;
 	public static final double explorationWidht = 0.5	;
-	
+	public static final boolean setActiveLearning = true	;
+	public static final boolean setSelfLearning = false	;
 	public static final int nbCycle = 1000;
+	public static final int nbTest = 10;
 	
 
 	
 	public static void main(String[] args) throws IOException {
-		// Instantiating the MainWindow before usage.
-		// It also allows you to change some of its behavior before creating an AMOEBA.
-		// If you use Configuration.commandLineMode = True , then you should skip it. 
-		AmoebaWindow.instance();
-		launch();
+		
+		
+		Application.launch(args);
+
+
 	}
 	
 
+	@Override
+	public void start(Stage arg0) throws Exception {
 
-	public static void launch() throws IOException{
+
+		// Set AMAK configuration before creating an AMOEBA
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = true;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = false;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		HashMap<String, ArrayList<Double>> data = new HashMap<String, ArrayList<Double>>();
 		
+		List<String> dataStrings = Arrays.asList("mappingScore", "randomRequests", "activeRequests","nbAgents");
 		
+		for (String dataName : dataStrings){
+			data.put(dataName, new ArrayList<Double>());
+		}
 		
+		for (int i = 0; i < nbTest; ++i) {
+			System.out.print(i + " ");
+			ellsaTest( data);
+		}
+		System.out.println("");
 		
-		// Set AMAK configuration before creating an AMOEBA
-		Configuration.commandLineMode = false;
-		Configuration.allowedSimultaneousAgentsExecution = 1;
-		Configuration.waitForGUI = true;
-		Configuration.plotMilliSecondsUpdate = 20000;
 		
-		AMOEBA amoeba = new AMOEBA();
+		
+		
+		
+		
+		for (String dataName : dataStrings){
+			OptionalDouble averageScore = data.get(dataName).stream().mapToDouble(a->a).average();
+			Double deviationScore = data.get(dataName).stream().mapToDouble(a->Math.pow((a-averageScore.getAsDouble()),2)).sum();
+			System.out.println("[" + dataName +" AVERAGE] " + averageScore.getAsDouble());
+			System.out.println("[" + dataName +" DEVIATION] " +Math.sqrt(deviationScore/data.get(dataName).size()));
+		}
+		
+		
+		
+	}
+
+
+	private void ellsaTest(HashMap<String, ArrayList<Double>> data) {
+		AMOEBA amoeba = new AMOEBA(null,  null);
 		StudiedSystem studiedSystem = new F_N_Manager(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
 		amoeba.setStudiedSystem(studiedSystem);
 		IBackupSystem backupSystem = new BackupSystem(amoeba);
 		File file = new File("resources/twoDimensionsLauncher.xml");
 		backupSystem.load(file);
 		
-		amoeba.saver = new SaveHelperImpl(amoeba);
-		amoeba.allowGraphicalScheduler(true);
-		amoeba.setRenderUpdate(true);		
+		
+		amoeba.allowGraphicalScheduler(false);
+		amoeba.setRenderUpdate(false);		
 		amoeba.data.learningSpeed = learningSpeed;
 		amoeba.data.numberOfPointsForRegression = regressionPoints;
+		amoeba.data.isActiveLearning = setActiveLearning;
+		amoeba.data.isSelfLearning = setSelfLearning;
 		amoeba.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
 		
-		// Exemple for adding a tool in the toolbar
-		Slider slider = new Slider(0.01, 0.1, mappingErrorAllowed);
-		slider.setShowTickLabels(true);
-		slider.setShowTickMarks(true);
-		
-		slider.valueProperty().addListener(new ChangeListener<Number>() {
-			@Override
-			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
-				System.out.println("new Value "+newValue);
-				mappingErrorAllowed = (double)newValue;
-				amoeba.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
-			}
-		});
-		AmoebaWindow.addToolbar(slider);
-		
-		studiedSystem.playOneStep();
-		amoeba.learn(studiedSystem.getOutput());
-		
-		/* AUTOMATIC */
-//		long start = System.currentTimeMillis();
-//		for (int i = 0; i < nbCycle; ++i) {
-//			studiedSystem.playOneStep();
-//			amoeba.learn(studiedSystem.getOutput());
-//		}
-//		long end = System.currentTimeMillis();
-//		System.out.println("Done in : " + (end - start) );
-//		
-//		start = System.currentTimeMillis();
-//		for (int i = 0; i < nbCycle; ++i) {
-//			studiedSystem.playOneStep();
-//			amoeba.request(studiedSystem.getOutput());
-//		}
-//		end = System.currentTimeMillis();
-//		System.out.println("Done in : " + (end - start) );
-		
-		
-//		/* XP PIERRE */
-//		
-//		String fileName = fileName(new ArrayList<String>(Arrays.asList("GaussiennePierre")));
-//		
-//		FILE Pierrefile = new FILE("Pierre",fileName);
-//		for (int i = 0; i < nbCycle; ++i) {
-//			studiedSystem.playOneStep();
-//			amoeba.learn(studiedSystem.getOutput());
-//			if(amoeba.getHeadAgent().isActiveLearning()) {
-//				studiedSystem.setActiveLearning(true);
-//				studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest());
-//				 
-//			}
-//		}
-//		
-//		for (int i = 0; i < 10; ++i) {
-//			studiedSystem.playOneStep();
-//			System.out.println(studiedSystem.getOutput());
-//			System.out.println(amoeba.request(studiedSystem.getOutput()));
-//			
-//			
-//		}
-//		
-//		Pierrefile.write(new ArrayList<String>(Arrays.asList("ID contexte","Coeff Cte","Coeff X0","Coeff X1","Min Value","Max Value")));
-//		
-//		for(Context ctxt : amoeba.getContexts()) {
-//			
-//			writeMessage(Pierrefile, ctxt.toStringArrayPierre());
-//
-//		}
-//		
-//		
-//		Pierrefile.close();
+		amoeba.setRenderUpdate(false);
 		
-	
+		World.minLevel = TRACE_LEVEL.ERROR;
+		
+		
+
+		
+		
+		for (int i = 0; i < nbCycle; ++i) {
+			amoeba.cycle();
+		}
+		
+		
+		data.get("mappingScore").add(amoeba.getHeadAgent().criticalities.getCriticality("spatialCriticality"));
+		data.get("randomRequests").add(studiedSystem.getRandomRequestCounts());
+		data.get("activeRequests").add(studiedSystem.getActiveRequestCounts());
+		data.get("nbAgents").add((double)amoeba.getContexts().size());
 	}
+
+	
 	
 	public static String fileName(ArrayList<String> infos) {
 		String fileName = "";
@@ -171,4 +165,8 @@ public class F_N_Launcher implements Serializable {
 		file.sendManualMessage();
 		
 	}
+
+
+
+	
 }
diff --git a/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_LauncherMultiUI.java b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_LauncherMultiUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..903eda8e1960f044089dfbfb1357e4e5d631a917
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_LauncherMultiUI.java
@@ -0,0 +1,458 @@
+package experiments.nDimensionsLaunchers;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+
+import experiments.FILE;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.examples.randomantsMultiUi.AntHillExampleMultiUI;
+import fr.irit.smac.amak.examples.randomantsMultiUi.WorldExampleMultiUI;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.scene.control.Slider;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperImpl;
+import utils.TRACE_LEVEL;
+
+
+/**
+ * The Class BadContextLauncherEasy.
+ */
+public class F_N_LauncherMultiUI extends Application implements Serializable {
+
+
+	public static final double oracleNoiseRange = 0.5;
+	public static final double learningSpeed = 0.01;
+	public static final int regressionPoints = 100;
+	public static final int dimension = 2;
+	public static final double spaceSize = 50.0	;
+	public static final int nbOfModels = 3	;
+	public static final int normType = 2	;
+	public static final boolean randomExploration = true;
+	public static final boolean limitedToSpaceZone = true;
+	//public static final double mappingErrorAllowed = 0.07; // BIG SQUARE
+	public static double mappingErrorAllowed = 0.03; // MULTI
+	public static final double explorationIncrement = 1.0	;
+	public static final double explorationWidht = 0.5	;
+	
+	public static final int nbCycle = 1000;
+	
+	AMOEBA amoeba;
+	StudiedSystem studiedSystem;
+	VUIMulti amoebaVUI;
+	AmoebaMultiUIWindow amoebaUI;
+	
+	AMOEBA amoeba2;
+	StudiedSystem studiedSystem2;
+	VUIMulti amoebaVUI2;
+	AmoebaMultiUIWindow amoebaUI2;
+	
+	public static void main(String[] args) throws IOException {
+		
+		
+		Application.launch(args);
+
+
+	}
+	
+	@Override
+	public void start(Stage arg0) throws Exception, IOException {
+
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		amoebaVUI = new VUIMulti("2D");
+		amoebaUI = new AmoebaMultiUIWindow("ELLSA", amoebaVUI);
+		
+		
+		// Exemple for adding a tool in the toolbar
+		Slider slider = new Slider(0.01, 0.1, mappingErrorAllowed);
+		slider.setShowTickLabels(true);
+		slider.setShowTickMarks(true);
+		
+		slider.valueProperty().addListener(new ChangeListener<Number>() {
+			@Override
+			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+				System.out.println("new Value "+newValue);
+				mappingErrorAllowed = (double)newValue;
+				amoeba.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+			}
+		});
+		amoebaUI.addToolbar(slider);
+		
+		
+		amoebaVUI2 = new VUIMulti("2D");
+		amoebaUI2 = new AmoebaMultiUIWindow("ELLSA", amoebaVUI2);
+		
+		
+		// Exemple for adding a tool in the toolbar
+		Slider slider2 = new Slider(0.01, 0.1, mappingErrorAllowed);
+		slider2.setShowTickLabels(true);
+		slider2.setShowTickMarks(true);
+		
+		slider2.valueProperty().addListener(new ChangeListener<Number>() {
+			@Override
+			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+				System.out.println("new Value "+newValue);
+				mappingErrorAllowed = (double)newValue;
+				amoeba2.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+			}
+		});
+		amoebaUI2.addToolbar(slider2);
+		
+		
+		
+		startTask(100, 1000);
+		startTask2(500, 100);
+
+
+		
+//		VUIMulti amoebaVUI2 = VUIMulti.get("2D");
+//		AmoebaMultiUIWindow amoebaUI2 = new AmoebaMultiUIWindow("ELLSA", amoebaVUI2);
+//		AMOEBA amoeba2 = new AMOEBA(amoebaUI2,  amoebaVUI2);
+//		
+//		StudiedSystem studiedSystem2 = new F_N_Manager(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
+//		amoeba2.setStudiedSystem(studiedSystem2);
+//		IBackupSystem backupSystem2 = new BackupSystem(amoeba2);
+//		File file2 = new File("resources/twoDimensionsLauncher.xml");
+//		backupSystem2.load(file2);
+//		
+//		amoeba2.saver = new SaveHelperImpl(amoeba2);
+//		amoeba2.allowGraphicalScheduler(true);
+//		amoeba2.setRenderUpdate(true);		
+//		amoeba2.data.learningSpeed = learningSpeed;
+//		amoeba2.data.numberOfPointsForRegression = regressionPoints;
+//		amoeba2.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+//		
+//		// Exemple for adding a tool in the toolbar
+//		Slider slider2 = new Slider(0.01, 0.1, mappingErrorAllowed);
+//		slider2.setShowTickLabels(true);
+//		slider2.setShowTickMarks(true);
+//		
+//		slider2.valueProperty().addListener(new ChangeListener<Number>() {
+//			@Override
+//			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+//				System.out.println("new Value "+newValue);
+//				mappingErrorAllowed = (double)newValue;
+//				amoeba2.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+//			}
+//		});
+//		amoebaUI2.addToolbar(slider2);
+//		
+//		studiedSystem2.playOneStep();
+//		amoeba2.learn(studiedSystem2.getOutput());
+		
+//		try {
+//			   Thread.sleep(2000) ;
+//			}  catch (InterruptedException e) {
+//			    // gestion de l'erreur
+//			}
+//		
+//		long start = System.currentTimeMillis();
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.learn(studiedSystem.getOutput());
+//		}
+//		long end = System.currentTimeMillis();
+//		System.out.println("Done in : " + (end - start) );
+		
+
+		
+	}
+	
+	public void startTask(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	public void startTask2(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask2(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	public void runTask(long wait, int cycles) 
+    {
+		
+		try
+        {
+             
+            // Update the Label on the JavaFx Application Thread        
+            Platform.runLater(new Runnable() 
+            {
+                @Override
+                public void run() 
+                {
+                	amoeba = new AMOEBA(amoebaUI,  amoebaVUI);
+            		studiedSystem = new F_N_Manager(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
+            		amoeba.setStudiedSystem(studiedSystem);
+            		IBackupSystem backupSystem = new BackupSystem(amoeba);
+            		File file = new File("resources/twoDimensionsLauncher.xml");
+            		backupSystem.load(file);
+            		
+            		amoeba.saver = new SaveHelperImpl(amoeba);
+            		amoeba.allowGraphicalScheduler(true);
+            		amoeba.setRenderUpdate(true);		
+            		amoeba.data.learningSpeed = learningSpeed;
+            		amoeba.data.numberOfPointsForRegression = regressionPoints;
+            		amoeba.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+            		
+                }
+            });
+     
+            Thread.sleep(wait);
+        }
+        catch (InterruptedException e) 
+        {
+            e.printStackTrace();
+        }
+		
+		
+		
+        for(int i = 0; i < cycles; i++) 
+        {
+            try
+            {
+                // Get the Status
+                final String status = "Processing " + i + " of " + cycles;
+                 
+                // Update the Label on the JavaFx Application Thread        
+                Platform.runLater(new Runnable() 
+                {
+                    @Override
+                    public void run() 
+                    {
+                    	studiedSystem.playOneStep();
+                    	amoeba.learn(studiedSystem.getOutput());
+                    	if(amoeba.getHeadAgent().isActiveLearning()) {
+                    		studiedSystem.setActiveLearning(true);
+                    		studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest()); //TODO self active ...
+    						 
+    					}
+                    	System.out.println(status);
+                    }
+                });
+         
+                Thread.sleep(wait);
+            }
+            catch (InterruptedException e) 
+            {
+                e.printStackTrace();
+            }
+        }
+    }   
+	
+	public void runTask2(long wait, int cycles) 
+    {
+		
+		try
+        {
+             
+            // Update the Label on the JavaFx Application Thread        
+            Platform.runLater(new Runnable() 
+            {
+                @Override
+                public void run() 
+                {
+                	amoeba2 = new AMOEBA(amoebaUI2,  amoebaVUI2);
+            		studiedSystem2 = new F_N_Manager(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
+            		amoeba2.setStudiedSystem(studiedSystem2);
+            		IBackupSystem backupSystem2 = new BackupSystem(amoeba2);
+            		File file2 = new File("resources/twoDimensionsLauncher.xml");
+            		backupSystem2.load(file2);
+            		
+            		amoeba2.saver = new SaveHelperImpl(amoeba2);
+            		amoeba2.allowGraphicalScheduler(true);
+            		amoeba2.setRenderUpdate(true);		
+            		amoeba2.data.learningSpeed = learningSpeed;
+            		amoeba2.data.numberOfPointsForRegression = regressionPoints;
+            		amoeba2.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+            		
+                }
+            });
+     
+            Thread.sleep(wait);
+        }
+        catch (InterruptedException e) 
+        {
+            e.printStackTrace();
+        }
+		
+		
+		
+        for(int i = 0; i < cycles; i++) 
+        {
+            try
+            {
+                // Get the Status
+                final String status = "Processing " + i + " of " + cycles;
+                 
+                // Update the Label on the JavaFx Application Thread        
+                Platform.runLater(new Runnable() 
+                {
+                    @Override
+                    public void run() 
+                    {
+                    	studiedSystem2.playOneStep();
+                    	amoeba2.learn(studiedSystem2.getOutput());
+                    	if(amoeba2.getHeadAgent().isActiveLearning()) {
+                    		studiedSystem2.setActiveLearning(true);
+                    		studiedSystem2.setSelfRequest(amoeba2.getHeadAgent().getSelfRequest()); //TODO self active ...
+    						 
+    					}
+                    	System.out.println(status);
+                    }
+                });
+         
+                Thread.sleep(wait);
+            }
+            catch (InterruptedException e) 
+            {
+                e.printStackTrace();
+            }
+        }
+    }   
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+
+	public static void launch() throws IOException{
+		
+		
+	
+		
+		
+		
+
+		
+		/* AUTOMATIC */
+//		long start = System.currentTimeMillis();
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.learn(studiedSystem.getOutput());
+//		}
+//		long end = System.currentTimeMillis();
+//		System.out.println("Done in : " + (end - start) );
+//		
+//		start = System.currentTimeMillis();
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.request(studiedSystem.getOutput());
+//		}
+//		end = System.currentTimeMillis();
+//		System.out.println("Done in : " + (end - start) );
+		
+		
+//		/* XP PIERRE */
+//		
+//		String fileName = fileName(new ArrayList<String>(Arrays.asList("GaussiennePierre")));
+//		
+//		FILE Pierrefile = new FILE("Pierre",fileName);
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.learn(studiedSystem.getOutput());
+//			if(amoeba.getHeadAgent().isActiveLearning()) {
+//				studiedSystem.setActiveLearning(true);
+//				studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest());
+//				 
+//			}
+//		}
+//		
+//		for (int i = 0; i < 10; ++i) {
+//			studiedSystem.playOneStep();
+//			System.out.println(studiedSystem.getOutput());
+//			System.out.println(amoeba.request(studiedSystem.getOutput()));
+//			
+//			
+//		}
+//		
+//		Pierrefile.write(new ArrayList<String>(Arrays.asList("ID contexte","Coeff Cte","Coeff X0","Coeff X1","Min Value","Max Value")));
+//		
+//		for(Context ctxt : amoeba.getContexts()) {
+//			
+//			writeMessage(Pierrefile, ctxt.toStringArrayPierre());
+//
+//		}
+//		
+//		
+//		Pierrefile.close();
+		
+	
+	}
+	
+	public static String fileName(ArrayList<String> infos) {
+		String fileName = "";
+		
+		for(String info : infos) {
+			fileName += info + "_";
+		}
+		
+		return fileName;
+	}
+	
+	public static void writeMessage(FILE file, ArrayList<String> message) {
+		
+		file.initManualMessage();
+		
+		for(String m : message) {
+			file.addManualMessage(m);
+		}
+		
+		file.sendManualMessage();
+		
+	}
+
+
+
+	
+}
diff --git a/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_LauncherUI.java b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_LauncherUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..c161535948db343636f1f8dd03eb486dfde5e043
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_LauncherUI.java
@@ -0,0 +1,196 @@
+package experiments.nDimensionsLaunchers;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+
+import experiments.FILE;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.ui.VUIMulti;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.scene.control.Slider;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperImpl;
+
+
+/**
+ * The Class BadContextLauncherEasy.
+ */
+public class F_N_LauncherUI  extends Application implements Serializable {
+
+
+	public static final double oracleNoiseRange = 0.5;
+	public static final double learningSpeed = 0.01;
+	public static final int regressionPoints = 100;
+	public static final int dimension = 2;
+	public static final double spaceSize = 50.0	;
+	public static final int nbOfModels = 2	;
+	public static final int normType = 2	;
+	public static final boolean randomExploration = true;
+	public static final boolean limitedToSpaceZone = true;
+	//public static final double mappingErrorAllowed = 0.07; // BIG SQUARE
+	public static double mappingErrorAllowed = 0.05; // MULTI
+	public static final double explorationIncrement = 1.0	;
+	public static final double explorationWidht = 0.5	;
+	public static final boolean setActiveLearning = true	;
+	public static final boolean setSelfLearning = false	;
+	public static final int nbCycle = 1000;
+	
+
+	
+	public static void main(String[] args) throws IOException {
+		
+		
+		Application.launch(args);
+
+
+	}
+	
+
+	@Override
+	public void start(Stage arg0) throws Exception {
+
+
+		// Set AMAK configuration before creating an AMOEBA
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		VUIMulti amoebaVUI = new VUIMulti("2D");
+		AmoebaMultiUIWindow amoebaUI = new AmoebaMultiUIWindow("ELLSA", amoebaVUI);
+		AMOEBA amoeba = new AMOEBA(amoebaUI,  amoebaVUI);
+		StudiedSystem studiedSystem = new F_N_Manager(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
+		amoeba.setStudiedSystem(studiedSystem);
+		IBackupSystem backupSystem = new BackupSystem(amoeba);
+		File file = new File("resources/twoDimensionsLauncher.xml");
+		backupSystem.load(file);
+		
+		amoeba.saver = new SaveHelperImpl(amoeba, amoebaUI);
+		
+		amoeba.allowGraphicalScheduler(true);
+		amoeba.setRenderUpdate(true);		
+		amoeba.data.learningSpeed = learningSpeed;
+		amoeba.data.numberOfPointsForRegression = regressionPoints;
+		amoeba.data.isActiveLearning = setActiveLearning;
+		amoeba.data.isSelfLearning = setSelfLearning;
+		amoeba.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+		
+		for (int i = 0; i < nbCycle; ++i) {
+			amoeba.cycle();
+		}
+		
+		
+		// Exemple for adding a tool in the toolbar
+//		Slider slider = new Slider(0.01, 0.1, mappingErrorAllowed);
+//		slider.setShowTickLabels(true);
+//		slider.setShowTickMarks(true);
+//		
+//		slider.valueProperty().addListener(new ChangeListener<Number>() {
+//			@Override
+//			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+//				System.out.println("new Value "+newValue);
+//				mappingErrorAllowed = (double)newValue;
+//				amoeba.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+//			}
+//		});
+//		amoebaUI.addToolbar(slider);
+		
+		//studiedSystem.playOneStep();
+		//amoeba.learn(studiedSystem.getOutput());
+		
+		
+		
+		
+		/* AUTOMATIC */
+//				long start = System.currentTimeMillis();
+//				for (int i = 0; i < nbCycle; ++i) {
+//					studiedSystem.playOneStep();
+//					amoeba.learn(studiedSystem.getOutput());
+//				}
+//				long end = System.currentTimeMillis();
+//				System.out.println("Done in : " + (end - start) );
+//				
+//				start = System.currentTimeMillis();
+//				for (int i = 0; i < nbCycle; ++i) {
+//					studiedSystem.playOneStep();
+//					amoeba.request(studiedSystem.getOutput());
+//				}
+//				end = System.currentTimeMillis();
+//				System.out.println("Done in : " + (end - start) );
+		
+		
+//				/* XP PIERRE */
+//				
+//				String fileName = fileName(new ArrayList<String>(Arrays.asList("GaussiennePierre")));
+//				
+//				FILE Pierrefile = new FILE("Pierre",fileName);
+//				for (int i = 0; i < nbCycle; ++i) {
+//					studiedSystem.playOneStep();
+//					amoeba.learn(studiedSystem.getOutput());
+//					if(amoeba.getHeadAgent().isActiveLearning()) {
+//						studiedSystem.setActiveLearning(true);
+//						studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest());
+//						 
+//					}
+//				}
+//				
+//				for (int i = 0; i < 10; ++i) {
+//					studiedSystem.playOneStep();
+//					System.out.println(studiedSystem.getOutput());
+//					System.out.println(amoeba.request(studiedSystem.getOutput()));
+//					
+//					
+//				}
+//				
+//				Pierrefile.write(new ArrayList<String>(Arrays.asList("ID contexte","Coeff Cte","Coeff X0","Coeff X1","Min Value","Max Value")));
+//				
+//				for(Context ctxt : amoeba.getContexts()) {
+//					
+//					writeMessage(Pierrefile, ctxt.toStringArrayPierre());
+//
+//				}
+//				
+//				
+//				Pierrefile.close();
+		
+	}
+
+	
+	
+	public static String fileName(ArrayList<String> infos) {
+		String fileName = "";
+		
+		for(String info : infos) {
+			fileName += info + "_";
+		}
+		
+		return fileName;
+	}
+	
+	public static void writeMessage(FILE file, ArrayList<String> message) {
+		
+		file.initManualMessage();
+		
+		for(String m : message) {
+			file.addManualMessage(m);
+		}
+		
+		file.sendManualMessage();
+		
+	}
+
+
+
+	
+}
diff --git a/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Manager.java b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Manager.java
index a5c51923a19dec1f5678edd58df31573c7561f4f..4bfe1f86c2d5e9b384d58736ea2eed8c4bb63b46 100644
--- a/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Manager.java
+++ b/AMOEBAonAMAK/src/experiments/nDimensionsLaunchers/F_N_Manager.java
@@ -5,6 +5,7 @@ import java.util.HashMap;
 import java.util.Random;
 
 import agents.percept.Percept;
+import kernel.AMOEBA;
 import kernel.StudiedSystem;
 
 
@@ -40,6 +41,7 @@ public class F_N_Manager implements StudiedSystem{
 	
 	HashMap<String,Double> selfRequest;
 	boolean activeLearning = false;
+	boolean selfLearning = false;
 	
 	double noiseRange;
 	
@@ -49,7 +51,31 @@ public class F_N_Manager implements StudiedSystem{
 	double explorationIncrement;
 	double explorationMaxVariation;
 	
+	private Double activeRequestCounts = 0.0;
+	private Double selfRequestCounts = 0.0;
+	private Double randomRequestCounts = 0.0;
 	
+	public Double getActiveRequestCounts() {
+		return activeRequestCounts;
+	}
+
+
+
+
+	public Double getSelfRequestCounts() {
+		return selfRequestCounts;
+	}
+
+
+
+
+	public Double getRandomRequestCounts() {
+		return randomRequestCounts;
+	}
+
+
+
+
 	/* Parameters */
 	private static final double gaussianCoef = 1000;
 	private static final double gaussianVariance = 10;
@@ -95,6 +121,28 @@ public class F_N_Manager implements StudiedSystem{
 		modelCoefs2[dimension] = (int) (Math.random() * 500 - 255);
 		
 		
+		//printModels(nbOfModels);
+		
+		
+		
+		randomExploration= rndExploration;
+		
+		explorationVector = new double[dimension];	
+		for(int i = 0 ; i < dimension ; i++) {
+			explorationVector[i] = Math.random() - 0.5;
+		}
+		double vectorNorm = normeP(explorationVector, 2);
+		for(int i = 0 ; i < dimension ; i++) {
+			explorationVector[i] /= vectorNorm;
+		}
+		
+		
+		explorationIncrement = explIncrement;
+		explorationMaxVariation = explnVariation;
+	}
+
+
+	private void printModels(int nbOfModels) {
 		System.out.println("ZONE 1 DISKS");
 		for(int nb = 0; nb<nbOfModels; nb++) {
 			System.out.print(modelCoefs[nb][dimension] + "\t");
@@ -121,23 +169,6 @@ public class F_N_Manager implements StudiedSystem{
 		}
 		System.out.println("");
 		System.out.println("");
-		
-		
-		
-		randomExploration= rndExploration;
-		
-		explorationVector = new double[dimension];	
-		for(int i = 0 ; i < dimension ; i++) {
-			explorationVector[i] = Math.random() - 0.5;
-		}
-		double vectorNorm = normeP(explorationVector, 2);
-		for(int i = 0 ; i < dimension ; i++) {
-			explorationVector[i] /= vectorNorm;
-		}
-		
-		
-		explorationIncrement = explIncrement;
-		explorationMaxVariation = explnVariation;
 	}
 	
 	
@@ -145,7 +176,7 @@ public class F_N_Manager implements StudiedSystem{
 	 * @see kernel.StudiedSystem#playOneStep(double)
 	 */
 	@Override
-	public void playOneStep() {
+	public HashMap<String, Double> playOneStep() {
 		
 
 		if(!randomExploration) {
@@ -153,17 +184,19 @@ public class F_N_Manager implements StudiedSystem{
 			nonRandomExplorationStep();
 			
 		}
+		else if(selfLearning) {
+					
+			for(int i = 0 ; i < dimension ; i++) {
+				x[i] = selfRequest.get("px" + i);
+			}
+			selfRequestCounts++;
+		}
 		else if(activeLearning) {
 			
-			
-			
-			activeLearning = false;
-			
-			
-			
 			for(int i = 0 ; i < dimension ; i++) {
 				x[i] = selfRequest.get("px" + i);
 			}
+			activeRequestCounts ++;
 		}
 
 		else {
@@ -172,9 +205,12 @@ public class F_N_Manager implements StudiedSystem{
 			for(int i = 0 ; i < dimension ; i++) {
 				x[i] = (generator.nextDouble() - 0.5) * spaceSize * 4;
 			}
+			randomRequestCounts++;
 		}
 		
+		//System.out.println("[PLAY ONE STEP] " + "selfLearning " + selfLearning + " activeLearning " + activeLearning);
 		
+		return null;
 	}
 	
 	
@@ -277,37 +313,21 @@ public class F_N_Manager implements StudiedSystem{
 		
 		int subzone = subzone2D(xRequest);
 		
-		if(subzone == 1) {
-			/* Disques */
-			return modelN(xRequest) ;
-		}else if (subzone == 2) {
-			/* Gaussian model */
-			return gaussianModel(xRequest, subZoneCenter3D(2), gaussianCoef, gaussianVariance);
-			
-		}else if (subzone == 3) {
-			/* Square */
-			return square2DModel(xRequest, subZoneCenter3D(3));
-			
-		}else if (subzone == 4) {
-			/* Exp */
-			return gaussianMapping2D(xRequest);
-		}
-		
-		return model1();
+		/* Multi */
+		//return multiModel(xRequest, subzone);
 		
 		
 		/* Disc */
-		//return (y*y + x*x < spaceSize*spaceSize ) ? 2*x + y : 5*x - 8*y;
+		return (xRequest[0]*xRequest[0] + xRequest[1]*xRequest[1] < spaceSize*spaceSize ) ? model1(xRequest[0],xRequest[1]) : model2(xRequest[0],xRequest[1]);
 		
 		/* Square */
-		//return (x1 > -spaceSize && x1 < spaceSize && x0 < spaceSize && x0 > -spaceSize) ? model1(x0,x1) : model2(x0,x1) ;
-		//return model1();
+		//return (xRequest[0] > -spaceSize && xRequest[0] < spaceSize && xRequest[0] < spaceSize && xRequest[1] > -spaceSize) ? model1(xRequest[0],xRequest[1]) : model2(xRequest[0],xRequest[1]) ;
 		
 		/* Triangle */
-		//return (y > x) ? 2*x + y : 5*x - 8*y;
+		//return (xRequest[0] > xRequest[1]) ? model1(xRequest[0],xRequest[1]) : model2(xRequest[0],xRequest[1]);
 		
 		/* Split */
-		//return ( x <= 0 ) ? 2*x + y : 5*x - 8*y;
+		//return ( xRequest[0] <= 0 ) ? model1(xRequest[0],xRequest[1]) : model2(xRequest[0],xRequest[1]);
 		
 		
 		
@@ -321,6 +341,27 @@ public class F_N_Manager implements StudiedSystem{
 		
 		
 		
+	}
+
+
+	private double multiModel(Double[] xRequest, int subzone) {
+		if(subzone == 1) {
+			/* Disques */
+			return modelN(xRequest) ;
+		}else if (subzone == 2) {
+			/* Gaussian model */
+			return gaussianModel(xRequest, subZoneCenter3D(2), gaussianCoef, gaussianVariance);
+			
+		}else if (subzone == 3) {
+			/* Square */
+			return square2DModel(xRequest, subZoneCenter3D(3));
+			
+		}else if (subzone == 4) {
+			/* Exp */
+			return gaussianMapping2D(xRequest);
+		}
+		
+		return model1();
 	}
 	
 	
@@ -565,7 +606,18 @@ private double[] subZoneCenter3D(int nb) {
 			out.put("px" + i,x[i]);
 			
 		}
-		out.put("oracle",result);
+		if(selfLearning) {
+			selfLearning = false;
+			out.put("oracle",null);
+		}else {
+			out.put("oracle",result);
+		}
+		if(activeLearning) {
+			activeLearning=false;
+		}
+		//out.put("oracle",result);
+		//System.out.println("[GET OUTPUT] " +out);
+		
 		return out;
 	}
 	
@@ -689,10 +741,17 @@ private double[] subZoneCenter3D(int nb) {
 		activeLearning = value;
 	}
 	
+	@Override
+	public void setSelfLearning(boolean value) {
+		selfLearning = value;
+	}
+	
 	@Override
 	public void setSelfRequest(HashMap<Percept, Double> request){
 		HashMap<String,Double> newRequest = new HashMap<String,Double>();
 		
+		//System.out.println("[SET SELF REQUEST] " +request);
+		
 		for(Percept pct : request.keySet()) {
 			newRequest.put(pct.getName(), request.get(pct));
 		}
@@ -701,5 +760,26 @@ private double[] subZoneCenter3D(int nb) {
 	}
 
 
+	@Override
+	public HashMap<String, Double> playOneStepWithControlModel() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+
+	@Override
+	public void setControlModels(HashMap<String, AMOEBA> controlModels) {
+		// TODO Auto-generated method stub
+		
+	}
+
+
+	@Override
+	public void setControl(boolean value) {
+		// TODO Auto-generated method stub
+		
+	}
+
+
 
 }
\ No newline at end of file
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/ReinforcementMultiUI.java b/AMOEBAonAMAK/src/experiments/reinforcement/ReinforcementMultiUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..a0cf4f059de05312393273f24da50d3f10009b97
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/ReinforcementMultiUI.java
@@ -0,0 +1,617 @@
+package experiments.reinforcement;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+import agents.context.localModel.TypeLocalModel;
+import experiments.FILE;
+import experiments.reinforcement.SimpleReinforcement1DSpatialRewardAndActionMltiUI.Environment;
+import experiments.reinforcement.SimpleReinforcement1DSpatialRewardAndActionMltiUI.LearningAgent;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.scene.control.Slider;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.World;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperDummy;
+import kernel.backup.SaveHelperImpl;
+import utils.Pair;
+import utils.RandomUtils;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+
+/**
+ * The Class BadContextLauncherEasy.
+ */
+public class ReinforcementMultiUI extends Application implements Serializable {
+
+
+	public static final double oracleNoiseRange = 0.5;
+	public static final double learningSpeed = 0.01;
+	public static final int regressionPoints = 100;
+	public static final int dimension = 2;
+	public static final double spaceSize = 50.0	;
+	public static final int nbOfModels = 3	;
+	public static final int normType = 2	;
+	public static final boolean randomExploration = true;
+	public static final boolean limitedToSpaceZone = true;
+	//public static final double mappingErrorAllowed = 0.07; // BIG SQUARE
+	public static double mappingErrorAllowed = 0.03; // MULTI
+	public static final double explorationIncrement = 1.0	;
+	public static final double explorationWidht = 0.5	;
+	
+	public static final int nbCycle = 1000;
+	
+	/* Learn and Test */
+	public static final int MAX_STEP_PER_EPISODE = 200;
+	public static final int N_LEARN = 1000;//400
+	public static final int N_TEST = 100;
+	
+	/* Exploration */
+	public static final double MIN_EXPLO_RATE = 0.02;
+	public static final double EXPLO_RATE_DIMINUTION_FACTOR = 0.01;
+	public static final double EXPLO_RATE_BASE = 1;
+	
+	AMOEBA amoebaSpatialReward;
+	VUIMulti amoebaSpatialRewardVUI;
+	AmoebaMultiUIWindow amoebaSpatialRewardUI;
+	
+	AMOEBA amoebaControlModel;
+	VUIMulti amoebaControlModelVUI;
+	AmoebaMultiUIWindow amoebaControlModelUI;
+	
+	LearningAgent agent;
+	Environment env;
+	
+	int nbStep;
+	boolean done;
+	boolean invalid;
+	HashMap<String, Double> action;
+	HashMap<String, Double> state ;
+	HashMap<String, Double> state2;
+	double totReward;
+	
+	public static void main(String[] args) throws IOException {
+		
+		
+		Application.launch(args);
+
+
+	}
+	
+	@Override
+	public void start(Stage arg0) throws Exception, IOException {
+
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		amoebaSpatialRewardVUI = new VUIMulti("2D");
+		amoebaSpatialRewardUI = new AmoebaMultiUIWindow("SPATIAL REWARD", amoebaSpatialRewardVUI);
+		
+		
+		amoebaControlModelVUI = new VUIMulti("2D");
+		amoebaControlModelUI = new AmoebaMultiUIWindow("CONTROL MODEL", amoebaControlModelVUI);
+		
+		startTask(100, 0);		
+	}
+	
+	public void startTask(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	
+	public void runTask(long wait, int cycles) 
+    {
+		
+//		try
+//        {
+//             
+//            // Update the Label on the JavaFx Application Thread        
+//            Platform.runLater(new Runnable() 
+//            {
+//                @Override
+//                public void run() 
+//                {
+//                	agent = new AmoebaRewardAndControl();
+//                	env = new OneDimensionEnv(10);
+//                }
+//            });
+//     
+//            Thread.sleep(wait);
+//        }
+//        catch (InterruptedException e) 
+//        {
+//            e.printStackTrace();
+//        }
+		
+		
+		
+		agent = new AmoebaRewardAndControl();
+    	env = new OneDimensionEnv(10);
+		
+		state = env.reset(); // BUG LAAAAAAAAAAAAAAAA
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			nbStep = 0;
+			state = env.reset();
+			action = new HashMap<String, Double>();
+			totReward = 0.0;
+			
+			// execute simulation cycles
+			done = false;
+			invalid = false;
+			
+			
+			while(!done && !invalid) {
+				
+				 try
+		            {
+		                 
+		                // Update the Label on the JavaFx Application Thread        
+		                Platform.runLater(new Runnable() 
+		                {
+		                    @Override
+		                    public void run() 
+		                    {
+		                    	nbStep++;
+		        				if(nbStep > MAX_STEP_PER_EPISODE) {
+		        					invalid = true;
+		        				}
+		        				state.remove("oracle");
+		        				
+		        				action = new HashMap<String, Double>();
+		        				
+		        				action = agent.explore(state, env);
+//		        				if(rand.nextDouble() < explo) {
+//		        					action = agent.explore(state, env);
+//		        				} else {
+//		        					action = agent.choose(state, env);
+//		        				}
+		        				
+		        				
+		        				state2 = env.step(action);  // new position with associated reward
+		        				
+		        				if(state2.get("oracle") != -1.0) { //if goal or end of world
+		        					done = true;
+		        				}
+		        				action.put("p1", state.get("p1")); //add previous state to action
+		        				
+		        				action.put("oracle", state2.get("oracle")); //add current reward to action
+		        				
+		        				// state : previous position and associated reward
+		        				// state2 : new position with current reward
+		        				// action : previous state, current action and current reward
+		        				
+		        				agent.learn(state, state2, action, done);
+		        				totReward += action.get("oracle");
+		        				
+		        				state = state2;
+		                    }
+		                });
+		         
+		                Thread.sleep(wait);
+		            }
+		            catch (InterruptedException e) 
+		            {
+		                e.printStackTrace();
+		            }
+				
+				
+				
+			}
+			
+			System.out.println("-----------------------------------------------------------------------");
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			//double testAR = test(agent, env, r, N_TEST);
+			//averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+        
+    }   
+	
+	
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+
+	
+	
+	public static String fileName(ArrayList<String> infos) {
+		String fileName = "";
+		
+		for(String info : infos) {
+			fileName += info + "_";
+		}
+		
+		return fileName;
+	}
+	
+	public static void writeMessage(FILE file, ArrayList<String> message) {
+		
+		file.initManualMessage();
+		
+		for(String m : message) {
+			file.addManualMessage(m);
+		}
+		
+		file.sendManualMessage();
+		
+	}
+	
+	/**
+	 * An environment in which a LearningAgent reside
+	 * @author Hugo
+	 *
+	 */
+	public interface Environment {
+		public List<String> actionSpace();
+		public List<String> perceptionSpace();
+		public HashMap<String, Double> reset();
+		public HashMap<String, Double> step(HashMap<String, Double> action);
+		public HashMap<String, Double> randomAction();
+	}
+	
+	/**
+	 * Wrapper for any kind of learning agent
+	 * @author Hugo
+	 *
+	 */
+	public interface LearningAgent {
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env);
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env);
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2, HashMap<String, Double> action, boolean done);
+	}
+	
+	public class AmoebaRewardAndControl implements LearningAgent {
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		public AmoebaRewardAndControl() {
+			amoebaSpatialReward = setupSpatialReward();
+			amoebaControlModel = setupControlModel();
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			
+//			HashMap<String, Double> stateWithVizuAdded = new HashMap<String, Double>(state);
+//			stateWithVizuAdded.put("p2", 0.0);
+//			stateWithVizuAdded.put("oracle", 0.0);
+//			HashMap<String, Double> bestFuturePosition =  amoebaSpatialReward.reinforcementRequest(stateWithVizuAdded);
+//			
+//			HashMap<String, Double> action = new HashMap<String, Double>();
+//			if(bestFuturePosition!=null) {
+//				HashMap<String, Double> requestForControlModel = new HashMap<String, Double>();
+//				requestForControlModel.put("pCurrent", state.get("p1"));
+//				requestForControlModel.put("pGoal", bestFuturePosition.get("p1"));
+//				
+//				double bestAction = amoebaControlModel.request(requestForControlModel);
+//				
+//				
+//				action.put("a1", bestAction);
+//			}
+//			action = env.randomAction();
+//			
+//			return action;
+			return null;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> positionAndReward,
+				HashMap<String, Double> action, boolean done) {
+			
+			// state : previous position and associated reward
+			// state2 : new position with current reward
+			// action : previous state, current actions and current reward
+			
+			HashMap<String, Double> previousStateCurrentStateAction = new HashMap<>();
+			previousStateCurrentStateAction.put("pCurrent", action.get("p1"));
+			previousStateCurrentStateAction.put("pGoal", positionAndReward.get("p1"));
+			previousStateCurrentStateAction.put("oracle", action.get("a1"));
+			
+
+			
+			//System.out.println("ControlModel " + previousStateCurrentStateAction + "                  ---------------- SIMPLE REIN XP 149");
+			//System.out.println("SpatialReward " + positionAndReward + "                  ---------------- SIMPLE REIN XP 149");
+			
+			amoebaSpatialReward.learn(positionAndReward);
+			amoebaControlModel.learn(previousStateCurrentStateAction);
+			
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+	}
+	
+	
+	
+	
+	
+	public static class OneDimensionEnv implements Environment {
+		private Random rand = new Random();
+		private double x = 0;
+		private double reward = 0;
+		private double size;
+		private Drawable pos;
+		
+		public OneDimensionEnv(double envSize) {
+			
+			size = envSize;
+			
+			
+		}
+		
+		@Override
+		public HashMap<String, Double> reset(){
+			x = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			x = Math.round(x);
+			reward = 0.0;
+			//pos.move(x+0.5, 0.5);
+			
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			return ret;
+		}
+		
+		@Override
+		public HashMap<String, Double> step(HashMap<String, Double> actionMap){
+			double action = actionMap.get("a1");
+			//if(action == 0.0) action = rand.nextDouble();
+			if(action > 0.0) action = Math.ceil(action);
+			if(action < 0.0 ) action = Math.floor(action);
+			if(action > 1.0) action = 1.0;
+			if(action < -1.0) action = -1.0;
+			double oldX = x;
+			x = x + action;
+			
+			
+			//System.out.println("ACTIONS " + " a1 " +action + " " + " a2 " + action2);
+			if(x < -size || x > size) {
+				reward = -1000.0;
+			} else if((x == 0.0) || (sign(oldX) != sign(x) )) {
+				// win !
+				reward = 1000.0;
+			} else {
+				reward = -1.0;
+			}
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			//pos.move(x+0.5, 0.5);
+			return ret;
+		}
+
+		@Override
+		public List<String> actionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("a1 enum:true {-1, 0, 1}");
+			return l;
+		}
+
+		@Override
+		public List<String> perceptionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("p1 enum:false [-"+size+", "+size+"]");
+			return l;
+		}
+
+		@Override
+		public HashMap<String, Double> randomAction() {
+			double a1 = rand.nextBoolean() ? -1 : 1;
+			
+						
+
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			return action;
+			}
+		
+	}
+	
+	
+	
+	
+	
+	private AMOEBA setupSpatialReward() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		File config;
+		try {
+			config = File.createTempFile("configSpatialReward", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(amoebaSpatialRewardUI, amoebaSpatialRewardVUI, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		amoeba.setReinforcement(true);
+		
+		
+		return amoeba;
+	}
+	
+
+	private AMOEBA setupControlModel() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("pCurrent", false));
+		sensors.add(new Pair<String, Boolean>("pGoal", false));
+		File config;
+		try {
+			config = File.createTempFile("configControlModel", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(amoebaControlModelUI, amoebaControlModelVUI, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+		
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		
+		return amoeba;
+	}
+
+
+	private static int sign(double x) {
+		return x < 0 ? -1 : 1;
+	}
+	
+	/**
+	 * Teach a learning agent on the SimpleReinforcement problem
+	 * @param agent
+	 * @return
+	 */
+	public static ArrayList<Double> learning(LearningAgent agent, Environment env){
+		ArrayList<Double> averageRewards = new ArrayList<Double>();
+		Random rand = new Random();
+		
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			int nbStep = 0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			double totReward = 0.0;
+			
+			// execute simulation cycles
+			boolean done = false;
+			boolean invalid = false;
+			
+			
+			while(!done && !invalid) {
+				nbStep++;
+				if(nbStep > MAX_STEP_PER_EPISODE) {
+					invalid = true;
+				}
+				state.remove("oracle");
+				
+				action = new HashMap<String, Double>();
+				
+				action = agent.explore(state, env);
+//				if(rand.nextDouble() < explo) {
+//					action = agent.explore(state, env);
+//				} else {
+//					action = agent.choose(state, env);
+//				}
+				
+				
+				state2 = env.step(action);  // new position with associated reward
+				
+				if(state2.get("oracle") != -1.0) { //if goal or end of world
+					done = true;
+				}
+				action.put("p1", state.get("p1")); //add previous state to action
+				
+				action.put("oracle", state2.get("oracle")); //add current reward to action
+				
+				// state : previous position and associated reward
+				// state2 : new position with current reward
+				// action : previous state, current action and current reward
+				
+				agent.learn(state, state2, action, done);
+				totReward += action.get("oracle");
+				
+				state = state2;
+			}
+			
+			System.out.println("-----------------------------------------------------------------------");
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			//double testAR = test(agent, env, r, N_TEST);
+			//averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+		return averageRewards;
+	}
+	
+}
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/ReinforcementMultiUI2D.java b/AMOEBAonAMAK/src/experiments/reinforcement/ReinforcementMultiUI2D.java
new file mode 100644
index 0000000000000000000000000000000000000000..65a84c3634fc6b413bfe954b72c91fa4c7567041
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/ReinforcementMultiUI2D.java
@@ -0,0 +1,553 @@
+package experiments.reinforcement;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+import agents.context.localModel.TypeLocalModel;
+import experiments.FILE;
+import experiments.reinforcement.SimpleReinforcement1DSpatialRewardAndActionMltiUI.Environment;
+import experiments.reinforcement.SimpleReinforcement1DSpatialRewardAndActionMltiUI.LearningAgent;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.scene.control.Slider;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.World;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperDummy;
+import kernel.backup.SaveHelperImpl;
+import utils.Pair;
+import utils.RandomUtils;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+
+/**
+ * The Class BadContextLauncherEasy.
+ */
+public class ReinforcementMultiUI2D extends Application implements Serializable {
+
+
+	public static final double oracleNoiseRange = 0.5;
+	public static final double learningSpeed = 0.01;
+	public static final int regressionPoints = 100;
+	public static final int dimension = 2;
+	public static final double spaceSize = 50.0	;
+	public static final int nbOfModels = 3	;
+	public static final int normType = 2	;
+	public static final boolean randomExploration = true;
+	public static final boolean limitedToSpaceZone = true;
+	//public static final double mappingErrorAllowed = 0.07; // BIG SQUARE
+	public static double mappingErrorAllowed = 0.03; // MULTI
+	public static final double explorationIncrement = 1.0	;
+	public static final double explorationWidht = 0.5	;
+	
+	public static final int nbCycle = 1000;
+	
+	/* Learn and Test */
+	public static final int MAX_STEP_PER_EPISODE = 200;
+	public static final int N_LEARN = 1000;//400
+	public static final int N_TEST = 100;
+	
+	/* Exploration */
+	public static final double MIN_EXPLO_RATE = 0.02;
+	public static final double EXPLO_RATE_DIMINUTION_FACTOR = 0.01;
+	public static final double EXPLO_RATE_BASE = 1;
+	
+	AMOEBA amoebaSpatialReward;
+	VUIMulti amoebaSpatialRewardVUI;
+	AmoebaMultiUIWindow amoebaSpatialRewardUI;
+	
+	AMOEBA amoebaActionModel1;
+	VUIMulti amoebaActionModel1VUI;
+	AmoebaMultiUIWindow amoebaActionModel1UI;
+	
+	AMOEBA amoebaActionModel2;
+	VUIMulti amoebaActionModel2VUI;
+	AmoebaMultiUIWindow amoebaActionModel2UI;
+	
+	LearningAgent agent;
+	Environment env;
+	
+	int nbStep;
+	boolean done;
+	boolean invalid;
+	HashMap<String, Double> action;
+	HashMap<String, Double> state ;
+	HashMap<String, Double> state2;
+	double totReward;
+	
+	public static void main(String[] args) throws IOException {
+		
+		
+		Application.launch(args);
+
+
+	}
+	
+	@Override
+	public void start(Stage arg0) throws Exception, IOException {
+
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		amoebaSpatialRewardVUI = new VUIMulti("2D");
+		amoebaSpatialRewardUI = new AmoebaMultiUIWindow("SPATIAL REWARD", amoebaSpatialRewardVUI);
+		
+		amoebaActionModel1VUI = new VUIMulti("2D");
+		amoebaActionModel1UI = new AmoebaMultiUIWindow("ACTION 1 MODEL", amoebaActionModel1VUI);
+		
+		amoebaActionModel2VUI = new VUIMulti("2D");
+		amoebaActionModel2UI = new AmoebaMultiUIWindow("ACTION 2 MODEL", amoebaActionModel2VUI);
+		
+		startTask(100, 0);		
+	}
+	
+	public void startTask(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	
+	public void runTask(long wait, int cycles) 
+    {
+				
+		
+		agent = new AmoebaRewardAndControl();
+    	env = new TwoDimensionEnv(10);
+		
+		state = env.reset(); 
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			nbStep = 0;
+			state = env.reset();
+			action = new HashMap<String, Double>();
+			totReward = 0.0;
+			
+			// execute simulation cycles
+			done = false;
+			invalid = false;
+			
+			
+			while(!done && !invalid) {
+				
+				 try
+		            {
+		                 
+		                // Update the Label on the JavaFx Application Thread        
+		                Platform.runLater(new Runnable() 
+		                {
+		                    @Override
+		                    public void run() 
+		                    {
+		                    	nbStep++;
+		        				if(nbStep > MAX_STEP_PER_EPISODE) {
+		        					invalid = true;
+		        				}
+		        				state.remove("oracle");
+		        				
+		        				action = new HashMap<String, Double>();
+		        				
+		        				action = agent.explore(state, env);
+//		        				if(rand.nextDouble() < explo) {
+//		        					action = agent.explore(state, env);
+//		        				} else {
+//		        					action = agent.choose(state, env);
+//		        				}
+		        				
+		        				
+		        				state2 = env.step(action);  // new position with associated reward
+		        				
+		        				if(state2.get("oracle") != -1.0) { //if goal or end of world
+		        					done = true;
+		        				}
+		        				action.put("p1", state.get("p1")); //add previous state to action
+		        				action.put("p2", state.get("p2")); //add previous state to action
+		        				
+		        				action.put("oracle", state2.get("oracle")); //add current reward to action
+		        				
+		        				// state : previous position and associated reward
+		        				// state2 : new position with current reward
+		        				// action : previous state, current action and current reward
+		        				
+		        				agent.learn(state, state2, action, done);
+		        				
+		        				totReward += action.get("oracle");
+		        				
+		        				state = state2;
+		                    }
+		                });
+		         
+		                Thread.sleep(wait);
+		            }
+		            catch (InterruptedException e) 
+		            {
+		                e.printStackTrace();
+		            }
+				
+				
+				
+			}
+			
+			System.out.println("-----------------------------------------------------------------------");
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			//double testAR = test(agent, env, r, N_TEST);
+			//averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+        
+    }   
+	
+	
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+
+	
+	
+	public static String fileName(ArrayList<String> infos) {
+		String fileName = "";
+		
+		for(String info : infos) {
+			fileName += info + "_";
+		}
+		
+		return fileName;
+	}
+	
+	public static void writeMessage(FILE file, ArrayList<String> message) {
+		
+		file.initManualMessage();
+		
+		for(String m : message) {
+			file.addManualMessage(m);
+		}
+		
+		file.sendManualMessage();
+		
+	}
+	
+	/**
+	 * An environment in which a LearningAgent reside
+	 * @author Hugo
+	 *
+	 */
+	public interface Environment {
+		public List<String> actionSpace();
+		public List<String> perceptionSpace();
+		public HashMap<String, Double> reset();
+		public HashMap<String, Double> step(HashMap<String, Double> action);
+		public HashMap<String, Double> randomAction();
+	}
+	
+	/**
+	 * Wrapper for any kind of learning agent
+	 * @author Hugo
+	 *
+	 */
+	public interface LearningAgent {
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env);
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env);
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2, HashMap<String, Double> action, boolean done);
+	}
+	
+	public class AmoebaRewardAndControl implements LearningAgent {
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		public AmoebaRewardAndControl() {
+			amoebaSpatialReward = setupSpatialReward();
+			//amoebaActionModel1 = setupControlModel("1", amoebaActionModel1UI, amoebaActionModel1VUI);
+			//amoebaActionModel2 = setupControlModel("2", amoebaActionModel2UI, amoebaActionModel2VUI);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			
+//			HashMap<String, Double> stateWithVizuAdded = new HashMap<String, Double>(state);
+//			stateWithVizuAdded.put("p2", 0.0);
+//			stateWithVizuAdded.put("oracle", 0.0);
+//			HashMap<String, Double> bestFuturePosition =  amoebaSpatialReward.reinforcementRequest(stateWithVizuAdded);
+//			
+//			HashMap<String, Double> action = new HashMap<String, Double>();
+//			if(bestFuturePosition!=null) {
+//				HashMap<String, Double> requestForControlModel = new HashMap<String, Double>();
+//				requestForControlModel.put("pCurrent", state.get("p1"));
+//				requestForControlModel.put("pGoal", bestFuturePosition.get("p1"));
+//				
+//				double bestAction = amoebaControlModel.request(requestForControlModel);
+//				
+//				
+//				action.put("a1", bestAction);
+//			}
+//			action = env.randomAction();
+//			
+//			return action;
+			return null;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> positionAndReward,
+				HashMap<String, Double> action, boolean done) {
+			
+			// state : previous position and associated reward
+			// state2 : new position with current reward
+			// action : previous state, current actions and current reward
+			
+			HashMap<String, Double> previousStateCurrentStateAction1 = new HashMap<>();
+			previousStateCurrentStateAction1.put("p1Current", action.get("p1"));
+			previousStateCurrentStateAction1.put("p2Current", action.get("p2"));
+			previousStateCurrentStateAction1.put("p1Goal", positionAndReward.get("p1"));
+			previousStateCurrentStateAction1.put("oracle", action.get("a1"));
+			
+			HashMap<String, Double> previousStateCurrentStateAction2 = new HashMap<>();
+			previousStateCurrentStateAction2.put("p1Current", action.get("p1"));
+			previousStateCurrentStateAction2.put("p2Current", action.get("p2"));
+			previousStateCurrentStateAction2.put("p2Goal", positionAndReward.get("p2"));
+			previousStateCurrentStateAction2.put("oracle", action.get("a2"));
+			
+
+			
+			//System.out.println("ControlModel " + previousStateCurrentStateAction + "                  ---------------- SIMPLE REIN XP 149");
+			//System.out.println("SpatialReward " + positionAndReward + "                  ---------------- SIMPLE REIN XP 149");
+			
+			amoebaSpatialReward.learn(positionAndReward);
+			//amoebaActionModel1.learn(previousStateCurrentStateAction1);
+			//amoebaActionModel2.learn(previousStateCurrentStateAction2);
+			
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+	}
+	
+	
+	
+	
+	
+	public static class TwoDimensionEnv implements Environment {
+		private Random rand = new Random();
+		private double x = 0;
+		private double y = 0;
+		private double reward = 0;
+		private double size;
+		private Drawable pos;
+		
+		public TwoDimensionEnv(double envSize) {
+			
+			size = envSize;
+			
+			
+		}
+		
+		@Override
+		public HashMap<String, Double> reset(){
+			x = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			x = Math.round(x);
+			y = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			y = Math.round(y);
+			reward = 0.0;
+			//pos.move(x+0.5, 0.5);
+			
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("p2", y);
+			ret.put("oracle", reward);
+			return ret;
+		}
+		
+		@Override
+		public HashMap<String, Double> step(HashMap<String, Double> actionMap){
+			double action = actionMap.get("a1");
+			//if(action == 0.0) action = rand.nextDouble();
+			if(action > 0.0) action = Math.ceil(action);
+			if(action < 0.0 ) action = Math.floor(action);
+			if(action > 1.0) action = 1.0;
+			if(action < -1.0) action = -1.0;
+			double oldX = x;
+			x = x + action;
+			
+			double action2 = actionMap.get("a2");
+			//if(action2 == 0.0) action2 = rand.nextDouble();
+			if(action2 > 0.0) action2 = Math.ceil(action2);
+			if(action2 < 0.0 ) action2 = Math.floor(action2);
+			if(action2 > 1.0) action2 = 1.0;
+			if(action2 < -1.0) action2 = -1.0;
+			double oldY = y;
+			y = y + action2;
+			
+			//System.out.println("ACTIONS " + " a1 " +action + " " + " a2 " + action2);
+			if(x < -size || x > size || y < -size || y > size) {
+				reward = -1000.0;
+			} else if((x == 0.0 && y == 0.0) || (sign(oldX) != sign(x) && sign(oldY) != sign(y) )) {
+				// win !
+				reward = 1000.0;
+			} else {
+				reward = -1.0;
+			}
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("p2", y);
+			ret.put("oracle", reward);
+			//pos.move(x+0.5, 0.5);
+			return ret;
+		}
+
+		@Override
+		public List<String> actionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("a1 enum:true {-1, 0, 1}");
+			l.add("a2 enum:true {-1, 0, 1}");
+			return l;
+		}
+
+		@Override
+		public List<String> perceptionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("p1 enum:false [-"+size+", "+size+"]");
+			l.add("p2 enum:false [-"+size+", "+size+"]");
+			return l;
+		}
+
+		@Override
+		public HashMap<String, Double> randomAction() {
+			double a1 = rand.nextInt(3) - 1;
+			double a2 = (a1 == 0.0) ? (rand.nextBoolean() ? -1 : 1) : (rand.nextInt(3) - 1);
+						
+//			double a1 =  rand.nextBoolean() ? -1 : 1;
+//			double a2 =  rand.nextBoolean() ? -1 : 1;
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			action.put("a2", a2);
+			return action;
+			}
+		
+	}
+	
+	
+	
+	
+	
+	private AMOEBA setupSpatialReward() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		sensors.add(new Pair<String, Boolean>("p2", false));
+		File config;
+		try {
+			config = File.createTempFile("configSpatialReward", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.DEBUG;
+		AMOEBA amoeba = new AMOEBA(amoebaSpatialRewardUI, amoebaSpatialRewardVUI, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		amoeba.setReinforcement(true);
+		
+		
+		return amoeba;
+	}
+	
+
+	private AMOEBA setupControlModel(String action, AmoebaMultiUIWindow window, VUIMulti VUI) {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1Current", false));
+		sensors.add(new Pair<String, Boolean>("p2Current", false));
+		sensors.add(new Pair<String, Boolean>("p"+action+"Goal", false));
+		File config;
+		try {
+			config = File.createTempFile("configControlModel", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(window, VUI, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+		
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		
+		return amoeba;
+	}
+
+
+	private static int sign(double x) {
+		return x < 0 ? -1 : 1;
+	}
+	
+	
+	
+}
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement.java b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement.java
new file mode 100644
index 0000000000000000000000000000000000000000..59db4ad33e5cf2107baac0bd96c17d0fcf7e2398
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement.java
@@ -0,0 +1,590 @@
+package experiments.reinforcement;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+import agents.context.localModel.TypeLocalModel;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import gui.AmoebaWindow;
+import kernel.AMOEBA;
+import kernel.World;
+import kernel.backup.SaveHelperDummy;
+import utils.Pair;
+import utils.RandomUtils;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+/**
+ * Train an amoeba on a simple reinforcement task.
+ * The goal of the task is to get to the center. When the position of the agent cross 0, it gets a reward of 100.
+ * The agent can only moves in 2 directions, of a distance of 1. Moving give a reward of -1.
+ * If the agent moves outside of the allowed range, it gets a reward of -100. 
+ * @author Hugo
+ *
+ */
+public abstract class SimpleReinforcement {
+	/* Learn and Test */
+	public static final int MAX_STEP_PER_EPISODE = 200;
+	public static final int N_LEARN = 100;
+	public static final int N_TEST = 100;
+	
+	/* Exploration */
+	public static final double MIN_EXPLO_RATE = 0.02;
+	public static final double EXPLO_RATE_DIMINUTION_FACTOR = 0.01;
+	public static final double EXPLO_RATE_BASE = 1;
+	
+	public static void main(String[] args) {
+		//poc(true);
+		Configuration.commandLineMode = false;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		/*System.out.println("----- AMOEBA -----");
+		learning(new QLearning(), new OneDimensionEnv());
+		System.out.println("----- END AMOEBA -----");
+		System.out.println("\n\n----- QLEARNING -----");
+		learning(new QLearning());
+		System.out.println("----- END QLEARNING -----");*/
+		ArrayList<ArrayList<Double>> results = new ArrayList<>();
+		for(int i = 0; i < 1; i++) {
+			//LearningAgent agent = new QLearning();
+			LearningAgent agent = new AmoebaQL();
+			//LearningAgent agent = new AmoebaCoop();
+			Environment env = new OneDimensionEnv();
+			results.add(learning(agent, env));
+			System.out.println(i);
+		}
+		
+		int nbEpisodes = results.get(0).size();
+		for(int i = 0; i < nbEpisodes; i++) {
+			double average = 0;
+			for(int j = 0; j < results.size(); j++) {
+				average += results.get(j).get(i);
+			}
+			average /= results.size();
+			System.out.println(""+i+"\t"+average);
+		}
+		
+		//System.exit(0);
+	}
+	
+	/**
+	 * An environment in which a LearningAgent reside
+	 * @author Hugo
+	 *
+	 */
+	public interface Environment {
+		public List<String> actionSpace();
+		public List<String> perceptionSpace();
+		public HashMap<String, Double> reset();
+		public HashMap<String, Double> step(HashMap<String, Double> action);
+		public HashMap<String, Double> randomAction();
+	}
+	
+	/**
+	 * Wrapper for any kind of learning agent
+	 * @author Hugo
+	 *
+	 */
+	public interface LearningAgent {
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env);
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env);
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2, HashMap<String, Double> action, boolean done);
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv 
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaQL implements LearningAgent {
+		public AMOEBA amoeba;
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		public AmoebaQL() {
+			amoeba = setup();
+			amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+			amoeba.getEnvironment().setMappingErrorAllowed(0.02);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			double a = amoeba.maximize(state).getOrDefault("a1", 0.0);
+			if(a == 0.0) {
+				a = rand.nextBoolean() ? -1 : 1;
+			}
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a/20);
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			
+			// state : previous position and associated reward
+			// state2 : new position with current reward
+			// action : previous state, current action and current reward
+			
+			HashMap<String, Double> state2Copy = new HashMap<>(state2);
+			state2Copy.remove("oracle"); //reward
+			
+			double reward = state2.get("oracle");
+			double q;
+			if(!done) {
+				
+				
+				double expectedReward = amoeba.request(action);
+				HashMap<String, Double> futureState = this.choose(state2Copy, null);
+				futureState.putAll(state2);
+				double futureReward = amoeba.request(futureState);
+				//double futureAction = this.choose(state2Copy, null).get("a1")/20;
+				
+				q = reward + gamma * futureReward - expectedReward;
+			} else {
+				q = reward;
+			}
+			HashMap<String, Double> learn = new HashMap<>(action);
+			
+			learn.put("a1", learn.get("a1")*20);
+			//learn.put("oracle", lr * q);
+			learn.put("oracle", reward);
+			
+			// learn : previous state, current action and current Q learning reward
+			
+			amoeba.learn(learn);
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+	}
+	
+	/**
+	 * Wrapper for AMOEBA
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaCoop implements LearningAgent {
+		public AMOEBA amoeba;
+		
+		public AmoebaCoop() {
+			amoeba = setup();
+			amoeba.setLocalModel(TypeLocalModel.COOP_MILLER_REGRESSION);
+			amoeba.getEnvironment().setMappingErrorAllowed(0.009);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			HashMap<String, Double> action = amoeba.maximize(state);
+			if(action.get("oracle") == Double.NEGATIVE_INFINITY) {
+				action = env.randomAction();
+			}
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			amoeba.learn(action);
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv.<br/>
+	 * An extremely crude and quick implementation of Q learning.
+	 * Not expected to perform well, but should be better than random.
+	 * @author Hugo
+	 *
+	 */
+	public static class QLearning implements LearningAgent {
+		public double[][] Q = new double[102][2];
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			int p = state.get("p1").intValue()+50;
+			double a;
+			if(Q[p][0] == Q[p][1]) {
+				a = rand.nextBoolean() ? -1 : 1;
+			} else {
+				a = Q[p][0] > Q[p][1] ? -1 : 1;
+			}
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a);
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			int p = state.get("p1").intValue()+50;
+			int p2 = state2.get("p1").intValue()+50;
+			int a = action.get("a1").intValue() == -1 ? 0 : 1;
+			double reward = state2.get("oracle");
+			double max = Double.NEGATIVE_INFINITY;
+			if(!done) {
+				for(Double v : Q[p2]) {
+					max = Math.max(max, v);
+				}
+			} else {
+				max = reward;
+			}
+			// 
+			double q = reward + gamma * max - Q[p][a];
+			Q[p][a] += lr * q;
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	public static class OneDimensionEnv implements Environment {
+		private Random rand = new Random();
+		private double x = 0;
+		private double reward = 0;
+		private Drawable pos;
+		
+		public OneDimensionEnv() {
+			if(!Configuration.commandLineMode) {
+				AmoebaWindow instance = AmoebaWindow.instance();
+				//pos = new DrawableOval(0.5, 0.5, 1, 1);
+				//pos.setColor(new Color(0.5, 0.0, 0.0, 0.5));
+				//instance.mainVUI.add(pos);
+				//instance.mainVUI.createAndAddRectangle(-50, -0.25, 100, 0.5);
+				//instance.mainVUI.createAndAddRectangle(-0.25, -1, 0.5, 2);
+				instance.point.hide();
+				//instance.rectangle.hide();
+			}
+		}
+		
+		@Override
+		public HashMap<String, Double> reset(){
+			x = RandomUtils.nextDouble(rand, -10.0, Math.nextUp(10.0));
+			x = Math.round(x);
+			reward = 0.0;
+			//pos.move(x+0.5, 0.5);
+			
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			return ret;
+		}
+		
+		@Override
+		public HashMap<String, Double> step(HashMap<String, Double> actionMap){
+			double action = actionMap.get("a1");
+			if(action == 0.0) action = rand.nextDouble();
+			if(action > 0.0) action = Math.ceil(action);
+			if(action < 0.0 ) action = Math.floor(action);
+			if(action > 1.0) action = 1.0;
+			if(action < -1.0) action = -1.0;
+			double oldX = x;
+			x = x + action;
+			if(x < -10.0 || x > 10.0) {
+				reward = -1000.0;
+			} else if(x == 0.0 || sign(oldX) != sign(x)) {
+				// win !
+				reward = 1000.0;
+			} else {
+				reward = -1.0;
+			}
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			//pos.move(x+0.5, 0.5);
+			return ret;
+		}
+
+		@Override
+		public List<String> actionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("a1 enum:true {-1, 1}");
+			return l;
+		}
+
+		@Override
+		public List<String> perceptionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("p1 enum:false [-10, 10]");
+			return l;
+		}
+
+		@Override
+		public HashMap<String, Double> randomAction() {
+			double a = rand.nextBoolean() ? -1 : 1;
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a);
+			return action;
+			}
+		
+	}
+	
+	/**
+	 * Setup an amoeba for the SimpleReinforcement problem
+	 * @return
+	 */
+	private static AMOEBA setup() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		sensors.add(new Pair<String, Boolean>("a1", true));
+		File config;
+		try {
+			config = File.createTempFile("config", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		return amoeba;
+	}
+	
+	/**
+	 * Teach a learning agent on the SimpleReinforcement problem
+	 * @param agent
+	 * @return
+	 */
+	public static ArrayList<Double> learning(LearningAgent agent, Environment env){
+		ArrayList<Double> averageRewards = new ArrayList<Double>();
+		Random rand = new Random();
+		
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			int nbStep = 0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			double totReward = 0.0;
+			
+			// execute simulation cycles
+			boolean done = false;
+			boolean invalid = false;
+			
+			
+			while(!done && !invalid) {
+				nbStep++;
+				if(nbStep > MAX_STEP_PER_EPISODE) {
+					invalid = true;
+				}
+				state.remove("oracle");
+				
+				action = new HashMap<String, Double>();
+				
+				action = agent.explore(state, env);
+//				if(rand.nextDouble() < explo) {
+//					action = agent.explore(state, env);
+//				} else {
+//					action = agent.choose(state, env);
+//				}
+				
+				
+				state2 = env.step(action);  // new position with associated reward
+				
+				if(state2.get("oracle") != -1.0) { //if goal or end of world
+					done = true;
+				}
+				action.put("p1", state.get("p1")); //add previous state to action
+				
+				action.put("oracle", state2.get("oracle")); //add current reward to action
+				
+				// state : previous position and associated reward
+				// state2 : new position with current reward
+				// action : previous state, current action and current reward
+				
+				agent.learn(state, state2, action, done);
+				totReward += action.get("oracle");
+				
+				state = state2;
+			}
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			double testAR = test(agent, env, r, N_TEST);
+			averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+		return averageRewards;
+	}
+
+	private static double test(LearningAgent agent, Environment env, Random r, int nbTest) {
+		HashMap<String, Double> state;
+		HashMap<String, Double> state2;
+		double nbPositiveReward = 0.0;
+		double tot_reward = 0.0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				 HashMap<String, Double> a = agent.choose(state, env);
+				
+				state2 = env.step(a);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+		}
+		double averageReward = tot_reward/nbTest;
+		System.out.println("Test average reward : "+averageReward+"  Positive reward %: "+(nbPositiveReward/nbTest));
+		
+		return averageReward;
+	}
+	
+	/**
+	 * This is a proof of concept, showing that if amoeba learn the correct model of the reward,
+	 * it can produce a good solution.
+	 * The expected average reward for the optimal solution is 75.
+	 * The main cause of negative reward is infinite loop (usually near the objective). In such case, the reward is -200
+	 */
+	public static void poc(boolean learnMalus) {
+		AMOEBA amoeba = setup();
+		Environment env = new OneDimensionEnv();
+		
+		// train
+		for(double n = 0.0; n < 0.5; n+=0.1) {
+			double pos = 50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", -1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", 1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos -= 1.0;
+			}
+			
+			pos = -50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", 1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", -1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos += 1.0;
+			}
+		}
+		
+		// tests
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double tot_reward = 0.0;
+		int nbTest = 100;
+		double nbPositiveReward = 0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				action = amoeba.maximize(state);
+				// random action if no proposition from amoeba
+				if(action.get("oracle").equals(Double.NEGATIVE_INFINITY) ) {
+					action.put("a1", (r.nextBoolean() ? 1.0 : -1.0));
+				}
+				//System.out.println("action "+action);
+				
+				state2 = env.step(action);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				//System.out.println("state2 "+state2+"  reward "+reward);
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+			//System.out.println("-----------------------------\nTot reward "+tot_reward+"\n-----------------------------");
+		}
+		System.out.println("Average reward : "+tot_reward/nbTest+"  Positive reward %: "+(nbPositiveReward/nbTest));
+	}
+	
+	private static int sign(double x) {
+		return x < 0 ? -1 : 1;
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement1DSpatialRewardAndAction.java b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement1DSpatialRewardAndAction.java
new file mode 100644
index 0000000000000000000000000000000000000000..136d983eeec81949faaac309442eee3dd22aae22
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement1DSpatialRewardAndAction.java
@@ -0,0 +1,671 @@
+package experiments.reinforcement;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+import agents.context.localModel.TypeLocalModel;
+import agents.percept.Percept;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import gui.AmoebaWindow;
+import kernel.AMOEBA;
+import kernel.World;
+import kernel.backup.SaveHelperDummy;
+import utils.Pair;
+import utils.RandomUtils;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+/**
+ * Train an amoeba on a simple reinforcement task.
+ * The goal of the task is to get to the center. When the position of the agent cross 0, it gets a reward of 100.
+ * The agent can only moves in 2 directions, of a distance of 1. Moving give a reward of -1.
+ * If the agent moves outside of the allowed range, it gets a reward of -100. 
+ * @author Hugo
+ *
+ */
+public abstract class SimpleReinforcement1DSpatialRewardAndAction {
+	/* Learn and Test */
+	public static final int MAX_STEP_PER_EPISODE = 200;
+	public static final int N_LEARN = 100;//400
+	public static final int N_TEST = 100;
+	
+	/* Exploration */
+	public static final double MIN_EXPLO_RATE = 0.02;
+	public static final double EXPLO_RATE_DIMINUTION_FACTOR = 0.01;
+	public static final double EXPLO_RATE_BASE = 1;
+	
+	public static void main(String[] args) {
+		//poc(true);
+		Configuration.commandLineMode = false;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		/*System.out.println("----- AMOEBA -----");
+		learning(new QLearning(), new OneDimensionEnv());
+		System.out.println("----- END AMOEBA -----");
+		System.out.println("\n\n----- QLEARNING -----");
+		learning(new QLearning());
+		System.out.println("----- END QLEARNING -----");*/
+		ArrayList<ArrayList<Double>> results = new ArrayList<>();
+		for(int i = 0; i < 1; i++) {
+			//LearningAgent agent = new QLearning();
+			LearningAgent agent = new AmoebaQL();
+			//LearningAgent agent = new AmoebaCoop();
+			Environment env = new OneDimensionEnv(10);
+			results.add(learning(agent, env));
+			System.out.println(i);
+		}
+		
+		int nbEpisodes = results.get(0).size();
+		for(int i = 0; i < nbEpisodes; i++) {
+			double average = 0;
+			for(int j = 0; j < results.size(); j++) {
+				average += results.get(j).get(i);
+			}
+			average /= results.size();
+			System.out.println(""+i+"\t"+average);
+		}
+		
+		//System.exit(0);
+	}
+	
+	/**
+	 * An environment in which a LearningAgent reside
+	 * @author Hugo
+	 *
+	 */
+	public interface Environment {
+		public List<String> actionSpace();
+		public List<String> perceptionSpace();
+		public HashMap<String, Double> reset();
+		public HashMap<String, Double> step(HashMap<String, Double> action);
+		public HashMap<String, Double> randomAction();
+	}
+	
+	/**
+	 * Wrapper for any kind of learning agent
+	 * @author Hugo
+	 *
+	 */
+	public interface LearningAgent {
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env);
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env);
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2, HashMap<String, Double> action, boolean done);
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv 
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaQL implements LearningAgent {
+		public AMOEBA amoebaSpatialReward;
+		//public AMOEBA amoebaControlModel;
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		public AmoebaQL() {
+			amoebaSpatialReward = setupSpatialReward();
+			//amoebaControlModel = setupControlModel();
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			
+//			HashMap<String, Double> stateWithVizuAdded = new HashMap<String, Double>(state);
+//			stateWithVizuAdded.put("p2", 0.0);
+//			stateWithVizuAdded.put("oracle", 0.0);
+//			HashMap<String, Double> bestFuturePosition =  amoebaSpatialReward.reinforcementRequest(stateWithVizuAdded);
+//			
+//			HashMap<String, Double> action = new HashMap<String, Double>();
+//			if(bestFuturePosition!=null) {
+//				HashMap<String, Double> requestForControlModel = new HashMap<String, Double>();
+//				requestForControlModel.put("pCurrent", state.get("p1"));
+//				requestForControlModel.put("pGoal", bestFuturePosition.get("p1"));
+//				
+//				double bestAction = amoebaControlModel.request(requestForControlModel);
+//				
+//				
+//				action.put("a1", bestAction);
+//			}
+//			action = env.randomAction();
+//			
+//			return action;
+			return null;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> positionAndReward,
+				HashMap<String, Double> action, boolean done) {
+			
+			// state : previous position and associated reward
+			// state2 : new position with current reward
+			// action : previous state, current actions and current reward
+			
+			HashMap<String, Double> previousStateCurrentStateAction = new HashMap<>();
+			previousStateCurrentStateAction.put("pCurrent", action.get("p1"));
+			previousStateCurrentStateAction.put("pGoal", positionAndReward.get("p1"));
+			previousStateCurrentStateAction.put("oracle", action.get("a1"));
+			
+
+			
+			//System.out.println("ControlModel " + previousStateCurrentStateAction + "                  ---------------- SIMPLE REIN XP 149");
+			//System.out.println("SpatialReward " + positionAndReward + "                  ---------------- SIMPLE REIN XP 149");
+			
+			amoebaSpatialReward.learn(positionAndReward);
+			//amoebaControlModel.learn(previousStateCurrentStateAction);
+			
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+	}
+	
+	/**
+	 * Wrapper for AMOEBA
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaCoop implements LearningAgent {
+		public AMOEBA amoeba;
+		
+		public AmoebaCoop() {
+			amoeba = setup();
+			amoeba.setLocalModel(TypeLocalModel.COOP_MILLER_REGRESSION);
+			amoeba.getEnvironment().setMappingErrorAllowed(0.009);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			HashMap<String, Double> action = amoeba.maximize(state);
+			if(action.get("oracle") == Double.NEGATIVE_INFINITY) {
+				action = env.randomAction();
+			}
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			amoeba.learn(action);
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv.<br/>
+	 * An extremely crude and quick implementation of Q learning.
+	 * Not expected to perform well, but should be better than random.
+	 * @author Hugo
+	 *
+	 */
+	public static class QLearning implements LearningAgent {
+		public double[][] Q = new double[102][2];
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			int p = state.get("p1").intValue()+50;
+			double a;
+			if(Q[p][0] == Q[p][1]) {
+				a = rand.nextBoolean() ? -1 : 1;
+			} else {
+				a = Q[p][0] > Q[p][1] ? -1 : 1;
+			}
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a);
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			int p = state.get("p1").intValue()+50;
+			int p2 = state2.get("p1").intValue()+50;
+			int a = action.get("a1").intValue() == -1 ? 0 : 1;
+			double reward = state2.get("oracle");
+			double max = Double.NEGATIVE_INFINITY;
+			if(!done) {
+				for(Double v : Q[p2]) {
+					max = Math.max(max, v);
+				}
+			} else {
+				max = reward;
+			}
+			// 
+			double q = reward + gamma * max - Q[p][a];
+			Q[p][a] += lr * q;
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	public static class OneDimensionEnv implements Environment {
+		private Random rand = new Random();
+		private double x = 0;
+		private double reward = 0;
+		private double size;
+		private Drawable pos;
+		
+		public OneDimensionEnv(double envSize) {
+			
+			size = envSize;
+			
+			if(!Configuration.commandLineMode) {
+				AmoebaWindow instance = AmoebaWindow.instance();
+				//pos = new DrawableOval(0.5, 0.5, 1, 1);
+				//pos.setColor(new Color(0.5, 0.0, 0.0, 0.5));
+				//instance.mainVUI.add(pos);
+				//instance.mainVUI.createAndAddRectangle(-50, -0.25, 100, 0.5);
+				//instance.mainVUI.createAndAddRectangle(-0.25, -1, 0.5, 2);
+				instance.point.hide();
+				//instance.rectangle.hide();
+			}
+		}
+		
+		@Override
+		public HashMap<String, Double> reset(){
+			x = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			x = Math.round(x);
+			reward = 0.0;
+			//pos.move(x+0.5, 0.5);
+			
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			return ret;
+		}
+		
+		@Override
+		public HashMap<String, Double> step(HashMap<String, Double> actionMap){
+			double action = actionMap.get("a1");
+			//if(action == 0.0) action = rand.nextDouble();
+			if(action > 0.0) action = Math.ceil(action);
+			if(action < 0.0 ) action = Math.floor(action);
+			if(action > 1.0) action = 1.0;
+			if(action < -1.0) action = -1.0;
+			double oldX = x;
+			x = x + action;
+			
+			
+			//System.out.println("ACTIONS " + " a1 " +action + " " + " a2 " + action2);
+			if(x < -size || x > size) {
+				reward = -1000.0;
+			} else if((x == 0.0) || (sign(oldX) != sign(x) )) {
+				// win !
+				reward = 1000.0;
+			} else {
+				reward = -1.0;
+			}
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			//pos.move(x+0.5, 0.5);
+			return ret;
+		}
+
+		@Override
+		public List<String> actionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("a1 enum:true {-1, 0, 1}");
+			return l;
+		}
+
+		@Override
+		public List<String> perceptionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("p1 enum:false [-"+size+", "+size+"]");
+			return l;
+		}
+
+		@Override
+		public HashMap<String, Double> randomAction() {
+			double a1 = rand.nextBoolean() ? -1 : 1;
+			
+						
+
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			return action;
+			}
+		
+	}
+	
+	/**
+	 * Setup an amoeba for the SimpleReinforcement problem
+	 * @return
+	 */
+	private static AMOEBA setup() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		File config;
+		try {
+			config = File.createTempFile("config", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+	
+
+		
+		return amoeba;
+	}
+	
+	
+	
+	private static AMOEBA setupSpatialReward() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		File config;
+		try {
+			config = File.createTempFile("configSpatialReward", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+//		for(Percept pct : amoeba.getPercepts()) {
+//			pct.setMax(10);
+//			pct.setMin(-10);
+//		}
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		amoeba.setReinforcement(true);
+		
+		
+		return amoeba;
+	}
+	
+
+	private static AMOEBA setupControlModel() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("pCurrent", false));
+		sensors.add(new Pair<String, Boolean>("pGoal", false));
+		File config;
+		try {
+			config = File.createTempFile("configControlModel", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+		
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		
+		return amoeba;
+	}
+	
+	/**
+	 * Teach a learning agent on the SimpleReinforcement problem
+	 * @param agent
+	 * @return
+	 */
+	public static ArrayList<Double> learning(LearningAgent agent, Environment env){
+		ArrayList<Double> averageRewards = new ArrayList<Double>();
+		Random rand = new Random();
+		
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			int nbStep = 0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			double totReward = 0.0;
+			
+			// execute simulation cycles
+			boolean done = false;
+			boolean invalid = false;
+			
+			
+			while(!done && !invalid) {
+				nbStep++;
+				if(nbStep > MAX_STEP_PER_EPISODE) {
+					invalid = true;
+				}
+				state.remove("oracle");
+				
+				action = new HashMap<String, Double>();
+				
+				action = agent.explore(state, env);
+//				if(rand.nextDouble() < explo) {
+//					action = agent.explore(state, env);
+//				} else {
+//					action = agent.choose(state, env);
+//				}
+				
+				
+				state2 = env.step(action);  // new position with associated reward
+				
+				if(state2.get("oracle") != -1.0) { //if goal or end of world
+					done = true;
+				}
+				action.put("p1", state.get("p1")); //add previous state to action
+				
+				action.put("oracle", state2.get("oracle")); //add current reward to action
+				
+				// state : previous position and associated reward
+				// state2 : new position with current reward
+				// action : previous state, current action and current reward
+				
+				agent.learn(state, state2, action, done);
+				totReward += action.get("oracle");
+				
+				state = state2;
+			}
+			
+			System.out.println("-----------------------------------------------------------------------");
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			//double testAR = test(agent, env, r, N_TEST);
+			//averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+		return averageRewards;
+	}
+
+	private static double test(LearningAgent agent, Environment env, Random r, int nbTest) {
+		HashMap<String, Double> state;
+		HashMap<String, Double> state2;
+		double nbPositiveReward = 0.0;
+		double tot_reward = 0.0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				 HashMap<String, Double> a = agent.choose(state, env);
+				
+				state2 = env.step(a);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+		}
+		double averageReward = tot_reward/nbTest;
+		System.out.println("Test average reward : "+averageReward+"  Positive reward %: "+(nbPositiveReward/nbTest));
+		
+		return averageReward;
+	}
+	
+	/**
+	 * This is a proof of concept, showing that if amoeba learn the correct model of the reward,
+	 * it can produce a good solution.
+	 * The expected average reward for the optimal solution is 75.
+	 * The main cause of negative reward is infinite loop (usually near the objective). In such case, the reward is -200
+	 */
+	public static void poc(boolean learnMalus) {
+		AMOEBA amoeba = setup();
+		Environment env = new OneDimensionEnv(50);
+		
+		// train
+		for(double n = 0.0; n < 0.5; n+=0.1) {
+			double pos = 50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", -1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", 1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos -= 1.0;
+			}
+			
+			pos = -50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", 1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", -1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos += 1.0;
+			}
+		}
+		
+		// tests
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double tot_reward = 0.0;
+		int nbTest = 100;
+		double nbPositiveReward = 0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				action = amoeba.maximize(state);
+				// random action if no proposition from amoeba
+				if(action.get("oracle").equals(Double.NEGATIVE_INFINITY) ) {
+					action.put("a1", (r.nextBoolean() ? 1.0 : -1.0));
+				}
+				//System.out.println("action "+action);
+				
+				state2 = env.step(action);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				//System.out.println("state2 "+state2+"  reward "+reward);
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+			//System.out.println("-----------------------------\nTot reward "+tot_reward+"\n-----------------------------");
+		}
+		System.out.println("Average reward : "+tot_reward/nbTest+"  Positive reward %: "+(nbPositiveReward/nbTest));
+	}
+	
+	private static int sign(double x) {
+		return x < 0 ? -1 : 1;
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement1DSpatialRewardAndActionMltiUI.java b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement1DSpatialRewardAndActionMltiUI.java
new file mode 100644
index 0000000000000000000000000000000000000000..db5db34acf6a1951f30419533edc72b467bf3344
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement1DSpatialRewardAndActionMltiUI.java
@@ -0,0 +1,693 @@
+package experiments.reinforcement;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+import agents.context.localModel.TypeLocalModel;
+import agents.percept.Percept;
+import experiments.nDimensionsLaunchers.F_N_Manager;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.VUIMulti;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.scene.control.Slider;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.World;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperDummy;
+import kernel.backup.SaveHelperImpl;
+import utils.Pair;
+import utils.RandomUtils;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+/**
+ * Train an amoeba on a simple reinforcement task.
+ * The goal of the task is to get to the center. When the position of the agent cross 0, it gets a reward of 100.
+ * The agent can only moves in 2 directions, of a distance of 1. Moving give a reward of -1.
+ * If the agent moves outside of the allowed range, it gets a reward of -100. 
+ * @author Hugo
+ *
+ */
+public abstract class SimpleReinforcement1DSpatialRewardAndActionMltiUI extends Application implements Serializable{
+	
+	/* Learn and Test */
+	public static final int MAX_STEP_PER_EPISODE = 200;
+	public static final int N_LEARN = 1000;//400
+	public static final int N_TEST = 100;
+	
+	/* Exploration */
+	public static final double MIN_EXPLO_RATE = 0.02;
+	public static final double EXPLO_RATE_DIMINUTION_FACTOR = 0.01;
+	public static final double EXPLO_RATE_BASE = 1;
+	
+	AMOEBA amoebaSpatialReward;
+	VUIMulti amoebaSpatialRewardVUI;
+	AmoebaMultiUIWindow amoebaSpatialRewardUI;
+	
+	AMOEBA amoebaControlModel;
+	VUIMulti amoebaControlModelVUI;
+	AmoebaMultiUIWindow amoebaControlModelUI;
+	
+	AMOEBA amoeba;
+	StudiedSystem studiedSystem;
+	VUIMulti amoebaVUI;
+	AmoebaMultiUIWindow amoebaUI;
+	
+	
+	
+	public static void main(String[] args) throws IOException {
+		
+		
+		Application.launch(args);
+		
+	}
+	
+	@Override
+	public void start(Stage arg0) throws Exception, IOException {
+
+		
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		amoebaVUI = new VUIMulti("2D");
+		amoebaUI = new AmoebaMultiUIWindow("ELLSA", amoebaVUI);
+		
+//		amoebaSpatialRewardVUI = new VUIMulti("2D");
+//		amoebaSpatialRewardUI = new AmoebaMultiUIWindow("SPATIAL_REWARD", amoebaSpatialRewardVUI);
+//		
+//		amoebaControlModelVUI = new VUIMulti("2D");
+//		amoebaControlModelUI = new AmoebaMultiUIWindow("CONTROL_MODEL", amoebaControlModelVUI);
+//		
+//	
+		
+		//startTask(100, 1000);
+
+
+		
+	}
+	
+	public void startTask(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	public void startTask2(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask2(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	public void runTask(long wait, int cycles) 
+    {
+		
+		try
+        {
+             
+            // Update the Label on the JavaFx Application Thread        
+            Platform.runLater(new Runnable() 
+            {
+                @Override
+                public void run() 
+                {
+                	
+                	ArrayList<ArrayList<Double>> results = new ArrayList<>();
+        			//LearningAgent agent = new QLearning();
+        			LearningAgent agent = new AmoebaQL();
+        			//LearningAgent agent = new AmoebaCoop();
+        			Environment env = new OneDimensionEnv(10);
+        			results.add(learning(agent, env));
+ 
+            		
+            		int nbEpisodes = results.get(0).size();
+            		for(int i = 0; i < nbEpisodes; i++) {
+            			double average = 0;
+            			for(int j = 0; j < results.size(); j++) {
+            				average += results.get(j).get(i);
+            			}
+            			average /= results.size();
+            			System.out.println(""+i+"\t"+average);
+            		}
+                }
+            });
+     
+            Thread.sleep(wait);
+        }
+        catch (InterruptedException e) 
+        {
+            e.printStackTrace();
+        }
+		
+		
+		
+//        for(int i = 0; i < cycles; i++) 
+//        {
+//            try
+//            {
+//                // Get the Status
+//                final String status = "Processing " + i + " of " + cycles;
+//                 
+//                // Update the Label on the JavaFx Application Thread        
+//                Platform.runLater(new Runnable() 
+//                {
+//                    @Override
+//                    public void run() 
+//                    {
+//                    	///
+//                    }
+//                });
+//         
+//                Thread.sleep(wait);
+//            }
+//            catch (InterruptedException e) 
+//            {
+//                e.printStackTrace();
+//            }
+//        }
+    }   
+	
+	public void runTask2(long wait, int cycles) 
+    {
+		
+		try
+        {
+             
+            // Update the Label on the JavaFx Application Thread        
+            Platform.runLater(new Runnable() 
+            {
+                @Override
+                public void run() 
+                {
+                	///
+            		
+                }
+            });
+     
+            Thread.sleep(wait);
+        }
+        catch (InterruptedException e) 
+        {
+            e.printStackTrace();
+        }
+		
+		
+		
+        for(int i = 0; i < cycles; i++) 
+        {
+            try
+            {
+                // Get the Status
+                final String status = "Processing " + i + " of " + cycles;
+                 
+                // Update the Label on the JavaFx Application Thread        
+                Platform.runLater(new Runnable() 
+                {
+                    @Override
+                    public void run() 
+                    {
+                    	///
+                    }
+                });
+         
+                Thread.sleep(wait);
+            }
+            catch (InterruptedException e) 
+            {
+                e.printStackTrace();
+            }
+        }
+    }   
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+	
+	
+	
+	
+	
+	
+	
+	
+	
+	
+	
+	/**
+	 * An environment in which a LearningAgent reside
+	 * @author Hugo
+	 *
+	 */
+	public interface Environment {
+		public List<String> actionSpace();
+		public List<String> perceptionSpace();
+		public HashMap<String, Double> reset();
+		public HashMap<String, Double> step(HashMap<String, Double> action);
+		public HashMap<String, Double> randomAction();
+	}
+	
+	/**
+	 * Wrapper for any kind of learning agent
+	 * @author Hugo
+	 *
+	 */
+	public interface LearningAgent {
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env);
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env);
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2, HashMap<String, Double> action, boolean done);
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv 
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaQL implements LearningAgent {
+		public AMOEBA amoebaSpatialReward;
+		//public AMOEBA amoebaControlModel;
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		public AmoebaQL() {
+			amoebaSpatialReward = setupSpatialReward();
+			//amoebaControlModel = setupControlModel();
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			
+//			HashMap<String, Double> stateWithVizuAdded = new HashMap<String, Double>(state);
+//			stateWithVizuAdded.put("p2", 0.0);
+//			stateWithVizuAdded.put("oracle", 0.0);
+//			HashMap<String, Double> bestFuturePosition =  amoebaSpatialReward.reinforcementRequest(stateWithVizuAdded);
+//			
+//			HashMap<String, Double> action = new HashMap<String, Double>();
+//			if(bestFuturePosition!=null) {
+//				HashMap<String, Double> requestForControlModel = new HashMap<String, Double>();
+//				requestForControlModel.put("pCurrent", state.get("p1"));
+//				requestForControlModel.put("pGoal", bestFuturePosition.get("p1"));
+//				
+//				double bestAction = amoebaControlModel.request(requestForControlModel);
+//				
+//				
+//				action.put("a1", bestAction);
+//			}
+//			action = env.randomAction();
+//			
+//			return action;
+			return null;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> positionAndReward,
+				HashMap<String, Double> action, boolean done) {
+			
+			// state : previous position and associated reward
+			// state2 : new position with current reward
+			// action : previous state, current actions and current reward
+			
+			HashMap<String, Double> previousStateCurrentStateAction = new HashMap<>();
+			previousStateCurrentStateAction.put("pCurrent", action.get("p1"));
+			previousStateCurrentStateAction.put("pGoal", positionAndReward.get("p1"));
+			previousStateCurrentStateAction.put("oracle", action.get("a1"));
+			
+
+			
+			//System.out.println("ControlModel " + previousStateCurrentStateAction + "                  ---------------- SIMPLE REIN XP 149");
+			//System.out.println("SpatialReward " + positionAndReward + "                  ---------------- SIMPLE REIN XP 149");
+			
+			amoebaSpatialReward.learn(positionAndReward);
+			//amoebaControlModel.learn(previousStateCurrentStateAction);
+			
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+	}
+	
+	
+	
+	
+	
+	public static class OneDimensionEnv implements Environment {
+		private Random rand = new Random();
+		private double x = 0;
+		private double reward = 0;
+		private double size;
+		private Drawable pos;
+		
+		public OneDimensionEnv(double envSize) {
+			
+			size = envSize;
+			
+			if(!Configuration.commandLineMode) {
+				AmoebaWindow instance = AmoebaWindow.instance();
+				//pos = new DrawableOval(0.5, 0.5, 1, 1);
+				//pos.setColor(new Color(0.5, 0.0, 0.0, 0.5));
+				//instance.mainVUI.add(pos);
+				//instance.mainVUI.createAndAddRectangle(-50, -0.25, 100, 0.5);
+				//instance.mainVUI.createAndAddRectangle(-0.25, -1, 0.5, 2);
+				instance.point.hide();
+				//instance.rectangle.hide();
+			}
+		}
+		
+		@Override
+		public HashMap<String, Double> reset(){
+			x = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			x = Math.round(x);
+			reward = 0.0;
+			//pos.move(x+0.5, 0.5);
+			
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			return ret;
+		}
+		
+		@Override
+		public HashMap<String, Double> step(HashMap<String, Double> actionMap){
+			double action = actionMap.get("a1");
+			//if(action == 0.0) action = rand.nextDouble();
+			if(action > 0.0) action = Math.ceil(action);
+			if(action < 0.0 ) action = Math.floor(action);
+			if(action > 1.0) action = 1.0;
+			if(action < -1.0) action = -1.0;
+			double oldX = x;
+			x = x + action;
+			
+			
+			//System.out.println("ACTIONS " + " a1 " +action + " " + " a2 " + action2);
+			if(x < -size || x > size) {
+				reward = -1000.0;
+			} else if((x == 0.0) || (sign(oldX) != sign(x) )) {
+				// win !
+				reward = 1000.0;
+			} else {
+				reward = -1.0;
+			}
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("oracle", reward);
+			//pos.move(x+0.5, 0.5);
+			return ret;
+		}
+
+		@Override
+		public List<String> actionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("a1 enum:true {-1, 0, 1}");
+			return l;
+		}
+
+		@Override
+		public List<String> perceptionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("p1 enum:false [-"+size+", "+size+"]");
+			return l;
+		}
+
+		@Override
+		public HashMap<String, Double> randomAction() {
+			double a1 = rand.nextBoolean() ? -1 : 1;
+			
+						
+
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			return action;
+			}
+		
+	}
+	
+	/**
+	 * Setup an amoeba for the SimpleReinforcement problem
+	 * @return
+	 */
+	private static AMOEBA setup() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		File config;
+		try {
+			config = File.createTempFile("config", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+	
+
+		
+		return amoeba;
+	}
+	
+	
+	
+	private static AMOEBA setupSpatialReward() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		File config;
+		try {
+			config = File.createTempFile("configSpatialReward", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		//amoeba.setReinforcement(true);
+		
+		
+		return amoeba;
+	}
+	
+
+	private static AMOEBA setupControlModel() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("pCurrent", false));
+		sensors.add(new Pair<String, Boolean>("pGoal", false));
+		File config;
+		try {
+			config = File.createTempFile("configControlModel", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+		
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		
+		return amoeba;
+	}
+	
+	/**
+	 * Teach a learning agent on the SimpleReinforcement problem
+	 * @param agent
+	 * @return
+	 */
+	public static ArrayList<Double> learning(LearningAgent agent, Environment env){
+		ArrayList<Double> averageRewards = new ArrayList<Double>();
+		Random rand = new Random();
+		
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			int nbStep = 0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			double totReward = 0.0;
+			
+			// execute simulation cycles
+			boolean done = false;
+			boolean invalid = false;
+			
+			
+			while(!done && !invalid) {
+				nbStep++;
+				if(nbStep > MAX_STEP_PER_EPISODE) {
+					invalid = true;
+				}
+				state.remove("oracle");
+				
+				action = new HashMap<String, Double>();
+				
+				action = agent.explore(state, env);
+//				if(rand.nextDouble() < explo) {
+//					action = agent.explore(state, env);
+//				} else {
+//					action = agent.choose(state, env);
+//				}
+				
+				
+				state2 = env.step(action);  // new position with associated reward
+				
+				if(state2.get("oracle") != -1.0) { //if goal or end of world
+					done = true;
+				}
+				action.put("p1", state.get("p1")); //add previous state to action
+				
+				action.put("oracle", state2.get("oracle")); //add current reward to action
+				
+				// state : previous position and associated reward
+				// state2 : new position with current reward
+				// action : previous state, current action and current reward
+				
+				agent.learn(state, state2, action, done);
+				totReward += action.get("oracle");
+				
+				state = state2;
+			}
+			
+			System.out.println("-----------------------------------------------------------------------");
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			//double testAR = test(agent, env, r, N_TEST);
+			//averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+		return averageRewards;
+	}
+
+	private static double test(LearningAgent agent, Environment env, Random r, int nbTest) {
+		HashMap<String, Double> state;
+		HashMap<String, Double> state2;
+		double nbPositiveReward = 0.0;
+		double tot_reward = 0.0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				 HashMap<String, Double> a = agent.choose(state, env);
+				
+				state2 = env.step(a);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+		}
+		double averageReward = tot_reward/nbTest;
+		System.out.println("Test average reward : "+averageReward+"  Positive reward %: "+(nbPositiveReward/nbTest));
+		
+		return averageReward;
+	}
+	
+	
+	
+	private static int sign(double x) {
+		return x < 0 ? -1 : 1;
+	}
+
+}
\ No newline at end of file
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement2D.java b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement2D.java
new file mode 100644
index 0000000000000000000000000000000000000000..6c035fda202bcf3c92237dfd00692e513c86fe84
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement2D.java
@@ -0,0 +1,627 @@
+package experiments.reinforcement;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+import agents.context.localModel.TypeLocalModel;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import gui.AmoebaWindow;
+import kernel.AMOEBA;
+import kernel.World;
+import kernel.backup.SaveHelperDummy;
+import utils.Pair;
+import utils.RandomUtils;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+/**
+ * Train an amoeba on a simple reinforcement task.
+ * The goal of the task is to get to the center. When the position of the agent cross 0, it gets a reward of 100.
+ * The agent can only moves in 2 directions, of a distance of 1. Moving give a reward of -1.
+ * If the agent moves outside of the allowed range, it gets a reward of -100. 
+ * @author Hugo
+ *
+ */
+public abstract class SimpleReinforcement2D {
+	/* Learn and Test */
+	public static final int MAX_STEP_PER_EPISODE = 200;
+	public static final int N_LEARN = 100;//400
+	public static final int N_TEST = 100;
+	
+	/* Exploration */
+	public static final double MIN_EXPLO_RATE = 0.02;
+	public static final double EXPLO_RATE_DIMINUTION_FACTOR = 0.01;
+	public static final double EXPLO_RATE_BASE = 1;
+	
+	public static void main(String[] args) {
+		//poc(true);
+		Configuration.commandLineMode = false;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		/*System.out.println("----- AMOEBA -----");
+		learning(new QLearning(), new OneDimensionEnv());
+		System.out.println("----- END AMOEBA -----");
+		System.out.println("\n\n----- QLEARNING -----");
+		learning(new QLearning());
+		System.out.println("----- END QLEARNING -----");*/
+		ArrayList<ArrayList<Double>> results = new ArrayList<>();
+		for(int i = 0; i < 1; i++) {
+			//LearningAgent agent = new QLearning();
+			LearningAgent agent = new AmoebaQL();
+			//LearningAgent agent = new AmoebaCoop();
+			Environment env = new TwoDimensionEnv(10);
+			results.add(learning(agent, env));
+			System.out.println(i);
+		}
+		
+		int nbEpisodes = results.get(0).size();
+		for(int i = 0; i < nbEpisodes; i++) {
+			double average = 0;
+			for(int j = 0; j < results.size(); j++) {
+				average += results.get(j).get(i);
+			}
+			average /= results.size();
+			System.out.println(""+i+"\t"+average);
+		}
+		
+		//System.exit(0);
+	}
+	
+	/**
+	 * An environment in which a LearningAgent reside
+	 * @author Hugo
+	 *
+	 */
+	public interface Environment {
+		public List<String> actionSpace();
+		public List<String> perceptionSpace();
+		public HashMap<String, Double> reset();
+		public HashMap<String, Double> step(HashMap<String, Double> action);
+		public HashMap<String, Double> randomAction();
+	}
+	
+	/**
+	 * Wrapper for any kind of learning agent
+	 * @author Hugo
+	 *
+	 */
+	public interface LearningAgent {
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env);
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env);
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2, HashMap<String, Double> action, boolean done);
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv 
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaQL implements LearningAgent {
+		public AMOEBA amoeba;
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		public AmoebaQL() {
+			amoeba = setup();
+			amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+			amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			
+			HashMap<String, Double> bestActions =  amoeba.maximize(state);
+			double a1 = bestActions.getOrDefault("a1", 0.0);
+			double a2 = bestActions.getOrDefault("a2", 0.0);
+//			if(a1 == 0.0) {
+//				a1 = rand.nextBoolean() ? -1 : 1;
+//			}
+//			if(a2 == 0.0) {
+//				a2 = rand.nextBoolean() ? -1 : 1;
+//			}
+			
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			action.put("a2", a2);
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			
+			// state : previous position and associated reward
+			// state2 : new position with current reward
+			// action : previous state, current actions and current reward
+			
+			HashMap<String, Double> state2Copy = new HashMap<>(state2);
+			state2Copy.remove("oracle"); //reward
+			
+			double reward = state2.get("oracle");
+			double q;
+			if(!done) {
+				double expectedReward = amoeba.request(action);
+				HashMap<String, Double> futureState = this.choose(state2Copy, null);
+				futureState.putAll(state2);
+				double futureReward = amoeba.request(futureState);
+				//double futureAction = this.choose(state2Copy, null).get("a1");
+				
+				q = reward + gamma * futureReward - expectedReward;
+			} else {
+				q = reward;
+			}
+			HashMap<String, Double> learn = new HashMap<>(action);
+			
+			//learn.put("oracle", lr * q);
+			learn.put("oracle", reward);
+			// learn : previous state, current action and current Q learning reward
+			System.out.println(learn);
+			amoeba.learn(learn);
+			
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+	}
+	
+	/**
+	 * Wrapper for AMOEBA
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaCoop implements LearningAgent {
+		public AMOEBA amoeba;
+		
+		public AmoebaCoop() {
+			amoeba = setup();
+			amoeba.setLocalModel(TypeLocalModel.COOP_MILLER_REGRESSION);
+			amoeba.getEnvironment().setMappingErrorAllowed(0.009);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			HashMap<String, Double> action = amoeba.maximize(state);
+			if(action.get("oracle") == Double.NEGATIVE_INFINITY) {
+				action = env.randomAction();
+			}
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			amoeba.learn(action);
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv.<br/>
+	 * An extremely crude and quick implementation of Q learning.
+	 * Not expected to perform well, but should be better than random.
+	 * @author Hugo
+	 *
+	 */
+	public static class QLearning implements LearningAgent {
+		public double[][] Q = new double[102][2];
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			int p = state.get("p1").intValue()+50;
+			double a;
+			if(Q[p][0] == Q[p][1]) {
+				a = rand.nextBoolean() ? -1 : 1;
+			} else {
+				a = Q[p][0] > Q[p][1] ? -1 : 1;
+			}
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a);
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			int p = state.get("p1").intValue()+50;
+			int p2 = state2.get("p1").intValue()+50;
+			int a = action.get("a1").intValue() == -1 ? 0 : 1;
+			double reward = state2.get("oracle");
+			double max = Double.NEGATIVE_INFINITY;
+			if(!done) {
+				for(Double v : Q[p2]) {
+					max = Math.max(max, v);
+				}
+			} else {
+				max = reward;
+			}
+			// 
+			double q = reward + gamma * max - Q[p][a];
+			Q[p][a] += lr * q;
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	public static class TwoDimensionEnv implements Environment {
+		private Random rand = new Random();
+		private double x = 0;
+		private double y = 0;
+		private double reward = 0;
+		private double size;
+		private Drawable pos;
+		
+		public TwoDimensionEnv(double envSize) {
+			
+			size = envSize;
+			
+			if(!Configuration.commandLineMode) {
+				AmoebaWindow instance = AmoebaWindow.instance();
+				//pos = new DrawableOval(0.5, 0.5, 1, 1);
+				//pos.setColor(new Color(0.5, 0.0, 0.0, 0.5));
+				//instance.mainVUI.add(pos);
+				//instance.mainVUI.createAndAddRectangle(-50, -0.25, 100, 0.5);
+				//instance.mainVUI.createAndAddRectangle(-0.25, -1, 0.5, 2);
+				instance.point.hide();
+				//instance.rectangle.hide();
+			}
+		}
+		
+		@Override
+		public HashMap<String, Double> reset(){
+			x = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			x = Math.round(x);
+			y = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			y = Math.round(y);
+			reward = 0.0;
+			//pos.move(x+0.5, 0.5);
+			
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("p2", y);
+			ret.put("oracle", reward);
+			return ret;
+		}
+		
+		@Override
+		public HashMap<String, Double> step(HashMap<String, Double> actionMap){
+			double action = actionMap.get("a1");
+			//if(action == 0.0) action = rand.nextDouble();
+			if(action > 0.0) action = Math.ceil(action);
+			if(action < 0.0 ) action = Math.floor(action);
+			if(action > 1.0) action = 1.0;
+			if(action < -1.0) action = -1.0;
+			double oldX = x;
+			x = x + action;
+			
+			double action2 = actionMap.get("a2");
+			//if(action2 == 0.0) action2 = rand.nextDouble();
+			if(action2 > 0.0) action2 = Math.ceil(action2);
+			if(action2 < 0.0 ) action2 = Math.floor(action2);
+			if(action2 > 1.0) action2 = 1.0;
+			if(action2 < -1.0) action2 = -1.0;
+			double oldY = y;
+			y = y + action2;
+			
+			//System.out.println("ACTIONS " + " a1 " +action + " " + " a2 " + action2);
+			if(x < -size || x > size || y < -size || y > size) {
+				reward = -1000.0;
+			} else if((x == 0.0 && y == 0.0) || (sign(oldX) != sign(x) && sign(oldY) != sign(y) )) {
+				// win !
+				reward = 1000.0;
+			} else {
+				reward = -1.0;
+			}
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("p2", y);
+			ret.put("oracle", reward);
+			//pos.move(x+0.5, 0.5);
+			return ret;
+		}
+
+		@Override
+		public List<String> actionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("a1 enum:true {-1, 0, 1}");
+			l.add("a2 enum:true {-1, 0, 1}");
+			return l;
+		}
+
+		@Override
+		public List<String> perceptionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("p1 enum:false [-"+size+", "+size+"]");
+			l.add("p2 enum:false [-"+size+", "+size+"]");
+			return l;
+		}
+
+		@Override
+		public HashMap<String, Double> randomAction() {
+			double a1 = rand.nextInt(3) - 1;
+			double a2 = (a1 == 0.0) ? (rand.nextBoolean() ? -1 : 1) : (rand.nextInt(3) - 1);
+						
+//			double a1 =  rand.nextBoolean() ? -1 : 1;
+//			double a2 =  rand.nextBoolean() ? -1 : 1;
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			action.put("a2", a2);
+			return action;
+			}
+		
+	}
+	
+	/**
+	 * Setup an amoeba for the SimpleReinforcement problem
+	 * @return
+	 */
+	private static AMOEBA setup() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		sensors.add(new Pair<String, Boolean>("a1", true));
+		sensors.add(new Pair<String, Boolean>("p2", false));
+		sensors.add(new Pair<String, Boolean>("a2", true));
+		File config;
+		try {
+			config = File.createTempFile("config", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		return amoeba;
+	}
+	
+	/**
+	 * Teach a learning agent on the SimpleReinforcement problem
+	 * @param agent
+	 * @return
+	 */
+	public static ArrayList<Double> learning(LearningAgent agent, Environment env){
+		ArrayList<Double> averageRewards = new ArrayList<Double>();
+		Random rand = new Random();
+		
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			int nbStep = 0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			double totReward = 0.0;
+			
+			// execute simulation cycles
+			boolean done = false;
+			boolean invalid = false;
+			
+			
+			while(!done && !invalid) {
+				nbStep++;
+				if(nbStep > MAX_STEP_PER_EPISODE) {
+					invalid = true;
+				}
+				state.remove("oracle");
+				
+				action = new HashMap<String, Double>();
+				
+				action = agent.explore(state, env);
+//				if(rand.nextDouble() < explo) {
+//					action = agent.explore(state, env);
+//				} else {
+//					action = agent.choose(state, env);
+//				}
+				
+				
+				state2 = env.step(action);  // new position with associated reward
+				
+				if(state2.get("oracle") != -1.0) { //if goal or end of world
+					done = true;
+				}
+				action.put("p1", state.get("p1")); //add previous state to action
+				action.put("p2", state.get("p2")); //add previous state to action
+				
+				action.put("oracle", state2.get("oracle")); //add current reward to action
+				
+				// state : previous position and associated reward
+				// state2 : new position with current reward
+				// action : previous state, current action and current reward
+				
+				agent.learn(state, state2, action, done);
+				totReward += action.get("oracle");
+				
+				state = state2;
+			}
+			
+			System.out.println("-----------------------------------------------------------------------");
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			double testAR = test(agent, env, r, N_TEST);
+			averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+		return averageRewards;
+	}
+
+	private static double test(LearningAgent agent, Environment env, Random r, int nbTest) {
+		HashMap<String, Double> state;
+		HashMap<String, Double> state2;
+		double nbPositiveReward = 0.0;
+		double tot_reward = 0.0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				 HashMap<String, Double> a = agent.choose(state, env);
+				
+				state2 = env.step(a);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+		}
+		double averageReward = tot_reward/nbTest;
+		System.out.println("Test average reward : "+averageReward+"  Positive reward %: "+(nbPositiveReward/nbTest));
+		
+		return averageReward;
+	}
+	
+	/**
+	 * This is a proof of concept, showing that if amoeba learn the correct model of the reward,
+	 * it can produce a good solution.
+	 * The expected average reward for the optimal solution is 75.
+	 * The main cause of negative reward is infinite loop (usually near the objective). In such case, the reward is -200
+	 */
+	public static void poc(boolean learnMalus) {
+		AMOEBA amoeba = setup();
+		Environment env = new TwoDimensionEnv(50);
+		
+		// train
+		for(double n = 0.0; n < 0.5; n+=0.1) {
+			double pos = 50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", -1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", 1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos -= 1.0;
+			}
+			
+			pos = -50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", 1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", -1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos += 1.0;
+			}
+		}
+		
+		// tests
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double tot_reward = 0.0;
+		int nbTest = 100;
+		double nbPositiveReward = 0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				action = amoeba.maximize(state);
+				// random action if no proposition from amoeba
+				if(action.get("oracle").equals(Double.NEGATIVE_INFINITY) ) {
+					action.put("a1", (r.nextBoolean() ? 1.0 : -1.0));
+				}
+				//System.out.println("action "+action);
+				
+				state2 = env.step(action);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				//System.out.println("state2 "+state2+"  reward "+reward);
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+			//System.out.println("-----------------------------\nTot reward "+tot_reward+"\n-----------------------------");
+		}
+		System.out.println("Average reward : "+tot_reward/nbTest+"  Positive reward %: "+(nbPositiveReward/nbTest));
+	}
+	
+	private static int sign(double x) {
+		return x < 0 ? -1 : 1;
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement2DSpatialRewardAndAction.java b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement2DSpatialRewardAndAction.java
new file mode 100644
index 0000000000000000000000000000000000000000..2b40d5655a678f2f55a9475a68cec5d9a2fdc127
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/SimpleReinforcement2DSpatialRewardAndAction.java
@@ -0,0 +1,618 @@
+package experiments.reinforcement;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Random;
+
+import agents.context.localModel.TypeLocalModel;
+import agents.percept.Percept;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import gui.AmoebaWindow;
+import kernel.AMOEBA;
+import kernel.World;
+import kernel.backup.SaveHelperDummy;
+import utils.Pair;
+import utils.RandomUtils;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+/**
+ * Train an amoeba on a simple reinforcement task.
+ * The goal of the task is to get to the center. When the position of the agent cross 0, it gets a reward of 100.
+ * The agent can only moves in 2 directions, of a distance of 1. Moving give a reward of -1.
+ * If the agent moves outside of the allowed range, it gets a reward of -100. 
+ * @author Hugo
+ *
+ */
+public abstract class SimpleReinforcement2DSpatialRewardAndAction {
+	/* Learn and Test */
+	public static final int MAX_STEP_PER_EPISODE = 200;
+	public static final int N_LEARN = 400;//400
+	public static final int N_TEST = 100;
+	
+	/* Exploration */
+	public static final double MIN_EXPLO_RATE = 0.02;
+	public static final double EXPLO_RATE_DIMINUTION_FACTOR = 0.01;
+	public static final double EXPLO_RATE_BASE = 1;
+	
+	public static void main(String[] args) {
+		//poc(true);
+		Configuration.commandLineMode = false;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		/*System.out.println("----- AMOEBA -----");
+		learning(new QLearning(), new OneDimensionEnv());
+		System.out.println("----- END AMOEBA -----");
+		System.out.println("\n\n----- QLEARNING -----");
+		learning(new QLearning());
+		System.out.println("----- END QLEARNING -----");*/
+		ArrayList<ArrayList<Double>> results = new ArrayList<>();
+		for(int i = 0; i < 1; i++) {
+			//LearningAgent agent = new QLearning();
+			LearningAgent agent = new AmoebaQL();
+			//LearningAgent agent = new AmoebaCoop();
+			Environment env = new TwoDimensionEnv(10);
+			results.add(learning(agent, env));
+			System.out.println(i);
+		}
+		
+		int nbEpisodes = results.get(0).size();
+		for(int i = 0; i < nbEpisodes; i++) {
+			double average = 0;
+			for(int j = 0; j < results.size(); j++) {
+				average += results.get(j).get(i);
+			}
+			average /= results.size();
+			System.out.println(""+i+"\t"+average);
+		}
+		
+		//System.exit(0);
+	}
+	
+	/**
+	 * An environment in which a LearningAgent reside
+	 * @author Hugo
+	 *
+	 */
+	public interface Environment {
+		public List<String> actionSpace();
+		public List<String> perceptionSpace();
+		public HashMap<String, Double> reset();
+		public HashMap<String, Double> step(HashMap<String, Double> action);
+		public HashMap<String, Double> randomAction();
+	}
+	
+	/**
+	 * Wrapper for any kind of learning agent
+	 * @author Hugo
+	 *
+	 */
+	public interface LearningAgent {
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env);
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env);
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2, HashMap<String, Double> action, boolean done);
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv 
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaQL implements LearningAgent {
+		public AMOEBA amoebaSpatialReward;
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		public AmoebaQL() {
+			amoebaSpatialReward = setup();
+			amoebaSpatialReward.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+			amoebaSpatialReward.getEnvironment().setMappingErrorAllowed(0.025);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			
+			HashMap<String, Double> bestActions =  amoebaSpatialReward.maximize(state);
+			double a1 = bestActions.getOrDefault("a1", 0.0);
+			double a2 = bestActions.getOrDefault("a2", 0.0);
+//			if(a1 == 0.0) {
+//				a1 = rand.nextBoolean() ? -1 : 1;
+//			}
+//			if(a2 == 0.0) {
+//				a2 = rand.nextBoolean() ? -1 : 1;
+//			}
+			
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			action.put("a2", a2);
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			
+			// state : previous position and associated reward
+			// state2 : new position with current reward
+			// action : previous state, current actions and current reward
+			
+			HashMap<String, Double> state2Copy = new HashMap<>(state2);
+			state2Copy.remove("oracle"); //reward
+			
+			double reward = state2.get("oracle");
+			
+			System.out.println(state2);
+			amoebaSpatialReward.learn(state2);
+			
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+	}
+	
+	/**
+	 * Wrapper for AMOEBA
+	 * @author Hugo
+	 *
+	 */
+	public static class AmoebaCoop implements LearningAgent {
+		public AMOEBA amoeba;
+		
+		public AmoebaCoop() {
+			amoeba = setup();
+			amoeba.setLocalModel(TypeLocalModel.COOP_MILLER_REGRESSION);
+			amoeba.getEnvironment().setMappingErrorAllowed(0.009);
+		}
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			HashMap<String, Double> action = amoeba.maximize(state);
+			if(action.get("oracle") == Double.NEGATIVE_INFINITY) {
+				action = env.randomAction();
+			}
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			amoeba.learn(action);
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	/**
+	 * Compatible only with OneDimensionEnv.<br/>
+	 * An extremely crude and quick implementation of Q learning.
+	 * Not expected to perform well, but should be better than random.
+	 * @author Hugo
+	 *
+	 */
+	public static class QLearning implements LearningAgent {
+		public double[][] Q = new double[102][2];
+		public double lr = 0.8;
+		public double gamma = 0.9;
+		private Random rand = new Random();
+		
+		@Override
+		public HashMap<String, Double> choose(HashMap<String, Double> state, Environment env) {
+			int p = state.get("p1").intValue()+50;
+			double a;
+			if(Q[p][0] == Q[p][1]) {
+				a = rand.nextBoolean() ? -1 : 1;
+			} else {
+				a = Q[p][0] > Q[p][1] ? -1 : 1;
+			}
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a);
+			return action;
+		}
+
+		@Override
+		public void learn(HashMap<String, Double> state, HashMap<String, Double> state2,
+				HashMap<String, Double> action, boolean done) {
+			int p = state.get("p1").intValue()+50;
+			int p2 = state2.get("p1").intValue()+50;
+			int a = action.get("a1").intValue() == -1 ? 0 : 1;
+			double reward = state2.get("oracle");
+			double max = Double.NEGATIVE_INFINITY;
+			if(!done) {
+				for(Double v : Q[p2]) {
+					max = Math.max(max, v);
+				}
+			} else {
+				max = reward;
+			}
+			// 
+			double q = reward + gamma * max - Q[p][a];
+			Q[p][a] += lr * q;
+		}
+
+		@Override
+		public HashMap<String, Double> explore(HashMap<String, Double> state, Environment env) {
+			return env.randomAction();
+		}
+		
+	}
+	
+	public static class TwoDimensionEnv implements Environment {
+		private Random rand = new Random();
+		private double x = 0;
+		private double y = 0;
+		private double reward = 0;
+		private double size;
+		private Drawable pos;
+		
+		public TwoDimensionEnv(double envSize) {
+			
+			size = envSize;
+			
+			if(!Configuration.commandLineMode) {
+				AmoebaWindow instance = AmoebaWindow.instance();
+				//pos = new DrawableOval(0.5, 0.5, 1, 1);
+				//pos.setColor(new Color(0.5, 0.0, 0.0, 0.5));
+				//instance.mainVUI.add(pos);
+				//instance.mainVUI.createAndAddRectangle(-50, -0.25, 100, 0.5);
+				//instance.mainVUI.createAndAddRectangle(-0.25, -1, 0.5, 2);
+				instance.point.hide();
+				//instance.rectangle.hide();
+			}
+		}
+		
+		@Override
+		public HashMap<String, Double> reset(){
+			x = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			x = Math.round(x);
+			y = RandomUtils.nextDouble(rand, -size, Math.nextUp(size));
+			y = Math.round(y);
+			reward = 0.0;
+			//pos.move(x+0.5, 0.5);
+			
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("p2", y);
+			ret.put("oracle", reward);
+			return ret;
+		}
+		
+		@Override
+		public HashMap<String, Double> step(HashMap<String, Double> actionMap){
+			double action = actionMap.get("a1");
+			//if(action == 0.0) action = rand.nextDouble();
+			if(action > 0.0) action = Math.ceil(action);
+			if(action < 0.0 ) action = Math.floor(action);
+			if(action > 1.0) action = 1.0;
+			if(action < -1.0) action = -1.0;
+			double oldX = x;
+			x = x + action;
+			
+			double action2 = actionMap.get("a2");
+			//if(action2 == 0.0) action2 = rand.nextDouble();
+			if(action2 > 0.0) action2 = Math.ceil(action2);
+			if(action2 < 0.0 ) action2 = Math.floor(action2);
+			if(action2 > 1.0) action2 = 1.0;
+			if(action2 < -1.0) action2 = -1.0;
+			double oldY = y;
+			y = y + action2;
+			
+			//System.out.println("ACTIONS " + " a1 " +action + " " + " a2 " + action2);
+			if(x < -size || x > size || y < -size || y > size) {
+				reward = -1000.0;
+			} else if((x == 0.0 && y == 0.0) || (sign(oldX) != sign(x) && sign(oldY) != sign(y) )) {
+				// win !
+				reward = 1000.0;
+			} else {
+				reward = -1.0;
+			}
+			HashMap<String, Double> ret = new HashMap<>();
+			ret.put("p1", x);
+			ret.put("p2", y);
+			ret.put("oracle", reward);
+			//pos.move(x+0.5, 0.5);
+			return ret;
+		}
+
+		@Override
+		public List<String> actionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("a1 enum:true {-1, 0, 1}");
+			l.add("a2 enum:true {-1, 0, 1}");
+			return l;
+		}
+
+		@Override
+		public List<String> perceptionSpace() {
+			ArrayList<String> l = new ArrayList<>();
+			l.add("p1 enum:false [-"+size+", "+size+"]");
+			l.add("p2 enum:false [-"+size+", "+size+"]");
+			return l;
+		}
+
+		@Override
+		public HashMap<String, Double> randomAction() {
+			double a1 = rand.nextInt(3) - 1;
+			double a2 = (a1 == 0.0) ? (rand.nextBoolean() ? -1 : 1) : (rand.nextInt(3) - 1);
+						
+//			double a1 =  rand.nextBoolean() ? -1 : 1;
+//			double a2 =  rand.nextBoolean() ? -1 : 1;
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			action.put("a1", a1);
+			action.put("a2", a2);
+			return action;
+			}
+		
+	}
+	
+	/**
+	 * Setup an amoeba for the SimpleReinforcement problem
+	 * @return
+	 */
+	private static AMOEBA setup() {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		sensors.add(new Pair<String, Boolean>("p2", false));
+		File config;
+		try {
+			config = File.createTempFile("config", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(null, null, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		for(Percept pct : amoeba.getPercepts()) {
+			pct.setMax(10);
+			pct.setMin(-10);
+		}
+		
+		amoeba.setReinforcement(true);
+		
+		return amoeba;
+	}
+	
+	/**
+	 * Teach a learning agent on the SimpleReinforcement problem
+	 * @param agent
+	 * @return
+	 */
+	public static ArrayList<Double> learning(LearningAgent agent, Environment env){
+		ArrayList<Double> averageRewards = new ArrayList<Double>();
+		Random rand = new Random();
+		
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double explo = EXPLO_RATE_BASE;
+		for(int i = 0; i < N_LEARN; i++) {
+			int nbStep = 0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			double totReward = 0.0;
+			
+			// execute simulation cycles
+			boolean done = false;
+			boolean invalid = false;
+			
+			
+			while(!done && !invalid) {
+				nbStep++;
+				if(nbStep > MAX_STEP_PER_EPISODE) {
+					invalid = true;
+				}
+				state.remove("oracle");
+				
+				action = new HashMap<String, Double>();
+				
+				action = agent.explore(state, env);
+//				if(rand.nextDouble() < explo) {
+//					action = agent.explore(state, env);
+//				} else {
+//					action = agent.choose(state, env);
+//				}
+				
+				
+				state2 = env.step(action);  // new position with associated reward
+				
+				if(state2.get("oracle") != -1.0) { //if goal or end of world
+					done = true;
+				}
+				action.put("p1", state.get("p1")); //add previous state to action
+				action.put("p2", state.get("p2")); //add previous state to action
+				
+				action.put("oracle", state2.get("oracle")); //add current reward to action
+				
+				// state : previous position and associated reward
+				// state2 : new position with current reward
+				// action : previous state, current action and current reward
+				
+				agent.learn(state, state2, action, done);
+				totReward += action.get("oracle");
+				
+				state = state2;
+			}
+			
+			System.out.println("-----------------------------------------------------------------------");
+			
+			// update exploration rate
+			if(explo > MIN_EXPLO_RATE) {
+				explo -= EXPLO_RATE_DIMINUTION_FACTOR;
+				if(explo < MIN_EXPLO_RATE)
+					explo = MIN_EXPLO_RATE;
+			}
+			
+			System.out.println("Episode "+i+"  reward : "+totReward+"  explo : "+explo);
+			//double testAR = test(agent, env, r, N_TEST);
+			//averageRewards.add(testAR);
+			
+			//Scanner scan = new Scanner(System.in);
+			//scan.nextLine();
+		}
+		
+		return averageRewards;
+	}
+
+	private static double test(LearningAgent agent, Environment env, Random r, int nbTest) {
+		HashMap<String, Double> state;
+		HashMap<String, Double> state2;
+		double nbPositiveReward = 0.0;
+		double tot_reward = 0.0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				 HashMap<String, Double> a = agent.choose(state, env);
+				
+				state2 = env.step(a);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+		}
+		double averageReward = tot_reward/nbTest;
+		System.out.println("Test average reward : "+averageReward+"  Positive reward %: "+(nbPositiveReward/nbTest));
+		
+		return averageReward;
+	}
+	
+	/**
+	 * This is a proof of concept, showing that if amoeba learn the correct model of the reward,
+	 * it can produce a good solution.
+	 * The expected average reward for the optimal solution is 75.
+	 * The main cause of negative reward is infinite loop (usually near the objective). In such case, the reward is -200
+	 */
+	public static void poc(boolean learnMalus) {
+		AMOEBA amoeba = setup();
+		Environment env = new TwoDimensionEnv(50);
+		
+		// train
+		for(double n = 0.0; n < 0.5; n+=0.1) {
+			double pos = 50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", -1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", 1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos -= 1.0;
+			}
+			
+			pos = -50.0-n;
+			for(int i = 0; i < 49; i++) {
+				double reward = 100 - Math.abs(pos);
+				HashMap<String, Double> action = new HashMap<String, Double>();
+				action.put("p1", pos);
+				action.put("a1", 1.0);
+				action.put("oracle", reward);
+				amoeba.learn(action);
+				
+				if(learnMalus) {
+					reward = -150 + Math.abs(pos);
+					action.put("a1", -1.0);
+					action.put("oracle", reward);
+					amoeba.learn(action);
+				}
+				
+				pos += 1.0;
+			}
+		}
+		
+		// tests
+		Random r = new Random();
+		HashMap<String, Double> state = env.reset();
+		HashMap<String, Double> state2;
+		double tot_reward = 0.0;
+		int nbTest = 100;
+		double nbPositiveReward = 0;
+		for(int i = 0; i < nbTest; i++) {
+			double reward = 0.0;
+			state = env.reset();
+			HashMap<String, Double> action = new HashMap<String, Double>();
+			
+			// execute simulation cycles
+			boolean done = false;
+			int nbStep = 0;
+			while(!done) {
+				nbStep++;
+				if(nbStep > 200) {
+					done = true;
+				}
+				state.remove("oracle");
+				action = amoeba.maximize(state);
+				// random action if no proposition from amoeba
+				if(action.get("oracle").equals(Double.NEGATIVE_INFINITY) ) {
+					action.put("a1", (r.nextBoolean() ? 1.0 : -1.0));
+				}
+				//System.out.println("action "+action);
+				
+				state2 = env.step(action);
+				
+				if(state2.get("oracle") != -1.0) {
+					done = true;
+				}
+				
+				reward += state2.get("oracle");
+				
+				//System.out.println("state2 "+state2+"  reward "+reward);
+				
+				state = state2;
+			}
+			if(reward > 0) {
+				nbPositiveReward += 1.0;
+			}
+			tot_reward += reward;
+			//System.out.println("-----------------------------\nTot reward "+tot_reward+"\n-----------------------------");
+		}
+		System.out.println("Average reward : "+tot_reward/nbTest+"  Positive reward %: "+(nbPositiveReward/nbTest));
+	}
+	
+	private static int sign(double x) {
+		return x < 0 ? -1 : 1;
+	}
+
+}
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementLauncher2D.java b/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementLauncher2D.java
new file mode 100644
index 0000000000000000000000000000000000000000..b9804256fa473e4dd73500c491e450c3f8323ce7
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementLauncher2D.java
@@ -0,0 +1,301 @@
+package experiments.reinforcement.WithStudiedSystem;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+
+import experiments.FILE;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.examples.randomantsMultiUi.AntHillExampleMultiUI;
+import fr.irit.smac.amak.examples.randomantsMultiUi.WorldExampleMultiUI;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.scene.control.Slider;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.World;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperImpl;
+import utils.TRACE_LEVEL;
+
+
+/**
+ * The Class BadContextLauncherEasy.
+ */
+public class ReinforcementLauncher2D extends Application implements Serializable {
+
+
+	public static final double oracleNoiseRange = 0.5;
+	public static final double learningSpeed = 0.5;
+	public static final int regressionPoints = 100;
+	public static final int dimension = 2;
+	public static final double spaceSize = 10.0	;
+	public static final int nbOfModels = 3	;
+	public static final int normType = 2	;
+	public static final boolean randomExploration = false;
+	public static final boolean limitedToSpaceZone = true;
+	//public static final double mappingErrorAllowed = 0.07; // BIG SQUARE
+	public static double mappingErrorAllowed = 0.02; // MULTI
+	public static final double explorationIncrement = 1.0	;
+	public static final double explorationWidht = 0.5	;
+	
+	public static final int nbCycle = 10000;
+	
+	AMOEBA amoebaSpatialReward;
+	StudiedSystem studiedSystem;
+	VUIMulti amoebaSpatialRewardVUI;
+	AmoebaMultiUIWindow amoebaSpatialRewardUI;
+	
+
+	
+	
+	public static void main(String[] args) throws IOException {
+		
+		
+		Application.launch(args);
+
+
+	}
+	
+	@Override
+	public void start(Stage arg0) throws Exception, IOException {
+
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		amoebaSpatialRewardVUI = new VUIMulti("2D");
+		amoebaSpatialRewardUI = new AmoebaMultiUIWindow("ELLSA", amoebaSpatialRewardVUI);
+		
+		
+		// Exemple for adding a tool in the toolbar
+		Slider slider = new Slider(0.01, 0.1, mappingErrorAllowed);
+		slider.setShowTickLabels(true);
+		slider.setShowTickMarks(true);
+		
+		slider.valueProperty().addListener(new ChangeListener<Number>() {
+			@Override
+			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+				System.out.println("new Value "+newValue);
+				mappingErrorAllowed = (double)newValue;
+				amoebaSpatialReward.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+			}
+		});
+		amoebaSpatialRewardUI.addToolbar(slider);
+		
+		
+		
+		
+		
+		startTask(100, 1);
+
+
+		
+		
+	}
+	
+	public void startTask(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	
+	
+	public void runTask(long wait, int cycles) 
+    {
+		
+		try
+        {
+             
+            // Update the Label on the JavaFx Application Thread        
+            Platform.runLater(new Runnable() 
+            {
+                @Override
+                public void run() 
+                {
+                	amoebaSpatialReward = new AMOEBA(amoebaSpatialRewardUI,  amoebaSpatialRewardVUI);
+            		studiedSystem = new ReinforcementManager2D(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
+            		amoebaSpatialReward.setStudiedSystem(studiedSystem);
+            		IBackupSystem backupSystem = new BackupSystem(amoebaSpatialReward);
+            		File file = new File("resources/twoDimensionsLauncher.xml");
+            		backupSystem.load(file);
+            		
+            		amoebaSpatialReward.saver = new SaveHelperImpl(amoebaSpatialReward, amoebaSpatialRewardUI);
+            		amoebaSpatialReward.allowGraphicalScheduler(true);
+            		amoebaSpatialReward.setRenderUpdate(true);		
+            		amoebaSpatialReward.data.learningSpeed = learningSpeed;
+            		amoebaSpatialReward.data.numberOfPointsForRegression = regressionPoints;
+            		amoebaSpatialReward.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+            		amoebaSpatialReward.setReinforcement(true);
+            		World.minLevel = TRACE_LEVEL.DEBUG;
+                }
+            });
+     
+            Thread.sleep(wait);
+        }
+        catch (InterruptedException e) 
+        {
+            e.printStackTrace();
+        }
+		
+		
+		
+        for(int i = 0; i < cycles; i++) 
+        {
+            try
+            {
+                // Get the Status
+                final String status = "Processing " + i + " of " + cycles;
+                 
+                // Update the Label on the JavaFx Application Thread        
+                Platform.runLater(new Runnable() 
+                {
+                    @Override
+                    public void run() 
+                    {
+                    	studiedSystem.playOneStep();
+                    	amoebaSpatialReward.learn(studiedSystem.getOutput());
+                    	if(amoebaSpatialReward.getHeadAgent().isActiveLearning()) {
+                    		studiedSystem.setActiveLearning(true);
+                    		studiedSystem.setSelfRequest(amoebaSpatialReward.getHeadAgent().getSelfRequest()); //TODO self active ...
+    						 
+    					}
+                    	//System.out.println(status);
+                    }
+                });
+         
+                Thread.sleep(wait);
+            }
+            catch (InterruptedException e) 
+            {
+                e.printStackTrace();
+            }
+        }
+    }   
+	
+	
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+
+	public static void launch() throws IOException{
+		
+		
+	
+		
+		
+		
+
+		
+		/* AUTOMATIC */
+//		long start = System.currentTimeMillis();
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.learn(studiedSystem.getOutput());
+//		}
+//		long end = System.currentTimeMillis();
+//		System.out.println("Done in : " + (end - start) );
+//		
+//		start = System.currentTimeMillis();
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.request(studiedSystem.getOutput());
+//		}
+//		end = System.currentTimeMillis();
+//		System.out.println("Done in : " + (end - start) );
+		
+		
+//		/* XP PIERRE */
+//		
+//		String fileName = fileName(new ArrayList<String>(Arrays.asList("GaussiennePierre")));
+//		
+//		FILE Pierrefile = new FILE("Pierre",fileName);
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.learn(studiedSystem.getOutput());
+//			if(amoeba.getHeadAgent().isActiveLearning()) {
+//				studiedSystem.setActiveLearning(true);
+//				studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest());
+//				 
+//			}
+//		}
+//		
+//		for (int i = 0; i < 10; ++i) {
+//			studiedSystem.playOneStep();
+//			System.out.println(studiedSystem.getOutput());
+//			System.out.println(amoeba.request(studiedSystem.getOutput()));
+//			
+//			
+//		}
+//		
+//		Pierrefile.write(new ArrayList<String>(Arrays.asList("ID contexte","Coeff Cte","Coeff X0","Coeff X1","Min Value","Max Value")));
+//		
+//		for(Context ctxt : amoeba.getContexts()) {
+//			
+//			writeMessage(Pierrefile, ctxt.toStringArrayPierre());
+//
+//		}
+//		
+//		
+//		Pierrefile.close();
+		
+	
+	}
+	
+	public static String fileName(ArrayList<String> infos) {
+		String fileName = "";
+		
+		for(String info : infos) {
+			fileName += info + "_";
+		}
+		
+		return fileName;
+	}
+	
+	public static void writeMessage(FILE file, ArrayList<String> message) {
+		
+		file.initManualMessage();
+		
+		for(String m : message) {
+			file.addManualMessage(m);
+		}
+		
+		file.sendManualMessage();
+		
+	}
+
+
+
+	
+}
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementManager2D.java b/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementManager2D.java
new file mode 100644
index 0000000000000000000000000000000000000000..befc780052a0b7820db460cc83228d873dfced39
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementManager2D.java
@@ -0,0 +1,562 @@
+package experiments.reinforcement.WithStudiedSystem;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Random;
+
+import agents.percept.Percept;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+
+
+/**
+ * The Class BadContextManager.
+ */
+public class ReinforcementManager2D implements StudiedSystem{
+
+	/** The x. */
+	Double[] x ;
+	Double[] oldX ;
+	
+	double a1;
+	double a2;
+	
+	public boolean AI = false;
+	
+	/** The result. */
+	double result = 0;
+	
+	HashMap<String, AMOEBA> amoebas;
+
+	
+	/** The first step. */
+	boolean firstStep = true;
+	boolean randomExploration = false;
+	boolean spaceLimited = true;
+	double spaceSize;
+	
+	int dimension;
+	int numberOfModels;
+	int normType;
+	
+	double[] explorationVector;
+	
+	HashMap<String,Double> selfRequest;
+	boolean activeLearning = false;
+	
+	double noiseRange;
+	
+	/** The world. */
+	Random generator;
+	
+	double explorationIncrement;
+	double explorationMaxVariation;
+	
+	
+	/* Parameters */
+	private static final double gaussianCoef = 1000;
+	private static final double gaussianVariance = 10;
+	
+	private double lastReward;
+	
+	
+	public ReinforcementManager2D(double size, int dim, int nbOfModels, int nrmType, boolean rndExploration, double explIncrement, double explnVariation, boolean limiteToSpace, double noise) {
+		this.spaceSize= size;
+		dimension = dim;
+		numberOfModels = nbOfModels;
+		normType = nrmType;
+		x = new Double[dimension];
+		oldX = new Double[dimension];
+		
+		noiseRange = noise;
+		spaceLimited = limiteToSpace;
+		
+		//gaussianCoef = Math.random()*2000;
+		
+
+		
+		
+		
+		generator = new Random();
+		
+		
+		
+		
+		
+		
+		
+		randomExploration= rndExploration;
+		
+		explorationVector = new double[dimension];	
+		for(int i = 0 ; i < dimension ; i++) {
+			explorationVector[i] = Math.random() - 0.5;
+		}
+		double vectorNorm = normeP(explorationVector, 2);
+		for(int i = 0 ; i < dimension ; i++) {
+			explorationVector[i] /= vectorNorm;
+		}
+		
+		
+		explorationIncrement = explIncrement;
+		explorationMaxVariation = explnVariation;
+		
+		resetRandomExploration();
+	}
+	
+	
+	
+	
+	
+	
+	/* (non-Javadoc)
+	 * @see kernel.StudiedSystem#playOneStep(double)
+	 */
+	@Override
+	public HashMap<String, Double> playOneStep() {
+		
+		HashMap<String, Double> state = new HashMap<String, Double>();
+
+		if(!randomExploration) {
+			
+			pseudoRandomExplorationStep();
+			
+		}
+		else if(activeLearning) {
+			
+			
+			
+			activeLearning = false;
+			
+			
+			
+			for(int i = 0 ; i < dimension ; i++) {
+				x[i] = selfRequest.get("px" + i);
+			}
+		}
+
+		else {
+			
+			resetRandomExploration();
+		}
+		
+		if(oldX[0] != null) {
+			for(int i = 1 ; i < dimension+1 ; i++) {
+				state.put("p" + i + "Goal", x[i-1]);
+				state.put("p" + i, oldX[i-1]);
+			}
+			state.put("a1" , a1);
+			state.put("a2" , a2);
+		}
+		
+		
+		return state;
+	}
+	
+	@Override
+	public HashMap<String, Double> playOneStepWithControlModel() {
+		
+		HashMap<String, Double> state = new HashMap<String, Double>();
+
+		if(!randomExploration) {
+			
+			pseudoRandomExplorationStep();
+			
+		}
+		else if(activeLearning) {
+			
+			
+			
+			activeLearning = false;
+			
+			
+			
+			for(int i = 0 ; i < dimension ; i++) {
+				x[i] = selfRequest.get("px" + i);
+			}
+		}
+
+		else {
+			
+			resetRandomExploration();
+		}
+		
+		if(oldX[0] != null) {
+			for(int i = 1 ; i < dimension+1 ; i++) {
+				state.put("p" + i + "Goal", x[i-1]);
+				state.put("p" + i, oldX[i-1]);
+			}
+			state.put("a1" , a1);
+			state.put("a2" , a2);
+		}
+		
+		
+		return state;
+	}
+	
+	private void resetRandomExploration() {
+		for(int i = 0 ; i < dimension ; i++) {
+			x[i] = (double) (generator.nextInt((int) (spaceSize*2+5)) - (spaceSize+2));
+		}
+		oldX = new Double[dimension];
+	}
+	
+	
+	private void pseudoRandomExplorationStep() {
+		
+		if(x[0] != null) {
+			for(int i = 0 ; i < dimension ; i++) {
+				oldX[i] = x[i];
+			}
+		}
+		
+		
+		if(lastReward == -1000.0 || lastReward == 1000.0) {
+			resetRandomExploration();
+		}else {	
+			HashMap<String, Double> perceptions = new HashMap<String, Double>();
+			perceptions.put("px0", x[0]);
+			perceptions.put("px1", x[1]);
+			perceptions.put("oracle", 0.0);
+			HashMap<String, Double> bestFuturePosition = amoebas.get("spatialReward").reinforcementRequest(perceptions);
+			System.out.println(bestFuturePosition);
+			
+			if(bestFuturePosition != null && AI) {
+				HashMap<String, Double> a1Request = new HashMap<String, Double>();
+				a1Request.put("p1", x[0]);
+				a1Request.put("p2", x[1]);
+				a1Request.put("p1Goal", bestFuturePosition.get("px0"));
+				a1Request.put("oracle", 0.0);
+				a1 = amoebas.get("a1").request(a1Request);
+				HashMap<String, Double> a2Request = new HashMap<String, Double>();
+				a2Request.put("p1", x[0]);
+				a2Request.put("p2", x[1]);
+				a2Request.put("p2Goal", bestFuturePosition.get("px1"));
+				a2Request.put("oracle", 0.0);
+				a2 = amoebas.get("a2").request(a2Request);
+			}
+			else {
+				a1 = generator.nextInt(3) - 1;
+				a2 = (a1 == 0.0) ? (generator.nextBoolean() ? -1 : 1) : (generator.nextInt(3) - 1);
+			}
+			
+			x[0] += a1;
+			x[1] += a2;
+		}
+ 
+		
+		HashMap<String, Double> perceptionsActionState1 = new HashMap<String, Double>();
+		HashMap<String, Double> perceptionsActionState2 = new HashMap<String, Double>();
+		
+		
+		if(oldX[0] != null) {
+			perceptionsActionState1.put("p1", oldX[0]);
+			perceptionsActionState1.put("p2", oldX[1]);
+			perceptionsActionState1.put("p1Goal", x[0]);
+			perceptionsActionState1.put("oracle", a1);
+			
+			
+			perceptionsActionState2.put("p1", oldX[0]);
+			perceptionsActionState2.put("p2", oldX[1]);
+			perceptionsActionState2.put("p2Goal", x[1]);
+			perceptionsActionState2.put("oracle", a2);
+			
+			System.out.println(perceptionsActionState1);
+			System.out.println(perceptionsActionState2);
+			
+			amoebas.get("a1").learn(perceptionsActionState1);
+			amoebas.get("a2").learn(perceptionsActionState2);
+		}
+		
+		
+	}
+	
+	private double normeP(double[] x1, int p) {
+		double distance = 0;
+		for(int i = 0; i < x1.length; i ++) {
+			distance += Math.pow(Math.abs(x1[i]), p) ;
+		}
+		return Math.pow(distance, 1.0/p);
+	}
+	
+	public void playOneStepConstrained(double[][] constrains) {		
+		
+		for(int i = 0 ; i < dimension ; i++) {
+			x[i] = constrains[i][0] + (Math.random()*(constrains[i][1] - constrains[i][0]));
+		}
+	}
+	
+	public void playOneStepConstrainedWithNoise(double[][] constrains, double noiseRange) {
+		
+		
+		for(int i = 0 ; i < dimension ; i++) {
+			x[i] = constrains[i][0] + (Math.random()*(constrains[i][1] - constrains[i][0])) - noiseRange/2 + Math.random()*noiseRange;
+		}
+
+	}
+
+	
+
+
+
+	
+	
+
+	
+	public double model(Double[] situation) {
+		
+		Double[] xRequest;
+		
+		if(situation == null) {
+			xRequest = x;
+		}else {
+			xRequest = situation;
+		}
+		
+		
+		
+		return reinforcementModel2D(xRequest);
+		
+		
+
+		
+		
+	}
+	
+	
+	public double reinforcementModel2D(Double[] position) {
+		
+		double reward;
+		if(position[0] < -spaceSize || position[0] > spaceSize || position[1] < -spaceSize || position[1] > spaceSize) {
+			reward = -1000.0;
+		} else if(Math.abs(position[0]) < 1.5 && Math.abs(position[1]) < 1.5 ) {
+			// win !
+			reward = 1000.0;
+		} else {
+			reward = -1.0;
+		}
+		
+		lastReward = reward;
+		return reward;
+		
+		
+
+		
+		
+	}
+	
+
+	
+	/* (non-Javadoc)
+	 * @see kernel.StudiedSystem#getOutput()
+	 */
+
+	public HashMap<String, Double> getOutput() {
+		HashMap<String, Double> out = new HashMap<String, Double>();
+
+		result = model(null);
+		
+		for(int i = 0; i<dimension; i++) {
+			
+			out.put("px" + i,x[i]);
+			
+		}
+		out.put("oracle",result);
+		return out;
+	}
+	
+	public HashMap<String, Double> getIntput() {
+		HashMap<String, Double> in = new HashMap<String, Double>();
+
+		
+		for(int i = 0; i<dimension; i++) {
+			
+			in.put("px" + i,x[i]);
+			
+		}
+		return in;
+	}
+	
+	public HashMap<String, Double> getOutputWithNoise(double noiseRange) {
+		HashMap<String, Double> out = new HashMap<String, Double>();
+
+		result = model(null) - noiseRange/2 + Math.random()*noiseRange ;
+		
+		for(int i = 0; i<dimension; i++) {
+			
+			out.put("px" + i,x[i]);
+			
+		}
+		out.put("oracle",result);
+		return out;
+	}
+	
+	public HashMap<String, Double> getOutputWithAmoebaRequest(HashMap<String, Double> amoebaRequest,  double noiseRange) {
+		HashMap<String, Double> out = new HashMap<String, Double>();
+
+		for(int i = 0; i<dimension; i++) {
+			
+			x[i] = amoebaRequest.get("px" + i);
+			
+		}
+		
+		result = model(null) - noiseRange/2 + Math.random()*noiseRange ;
+		
+		for(int i = 0; i<dimension; i++) {
+			
+			out.put("px" + i,x[i]);
+			
+		}
+		out.put("oracle",result);
+		return out;
+	}
+	
+	public HashMap<String, Double> getOriginOutput() {
+		HashMap<String, Double> out = new HashMap<String, Double>();
+
+		for(int i = 0; i<dimension; i++) {
+			x[i] = 0.0;
+			
+		}
+		
+		result = model(null);
+		
+		for(int i = 0; i<dimension; i++) {
+			
+			out.put("px" + i,x[i]);
+			
+		}
+		out.put("oracle",result);
+		return out;
+	}
+	
+	
+	
+	
+	public HashMap<String, Double> getOutputRequest2D(HashMap<String, Double> values) {
+		HashMap<String, Double> out = new HashMap<String, Double>();
+
+		x[0] = values.get("px0");
+		x[1] = values.get("px1");
+		
+		
+		result =  model(null);
+		
+		out.put("px0",x[0]);
+		out.put("px1",x[1]);
+		out.put("oracle",result);
+		return out;
+	}
+
+	/* (non-Javadoc)
+	 * @see kernel.StudiedSystem#switchControlMode()
+	 */
+
+	public void switchControlMode() {
+		
+	}
+	
+
+	public double getSpaceSize() {
+		return spaceSize;
+	}
+
+
+	
+	
+
+
+	@Override
+	public double requestOracle(HashMap<String, Double> request) {
+		
+		Double[] xRequest = new Double[request.size()];
+		
+		for(int i = 0; i<dimension; i++) {
+			
+			xRequest[i] = request.get("px" + i);
+			
+		}
+		
+		return model(xRequest);
+	}
+	
+	@Override
+	public void setActiveLearning(boolean value) {
+		activeLearning = value;
+	}
+	
+	@Override
+	public void setSelfRequest(HashMap<Percept, Double> request){
+		HashMap<String,Double> newRequest = new HashMap<String,Double>();
+		
+		for(Percept pct : request.keySet()) {
+			newRequest.put(pct.getName(), request.get(pct));
+		}
+		
+		selfRequest = newRequest;
+	}
+
+	@Override
+	public void setControlModels(HashMap<String, AMOEBA> controlModels) {
+		amoebas = controlModels;
+		
+	}
+
+
+
+
+
+
+	@Override
+	public void setControl(boolean value) {
+		AI = value;
+		
+	}
+
+
+
+
+
+
+	@Override
+	public void setSelfLearning(boolean value) {
+		// TODO Auto-generated method stub
+		
+	}
+
+
+
+
+
+
+	@Override
+	public Double getActiveRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+
+
+
+
+
+	@Override
+	public Double getSelfRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+
+
+
+
+
+	@Override
+	public Double getRandomRequestCounts() {
+		// TODO Auto-generated method stub
+		return null;
+	}
+
+	
+
+
+}
\ No newline at end of file
diff --git a/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementWithControlLauncher2D.java b/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementWithControlLauncher2D.java
new file mode 100644
index 0000000000000000000000000000000000000000..3fae320ad747017e98a28d7086bad8429a67b792
--- /dev/null
+++ b/AMOEBAonAMAK/src/experiments/reinforcement/WithStudiedSystem/ReinforcementWithControlLauncher2D.java
@@ -0,0 +1,389 @@
+package experiments.reinforcement.WithStudiedSystem;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+
+import agents.context.localModel.TypeLocalModel;
+import experiments.FILE;
+import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.examples.randomantsMultiUi.AntHillExampleMultiUI;
+import fr.irit.smac.amak.examples.randomantsMultiUi.WorldExampleMultiUI;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import gui.AmoebaMultiUIWindow;
+import gui.AmoebaWindow;
+import javafx.application.Application;
+import javafx.application.Platform;
+import javafx.beans.property.BooleanProperty;
+import javafx.beans.property.ObjectProperty;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.collections.ObservableMap;
+import javafx.event.ActionEvent;
+import javafx.event.EventHandler;
+import javafx.scene.control.Button;
+import javafx.scene.control.Slider;
+import javafx.scene.control.Toggle;
+import javafx.scene.control.ToggleGroup;
+import javafx.stage.Stage;
+import kernel.AMOEBA;
+import kernel.StudiedSystem;
+import kernel.World;
+import kernel.backup.BackupSystem;
+import kernel.backup.IBackupSystem;
+import kernel.backup.SaveHelperDummy;
+import kernel.backup.SaveHelperImpl;
+import utils.Pair;
+import utils.TRACE_LEVEL;
+import utils.XmlConfigGenerator;
+
+
+/**
+ * The Class BadContextLauncherEasy.
+ */
+public class ReinforcementWithControlLauncher2D extends Application implements Serializable {
+
+
+	public static final double oracleNoiseRange = 0.5;
+	public static final double learningSpeed = 0.5;
+	public static final int regressionPoints = 100;
+	public static final int dimension = 2;
+	public static final double spaceSize = 10.0	;
+	public static final int nbOfModels = 3	;
+	public static final int normType = 2	;
+	public static final boolean randomExploration = false;
+	public static final boolean limitedToSpaceZone = true;
+	//public static final double mappingErrorAllowed = 0.07; // BIG SQUARE
+	public static double mappingErrorAllowed = 0.02; // MULTI
+	public static final double explorationIncrement = 1.0	;
+	public static final double explorationWidht = 0.5	;
+	
+	public static final int nbCycle = 10000;
+	
+	AMOEBA amoebaSpatialReward;
+	StudiedSystem studiedSystem;
+	VUIMulti amoebaSpatialRewardVUI;
+	AmoebaMultiUIWindow amoebaSpatialRewardUI;
+	
+	AMOEBA amoebaActionModel1;
+	VUIMulti amoebaActionModelVUI1;
+	AmoebaMultiUIWindow amoebaActionModelUI1;
+	
+	AMOEBA amoebaActionModel2;
+	VUIMulti amoebaActionModelVUI2;
+	AmoebaMultiUIWindow amoebaActionModelUI2;
+	
+	
+	public static void main(String[] args) throws IOException {
+		
+		
+		Application.launch(args);
+
+
+	}
+	
+	@Override
+	public void start(Stage arg0) throws Exception, IOException {
+
+		Configuration.multiUI=true;
+		Configuration.commandLineMode = false;
+		Configuration.allowedSimultaneousAgentsExecution = 1;
+		Configuration.waitForGUI = true;
+		Configuration.plotMilliSecondsUpdate = 20000;
+		
+		amoebaSpatialRewardVUI = new VUIMulti("2D REWARD");
+		amoebaSpatialRewardUI = new AmoebaMultiUIWindow("SPATIAL REWARD", amoebaSpatialRewardVUI);
+		
+		amoebaActionModelVUI1 = new VUIMulti("2D");
+		amoebaActionModelUI1 = new AmoebaMultiUIWindow("ACTION 1 MODEL", amoebaActionModelVUI1);
+		
+		amoebaActionModelVUI2 = new VUIMulti("2D");
+		amoebaActionModelUI2 = new AmoebaMultiUIWindow("ACTION 2 MODEL", amoebaActionModelVUI2);
+		
+
+		
+		
+		// Exemple for adding a tool in the toolbar
+		Slider slider = new Slider(0.01, 0.1, mappingErrorAllowed);
+		slider.setShowTickLabels(true);
+		slider.setShowTickMarks(true);
+		
+		slider.valueProperty().addListener(new ChangeListener<Number>() {
+			@Override
+			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+				System.out.println("new Value "+newValue);
+				mappingErrorAllowed = (double)newValue;
+				amoebaSpatialReward.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+			}
+		});
+		amoebaSpatialRewardUI.addToolbar(slider);
+		
+		Button btn = new Button();
+		btn.setText("AI");
+		btn.setOnAction(new EventHandler<ActionEvent>() {
+			
+			@Override
+			public void handle(ActionEvent event) {
+				studiedSystem.setControl(true);
+				
+			}
+		});
+		
+		amoebaSpatialRewardUI.addToolbar(btn);
+		
+		startTask(100, 1);
+
+
+		
+		
+	}
+	
+	public void startTask(long wait, int cycles) 
+    {
+        // Create a Runnable
+        Runnable task = new Runnable()
+        {
+            public void run()
+            {
+                runTask(wait, cycles);
+            }
+        };
+ 
+        // Run the task in a background thread
+        Thread backgroundThread = new Thread(task);
+        // Terminate the running thread if the application exits
+        backgroundThread.setDaemon(true);
+        // Start the thread
+        backgroundThread.start();
+        
+     
+    }
+	
+	
+	
+	public void runTask(long wait, int cycles) 
+    {
+		
+		try
+        {
+             
+            // Update the Label on the JavaFx Application Thread        
+            Platform.runLater(new Runnable() 
+            {
+                @Override
+                public void run() 
+                {
+                	amoebaSpatialReward = setupSpatialReward();
+                	amoebaActionModel1 = setupControlModel("1", amoebaActionModelUI1, amoebaActionModelVUI1);
+                	amoebaActionModel2 = setupControlModel("2", amoebaActionModelUI2, amoebaActionModelVUI2);
+                	
+                	HashMap<String, AMOEBA> amoebas = new HashMap<String, AMOEBA>();
+                	amoebas.put("a1", amoebaActionModel1);
+                	amoebas.put("a2", amoebaActionModel2);
+                	amoebas.put("spatialReward", amoebaSpatialReward);
+                	studiedSystem.setControlModels(amoebas);
+                }
+            });
+     
+            Thread.sleep(wait);
+        }
+        catch (InterruptedException e) 
+        {
+            e.printStackTrace();
+        }
+		
+		
+		
+        for(int i = 0; i < cycles; i++) 
+        {
+            try
+            {
+                // Get the Status
+                final String status = "Processing " + i + " of " + cycles;
+                 
+                // Update the Label on the JavaFx Application Thread        
+                Platform.runLater(new Runnable() 
+                {
+                    @Override
+                    public void run() 
+                    {
+                    	studiedSystem.playOneStepWithControlModel();
+                    	
+                    	
+                    	
+                    	amoebaSpatialReward.learn(studiedSystem.getOutput());
+                    	if(amoebaSpatialReward.getHeadAgent().isActiveLearning()) {
+                    		studiedSystem.setActiveLearning(true);
+                    		studiedSystem.setSelfRequest(amoebaSpatialReward.getHeadAgent().getSelfRequest()); //TODO self active ...
+    						 
+    					}
+                    	//System.out.println(status);
+                    }
+                });
+         
+                Thread.sleep(wait);
+            }
+            catch (InterruptedException e) 
+            {
+                e.printStackTrace();
+            }
+        }
+    }   
+	
+	
+	
+	@Override
+	public void stop() throws Exception {
+		super.stop();
+		System.exit(0);
+	}
+	
+	
+	
+	private AMOEBA setupSpatialReward() {
+		AMOEBA amoeba = new AMOEBA(amoebaSpatialRewardUI,  amoebaSpatialRewardVUI);
+		studiedSystem = new ReinforcementManager2D(spaceSize, dimension, nbOfModels, normType, randomExploration, explorationIncrement,explorationWidht,limitedToSpaceZone, oracleNoiseRange);
+		amoeba.setStudiedSystem(studiedSystem);
+		IBackupSystem backupSystem = new BackupSystem(amoeba);
+		File file = new File("resources/twoDimensionsLauncher.xml");
+		backupSystem.load(file);
+		
+		amoeba.saver = new SaveHelperImpl(amoeba, amoebaSpatialRewardUI);
+		amoeba.allowGraphicalScheduler(true);
+		amoeba.setRenderUpdate(true);		
+		amoeba.data.learningSpeed = learningSpeed;
+		amoeba.data.numberOfPointsForRegression = regressionPoints;
+		amoeba.getEnvironment().setMappingErrorAllowed(mappingErrorAllowed);
+		amoeba.setReinforcement(true);
+		World.minLevel = TRACE_LEVEL.DEBUG;
+		
+		
+		return amoeba;
+	}
+	
+
+	private AMOEBA setupControlModel(String action, AmoebaMultiUIWindow window, VUIMulti VUI) {
+		ArrayList<Pair<String, Boolean>> sensors = new ArrayList<>();
+		sensors.add(new Pair<String, Boolean>("p1", false));
+		sensors.add(new Pair<String, Boolean>("p2", false));
+		sensors.add(new Pair<String, Boolean>("p"+action+"Goal", false));
+		File config;
+		try {
+			config = File.createTempFile("configControlModel", "xml");
+			XmlConfigGenerator.makeXML(config, sensors);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.exit(1);
+			return null; // now compilator know config is initialized
+		}
+		//File config = new File("resources/simpleReinManualTrained.xml");
+		
+		Log.defaultMinLevel = Log.Level.INFORM;
+		World.minLevel = TRACE_LEVEL.ERROR;
+		AMOEBA amoeba = new AMOEBA(window, VUI, config.getAbsolutePath(), null);
+		amoeba.saver = new SaveHelperDummy();
+		
+		
+		
+		
+		amoeba.setLocalModel(TypeLocalModel.MILLER_REGRESSION);
+		amoeba.getEnvironment().setMappingErrorAllowed(0.025);
+		
+		return amoeba;
+	}
+
+	public static void launch() throws IOException{
+		
+		
+	
+		
+		
+		
+
+		
+		/* AUTOMATIC */
+//		long start = System.currentTimeMillis();
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.learn(studiedSystem.getOutput());
+//		}
+//		long end = System.currentTimeMillis();
+//		System.out.println("Done in : " + (end - start) );
+//		
+//		start = System.currentTimeMillis();
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.request(studiedSystem.getOutput());
+//		}
+//		end = System.currentTimeMillis();
+//		System.out.println("Done in : " + (end - start) );
+		
+		
+//		/* XP PIERRE */
+//		
+//		String fileName = fileName(new ArrayList<String>(Arrays.asList("GaussiennePierre")));
+//		
+//		FILE Pierrefile = new FILE("Pierre",fileName);
+//		for (int i = 0; i < nbCycle; ++i) {
+//			studiedSystem.playOneStep();
+//			amoeba.learn(studiedSystem.getOutput());
+//			if(amoeba.getHeadAgent().isActiveLearning()) {
+//				studiedSystem.setActiveLearning(true);
+//				studiedSystem.setSelfRequest(amoeba.getHeadAgent().getSelfRequest());
+//				 
+//			}
+//		}
+//		
+//		for (int i = 0; i < 10; ++i) {
+//			studiedSystem.playOneStep();
+//			System.out.println(studiedSystem.getOutput());
+//			System.out.println(amoeba.request(studiedSystem.getOutput()));
+//			
+//			
+//		}
+//		
+//		Pierrefile.write(new ArrayList<String>(Arrays.asList("ID contexte","Coeff Cte","Coeff X0","Coeff X1","Min Value","Max Value")));
+//		
+//		for(Context ctxt : amoeba.getContexts()) {
+//			
+//			writeMessage(Pierrefile, ctxt.toStringArrayPierre());
+//
+//		}
+//		
+//		
+//		Pierrefile.close();
+		
+	
+	}
+	
+	public static String fileName(ArrayList<String> infos) {
+		String fileName = "";
+		
+		for(String info : infos) {
+			fileName += info + "_";
+		}
+		
+		return fileName;
+	}
+	
+	public static void writeMessage(FILE file, ArrayList<String> message) {
+		
+		file.initManualMessage();
+		
+		for(String m : message) {
+			file.addManualMessage(m);
+		}
+		
+		file.sendManualMessage();
+		
+	}
+
+
+
+	
+}
diff --git a/AMOEBAonAMAK/src/gui/AmoebaMultiUIWindow.java b/AMOEBAonAMAK/src/gui/AmoebaMultiUIWindow.java
new file mode 100644
index 0000000000000000000000000000000000000000..511c19cf1f7b1299259d8d0d7b624ae1e547de9e
--- /dev/null
+++ b/AMOEBAonAMAK/src/gui/AmoebaMultiUIWindow.java
@@ -0,0 +1,166 @@
+package gui;
+
+import java.util.HashMap;
+
+import javax.management.InstanceAlreadyExistsException;
+
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.tools.RunLaterHelper;
+import fr.irit.smac.amak.ui.AmakPlot;
+import fr.irit.smac.amak.ui.AmakPlot.ChartType;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
+import fr.irit.smac.amak.ui.MainWindow;
+import fr.irit.smac.amak.ui.SchedulerToolbar;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import fr.irit.smac.amak.ui.drawables.Drawable;
+import javafx.application.Application;
+import javafx.beans.value.ChangeListener;
+import javafx.beans.value.ObservableValue;
+import javafx.event.ActionEvent;
+import javafx.event.EventHandler;
+import javafx.scene.control.Button;
+import javafx.scene.control.Menu;
+import javafx.scene.control.Slider;
+import javafx.scene.control.ToggleButton;
+import javafx.scene.control.Tooltip;
+import javafx.scene.paint.Color;
+import kernel.AMOEBA;
+import kernel.backup.SaveHelperImpl;
+
+/**
+ * The multi window for AMOEBA GUI.
+ * @author Bruno
+ *
+ */
+public class AmoebaMultiUIWindow extends AmasMultiUIWindow{
+
+	protected HashMap<String, AmakPlot> plots = new HashMap<>();
+	
+	/**
+	 * The main {@link VUI} for AMOEBA, by default it's the 2D representation of the contexts.
+	 */
+	public VUIMulti mainVUI;
+	
+	public Drawable point;
+	public Drawable rectangle;
+	public ToggleButton toggleRender;
+	public SchedulerToolbar schedulerToolbar;
+	public DimensionSelector dimensionSelector;
+	public Menu windowMenu;
+	
+	public AmoebaMultiUIWindow(String title, VUIMulti vui) {
+		super(title);
+		mainVUI = vui;
+	}
+	
+	public void initialize(AMOEBA amoeba) {
+		
+
+		mainVUI.setDefaultView(200, 0, 0);
+		//addTabbedPanel("2D VUI", mainVUI.getPanel());
+		
+		// scheduler toolbar
+		schedulerToolbar = new SchedulerToolbar("AMOEBA", amoeba.getScheduler());
+		addToolbar(schedulerToolbar);	
+		
+		// plots
+		point = mainVUI.createAndAddPoint(0, 0);
+		point.setName("Cursor");
+		rectangle = mainVUI.createAndAddRectangle(10, 10, 10, 10);
+		rectangle.setName("Neighborhood");
+		rectangle.setColor(new Color(1, 1, 1, 0));
+		
+		plots.put("This loop NCS", new AmakPlot(this, "This loop NCS", ChartType.LINE, "Cycle", "Number of NCS"));
+		plots.put("All time NCS", new AmakPlot(this, "All time NCS", ChartType.LINE, "Cycle", "Number of NCS"));
+		plots.put("Number of agents", new AmakPlot(this, "Number of agents", ChartType.LINE, "Cycle", "Number of agents"));
+		plots.put("Errors", new AmakPlot(this, "Errors", ChartType.LINE, "Cycle", "Coefficients"));
+		plots.put("Distances to models", new AmakPlot(this, "Distances to models", ChartType.LINE, "Cycle", "Distances"));
+		plots.put("Global Mapping Criticality", new AmakPlot(this, "Global Mapping Criticality", ChartType.LINE, "Cycle", "Criticalities"));
+		plots.put("Time Execution", new AmakPlot(this, "Time Execution", ChartType.LINE, "Cycle", "Times"));
+		plots.put("Criticalities", new AmakPlot(this, "Criticalities", ChartType.LINE, "Cycle", "Criticalities"));
+		
+		// update render button
+		toggleRender = new ToggleButton("Allow Rendering");
+		toggleRender.setOnAction(evt -> {
+			amoeba.setRenderUpdate(toggleRender.isSelected()); 
+			if(amoeba.isRenderUpdate()) {
+				amoeba.updateAgentsVisualisation();
+				amoeba.nextCycleRunAllAgents();
+			}
+		});
+		toggleRender.setSelected(amoeba.isRenderUpdate());
+		addToolbar(toggleRender);
+		
+		// dimension selector
+		dimensionSelector = new DimensionSelector(amoeba.getPercepts(), new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				amoeba.updateAgentsVisualisation();
+			}
+		});
+		RunLaterHelper.runLater(()->mainVUI.toolbar.getItems().add(dimensionSelector));
+		
+		// contextMenu "Request Here" on VUI
+		new ContextMenuVUIMulti(amoeba, mainVUI); //the ContextMenu add itself to the VUI
+		
+		// manual save button
+		addToolbar(newManualSaveButton(amoeba));
+		
+		Slider slider = new Slider(0, 0.1, 0.1);
+		slider.setShowTickLabels(true);
+		slider.setShowTickMarks(true);
+		slider.valueProperty().addListener(new ChangeListener<Number>() {
+			@Override
+			public void changed(ObservableValue<? extends Number> observable, Number oldValue, Number newValue) {
+				amoeba.getEnvironment().mappingErrorAllowed = newValue.doubleValue();
+			}
+		});
+		addToolbar(slider);
+	}
+	
+	
+	
+	/**
+	 * Get an existing {@link AmakPlot}. 
+	 * @param name name of the plot to get
+	 * @return an existing plot.
+	 * @see AmoebaMultiUIWindow#addPlot(String, AmakPlot)
+	 */
+	public AmakPlot getPlot(String name) {
+		return plots.get(name);
+	}
+	
+	/**
+	 * Add an {@link AmakPlot} to le map of plots. Allowing for easy access with {@code AmoebaWindow.instance().getPlot(name)}
+	 * @param name name of the plot to add
+	 * @param plot the plot to add
+	 * @see AmoebaMultiUIWindow#getPlot(String)
+	 */
+	public void addPlot(String name, AmakPlot plot) {
+		plots.put(name, plot);
+	}
+	
+	/**
+	 * Create a button 'Quick Save' button, when clicked create a manual save point using an amoeba's saver.
+	 * @param amoeba
+	 * @return
+	 * @see AMOEBA#saver
+	 * @see SaveHelperImpl#newManualSave(String)
+	 */
+	public Button newManualSaveButton(AMOEBA amoeba) {
+		Button button = new Button("Quick save");
+		button.setTooltip(new Tooltip("Create a new save point. You will be able to find it in 'Save Explorer' -> 'Manual Saves'"));
+		button.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				if(amoeba.saver != null) {
+					amoeba.saver.newManualSave("manualSaveButton");
+				} else {
+					Log.defaultLog.error("Main Window", "Cannot make a save point of an amoeba without saver");
+				}
+			}
+		});
+		return button;
+	}
+}
diff --git a/AMOEBAonAMAK/src/gui/ContextMenuVUI.java b/AMOEBAonAMAK/src/gui/ContextMenuVUI.java
index aad5e8bf8af3891878e442f441183af8bd3948f8..07d72a504280fdf50a7719a5cdc0869bace1ddb1 100644
--- a/AMOEBAonAMAK/src/gui/ContextMenuVUI.java
+++ b/AMOEBAonAMAK/src/gui/ContextMenuVUI.java
@@ -6,6 +6,7 @@ import java.util.Optional;
 import agents.percept.Percept;
 import fr.irit.smac.amak.tools.Log;
 import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
 import javafx.event.ActionEvent;
 import javafx.event.EventHandler;
 import javafx.scene.control.Button;
@@ -148,7 +149,7 @@ public class ContextMenuVUI extends ContextMenu {
 		learnHere.setOnAction(new EventHandler<ActionEvent>() {
 			@Override
 			public void handle(ActionEvent event) {
-				if(amoeba.getPercepts().size() == 2) {
+				if(quick2DRequest && amoeba.getPercepts().size() == 2) {
 					learnTwoDimension(amoeba, vui);
 				} else {
 					learnNDimebsion(amoeba, vui);
diff --git a/AMOEBAonAMAK/src/gui/ContextMenuVUIMulti.java b/AMOEBAonAMAK/src/gui/ContextMenuVUIMulti.java
new file mode 100644
index 0000000000000000000000000000000000000000..68999ebe6d9d75dc4948c5ee1644fe17e852fcc1
--- /dev/null
+++ b/AMOEBAonAMAK/src/gui/ContextMenuVUIMulti.java
@@ -0,0 +1,250 @@
+package gui;
+
+import java.util.HashMap;
+import java.util.Optional;
+
+import agents.percept.Percept;
+import fr.irit.smac.amak.tools.Log;
+import fr.irit.smac.amak.ui.VUI;
+import fr.irit.smac.amak.ui.VUIMulti;
+import javafx.event.ActionEvent;
+import javafx.event.EventHandler;
+import javafx.scene.control.Button;
+import javafx.scene.control.ButtonBar.ButtonData;
+import javafx.scene.control.ButtonType;
+import javafx.scene.control.ContextMenu;
+import javafx.scene.control.Dialog;
+import javafx.scene.control.MenuItem;
+import javafx.scene.control.TextField;
+import javafx.scene.input.ContextMenuEvent;
+import javafx.scene.layout.VBox;
+import kernel.AMOEBA;
+
+/**
+ * The ContextMenu that is shown when right-clicking the {@link VUI} canvas
+ * @author Hugo
+ *
+ */
+public class ContextMenuVUIMulti extends ContextMenu {
+	/**
+	 * If true will skip window asking for input in 2D problems
+	 */
+	public static boolean quick2DRequest = false;
+	private double reqHereX;
+	private double reqHereY;
+	
+	/**
+	 * Create a {@link ContextMenu} suited for our needs, composed of 2 items : "Request Here" and "Learn here".<br/>
+	 * Set itself as the vui canvas {@link ContextMenu}. 
+	 * @param amoeba the amoeba where {@link AMOEBA#request(HashMap)} and {@link AMOEBA#learn(HashMap)} will be executed.
+	 * @param vui the {@link VUI} hosting the {@link ContextMenuVUIMulti}
+	 */
+	public ContextMenuVUIMulti(AMOEBA amoeba, VUIMulti vui) {
+		// "request here" menu item
+		setupRequestHereMenuItem(amoeba, vui);
+		
+		// "learn here" menu item
+		setupLearnHereMenuItem(amoeba, vui);
+		
+		// show context menu on context menu event from VUI's canvas
+		vui.getCanvas().setOnContextMenuRequested(new EventHandler<ContextMenuEvent>() {
+			@Override
+			public void handle(ContextMenuEvent event) {
+				reqHereX = event.getX();
+				reqHereY = event.getY();
+				ContextMenuVUIMulti.this.show(vui.getCanvas(), event.getScreenX(), event.getScreenY());
+			}
+		});	
+	}
+
+	private void setupRequestHereMenuItem(AMOEBA amoeba, VUIMulti vui) {
+		MenuItem reqHere = new MenuItem("Request Here");
+		reqHere.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				if(quick2DRequest && amoeba.getPercepts().size() == 2) {
+					reqTwoDimension(amoeba, vui);
+				} else {
+					reqNDimension(amoeba, vui);
+				}
+			}
+
+		});
+		this.getItems().add(reqHere);
+	}
+	
+	/**
+	 * The "Request Here" action performed when the amoeba is 2D.<br/>
+	 * Execute a {@link AMOEBA#request(HashMap)} at the position of the click.
+	 * @param amoeba
+	 * @param vui
+	 */
+	private void reqTwoDimension(AMOEBA amoeba, VUIMulti vui) {
+		double x = vui.screenToWorldX(reqHereX);
+		double y = vui.screenToWorldY(reqHereY);
+		HashMap<String, Double> req = new HashMap<String, Double>();
+		req.put(amoeba.getDimensionSelector().d1().getName(), x);
+		req.put(amoeba.getDimensionSelector().d2().getName(), y);
+		req.put("oracle", 0.0);
+		double res = amoeba.request(req);
+		Log.defaultLog.inform("AMOEBA", "Request Here for x:"+x+" y:"+y+" -> "+res+".");
+	}
+	
+	/**
+	 * The "Request Here" action performed when the amoeba is not 2D.<br/>
+	 * Show a {@link Dialog} prompting the user to inputs value for the {@link AMOEBA#request(HashMap)}.
+	 * @param amoeba
+	 * @param vui
+	 */
+	private void reqNDimension(AMOEBA amoeba, VUIMulti vui) {
+		double x = vui.screenToWorldX(reqHereX);
+		double y = vui.screenToWorldY(reqHereY);
+		
+		Dialog<HashMap<String, Double>> dialog = new Dialog<>();
+		dialog.setTitle("Inputs");
+		dialog.setHeaderText("Fill inputs");
+		
+	    // Set the button types.
+	    ButtonType okButtonType = new ButtonType("OK", ButtonData.OK_DONE);
+	    dialog.getDialogPane().getButtonTypes().addAll(okButtonType, ButtonType.CANCEL);
+		
+		// inputs
+		HashMap<String, TextField> textFields = new HashMap<>();
+		VBox vbox = new VBox();
+		for(Percept p : amoeba.getPercepts()) {
+			TextField tf = new TextField();
+			textFields.put(p.getName(), tf);
+			tf.setPromptText(p.getName());
+			if(p.getName().equals(amoeba.getDimensionSelector().d1().getName())) {
+				tf.setText(x+"");
+			}
+			if(p.getName().equals(amoeba.getDimensionSelector().d2().getName())) {
+				tf.setText(y+"");
+			}
+			vbox.getChildren().add(tf);
+		}
+		
+		dialog.getDialogPane().setContent(vbox);
+		dialog.setResultConverter(dialogButton -> {
+	        if (dialogButton == okButtonType) {
+	        	HashMap<String, Double> req = new HashMap<String, Double>();
+	        	for(String k : textFields.keySet()) {
+	        		req.put(k, Double.valueOf(textFields.get(k).getText()));
+	        	}
+	        	req.put("oracle", 0.0);
+	            return req;
+	        }
+	        return null;
+	    });
+		
+		Optional<HashMap<String, Double>> result = dialog.showAndWait();
+		result.ifPresent(req -> {
+			double res = amoeba.request(req);
+			Log.defaultLog.inform("AMOEBA", "Request Here for "+req+"\n-> "+res+".");
+		});
+	}
+	
+	private void setupLearnHereMenuItem(AMOEBA amoeba, VUIMulti vui) {
+		MenuItem learnHere = new MenuItem("Learn Here");
+		learnHere.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				if(amoeba.getPercepts().size() == 2) {
+					learnTwoDimension(amoeba, vui);
+				} else {
+					learnNDimebsion(amoeba, vui);
+				}
+			}
+
+		});
+		this.getItems().add(learnHere);
+	}
+	
+	/**
+	 * The "Learn Here" action performed when the amoeba is 2D.<br/>
+	 * Execute a {@link AMOEBA#learn(HashMap)} at the position of the click.
+	 * @param amoeba
+	 * @param vui
+	 */
+	private void learnTwoDimension(AMOEBA amoeba, VUIMulti vui) {
+		double x = vui.screenToWorldX(reqHereX);
+		double y = vui.screenToWorldY(reqHereY);
+		HashMap<String, Double> req = new HashMap<String, Double>();
+		req.put(amoeba.getDimensionSelector().d1().getName(), x);
+		req.put(amoeba.getDimensionSelector().d2().getName(), y);
+		req.put("oracle", amoeba.studiedSystem.requestOracle(req));
+		amoeba.learn(req);
+	}
+	
+	/**
+	 * The "Learn Here" action performed when the amoeba is not 2D.<br/>
+	 * Show a {@link Dialog} prompting the user to inputs value for the {@link AMOEBA#learn(HashMap)}.
+	 * @param amoeba
+	 * @param vui
+	 */
+	private void learnNDimebsion(AMOEBA amoeba, VUIMulti vui) {
+		double x = vui.screenToWorldX(reqHereX);
+		double y = vui.screenToWorldY(reqHereY);
+		
+		Dialog<HashMap<String, Double>> dialog = new Dialog<>();
+		dialog.setTitle("Inputs");
+		dialog.setHeaderText("Fill inputs");
+		
+	    // Set the button types.
+	    ButtonType okButtonType = new ButtonType("OK", ButtonData.OK_DONE);
+	    dialog.getDialogPane().getButtonTypes().addAll(okButtonType, ButtonType.CANCEL);
+		
+		// inputs
+		HashMap<String, TextField> textFields = new HashMap<>();
+		VBox vbox = new VBox();
+		for(Percept p : amoeba.getPercepts()) {
+			TextField tf = new TextField();
+			textFields.put(p.getName(), tf);
+			tf.setPromptText(p.getName());
+			if(p.getName().equals(amoeba.getDimensionSelector().d1().getName())) {
+				tf.setText(x+"");
+			}
+			if(p.getName().equals(amoeba.getDimensionSelector().d2().getName())) {
+				tf.setText(y+"");
+			}
+			vbox.getChildren().add(tf);
+		}
+		
+		//oracle
+		TextField oracle = new TextField();
+		textFields.put("oracle", oracle);
+		oracle.setPromptText("oracle");
+		Button autoOracle = new Button("Autofill oracle");
+		autoOracle.setOnAction(new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				HashMap<String, Double> req = new HashMap<String, Double>();
+				for(String k : textFields.keySet()) {
+					if(!"oracle".equals(k)) {
+						req.put(k, Double.valueOf(textFields.get(k).getText()));
+					}
+	        	}
+				oracle.setText(amoeba.studiedSystem.requestOracle(req)+"");
+			}
+		});
+		vbox.getChildren().addAll(oracle, autoOracle);
+		
+		dialog.getDialogPane().setContent(vbox);
+		dialog.setResultConverter(dialogButton -> {
+	        if (dialogButton == okButtonType) {
+	        	HashMap<String, Double> req = new HashMap<String, Double>();
+	        	for(String k : textFields.keySet()) {
+	        		req.put(k, Double.valueOf(textFields.get(k).getText()));
+	        	}
+	            return req;
+	        }
+	        return null;
+	    });
+		
+		Optional<HashMap<String, Double>> result = dialog.showAndWait();
+		result.ifPresent(req -> {
+			amoeba.learn(req);
+			Log.defaultLog.inform("AMOEBA", "Learn Here for "+req+" done.");
+		});
+	}
+}
diff --git a/AMOEBAonAMAK/src/gui/ContextRendererFX.java b/AMOEBAonAMAK/src/gui/ContextRendererFX.java
index 75ddf4125607f21b3ef19ca0dcaaea006105be4d..f43ea2895ae147bbeeaa4c4affdb49cace739491 100644
--- a/AMOEBAonAMAK/src/gui/ContextRendererFX.java
+++ b/AMOEBAonAMAK/src/gui/ContextRendererFX.java
@@ -2,6 +2,7 @@ package gui;
 
 import agents.context.Context;
 import agents.percept.Percept;
+import fr.irit.smac.amak.ui.VUIMulti;
 import fr.irit.smac.amak.ui.drawables.DrawableRectangle;
 import gui.utils.ContextColor;
 import javafx.scene.paint.Color;
@@ -40,10 +41,52 @@ public class ContextRendererFX extends RenderStrategy {
 	}
 
 	private void updateColor() {
+		//setColorWithPrediction();
+		setColorWithCoefs();
+	}
+	
+	private void setColorWithCoefs() {
 		Double[] c = ContextColor.colorFromCoefs(context.getFunction().getCoef());
-		drawable.setColor(new Color(c[0], c[1], c[2], 90d / 255d));
+		if(context.isInNeighborhood) {
+			drawable.setColor(new Color(c[0], c[1], c[2], 255d / 255d));
+		}else {
+			drawable.setColor(new Color(c[0], c[1], c[2], 90d / 255d));
+		}
+		
+	}
+	
+	private void setColorWithPrediction() {
+		
+		double r = 0.0;
+		double g = 0.0;
+		double b = 0.0;
+		
+		if(context.lastPrediction!=null) {
+			r = context.lastPrediction < 0 ? Math.abs(context.lastPrediction)/2 : 0.0;
+			g = context.lastPrediction > 0 ? context.lastPrediction/2	 : 0.0;
+			r = r > 1.0 ? 1.0 : r;
+			g = g > 1.0 ? 1.0 : g;
+		}else {
+			b = 1.0;
+		}
+		if(context.lastPrediction == -1.0) {
+			r = 1.0;
+			g = 0.0;
+			b = 1.0;
+		}
+		if(Math.abs(context.lastPrediction) > 10000) {
+			
+			r = 1.0;
+			g = 1.0;
+			b = 0.0;
+		}
+		
+		
+		drawable.setColor(new Color(r, g, b, 200d / 255d));
 	}
 	
+	
+	
 	public String getColorForUnity() {
 		Double[] c = ContextColor.colorFromCoefs(context.getFunction().getCoef());
 		 return c[0].intValue() + "," + c[1].intValue() + "," + c[2].intValue() + ",100";
@@ -64,8 +107,8 @@ public class ContextRendererFX extends RenderStrategy {
 	 * window.
 	 */
 	@Override
-	public void initialize() {
-		getDrawable().setName(context.toString()); // create the drawable if it does not exist
+	public void initialize(VUIMulti vui) {
+		getDrawable(vui).setName(context.toString()); // create the drawable if it does not exist
 
 	}
 
@@ -81,10 +124,10 @@ public class ContextRendererFX extends RenderStrategy {
 	 * 
 	 * @return
 	 */
-	public DrawableRectangle getDrawable() {
+	public DrawableRectangle getDrawable(VUIMulti vui) {
 		if (!context.isDying() && drawable == null) {
 			drawable = new DrawableContext(0, 0, 0, 0, context);
-			AmoebaWindow.instance().mainVUI.add(drawable);
+			vui.add(drawable);
 		}
 		return drawable;
 	}
diff --git a/AMOEBAonAMAK/src/gui/NoneRenderer.java b/AMOEBAonAMAK/src/gui/NoneRenderer.java
index 6d154fdbf4b7ea73541d8e593b34fe66841d0e4c..438a9809fe1a6c41d54ca611719fdcd80e2e25b7 100644
--- a/AMOEBAonAMAK/src/gui/NoneRenderer.java
+++ b/AMOEBAonAMAK/src/gui/NoneRenderer.java
@@ -1,5 +1,7 @@
 package gui;
 
+import fr.irit.smac.amak.ui.VUIMulti;
+
 /**
  * A render strategy that does nothing.
  * @author Hugo
@@ -12,7 +14,7 @@ public class NoneRenderer extends RenderStrategy {
 	}
 
 	@Override
-	public void initialize() {
+	public void initialize(VUIMulti vui) {
 	}
 
 	@Override
diff --git a/AMOEBAonAMAK/src/gui/RenderStrategy.java b/AMOEBAonAMAK/src/gui/RenderStrategy.java
index d6bf4f7424a9b9166fb9d1ffe30ae5a34d740e99..7552c9518486d3050b687bb8f78114e2d274e774 100644
--- a/AMOEBAonAMAK/src/gui/RenderStrategy.java
+++ b/AMOEBAonAMAK/src/gui/RenderStrategy.java
@@ -1,5 +1,7 @@
 package gui;
 
+import fr.irit.smac.amak.ui.VUIMulti;
+
 /**
  * Strategy on how to render an object.
  * See {@link ContextRendererFX} for example on how to extends this class.
@@ -16,7 +18,8 @@ public abstract class RenderStrategy {
 	/**
 	 * Called when the rendered object need to be initialized
 	 */
-	abstract public void initialize();
+	//abstract public void initialize();
+	abstract public void initialize(VUIMulti vui);
 	
 	/**
 	 * Called to render the object.
diff --git a/AMOEBAonAMAK/src/gui/saveExplorer/SaveExplorer.java b/AMOEBAonAMAK/src/gui/saveExplorer/SaveExplorer.java
index 1cb91054d89b371ffccabf2fc05bc2551eab408c..325f0160b06ab9f6dad1394f332bd6d0cd057fa1 100644
--- a/AMOEBAonAMAK/src/gui/saveExplorer/SaveExplorer.java
+++ b/AMOEBAonAMAK/src/gui/saveExplorer/SaveExplorer.java
@@ -257,7 +257,7 @@ public class SaveExplorer extends VBox {
 	 */
 	public static void main(String[] args) throws ClassNotFoundException, IOException {
 		System.out.println("New AMOEBA launched.");
-		AMOEBA amoeba = new AMOEBA(args[0], (StudiedSystem)SerializeBase64.deserialize(args[1]));
+		AMOEBA amoeba = new AMOEBA(null,null,args[0], (StudiedSystem)SerializeBase64.deserialize(args[1]));
 		//amoeba.allowGraphicalScheduler(false);
 		for(Percept p : amoeba.getPercepts()) {
 			p.setValue(amoeba.getPerceptions(p.getName()));
diff --git a/AMOEBAonAMAK/src/kernel/AMOEBA.java b/AMOEBAonAMAK/src/kernel/AMOEBA.java
index dd2b1e7a6e324d66aac411429f0a847ac00ce42b..1d95549f77382f676023230d87d1ce3f784ba132 100644
--- a/AMOEBAonAMAK/src/kernel/AMOEBA.java
+++ b/AMOEBAonAMAK/src/kernel/AMOEBA.java
@@ -12,7 +12,6 @@ import java.util.stream.Stream;
 import agents.AmoebaAgent;
 import agents.context.Context;
 import agents.context.localModel.LocalModel;
-import agents.context.localModel.LocalModelMillerRegression;
 import agents.context.localModel.TypeLocalModel;
 import agents.head.Head;
 import agents.percept.Percept;
@@ -23,6 +22,8 @@ import fr.irit.smac.amak.Scheduling;
 import fr.irit.smac.amak.tools.Log;
 import fr.irit.smac.amak.tools.RunLaterHelper;
 import fr.irit.smac.amak.ui.AmakPlot;
+import fr.irit.smac.amak.ui.VUIMulti;
+import gui.AmoebaMultiUIWindow;
 import gui.AmoebaWindow;
 import gui.DimensionSelector;
 import kernel.backup.IBackupSystem;
@@ -38,6 +39,9 @@ import utils.PrintOnce;
  */
 public class AMOEBA extends Amas<World> implements IAMOEBA {
 	// -- Attributes
+	
+	
+	public VUIMulti vuiMulti;
 	/**
 	 * Utility to save, autosave, and load amoebas.
 	 */
@@ -48,6 +52,8 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	 */
 	public StudiedSystem studiedSystem;
 	
+	public AmoebaMultiUIWindow multiUIWindow;
+	
 	private Head head;
 	private TypeLocalModel localModel = TypeLocalModel.MILLER_REGRESSION;
 	private HashMap<String, Double> perceptions = new HashMap<String, Double>();
@@ -56,6 +62,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	private boolean runAll = false;
 	private boolean creationOfNewContext = true;
 	private boolean renderUpdate;
+	private boolean reinforcementMode = false;
 	
 	private int cycleWithoutRender = 0;
 
@@ -82,8 +89,9 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	 * @param studiedSystem
 	 *            the studied system
 	 */
-	public AMOEBA() {
-		super(new World(), Scheduling.HIDDEN);
+	public AMOEBA(AmoebaMultiUIWindow window, VUIMulti vui) {
+		super(window, vui, new World(), Scheduling.HIDDEN);
+		vuiMulti = vui;
 	}
 	
 	/**
@@ -91,11 +99,12 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	 * 
 	 * @param path path to the config file.
 	 */
-	public AMOEBA(String path, StudiedSystem studiedSystem) {
-		super(new World(), Scheduling.HIDDEN);
+	public AMOEBA(AmoebaMultiUIWindow window, VUIMulti vui, String path, StudiedSystem studiedSystem) {
+		super(window, vui, new World(), Scheduling.HIDDEN);
+		vuiMulti = vui;
 		this.studiedSystem = studiedSystem;
 		setRenderUpdate(true);
-		saver = new SaveHelperImpl(this);
+		saver = new SaveHelperImpl(this, window);
 		saver.load(path);
 	}
 
@@ -112,23 +121,22 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	
 	@Override
 	protected void onRenderingInitialization() {
-		AmoebaWindow.instance().initialize(this);
+		((AmoebaMultiUIWindow) amasMultiUIWindow).initialize(this);
 	}
 
 	@Override
 	protected void onUpdateRender() {
 		// Update statistics
-		if(AmoebaWindow.isInstance()) {
-			AmoebaWindow window = AmoebaWindow.instance();
-
-			AmakPlot loopNCS = window.getPlot("This loop NCS");
-			AmakPlot allNCS = window.getPlot("All time NCS");
-			AmakPlot nbAgent = window.getPlot("Number of agents");
-			AmakPlot errors = window.getPlot("Errors");
-			AmakPlot distancesToModels = window.getPlot("Distances to models");
-			AmakPlot gloabalMappingCriticality = window.getPlot("Global Mapping Criticality");
-			AmakPlot timeExecution = window.getPlot("Time Execution");
-			AmakPlot criticalities = window.getPlot("Criticalities");
+		if(amasMultiUIWindow!=null) {
+
+			AmakPlot loopNCS = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("This loop NCS");
+			AmakPlot allNCS = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("All time NCS");
+			AmakPlot nbAgent = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("Number of agents");
+			AmakPlot errors = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("Errors");
+			AmakPlot distancesToModels = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("Distances to models");
+			AmakPlot gloabalMappingCriticality = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("Global Mapping Criticality");
+			AmakPlot timeExecution = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("Time Execution");
+			AmakPlot criticalities = ((AmoebaMultiUIWindow)amasMultiUIWindow).getPlot("Criticalities");
 			
 			
 			boolean notify = isRenderUpdate();
@@ -174,7 +182,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 		}
 		
 		if (isRenderUpdate()) {
-			AmoebaWindow.instance().mainVUI.updateCanvas();
+			((AmoebaMultiUIWindow)amasMultiUIWindow).mainVUI.updateCanvas();
 			updateAgentsVisualisation();
 			RunLaterHelper.runLater(() -> {resetCycleWithoutRender();});
 		}
@@ -183,7 +191,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	@Override
 	protected void onSystemCycleBegin() {
 		if (cycle % 1000 == 0) {
-			Log.defaultLog.inform("AMOEBA", "Cycle " + cycle + ". Nb agents: "+getAgents().size());
+			//Log.defaultLog.inform("AMOEBA", "Cycle " + cycle + ". Nb agents: "+getAgents().size());
 		}
 		
 		if(isRenderUpdate()) {
@@ -209,6 +217,12 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 			perceptions = studiedSystem.getOutput();
 			
 			
+			if(perceptions.get("oracle")==null) {
+				data.useOracle = false;
+			}else {
+				data.useOracle = true;
+			}
+			
 		}
 		
 		environment.preCycleActions();
@@ -220,6 +234,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 		toKillContexts.clear();
 		lastModifiedContext.clear();
 		alteredContexts.clear();
+		data.higherNeighborLastPredictionPercepts=null;
 	}
 	
 	synchronized private void incrementCycleWithoutRender() {
@@ -238,11 +253,17 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	protected void onSystemCycleEnd() {
 		
 		if(studiedSystem != null) {
-			if(head.isActiveLearning()) {
-				studiedSystem.setActiveLearning(true);
+			if(data.selfLearning) {
+				data.selfLearning = false;
+				studiedSystem.setSelfLearning(true);
 				studiedSystem.setSelfRequest(head.getSelfRequest());
 				 
 			}
+			else if(data.activeLearning) {
+				data.activeLearning = false;
+				studiedSystem.setActiveLearning(true);
+				studiedSystem.setSelfRequest(head.getActiveRequest());
+			}
 		}
 		
 		super.onSystemCycleEnd();
@@ -394,56 +415,70 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 		return getAction();
 	}
 	
+	
+	public HashMap<String, Double> reinforcementRequest(HashMap<String, Double> perceptionsActionState) {
+		boolean usingOracle = isUseOracle();
+		if (usingOracle)
+			head.changeOracleConnection();
+		StudiedSystem ss = studiedSystem;
+		studiedSystem = null;
+		setPerceptionsAndActionState(perceptionsActionState);
+		cycle();
+		if (usingOracle)
+			head.changeOracleConnection();
+		studiedSystem = ss;
+		return getHigherNeighborLastPredictionPercepts();
+	}
+	
 	@Override
-	public HashMap<String, Double> maximize(HashMap<String, Double> known){
+	public HashMap<String, Double> maximize(HashMap<String, Double> fixedPercepts){
 		ArrayList<Percept> percepts = getPercepts();
-		ArrayList<Percept> unknown = new ArrayList<>(percepts);
-		unknown.removeIf(p ->known.containsKey(p.getName()));
+		ArrayList<Percept> freePercepts = new ArrayList<>(percepts);
+		freePercepts.removeIf(p ->fixedPercepts.containsKey(p.getName()));
 		//System.out.println("known : "+known.keySet());
 		//System.out.println("unknow : "+unknown);
-		if(unknown.isEmpty()) {
+		if(freePercepts.isEmpty()) {
 			return null;
 		}
 		
 		//get partially activated context
-		ArrayList<Context> pac = new ArrayList<>();
-		for(Context c : getContexts()) {
+		ArrayList<Context> partiallyActivatedCtxts = new ArrayList<>();
+		for(Context ctxt : getContexts()) {
 			boolean good = true;
-			for(String p : known.keySet()) {
-				if(!c.getRangeByPerceptName(p).contains2(known.get(p))) {
+			for(String pctString : fixedPercepts.keySet()) {
+				
+				if(!ctxt.getRangeByPerceptName(pctString).contains2(fixedPercepts.get(pctString))) {
 					good = false;
 					break;
 				}
 			}
-			if(good) pac.add(c);
+			if(good) partiallyActivatedCtxts.add(ctxt);
 		}
 		
-		ArrayList<HashMap<String, Double>> sol = new ArrayList<>();
-		for(Context c : pac) {
-			sol.add(c.getLocalModel().getMaxWithConstraint(known));
+		ArrayList<HashMap<String, Double>> posibleSolutions = new ArrayList<>();
+		for(Context ctxt : partiallyActivatedCtxts) {
+			posibleSolutions.add(ctxt.getLocalModel().getMaxWithConstraint(fixedPercepts));
 		}
-		HashMap<String, Double> max = new HashMap<>();
+		HashMap<String, Double> maxSolution = new HashMap<>();
 
 		Double maxValue = Double.NEGATIVE_INFINITY;
-		max.put("oracle", maxValue);
+		maxSolution.put("oracle", maxValue);
 		//find best solution
-		for(HashMap<String, Double> s : sol) {
+		for(HashMap<String, Double> s : posibleSolutions) {
 			if(s.get("oracle") > maxValue) {
 				maxValue = s.get("oracle");
-				max = s;
+				maxSolution = s;
 			}
 		}
-		return max;
+		return maxSolution;
 	}
 
+	public LocalModel buildLocalModel(Context context, TypeLocalModel type) {
+		return type.factory.buildLocalModel(context);
+	}
+	
 	public LocalModel buildLocalModel(Context context) {
-		switch (localModel) {
-		case MILLER_REGRESSION:
-			return new LocalModelMillerRegression(context);
-
-		default:
-			throw new IllegalArgumentException("Unknown model " + localModel + ".");
-		}
+		return buildLocalModel(context, localModel);
 	}
 
 	/**
@@ -454,7 +489,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	 */
 	public void allowGraphicalScheduler(boolean allow) {
 		if (!Configuration.commandLineMode) {
-			AmoebaWindow.instance().schedulerToolbar.setDisable(!allow);
+			((AmoebaMultiUIWindow)amasMultiUIWindow).schedulerToolbar.setDisable(!allow);
 		}
 	}
 
@@ -476,7 +511,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 		super.addPendingAgents();
 		nextCycleRunAllAgents();
 		if(!Configuration.commandLineMode) {
-			AmoebaWindow.instance().dimensionSelector.update(getPercepts());
+			((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.update(getPercepts());
 			updateAgentsVisualisation();
 		}
 	}
@@ -495,6 +530,14 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	public void setLocalModel(TypeLocalModel localModel) {
 		this.localModel = localModel;
 	}
+	
+	public void setReinforcement(boolean value) {
+		reinforcementMode = value;
+	}
+	
+	public boolean isReinforcement() {
+		return reinforcementMode;
+	}
 
 	/**
 	 * Activate or deactivate rendering of agents at runtime.
@@ -504,7 +547,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	public void setRenderUpdate(boolean renderUpdate) {
 		if (!Configuration.commandLineMode) {
 			this.renderUpdate = renderUpdate;
-			AmoebaWindow.instance().toggleRender.setSelected(renderUpdate);
+			((AmoebaMultiUIWindow)amasMultiUIWindow).toggleRender.setSelected(renderUpdate);
 			if(renderUpdate == true)
 				nextCycleRunAllAgents();
 		}
@@ -527,6 +570,12 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	public double getAction() {
 		return head.getAction();
 	}
+	
+	
+	
+	public HashMap<String, Double> getHigherNeighborLastPredictionPercepts() {
+		return head.getHigherNeighborLastPredictionPercepts();
+	}
 
 	public ArrayList<Context> getContexts() {
 		ArrayList<Context> contexts = new ArrayList<>();
@@ -595,13 +644,13 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 		for(Agent<? extends Amas<World>, World> a : getAgents()) {
 			a.onUpdateRender();
 		}
-		AmoebaWindow.instance().point.move(AmoebaWindow.instance().dimensionSelector.d1().getValue(), AmoebaWindow.instance().dimensionSelector.d2().getValue());
-		AmoebaWindow.instance().rectangle.setHeight(2*getEnvironment().getContextCreationNeighborhood(null, AmoebaWindow.instance().dimensionSelector.d2()));
-		AmoebaWindow.instance().rectangle.setWidth(2*getEnvironment().getContextCreationNeighborhood(null, AmoebaWindow.instance().dimensionSelector.d1()));
-		AmoebaWindow.instance().rectangle.move(AmoebaWindow.instance().dimensionSelector.d1().getValue() - getEnvironment().getContextCreationNeighborhood(null, AmoebaWindow.instance().dimensionSelector.d1()), AmoebaWindow.instance().dimensionSelector.d2().getValue() - getEnvironment().getContextCreationNeighborhood(null, AmoebaWindow.instance().dimensionSelector.d2()));
-		AmoebaWindow.instance().mainVUI.updateCanvas();
-		AmoebaWindow.instance().point.toFront();
-		AmoebaWindow.instance().point.setInfo(getCursorInfo());
+		((AmoebaMultiUIWindow)amasMultiUIWindow).point.move(((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d1().getValue(), ((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d2().getValue());
+		((AmoebaMultiUIWindow)amasMultiUIWindow).rectangle.setHeight(2*getEnvironment().getContextCreationNeighborhood(null, ((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d2()));
+		((AmoebaMultiUIWindow)amasMultiUIWindow).rectangle.setWidth(2*getEnvironment().getContextCreationNeighborhood(null, ((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d1()));
+		((AmoebaMultiUIWindow)amasMultiUIWindow).rectangle.move(((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d1().getValue() - getEnvironment().getContextCreationNeighborhood(null, ((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d1()), ((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d2().getValue() - getEnvironment().getContextCreationNeighborhood(null, ((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector.d2()));
+		((AmoebaMultiUIWindow)amasMultiUIWindow).mainVUI.updateCanvas();
+		((AmoebaMultiUIWindow)amasMultiUIWindow).point.toFront();
+		((AmoebaMultiUIWindow)amasMultiUIWindow).point.setInfo(getCursorInfo());
 	}
 	
 	/**
@@ -609,7 +658,7 @@ public class AMOEBA extends Amas<World> implements IAMOEBA {
 	 * @return
 	 */
 	public DimensionSelector getDimensionSelector() {
-		return AmoebaWindow.instance().dimensionSelector;
+		return ((AmoebaMultiUIWindow)amasMultiUIWindow).dimensionSelector;
 	}
 	
 	/**
diff --git a/AMOEBAonAMAK/src/kernel/AmoebaData.java b/AMOEBAonAMAK/src/kernel/AmoebaData.java
index 5f2e57a82c17eef5f047bdf4a699585dd9f6b8a0..a413c55a28cc554b120c0f9b02363f8a1ce2c6b6 100644
--- a/AMOEBAonAMAK/src/kernel/AmoebaData.java
+++ b/AMOEBAonAMAK/src/kernel/AmoebaData.java
@@ -1,9 +1,12 @@
 package kernel;
 
 import java.io.Serializable;
+import java.util.ArrayList;
 import java.util.HashMap;
 
+import agents.context.Context;
 import agents.head.DynamicPerformance;
+import agents.percept.Percept;
 /**
  * A Plain Old Java Object for storing parameters for the Head. Keep it as simple as possible for ease of serialization.<br/>
  * Make sure that all member are : public, serializable, and with a default constructor (taking no parameters).
@@ -19,6 +22,7 @@ public class AmoebaData implements Serializable {
 	public int numberOfCriticityValuesForAverageforVizualisation = 300;
 
 	public Double prediction;
+	public HashMap<String, Double> higherNeighborLastPredictionPercepts = null;
 	public Double endogenousPredictionActivatedContextsOverlaps = 0.0;
 	public Double endogenousPredictionActivatedContextsOverlapsWorstDimInfluence = 0.0;
 	public Double endogenousPredictionActivatedContextsOverlapsInfluenceWithoutConfidence = 0.0;
@@ -28,8 +32,8 @@ public class AmoebaData implements Serializable {
 	public Double endogenousPredictionNContexts = 0.0;
 	public Double endogenousPredictionNContextsByInfluence = 0.0;
 
-	public double oracleValue;
-	public double oldOracleValue;
+	public Double oracleValue;
+	public Double oldOracleValue;
 	public double criticity = 0.0;
 	public double distanceToRegression;
 	public double oldCriticity;
@@ -42,7 +46,11 @@ public class AmoebaData implements Serializable {
 	public boolean newContextWasCreated = false;
 	public boolean contextFromPropositionWasSelected = false;
 	
+	public boolean isActiveLearning;
+	public boolean isSelfLearning;
+	
 	public boolean activeLearning = false;
+	public boolean selfLearning = false;
 	
 	public HashMap<String, Double> selfRequest;
 
@@ -80,4 +88,6 @@ public class AmoebaData implements Serializable {
 	public double initRegressionPerformance = 1.0;
 	
 	public double averageRegressionPerformanceIndicator;
+	
+	
 }
\ No newline at end of file
diff --git a/AMOEBAonAMAK/src/kernel/StudiedSystem.java b/AMOEBAonAMAK/src/kernel/StudiedSystem.java
index 0def257b257778365c2639fb813e05a9f3fb324d..d69a549f838e5746da2ec52524d9f9bd63e9c428 100644
--- a/AMOEBAonAMAK/src/kernel/StudiedSystem.java
+++ b/AMOEBAonAMAK/src/kernel/StudiedSystem.java
@@ -10,11 +10,13 @@ import agents.percept.Percept;
  * 
  */
 public interface StudiedSystem extends Serializable{
+	
+	
 
 	/**
 	 * Tell the StudiedSystem to advance its simulation of one step
 	 */
-	public void playOneStep();
+	public HashMap<String, Double> playOneStep();
 
 	/**
 	 * Gets the output for the current step.
@@ -37,8 +39,30 @@ public interface StudiedSystem extends Serializable{
 	public double requestOracle(HashMap<String, Double> request);
 	
 	public void setActiveLearning(boolean value);
-	
+	public void setSelfLearning(boolean value);
 	
 	public void setSelfRequest(HashMap<Percept, Double> request);
 
+
+	public HashMap<String, Double> playOneStepWithControlModel();
+	
+	public void setControlModels(HashMap<String, AMOEBA> controlModels);
+	
+	public void setControl(boolean value);
+	
+	public Double getActiveRequestCounts() ;
+	
+
+
+
+
+	public Double getSelfRequestCounts() ;
+
+
+
+
+	public Double getRandomRequestCounts() ;
+	
+	
+
 }
diff --git a/AMOEBAonAMAK/src/kernel/World.java b/AMOEBAonAMAK/src/kernel/World.java
index e248a189449055d43fff29bc5ac33ff3d1e90fdf..01d9346e77fba479ca986ffbcaf0c6f4af729ef7 100644
--- a/AMOEBAonAMAK/src/kernel/World.java
+++ b/AMOEBAonAMAK/src/kernel/World.java
@@ -34,7 +34,9 @@ public class World extends Environment {
 	
 	
 	
-	public static TRACE_LEVEL minLevel = TRACE_LEVEL.ERROR;
+	
+	
+	public static TRACE_LEVEL minLevel = TRACE_LEVEL.INFORM;
 	
 	private AMOEBA amoeba;
 
diff --git a/AMOEBAonAMAK/src/kernel/backup/BackupSystem.java b/AMOEBAonAMAK/src/kernel/backup/BackupSystem.java
index dcb6f72cbed6ef14922c82033642c7dc47370367..a6e1cc6c72eed272eb0fc2165e37236feda00558 100644
--- a/AMOEBAonAMAK/src/kernel/backup/BackupSystem.java
+++ b/AMOEBAonAMAK/src/kernel/backup/BackupSystem.java
@@ -25,7 +25,6 @@ import agents.context.Context;
 import agents.context.Experiment;
 import agents.context.Range;
 import agents.context.localModel.LocalModel;
-import agents.context.localModel.LocalModelMillerRegression;
 import agents.context.localModel.TypeLocalModel;
 import agents.head.Head;
 import agents.percept.Percept;
@@ -269,21 +268,15 @@ public class BackupSystem implements IBackupSystem {
 		// -- Load Model
 		String localModelName = localModelElement.getAttributeValue("Type");
 		TypeLocalModel type = TypeLocalModel.valueOf(localModelName);
-		LocalModel localModel;
-		switch (type) {
-		case MILLER_REGRESSION:
-			List<Double> coefs = new ArrayList<>();
-			for(Element e : localModelElement.getChild("Coefs").getChildren()) {
-				coefs.add(Double.valueOf(e.getAttributeValue("v")));
-			}
-			localModel = new LocalModelMillerRegression(context, coefs.toArray(new Double[coefs.size()]), experiments);
-			break;
-		default:
-			throw new IllegalArgumentException("Found unknown model " + localModelName + " in XML file. ");
+		LocalModel localModel = amoeba.buildLocalModel(context, type);
+		List<Double> coefs = new ArrayList<>();
+		for(Element e : localModelElement.getChild("Coefs").getChildren()) {
+			coefs.add(Double.valueOf(e.getAttributeValue("v")));
 		}
+		Double[] coefArray = coefs.toArray(new Double[coefs.size()]);
+		localModel.setFirstExperiments(experiments);
+		localModel.setCoef(coefArray);
 		context.setLocalModel(localModel);
-		
-		
 	}
 
 	private void loadRanges(Context context, Element elemRanges) {
diff --git a/AMOEBAonAMAK/src/kernel/backup/SaveHelperImpl.java b/AMOEBAonAMAK/src/kernel/backup/SaveHelperImpl.java
index b2258c51d799c99816a3b9d049b84901f5374c1f..84c08ccf2b9de09de02373cb4a75302a3f93e616 100644
--- a/AMOEBAonAMAK/src/kernel/backup/SaveHelperImpl.java
+++ b/AMOEBAonAMAK/src/kernel/backup/SaveHelperImpl.java
@@ -10,7 +10,9 @@ import java.util.ArrayList;
 import java.util.List;
 
 import fr.irit.smac.amak.Configuration;
+import fr.irit.smac.amak.ui.AmasMultiUIWindow;
 import fr.irit.smac.amak.ui.MainWindow;
+import gui.AmoebaMultiUIWindow;
 import gui.AmoebaWindow;
 import gui.saveExplorer.SaveExplorer;
 import javafx.event.ActionEvent;
@@ -33,6 +35,9 @@ public class SaveHelperImpl implements ISaveHelper{
 	public static final String autosaveDirName = "autosave";
 	public static final String manualsaveDirName = "manual";
 
+	
+	public AmoebaMultiUIWindow amoebaMultiUIWindow;
+	
 	/**
 	 * The backup system used by the SaveHelper.
 	 */
@@ -116,6 +121,49 @@ public class SaveHelperImpl implements ISaveHelper{
 			setupGraphicalTool();
 		}
 	}
+	
+	public SaveHelperImpl(AMOEBA amoeba, AmoebaMultiUIWindow window) {
+		amoebaMultiUIWindow = window;
+		autoSave = !Configuration.commandLineMode;
+		this.amoeba = amoeba;
+		backupSystem = new BackupSystem(amoeba);
+		String dirName = amoeba.toString() + "_" + System.currentTimeMillis();
+		dir = Paths.get(savesRoot, dirName);
+		if (autoSave) {
+			dirAuto = Paths.get(dir.toString(), autosaveDirName);
+			try {
+				Files.createDirectories(dirAuto);
+			} catch (IOException e) {
+				e.printStackTrace();
+				System.err.println("Cannot create auto save directory. Auto saving is disabled.");
+				dirAuto = null;
+				autoSave = false;
+			}
+		}
+		dirManual = Paths.get(dir.toString(), manualsaveDirName);
+		try {
+			Files.createDirectories(dirManual);
+		} catch (IOException e) {
+			e.printStackTrace();
+			System.err.println("Cannot create manual save directory.");
+			dirManual = null;
+		}
+
+		// add graphical element if relevant
+		SaveExplorer se = new SaveExplorer(amoeba);
+		window.addTabbedPanel("Save Explorer", se);
+		window.addOnCloseAction(()-> {
+			if(deleteFolderOnClose) {
+				try {
+					DeleteDirectory.deleteDirectoryRecursion(dir);
+				} catch (IOException e) {
+					e.printStackTrace();
+					System.err.println("Failed to delete saves files on close.");
+				}
+			}
+		});
+		setupGraphicalTool(window);
+	}
 
 	@Override
 	public void load(String path) {
@@ -187,7 +235,7 @@ public class SaveHelperImpl implements ISaveHelper{
 	 * Add save/load options in the main window.
 	 */
 	private void setupGraphicalTool() {
-		MainWindow mw = AmoebaWindow.instance();
+		AmoebaMultiUIWindow mw = amoebaMultiUIWindow;
 		// TODO remove if they exist items Save and Load in menu Option.
 		FileChooser fileChooser = new FileChooser();
 		fileChooser.getExtensionFilters().addAll(new FileChooser.ExtensionFilter("XML", "*.xml"),
@@ -198,7 +246,7 @@ public class SaveHelperImpl implements ISaveHelper{
 			@Override
 			public void handle(ActionEvent event) {
 				amoeba.getScheduler().stop();
-				File file = fileChooser.showOpenDialog(mw.stage);
+				File file = fileChooser.showOpenDialog(mw);
 				if (file != null)
 					backupSystem.load(file);
 			}
@@ -210,13 +258,45 @@ public class SaveHelperImpl implements ISaveHelper{
 			@Override
 			public void handle(ActionEvent event) {
 				amoeba.getScheduler().stop();
-				File file = fileChooser.showSaveDialog(mw.stage);
+				File file = fileChooser.showSaveDialog(mw);
 				if (file != null)
 					backupSystem.save(file);
 			}
 		};
 		MainWindow.addOptionsItem("Save", eventSave);
 	}
+	
+	private void setupGraphicalTool(AmoebaMultiUIWindow window) {
+		AmoebaMultiUIWindow mw = amoebaMultiUIWindow;
+		// TODO remove if they exist items Save and Load in menu Option.
+		FileChooser fileChooser = new FileChooser();
+		fileChooser.getExtensionFilters().addAll(new FileChooser.ExtensionFilter("XML", "*.xml"),
+				new FileChooser.ExtensionFilter("All", "*.*"));
+
+		// Creation of the load menu item
+		EventHandler<ActionEvent> eventLoad = new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				amoeba.getScheduler().stop();
+				File file = fileChooser.showOpenDialog(mw);
+				if (file != null)
+					backupSystem.load(file);
+			}
+		};
+		window.addOptionsItem("Load", eventLoad);
+
+		// Creation of the save menu item
+		EventHandler<ActionEvent> eventSave = new EventHandler<ActionEvent>() {
+			@Override
+			public void handle(ActionEvent event) {
+				amoeba.getScheduler().stop();
+				File file = fileChooser.showSaveDialog(mw);
+				if (file != null)
+					backupSystem.save(file);
+			}
+		};
+		window.addOptionsItem("Save", eventSave);
+	}
 
 	@Override
 	public void setAutoSave(boolean value) {
diff --git a/AMOEBAonAMAK/src/ros/Main.java b/AMOEBAonAMAK/src/ros/Main.java
index e5d502c6c7e510dba9e4612e0145f5fc8a0561b6..b0a594150f3710bc932385dcdff4813a7769b2f8 100644
--- a/AMOEBAonAMAK/src/ros/Main.java
+++ b/AMOEBAonAMAK/src/ros/Main.java
@@ -30,7 +30,7 @@ public class Main {
 			}
 		}
 		
-		AMOEBA amoeba = new AMOEBA(config, null);
+		AMOEBA amoeba = new AMOEBA(null,null,config, null);
 		amoeba.allowGraphicalScheduler(false);
 
 		RosBridge bridge = new RosBridge();
diff --git a/AMOEBAonAMAK/src/utils/Pair.java b/AMOEBAonAMAK/src/utils/Pair.java
index aa54fb9a0bedfcdf285dc83b9289d079db8e9828..43b7d48085f1422d7a0b15376635b1eb6612353a 100644
--- a/AMOEBAonAMAK/src/utils/Pair.java
+++ b/AMOEBAonAMAK/src/utils/Pair.java
@@ -25,4 +25,10 @@ public class Pair<A,B> {
     public void setB(B b) {
    	 this.b = b;
    }
+
+	@Override
+	public String toString() {
+		return "Pair [a=" + a + ", b=" + b + "]";
+	}
+	
 }
diff --git a/AMOEBAonAMAK/src/utils/TRACE_LEVEL.java b/AMOEBAonAMAK/src/utils/TRACE_LEVEL.java
index bc2da4ad5f98cbdfc9969723dcc707528bba233d..0a1ce9acaae6c2bbd136cf3256c730dd628410b1 100644
--- a/AMOEBAonAMAK/src/utils/TRACE_LEVEL.java
+++ b/AMOEBAonAMAK/src/utils/TRACE_LEVEL.java
@@ -1,7 +1,7 @@
 package utils;
 
 public enum TRACE_LEVEL {
-	ERROR(200), NCS(100), EVENT(50), STATE(40), INFORM(20), DEBUG(0);
+	ERROR(200), CYCLE(150), NCS(100), EVENT(50), STATE(40), INFORM(20), DEBUG(0);
 
 	private final int order;
 
diff --git a/documentation/py4j_demo/fetch.py b/documentation/py4j_demo/fetch.py
new file mode 100644
index 0000000000000000000000000000000000000000..53023788007b4a02e5725c0f96460c8bc511b1df
--- /dev/null
+++ b/documentation/py4j_demo/fetch.py
@@ -0,0 +1,25 @@
+import numpy as np
+import gym
+
+
+env = gym.make('FetchReach-v0')
+obs = env.reset()
+done = False
+
+def policy(observation, desired_goal):
+    # Here you would implement your smarter policy. In this case,
+    # we just sample random actions.
+    return env.action_space.sample()
+
+while not done:
+    action = policy(obs['observation'], obs['desired_goal'])
+    obs, reward, done, info = env.step(action)
+
+    # If we want, we can substitute a goal here and re-compute
+    # the reward. For instance, we can just pretend that the desired
+    # goal was what we achieved all along.
+    substitute_goal = obs['achieved_goal'].copy()
+    substitute_reward = env.compute_reward(
+        obs['achieved_goal'], substitute_goal, info)
+    print('reward is {}, substitute_reward is {}'.format(
+        reward, substitute_reward))
diff --git a/documentation/py4j_demo/test.py b/documentation/py4j_demo/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f31cf957acab92783435a526b0b8a4b8f23db72
--- /dev/null
+++ b/documentation/py4j_demo/test.py
@@ -0,0 +1,8 @@
+import gym
+env = gym.make('FetchReach-v1')
+env.reset()
+
+for _ in range(1000):
+    env.render()
+    env.step(env.action_space.sample()) # take a random action
+env.close()