diff --git a/README.md b/README.md
index ad51ed13ff2e5d00b0f8151f9e032a627386a958..79b13b52b1b05dc417ca2736db7f5897a435999d 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,5 @@
 # expetator
 
-A tool for running HPC applications using several type of leverages (DVFS) and low-level monitoring (hardware performance counters, RAPL)
-
-
+A tool for running HPC benchmarks using several type of leverages (DVFS) and low-level monitoring (hardware performance counters, RAPL) mostly on Grid5000.
 
+Documentation is available here: https://expetator.readthedocs.io/
diff --git a/docs/expetator.benchmarks.rst b/docs/expetator.benchmarks.rst
index 1f94251d9bf52757532c3cea7a1920acbe5a1c25..8120249f70ed57bdfffc155ea236002c2a8263d9 100644
--- a/docs/expetator.benchmarks.rst
+++ b/docs/expetator.benchmarks.rst
@@ -1,25 +1,146 @@
-expetator.benchmarks package
-============================
+Benchmarks
+==========
 
-.. autoclass:: expetator.benchmarks.cpubench.CpuBench
+Available benchmarks
+--------------------
 
-.. autoclass:: expetator.benchmarks.genericbench.GenericBench
+.. autoclass:: expetator.benchmarks.CpuBench
 
-.. autoclass:: expetator.benchmarks.gpucpubench.GpuCpuBench
+.. autoclass:: expetator.benchmarks.GenericBench
 
-.. autoclass:: expetator.benchmarks.gpumembench.GpuMemBench
+.. autoclass:: expetator.benchmarks.GpuCpuBench
 
-.. autoclass:: expetator.benchmarks.gromacsbench.GromacsBench
+.. autoclass:: expetator.benchmarks.GpuMemBench
 
-.. autoclass:: expetator.benchmarks.membench.MemBench
+.. autoclass:: expetator.benchmarks.GromacsBench
 
-.. autoclass:: expetator.benchmarks.mpibench.MpiBench
+.. autoclass:: expetator.benchmarks.MemBench
 
-.. autoclass:: expetator.benchmarks.npbbench.NpbBench
+.. autoclass:: expetator.benchmarks.MpiBench
 
-.. autoclass:: expetator.benchmarks.percentagebench.PercentageBench
+.. autoclass:: expetator.benchmarks.NpbBench
 
-.. autoclass:: expetator.benchmarks.sleepbench.SleepBench
+.. autoclass:: expetator.benchmarks.PercentageBench
 
-.. autoclass:: expetator.benchmarks.watermark.WaterMark
+.. autoclass:: expetator.benchmarks.SleepBench
 
+.. autoclass:: expetator.benchmarks.WaterMark
+
+Example of use of an available benchmark :
+
+.. code-block:: C
+
+    import expetator.experiment as experiment
+    from expetator.benchmarks import SleepBench
+
+    experiment.run_experiment("/tmp/sleep_demo",
+                              [ SleepBench(default_time=2) ]
+                             )
+
+The result will be in a file **/tmp/sleep_demo_${HOST}_${TIMESTAMP_START}**
+
+	       
+How to make your own benchmark
+------------------------------
+
+A benchmark is an object with the following properties
+ * The constructor takes some parameters to configure the particular instance of the benchmark. It can be a duration, an amount of work, an internal parameter. It can also be several of each. It can be a list of duration for example.
+ * The only required members is **self.names**
+   * **self.names** is the name of the benchmark. If the object provides several benchmarks, they are provided here
+ * The builder (**build** method) must build the benchmark, download necessary files, install packages, ... All the shared elements must be put in */tmp/bin* which will be copied on all nodes. To do so it can use **executor** which provides tools to run applications on the main node or on all nodes, and provides informations such as number of cores. It must also return the list of parameters.
+ * The benchmark itself must return *EXIT_SUCCESS*
+ * The **run** method will take the name of a benchmark, a parameter and will run it. It must return a couple with the performance and the name of this particular benchmark and this parameter.
+
+The workflow will be the following:
+  * First the benchmark is build using **build** method which returns the *parameters*
+  * Then, it is run:
+    
+    * For each name in **names**
+      
+      * For each parameter associated with the name in the dictionary returned by **build**
+
+	* The method **run** is called with the corresponding parameters
+	* Each run will create a new line in the output file including the value returned by **run** which should be a couple *(performance, benchmark_name and parameters)*
+
+
+Example:
+  If **bench.names = {'b1', 'b2'}** and builds returns **{'b1':[1], 'b2':[4,8]}** then expetator will call one after the other:
+
+  * bench.run('b1', 1, executor)
+  * bench.run('b2', 4, executor)
+  * bench.run('b2', 8, executor)
+
+**executor** is used to run the benchmark on the main node, on all nodes, ...
+
+Example of benchmark
+--------------------
+
+A benchmark is an object providing methods to be built and executed.
+
+Here is an example of a simple benchmark in a file **demo_bench.py**
+
+.. code-block:: python
+
+    import expetator.experiment as experiment
+
+    class DemoBench:
+        """Demo benchmark. To run,
+        python3 bench_exemple.py
+        with the file demo.c in the same directory"""
+
+        def __init__(self, params=[30]):
+            self.names = {"demo"}
+            self.params = params
+
+        def build(self, executor):
+            """Builds the demo benchmark in /tmp/bin/ and
+            returns the parameters used during execution
+            as a dictionary linking elements from self.names
+            with a list of parameters to pass during execution.
+            Returns a couple (performance, unique_name).
+            unique_name will be the part identifying the
+            execution and is usually composed of the benchmark
+            name and the parameters"""
+
+            executor.local("gcc demo.c -o /tmp/bin/demo")
+
+            params = {"demo": self.params}
+            return params
+
+        def run(self, bench, param, executor):
+            """Runs the cpu benchmark. bench comes from
+            self.names and params comes from self.param
+            The application must return EXIT_SUCCESS"""
+
+            output = executor.local("/tmp/bin/demo %s" % (param))
+            return output.strip(), "demo-%s" % (param)
+
+    if __name__ == "__main__":
+        experiment.run_experiment(
+            "/tmp/demo", [DemoBench()],
+	    leverages=[], monitors=[], times=1
+        )
+
+With the following benchmark code in a file **demo.c** in the same directory
+
+.. code-block:: C
+
+    #include <stdio.h>
+    #include <stdlib.h>
+    #include <unistd.h>
+
+    int main(int argc, char**argv) {
+        int duration = atoi(argv[1]);
+        sleep(duration);
+        printf("%d\n", duration);
+        return 0;
+    }
+
+And the following command to actually run the benchmark
+
+.. code-block:: bash
+
+    pip3 install expetator
+    python3 demo_bench.py
+
+The results will be saved in a file called */tmp/demo_${HOST}_${TIMESTAMP_START}*
diff --git a/docs/expetator.leverages.rst b/docs/expetator.leverages.rst
index 15dbf1f02af7976ec28c5d0ccd90bbecedf9f245..029b030da0123f1a2114e0baf9fd53cf43773b1f 100644
--- a/docs/expetator.leverages.rst
+++ b/docs/expetator.leverages.rst
@@ -1,19 +1,19 @@
 expetator.leverages package
 ===========================
 
-.. autoclass:: expetator.leverages.dvfs.Dvfs
+.. autoclass:: expetator.leverages.Dvfs
 
-.. autoclass:: expetator.leverages.gpuclock.GpuClock
+.. autoclass:: expetator.leverages.GpuClock
 
-.. autoclass:: expetator.leverages.gpupow.GpuPower
+.. autoclass:: expetator.leverages.GpuPower
 
-.. autoclass:: expetator.leverages.neosched.NeoSched
+.. autoclass:: expetator.leverages.NeoSched
 
-.. autoclass:: expetator.leverages.nodeepsleep.Nodeepsleep
+.. autoclass:: expetator.leverages.Nodeepsleep
 
-.. autoclass:: expetator.leverages.pct.Pct
+.. autoclass:: expetator.leverages.Pct
 
-.. autoclass:: expetator.leverages.powercap.Powercap
+.. autoclass:: expetator.leverages.Powercap
 
-.. autoclass:: expetator.leverages.template.Template
+.. autoclass:: expetator.leverages.Template
 
diff --git a/docs/index.rst b/docs/index.rst
index 8b995ca7766dd718cd750c320943d4c0df932022..c1a14425cc2b6b3e692f3e26c81c08b02c076513 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -4,6 +4,7 @@ Welcome to Expetator's documentation
 .. toctree::
    :maxdepth: 4
 
+   expetator
    expetator.benchmarks
    expetator.leverages
    expetator.monitors
diff --git a/expetator/benchmarks/__init__.py b/expetator/benchmarks/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..462eb94aa4c08a5d6ffd37742bfb65a4318a33fb 100644
--- a/expetator/benchmarks/__init__.py
+++ b/expetator/benchmarks/__init__.py
@@ -0,0 +1,11 @@
+from expetator.benchmarks.cpubench import CpuBench
+from expetator.benchmarks.genericbench import GenericBench
+from expetator.benchmarks.gpucpubench import GpuCpuBench
+from expetator.benchmarks.gpumembench import GpuMemBench
+from expetator.benchmarks.gromacsbench import GromacsBench
+from expetator.benchmarks.membench import MemBench
+from expetator.benchmarks.mpibench import MpiBench
+from expetator.benchmarks.npbbench import NpbBench
+from expetator.benchmarks.percentagebench import PercentageBench
+from expetator.benchmarks.sleepbench import SleepBench
+from expetator.benchmarks.watermark import WaterMark
diff --git a/expetator/leverages/__init__.py b/expetator/leverages/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..84d351acb57ee28b11e9b2514d62949d0b1cd1ca 100644
--- a/expetator/leverages/__init__.py
+++ b/expetator/leverages/__init__.py
@@ -0,0 +1,8 @@
+from expetator.leverages.dvfs import Dvfs
+from expetator.leverages.gpuclock import GpuClock
+from expetator.leverages.gpupow import GpuPower
+from expetator.leverages.neosched import NeoSched
+from expetator.leverages.nodeepsleep import Nodeepsleep
+from expetator.leverages.pct import Pct
+from expetator.leverages.powercap import Powercap
+from expetator.leverages.template import Template