diff --git a/artifact-overview.typ b/artifact-overview.typ
index 3bf57e5eeee3b1660b5f70a5b7187b29cf599213..f041e80b901ac23c6ca6e79a3b8fe7201d5510f4 100644
--- a/artifact-overview.typ
+++ b/artifact-overview.typ
@@ -11,26 +11,14 @@
 )
 #set heading(numbering: "1.1.")
 
-#let subsetbox(content, ..args) = showybox(..args,
-  frame: (
-    footer-color: green.lighten(80%),
-    thickness: 0.125mm,
-    radius: 0mm,
-    footer-inset: (x: 0.5em, y: 0.2em),
-  ),
-  footer-style: (
-    color: luma(50),
-    align: right,
-    sep-thickness: 0.125mm,
-  ),
-  content
-)
+#let emph-overhead(x) = text(fill: rgb("ff0000"), weight: "semibold", x)
 
 #let fullbox(content, ..args) = showybox(..args,
   frame: (
     radius: 0mm,
     thickness: 0.5mm,
-    footer-inset: (x: 0.5em, y: 0.25em),
+    footer-inset: (x: 0.5em, y: 0.4em),
+    footer-color: luma(230),
   ),
   footer-style: (
     color: luma(50),
@@ -40,6 +28,22 @@
   content
 )
 
+#let filehashes(args) = {
+  set align(center)
+  table(
+    columns: (auto, auto),
+    align: left,
+    table.header(
+      [*md5 hash*], [*output file*]
+    ),
+    stroke: (x, y) => {
+      if y == 0 { (bottom: black+1pt, top: black+1.5pt) }
+      else { none }
+    },
+    ..args.map(x => raw(x))
+  )
+}
+
 #set par(justify: true)
 #show link: x => underline(offset: 0.5mm, stroke: .25mm, text(font: "DejaVu Sans Mono", weight: "semibold", size: 9.5pt, fill: blue, x))
 #let todo = todo.with(inline: true)
@@ -63,7 +67,7 @@
   - Preprint on HAL (long-term). #link("https://hal.science/hal-04566184")
   - Artifact data on Zenodo (long-term). #todo([zenodo link])
   - Artifact code on Software Heritage (long-term). #todo([software heritage link])
-  - Artifact code on our GitLab. #todo([git link])
+  - Artifact code on the author's GitLab instance. #todo([git link])
   #line(length:100%, stroke: .5mm)
 ]
 
@@ -73,15 +77,21 @@
 This document shows how to reproduce the experimental sections (6.2 to 6.5) of article @lightpredenergy.
 We hope that this document is enough to reproduce the whole experiments from scratch.
 However, as reproducing the exact analyses and experiments conducted by the authors requires to download and store lots of input trace data (#box([$tilde.eq$ 300 Go)]) and to do some heavy computations,
-various intermediate and final results have been cached and made available on #todo[Zenodo] to enable the reproduction of only subparts of the experimental pipeline. In particular, the final analyses of the article are done in standalone notebooks whose input data is available.
+various intermediate and final results have been cached and made available on #todo[Zenodo] to enable the reproduction of only subparts of the experimental pipeline. In particular, the final analyses of the article are done in standalone notebooks whose input data is available and small.
 
-Unless otherwise specified, all commands shown in this document similar to the one in the box below are expressed in #link("https://en.wikipedia.org/wiki/Bourne_shell")[`sh`] and thus compatible with `bash` and `zsh`. Every command that takes a significant amount of time, storage or bandwidth have its overhead given in the second part of the box. Unless otherwise specified, amounts of times are those we obtained on a powerful machine (2x Intel Xeon Gold 6130).
+Unless otherwise specified, all commands shown in this document are expressed in #link("https://en.wikipedia.org/wiki/Bourne_shell")[`sh`] and are thus compatible with `bash` and `zsh`. Every command that takes a significant amount of time, storage or bandwidth have its overhead given in the second part of the box. Unless otherwise specified, execution times have been obtained on a powerful computation node that uses 2x Intel Xeon Gold 6130.
+A #link("https://en.wikipedia.org/wiki/MD5")[MD5 hash] is given for the output files that we are think are important, and all these files can directly be downloaded on #todo[zenodo].
+The MD5 hashes have been computed by #link("https://www.gnu.org/software/coreutils/")[GNU coreutils]'s `md5sum` command.
 
 #fullbox(footer: [Time: 00:00:01.])[
   ```sh
-  echo 'Example command'
+  echo 'All commands should look like this'
+  echo 'Example command' > /tmp/example-output
   sleep 1
   ```
+  #filehashes((
+    "fb9807302a8a925bb7a3963d03cedd04", "/tmp/example-output",
+  ))
 ]
 
 = Getting Started Guide
@@ -89,12 +99,14 @@ All the software environments required to reproduce the analyses and experiments
 Nix can build the *full* software stack needed for this experiment as long as source code remains available. As we also put our source code on #link("https://www.softwareheritage.org/")[Software Heritage] we hope that this artifact will have long-term longevity. For the sake of this artifact reviewers' quality of life, we have set up a binary cache with precompiled versions of the software used in the experiments.
 
 No special hardware is required to reproduce our work. We think that our Nix environments will work on future Nix versions, but for the sake of traceability we stress that we have used Nix 2.18.0 installed either by #link("https://archive.softwareheritage.org/swh:1:rev:b5b47f1ea628ecaad5f2d95580ed393832b36dc8;origin=https://github.com/DavHau/nix-portable;visit=swh:1:snp:318694dfdf0449f0a95b20aab7e8370cff809a66")[nix-portable 0.10.0] or directly available via NixOS using channel `23-11`.
-Our software environments likely work on all platforms supported by Nix (Linux on `i686`/`x86_64`/`aarch64` and MacOS on `x86_64`/`aarch64` as of 2024-05-07) but we have only tested it on Linux on `x86_64`. More precisely, we have used the #link("https://www.grid5000.fr/w/Grenoble:Hardware#dahu")[Dahu Grid'5000 cluster] (Dell PowerEdge C6420, 2x Intel Xeon Gold 6130, 192 GiB of RAM) on the default operating system available on Grid'5000 as of 2024-05-07 (Debian `5.10.209-2` using Linux kernel `5.10.0-28-amd64`).
+Our software environments likely work on all platforms supported by Nix (Linux on `i686`/`x86_64`/`aarch64` and MacOS on `x86_64`/`aarch64` as of 2024-05-07) but we have only tested them on Linux on `x86_64`. More precisely, we have used the #link("https://www.grid5000.fr/w/Grenoble:Hardware#dahu")[Dahu Grid'5000 cluster] (Dell PowerEdge C6420, 2x Intel Xeon Gold 6130, 192 GiB of RAM) on the default operating system available on Grid'5000 as of 2024-05-07 (Debian `5.10.209-2` using Linux kernel `5.10.0-28-amd64`).
 
 == Install Nix
-If you are already using NixOS, Nix should already be usable on your system.
-Otherwise up-to-date information on how to install Nix is available on #link("https://nixos.org/download/").
-As of 2024-05-07 the recommended command to install Nix (on a Linux system running systemd, with SELinux disabled and `sudo` usable) is to run the following command.
+If you are already using NixOS, Nix should already be usable on your system and you can go to @enable-nix-flakes.
+
+Otherwise you must install Nix to use the software we have packaged.
+We recommend to use #link("https://nixos.org/download/")[up-to-date documentation on how to install Nix].
+As of 2024-05-07 the recommended command to install Nix (on a Linux system running systemd, with SELinux disabled and `sudo` usable) is the following.
 
 #fullbox[
   ```sh
@@ -106,26 +118,42 @@ Please note that you may need to launch a new shell, to source a file or to modi
 
 *Test your installation.* Launching `nix-shell --version` should run and print you the Nix version installed.
 
-== Enable Nix flakes
+== Enable Nix flakes <enable-nix-flakes>
 Our Nix packages rely on #link("https://nixos.wiki/wiki/Flakes")[Nix flakes], which are not enabled by default as of 2024-05-07.
-Up-to-date information on how to enable them can be found on the #link("https://nixos.wiki/wiki/Flakes")[Nix flakes documentation].
-If you are using NixOS and as of 2024-05-07, flakes can be enabled by setting the following in your system configuration file.
-#h(1fr) #box(stroke: (thickness: .1mm, dash: "densely-dashed"), fill: luma(90%), outset:1mm)[
-  #set align(bottom)
-  ```nix
-  nix.settings.experimental-features = [ "nix-command" "flakes" ];
-  ```
-]
-
-Otherwise, as of 2024-05-07, the following commands should enable flakes on non-NixOS Linuxes.
-#fullbox[
-  ```sh
-  mkdir -p ~/.config/nix/
-  echo 'experimental-features = nix-command flakes' > ~/.config/nix/nix.conf
-  ```
-]
+Nix flakes must be enabled to use the software we have packaged.
+We recommend to use #link("https://nixos.wiki/wiki/Flakes")[up-to-date documentation on how to enable flakes], but we also explain how to install it as of 2024-05-07 below. Please note that the way to enable flakes depend on whether you are on NixOS or not.
+
+#grid(
+  columns: (1fr, 1fr),
+  column-gutter: 5mm,
+  [
+    *If you are using NixOS* and as of 2024-05-07, flakes can be enabled by setting at least the `nix-command` and `flakes` experimental settings in your system configuration file. In other words, your NixOS configuration file should have content similar to the one in the box below.
+
+    #{
+      rect(stroke: (thickness: .1mm, dash: "densely-dashed"), fill: luma(97%), outset:1mm)[
+        ```nix
+        nix.settings.experimental-features = [ "nix-command" "flakes" ];
+        ```
+      ]
+    }
+  ],
+  [
+    If you are *not using NixOS*, as of 2024-05-07, flakes can be enabled by setting at least the `nix-command` and `flakes` `experimental-features` in your Nix configuration file.
+    The Nix configuration file path is `~/.config/nix/nix.conf` on non-NixOS Linuxes.
+    In other words, your Nix configuration file should have content similar to the one in the box below.
+
+    #{
+      set align(right)
+      rect(stroke: (thickness: .1mm, dash: "densely-dashed"), fill: luma(97%), outset:1mm)[
+        ```nix
+        experimental-features = nix-command flakes
+        ```
+      ]
+    }
+  ]
+)
 
-*Testing your flakes configuration.* Launching `nix build 'github:nixos/nixpkgs?ref=23.11#hello'` should create a `result` symbolic link in your current directory. Then, launching `./result/bin/hello` should print `Hello, world!`.
+*Test your flakes configuration.* Launching `nix build 'github:nixos/nixpkgs?ref=23.11#hello'` should create a `result` symbolic link in your current directory. Then, launching `./result/bin/hello` should print `Hello, world!`.
 
 == Configure Nix to use our binary cache
 This step is completely optional but recommended for this artifact reviewers, as it enables to download precompiled versions of our software environments instead of building them on your own machine.
@@ -154,26 +182,64 @@ This step is completely optional but recommended for this artifact reviewers, as
 - Job power predictions for all prediction methods. #todo[cache mean/max power prediction tarballs]
 
 == Modeling of the power behavior of Marconi100 nodes
-*Inputs.* None.\
-*Outputs.*
-- Marconi100 power and job traces on your disk.
-- Marconi100 nodes power model.
-- Notebook that analyses the power profiles of M100 nodes.
+#todo[introduce this section]
 
 === Get power and job Marconi100 traces on your disk <sec-m100-power-job-traces>
-This section downloads parts of the Marconi100 trace from Zenodo, checks that the downloaded parts have the right content (via a md5 checksum), extracts the data needed by later stages of the pipeline (power usage traces, jobs information traces), then removes unneeded extracted files and the downloaded files.
+This section downloads parts of the Marconi100 trace as archives from Zenodo, checks that the archives have the right content (via a md5 checksum), extracts the data needed by later stages of the pipeline (node power usage traces, jobs information traces), then removes unneeded extracted files and the downloaded archives.
 
-#fullbox(footer:[Download: 254 Go. Final disk used: 2.5 Go. Time: 00:40:00.])[
+#fullbox(footer:[#emph-overhead[Download+temporary disk: 254 Go.] Final disk: 928 Mo. #emph-overhead[Time: 00:40:00.]])[
   ```sh
   nix develop .#download-m100-months --command \
-              m100-data-downloader ./m100-data 22-01 22-02 22-03 22-04 22-05 22-06 \
-                                               22-07 22-08 22-09
+              m100-data-downloader ./m100-data \
+                                   22-01 22-02 22-03 22-04 22-05 22-06 22-07 22-08 22-09
+  ```
+  #filehashes((
+    "604aa2493d688a77a7f771ad1dc91621", "m100-data/22-01_jobs.parquet",
+    "53e5939579412cb99347d14c62ce789e", "m100-data/22-02_jobs.parquet",
+    "4da725eb59b311c7b7f5568bd389d120", "m100-data/22-03_jobs.parquet",
+    "6091df746cf94d346a3900153777496d", "m100-data/22-04_jobs.parquet",
+    "7f1e442f59203b990217ecefb56aec4b", "m100-data/22-05_jobs.parquet",
+    "f8f3fa87a6310f73f8c2e8ac013cebaa", "m100-data/22-06_jobs.parquet",
+    "350040cbc9532184679f226eff73c6f5", "m100-data/22-07_jobs.parquet",
+    "11eebd414fbbbe2b4d9f3aa1568260ef", "m100-data/22-08_jobs.parquet",
+    "9d60ba75bd53ab8e689097f2ccfe2f42", "m100-data/22-09_jobs.parquet",
+    "9a0a5a883862889ea29ebe866038aacf", "m100-data/22-01_power_total.parquet",
+    "a13b1a287197cdaf18ca172c0cf6eec8", "m100-data/22-02_power_total.parquet",
+    "f4c3f05ff5a6b28da48d56c11f8a5146", "m100-data/22-03_power_total.parquet",
+    "f02745d785f6afa812a67bd70ca8090f", "m100-data/22-04_power_total.parquet",
+    "2969a1a6f501f35b12f80ec4f3c7b298", "m100-data/22-05_power_total.parquet",
+    "4bd100c4ebd048c80dea58f064670e1a", "m100-data/22-06_power_total.parquet",
+    "2631979125b4454e177977da6a482073", "m100-data/22-07_power_total.parquet",
+    "b36373acddc0fbf41e7171ded786e877", "m100-data/22-08_power_total.parquet",
+    "82c3f6f013c9254cabfd23c67a3e7b0f", "m100-data/22-09_power_total.parquet",
+  ))
+]
+
+=== Aggregate power traces per node
+The following command traverses all the Marconi100 power traces and counts how many times each node was at each power value.
+#fullbox(footer:[Disk: 1 Mo. Time: 00:03:00.])[
+  ```sh
+  nix develop .#py-scripts --command \
+              m100-agg-power-months ./m100-data/ ./m100-data/22-agg_ \
+                                    22-01 22-02 22-03 22-04 22-05 22-06 22-07 22-08 22-09
   ```
+  #filehashes((
+    "20e5d7b3f941efb1c5b6083e4752b647", "m100-data/22-agg_power_total.csv"
+  ))
 ]
 
 === Analyze Marconi100 power traces <sec-analyze-m100-power-traces>
-*Outputs.*
-- powermodel file
+#fullbox(footer:[Disk: 1.7 Mo. Time: 00:00:10])[
+  ```sh
+  nix develop .#r-notebook --command \
+              Rscript notebooks/run-rmarkdown-notebook.R \
+                      notebooks/m100-power-trace-analysis.Rmd
+  ```
+  #filehashes((
+    "a2ebebb21586d1adfa63fc917e1517bd", "m100-data/22-powermodel_total.csv",
+    "9829bb1ebb9ca5811676db3c56b6458c", "notebooks/m100-power-trace-analysis.html"
+  ))
+]
 
 == Job scheduling with power prediction <sec-sched>
 This section shows how to reproduce Sections 6.4 and 6.5 of article @lightpredenergy.
@@ -183,31 +249,40 @@ This section shows how to reproduce Sections 6.4 and 6.5 of article @lightpreden
 The following command generates the SimGrid platform used for the simulations.
 This requires a power model of the Marconi100 nodes (as outputted by @sec-analyze-m100-power-traces).
 
-#fullbox[
+#fullbox(footer:[Time: ])[
   ```sh
   nix develop .#py-scripts --command \
               m100-generate-sg-platform ./m100-data/22-powermodel_total.csv 100 \
                                         -o ./expe-sched/m100-platform.xml
   ```
+
+  #filehashes((
+    "b5c28261bbe6bcea017ac03b1ef97bd9", "expe-sched/m100-platform.xml",
+  ))
 ]
 ==== Generate simulation instances
 The following commands generate workload parameters (_i.e._, when each workload should start and end). The start points are taken randomly during the 2022 M100 trace.
 
-#fullbox[
+#fullbox(footer:[Disk: 1.3 Mo. Time: 00:00:01.])[
   ```sh
-  nix develop .#gen-simu-instances --command \
+  nix develop .#py-scripts --command \
               m100-generate-expe-workload-params -o ./expe-sched/workload-params.json
-  nix develop .#gen-simu-instances --command \
+  nix develop .#py-scripts --command \
               m100-generate-expe-params ./expe-sched/workload-params.json \
-              -o ./expe-sched/simu-instances.json
+                                        ./expe-sched/m100-platform.xml \
+                                        -o ./expe-sched/simu-instances.json
   ```
+  #filehashes((
+    "e1b4475f55938ad6de4ca500bddc7908", "expe-sched/workload-params.json",
+    "3a7e7d8183dcb733d6b49d86b2ab3b14", "expe-sched/simu-instances.json",
+  ))
 ]
 
 ==== Merge job power predictions and jobs information into a single file
 The job power predictions (as outputted by @sec-job-power-pred) are two archives that we assume are on your disk in the `./user-power-predictions` directory.
 These archives contain gzipped files for each user.
 To make things more convenient for the generation of simulation inputs, all the job power prediction files are merged into a single file with the following commands.
-#fullbox(footer: [Temporary disk used: 519 Mo. Final disk used: 25 Mo. Time: 00:00:30.])[
+#fullbox(footer: [Temporary disk: 519 Mo. Final disk: 25 Mo. Time: 00:00:30.])[
   ```sh
   mkdir ./user-power-predictions/tmp
   nix develop .#merge-m100-power-predictions --command \
@@ -242,7 +317,7 @@ The following command generates all the workloads needed by the simulation.
 *This step is very long!*
 #todo[zenodo]
 
-#fullbox(footer: [Time: 05:00:00.])[
+#fullbox(footer: [#emph-overhead[Time: 05:30:00.]])[
   ```sh
   nix develop .#py-scripts --command \
               m100-generate-expe-workloads ./expe-sched/workload-params.json \
@@ -250,25 +325,34 @@ The following command generates all the workloads needed by the simulation.
                                           ./m100-data \
                                           -o /tmp/wlds
   ```
+
+  Output should be the `/tmp/wlds` directory, with should contain 1.4 Go of files:
+  - 30 Batsim workload files -- _e.g._, `/tmp/wlds/wload_delay_5536006.json`
+  - 30 unused `input_watts` files -- _e.g._, `/tmp/wlds/wload_delay_5536006_input_watts.csv`
+  - 1 directory per replayed job in `/tmp/wlds/jobs/` (total of 121544 jobs)
+  - 1 dynamic power trace per job in `/tmp/wlds/jobs/JOBID_STARTREPLAYTIME/dynpower.csv`
 ]
 
 
 === Run the simulation campaign
-The following command runs the whole simulation campaign. This requires all the 
+The following command runs the whole simulation campaign.
+This requires that you have generated or downloaded simulation instances: file `./expe-sched/simu-instances.json` should exist.
+This requires that you have generated or downloaded workloads: directory `/tmp/wlds` should exist and contain #box([$tilde.eq$ 1.4 Go]).
 
-#fullbox[
+#fullbox(footer: [Time: 00:06:00.])[
   ```sh
   nix develop .#simulation --command \
-              m100-run-batsim-instances ./expe-sched/simu-instances.json \
-                                        -w /tmp/wlds \
-                                        -o /tmp/simout \
-                                        --output_state_file ./expe-sched/exec-state.json \
-                                        --output_result_file ./expe-sched/agg-result.csv
+              m100-run-batsim-instances \
+                ./expe-sched/simu-instances.json \
+                -w /tmp/wlds \
+                -o /tmp/simout \
+                --output_state_file ./expe-sched/simu-campaign-exec-state.json \
+                --output_result_file ./expe-sched/simu-campaign-agg-result.csv
   ```
 ]
 
 === Analyze the simulation campaign outputs
-#fullbox[
+#fullbox(footer:[Time on a low-performance laptop: 00:00:30.])[
   ```sh
   nix develop .#r-notebook --command \
               Rscript notebooks/run-rmarkdown-notebook.R \