From 2b75cc423d55b963ff8dab524d6ca15710bf0090 Mon Sep 17 00:00:00 2001
From: Caroline DE POURTALES <cdepourt@montana.irit.fr>
Date: Wed, 13 Jul 2022 13:17:41 +0200
Subject: [PATCH] readme

---
 README.md | 49 +++++++++++++++++++++++++++++++++++++++++++------
 utils.py  | 37 +++++++++++++++++++++++++++++++++++++
 2 files changed, 80 insertions(+), 6 deletions(-)

diff --git a/README.md b/README.md
index a51b572..c3b2fcc 100644
--- a/README.md
+++ b/README.md
@@ -1,22 +1,59 @@
 # FXTool Kit by PNRIA
 
-## How to use it 
+## About explicability
 
-Set the parameters and upload the models.
-The instance should either be a .txt at format (feature1=...,feature2=...) or a json file
+The project consists in deploying the explicability algorithms from Joao Marques-Silva’s team at ANITI on a web platform.
+
+Indeed they developed multiple algorithms to explain Decision Tree, Random Forest, Naive Bayes, Neural Network … But these algorithms are not accessible to other researchers so the team wants a “showcase site” .
+The tools are described in recent papers by ANITI’s DeepLever Chair, with prototypes already available for most of the papers.
 
-### Requirements 
+Please visit Joao Marques-Silva's work on : [Github Repo](https://github.com/jpmarquessilva/) 
+You will see some repos integrated here.
+
+## Requirements 
 
 Python-3.8.10
 
-`pip install -r requirements.txt` 
+```commandline
+$ pip install dash
+$ pip install pandas
+$ pip install -r requirements.txt
+```
 
 Import graphviz manually
 
-### Running
+## Structure
+
+The structure is adapted for deployment on Heroku, if you don't wish to deploy, you can delete Procfile and runtime.txt.
+
+
+## Running locally
 
 Run app.py then visit localhost
 
+Set the parameters and upload the models (you should upload data for random forest).
+The instance should either be a .txt at format (feature1=...,feature2=...) or a json file
+
+## Deployed app
+
+Visit :
+[App web](https://aniti-fxtools.herokuapp.com/)
+
+Set the parameters and upload the models (you should upload data for random forest).
+The instance should either be a .txt at format (feature1=...,feature2=...) or a json file
+
+To deploy new changes, please execute this code : 
+```commandline
+$ git status # view the changes
+$ git add .  # add all the changes
+$ git commit -m 'a description of the changes'
+$ git push heroku master
+```
+
+However, for this free deployment, you have a limited slug size. It might need to be deployed somewhere else.
+
 ## Authors
 
+CNRS-IRIT collaboration with ANITI for PNRIA
+
 [de Pourtales Caroline](https://www.linkedin.com/in/caroline-de-pourtales/)
\ No newline at end of file
diff --git a/utils.py b/utils.py
index 248358b..fd8896d 100644
--- a/utils.py
+++ b/utils.py
@@ -12,6 +12,16 @@ sys.modules['xrf'] = xrf
 
 
 def parse_contents_graph(contents, filename):
+    r"""
+    Extract content from model file
+
+    Args:
+        contents : the contents of the model file
+        filename : the name of the model file
+
+    Return:
+        Content of model file
+    """
     content_type, content_string = contents.split(',')
     decoded = base64.b64decode(content_string)
     try:
@@ -34,6 +44,16 @@ def parse_contents_graph(contents, filename):
 
 
 def parse_contents_data(contents, filename):
+    r"""
+    Extract content from data file
+
+    Args:
+        contents : the contents of the data file
+        filename : the name of the data file
+
+    Return:
+        Content of data file
+    """
     content_type, content_string = contents.split(',')
     decoded = base64.b64decode(content_string)
     try:
@@ -51,6 +71,9 @@ def parse_contents_data(contents, filename):
 
 
 def split_instance_according_to_format(instance, features_names=None):
+    r"""
+    Format instance
+    """
     if "=" in instance:
         splitted_instance = [tuple((v.split('=')[0].strip(), float(v.split('=')[1].strip()))) for v in
                              instance.split(',')]
@@ -66,6 +89,16 @@ def split_instance_according_to_format(instance, features_names=None):
 
 
 def parse_contents_instance(contents, filename):
+    r"""
+    Extract content from instance file
+
+    Args:
+        contents : the contents of the instance file
+        filename : the name of the instance file
+
+    Return:
+        Content of instance file
+    """
     content_type, content_string = contents.split(',')
     decoded = base64.b64decode(content_string)
     try:
@@ -101,6 +134,10 @@ def parse_contents_instance(contents, filename):
 
 
 def extract_data(data):
+    r"""
+    Args:
+        data : json containing information about the models available for explicability
+    """
     names_models = [data[i]['ml_type'] for i in range(len(data))]
     dict_components = {}
     dic_solvers = {}
-- 
GitLab