Skip to content
Snippets Groups Projects
discut22_1.py 8.61 KiB
######################################
###### DISCOURSE SEGMENTOR 2022 ######
######################################
""" This the main script
    And the only one to run,
    after completion of config.json """

import os
import sys
import argparse
import pandas as pd # for futur clean output in df
import json 

from classes_def import Input, Process
import utils
import utils.fr_tokenize as tk
import utils.conv2ner as c2n
import utils.json2conll as j2c
import utils.conll2bracket as c2bracket
import utils.sent_split as ssent
import utils.training_allennlp as tr_allen


# fonction to get config stuffs
def get_config_infos(config_file):

    with open(config_file) as f:
        infos = json.load(f)
    data_in = Input(infos['input'])
    actions = Process(infos['steps'], data_in)
    print(f"data to be process : {data_in.name}")
    return actions


# fonction to load existing model -> only tony for now
def get_model(model_name):
    name = model_name

    if name == "tony": 
        arch = "french_tokens.tar.gz"
        if not os.path.isfile(f"../model/{arch}"):
            dl = "wget https://zenodo.org/record/4235850/files/french_tokens.tar.gz -P ../model --progress=bar"
            os.system(dl)
        else:
            print("Tony already in place !")

    return f"../model/{arch}"



def main(steps):
    
    #steps = get_config_infos(config) # on obtient la liste des trucs
    # à faire, donnée par la classe Process
    #print([x for x in enumerate(steps)])
    #suivant la liste ordonnée, faire les trucs (for now simple usecase1):

    
    # FN: soit besoin sent split, soit besoin tokenizer, soit aucun des deux
    if steps.ssplit == True :       # python code/ssplit/parse_corpus.py ${dataset} --parser stanza --out_dir data
    #### Split text into sentence : not in usecase1
        if not steps.ssplitor == "stanza" :
            print("pls define sentence splitor") # raise error n kill process
        data_in = f"{steps.data.path}/{steps.data.name}{steps.data.file}"
        data_tok = f"{steps.data.path}/{steps.data.name}.tok"
        print(f"Starting sentence spliting...to {steps.data.path}/steps.data.name")
        ssent.main(data_in, data_tok, steps.ssplitor, steps.data.lang)
    elif steps.toke == True :
    #### Tokenization du text        # #python ${SEG_DIR}/code/utils/fr_tokenize.py $RAW > ${RAW}.tok 
        data_in = f"{steps.data.path}/{steps.data.name}{steps.data.file}"
        data_tok = f"{steps.data.path}/{steps.data.name}.tok"
    #    sys.exit("check path")
        print(f"Starting Tokenization...to {data_tok}")
        tk.main(data_in, data_tok) # .ss -> .tok
    else:
        data_tok = f"{steps.data.path}/{steps.data.name}{steps.data.file}"


    if steps.ner_init == True:
        if steps.main == "test" or steps.main =="annotation":
    #### Conversion en NER pb        # #python $RUNTIME/conv2ner.py ${RAW}.tok > ${RAW}.ner.tok
            data_ner = f"{steps.data.path}/{steps.data.name}.ner.tok"
            print(f"Starting conversion to NER format...to {data_ner}")
            c2n.main(data_tok, data_ner, steps.data.file)
        elif steps.main == "train":
            for part in ["train", "dev", "test"]:
                data_tok = f"{steps.data.path}/{steps.data.name}_{part}{steps.data.file}"
                data_ner = f"{steps.data.path}/{steps.data.name}_{part}.ner{steps.data.file}"
                print("Starting conversion to NER format...to {}".format(data_ner))
                c2n.main(data_tok, data_ner, steps.data.file)


    # Create the results directory
    if not os.path.isdir(steps.data.resu):
        print(" result directory does not exist yet")
        os.mkdir(steps.data.resu)


    if steps.main == "train":
        #model_config = steps.model_config
        #cmd = "bash utils/expes.sh eng.rst.rstdt model/config_training.jsonnet bert train"
        #os.system(cmd)
        if steps.toolkit == "allennlp":
            print("toolkit allennlp for training")
        #    tr_allen.main(steps)
            # set the value of model from null to what was just created by training
            steps.model = f"{steps.data.resu}/model.tar.gz"
        elif steps.toolkit == "jiant":
            print("Jiant toolkit not ready")
        else :
            print("toolkit unknown")
        
        #check config train file
    elif steps.main == "test" or steps.main =="annotation":
    #### Appliquer le model choisi, sortir le JSON avec les predictions :score, proba, tags
    # #allennlp predict --use-dataset-reader --output-file ${RESULT_DIR}/${FILE}.json ${MODEL} ${RAW}.ner.tok
        print(f"Checking for model...{steps.model}")
        model_path = get_model(steps.model)
        data_json = f"{steps.data.resu}/{steps.data.name}.json"
        cmd = f"allennlp predict --use-dataset-reader --output-file {data_json} {model_path} {data_ner} &> {steps.data.resu}/logs.txt"
        print("Starting Prediction...")
        os.system(cmd)
    #### ------------------------------- TBD do the same but with python script (or JIANT ??)
    else:
        print(" pb define model")




    if steps.post_tab == True :
    #### Appliquer les predictions au texte et sortir le texte tokenisé avec la colone des tags-prédis     
    # # #python $RUNTIME/json2conll.py ${RESULT_DIR}/${FILE}.json split.tok > ${RESULT_DIR}/${FILE}.split.tok
        data_conll = f"{steps.data.resu}/{steps.data.name}.split.tok"
        format = "split.tok" # to retrive from config file !!!
        print(f"Starting Formating from json to tok format...to {data_conll}")
        j2c.main(data_json, format, data_conll)

    ####### EVALUATION AGAINST GOLD
    # python discut/code/utils/seg_eval.py data_gold data_pred (-s)
    if steps.eval == True : 
        if steps.main == "train":
            data_gold = steps.test_data # (())== data NER because of ner_init == true((deleted))
            if steps.ner_init == True :
                data_gold_ner = f"{steps.data.path}/{steps.data.name}_test.ner.conllu"

            # make predictions on test_data
            model_path = steps.model # model just been created
            # data_json about to be created by predict cmd
            data_json = f"{steps.data.resu}/{steps.data.name}_test.predictions.json" ## à faire en relatif !! [opt : --silent ??]
            cmd = f"allennlp predict --use-dataset-reader --output-file {data_json} {model_path} {data_gold_ner} --include-package allen_custom.custom_conll_reader --include-package allen_custom.custom_simple_tagger --include-package allen_custom.custom_disrpt_reader --predictor sentence-tagger --include-package allen_custom.custom_bert_token_embedder &> {steps.data.resu}/logs.txt"
            #cmd = f"allennlp predict --use-dataset-reader --output-file {data_json} {model_path} {data_gold}  &> {steps.data.resu} /logs.txt"
            print("Starting Prediction...")
            print(f"cmd prediction: {cmd}")
            os.system(cmd)
            
            data_conll = f"{steps.data.resu}/{steps.data.name}_test.predictions.conll" ## à faire en relatif
            print(f"Starting Formating from json to tok format...to {data_conll}")
            j2c.main(data_json, "split.tok", data_conll)
            #data_pred_ner = f"{steps.data.resu}/eng.rst.rstdt_test.predictions.conll.ner"
            #c2n.main(data_conll, data_pred_ner, steps.data.file)
            print(f"starting eval, gold={data_gold}, predictions={data_conll}, model={model_path}")
            data_g = "../data/eng.rst.rstdt/eng.rst.rstdt_test.conllu"
            data_p = "../data/eng.rst.rstdt/results/eng.rst.rstdt_test.predictions.conll" # == data_conll
            cmd = f"python utils/seg_eval.py {data_gold} {data_conll} &> {steps.data.resu}/Evaluation.txt"
            os.system(cmd)


        else :
            data_gold = data_tok # changer les noms des var, c'est pas clair !
            data_pred = data_conll #
            cmd = f"python utils/seg_eval.py {data_gold} {data_pred} &> {steps.data.resu}/Evaluation.txt"
            os.system(cmd)




    if steps.post_bracket == True :
    ####prendre le texte tokénisé+tags-prédits et sortir le texte en plain (format du d'ebut, for now en suite de phrases) avec les brackets    
    # # #python $RUNTIME/conll2bracket.py ${RESULT_DIR}/${FILE}.split.tok >  ${RESULT_DIR}/${FILE}.split.tok.bracket
        data_bracket = f"{steps.data.resu}/{steps.data.name}.split.tok.bracket"
        print(f"Starting formating into bracket text...to {data_bracket}")
        c2bracket.main(data_conll, data_bracket)
    




if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', help='Config file in JSON')
    args = parser.parse_args()
    config = args.config
    steps = get_config_infos(config)

    main(steps)
    print("Done.")