Skip to content
Snippets Groups Projects
Commit 4f75b36e authored by Maël Madon's avatar Maël Madon
Browse files

test: adding the necessary to test the expected_behavio_logs. Some tests now...

test: adding the necessary to test the expected_behavio_logs. Some tests now broke. See MR sh fichiers_cpp.sh
parent 47372428
Branches multibehavior
Tags
1 merge request!16Merge request multibehavior
Pipeline #5888 passed
...@@ -48,13 +48,17 @@ def pytest_generate_tests(metafunc): ...@@ -48,13 +48,17 @@ def pytest_generate_tests(metafunc):
] ]
metafunc.parametrize('sched_multi', scheds) metafunc.parametrize('sched_multi', scheds)
if 'test_instance' in metafunc.fixturenames: if 'test_with_expected_log' in metafunc.fixturenames:
instance_files = glob.glob('test/expected_log/*') instance_files = glob.glob('test/expected_log/*')
instances = [basename(instance_file).replace('_jobs.csv', '') instances = [basename(instance_file).replace('_jobs.csv', '')
for instance_file in instance_files] for instance_file in instance_files]
metafunc.parametrize('test_instance', instances) metafunc.parametrize('test_with_expected_log', instances)
if 'test_with_expected_behavior' in metafunc.fixturenames:
instance_files = glob.glob('test/expected_behavior_log/*')
instances = [basename(instance_file).replace('_user_stats_behaviors.csv', '')
for instance_file in instance_files]
metafunc.parametrize('test_with_expected_behavior', instances)
# def pytest_cmdline_preparse(config, args): # def pytest_cmdline_preparse(config, args):
# html_file = "test-out/testreport.html" # html_file = "test-out/testreport.html"
......
...@@ -68,10 +68,13 @@ def assert_expected_output(test_file): ...@@ -68,10 +68,13 @@ def assert_expected_output(test_file):
Files {expected} and {obtained} should be equal but are not.\n\ Files {expected} and {obtained} should be equal but are not.\n\
Run `diff {expected} {obtained}` to investigate why.\n\ Run `diff {expected} {obtained}` to investigate why.\n\
Run `cp {obtained} {expected}` to override the expected file with the obtained." Run `cp {obtained} {expected}` to override the expected file with the obtained."
def has_expected_behavior(test_file):
return os.path.exists(f"test/expected_behavior_log/{test_file}_user_stats_behaviors.csv")
def assert_expected_behavior(test_file) : def assert_expected_behavior(test_file) :
expected = "test/expected_behavior_log/" + test_file + "_user_stats_behaviors.csv" expected = f"test/expected_behavior_log/{test_file}_user_stats_behaviors.csv"
obtained = "test-out/" + test_file + "/user_stats_behaviors.csv" obtained = f"test-out/{test_file}/user_stats_behaviors.csv"
if os.path.exists(expected) : if os.path.exists(expected) :
assert filecmp.cmp(expected, obtained), f"\ assert filecmp.cmp(expected, obtained), f"\
Files {expected} and {obtained} should be equal but are not.\n\ Files {expected} and {obtained} should be equal but are not.\n\
......
...@@ -61,6 +61,9 @@ def run_user(user_name, platform_multiC, test_name=None, schedconf=None): ...@@ -61,6 +61,9 @@ def run_user(user_name, platform_multiC, test_name=None, schedconf=None):
if has_expected_output(test_name): if has_expected_output(test_name):
assert_expected_output(test_name) assert_expected_output(test_name)
if has_expected_behavior(test_name):
assert_expected_behavior(test_name)
return output_dir return output_dir
......
from helper import * from helper import *
def test_expected_output(test_instance): def test_expected_output(test_with_expected_log):
"""Test for each expected log (thanks to pytest fixtures) that the test was """Test for each expected log (thanks to pytest fixtures) that the test was
run and the _jobs.csv outputs are strictly equal""" run and the _jobs.csv outputs are strictly equal"""
assert_expected_output(test_instance) assert_expected_output(test_with_expected_log)
def test_expected_behavior(test_instance) : def test_expected_behavior(test_with_expected_behavior) :
"""Test for each expected log (thanks to pytest fixtures) that the test was """Test for each expected log (thanks to pytest fixtures) that the test was
run and the user_behavior_stats.csv outputs are strictly equal if they both exists""" run and the user_behavior_stats.csv outputs are strictly equal if they both
assert_expected_behavior(test_instance) exists"""
assert_expected_behavior(test_with_expected_behavior)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment