Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
Demand Response User
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
sepia-pub
Open Science
Demand Response User
Commits
3130d319
Commit
3130d319
authored
3 years ago
by
Maël Madon
Browse files
Options
Downloads
Patches
Plain Diff
campaign3: experiments with speedup
parent
2d70021c
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
campaign3.py
+55
-0
55 additions, 0 deletions
campaign3.py
instance3.py
+128
-0
128 additions, 0 deletions
instance3.py
with
183 additions
and
0 deletions
campaign3.py
0 → 100755
+
55
−
0
View file @
3130d319
#!/usr/bin/env python3
######################################
# Experiments with the speedup model
######################################
import
random
# from time import *
import
datetime
from
dateutil
import
parser
import
concurrent.futures
from
scripts.util
import
WL_DIR
from
instance3
import
start_instance
###############################
# Prepare the start date sample
###############################
begin_trace
=
1356994806
# according to original SWF header
jun1
=
parser
.
parse
(
'
Sun Jun 1 00:00:00 CEST 2014
'
)
daylight_saving_day
=
parser
.
parse
(
'
Sun Oct 26 00:00:00 CEST 2014
'
)
day
=
datetime
.
timedelta
(
days
=
1
)
weekdays
=
[
1
,
2
,
3
,
4
,
5
]
# Mon to Fri
# We do one expe for every weekday beween Jun 1 and Oct 26
expe_start_time
=
[]
str_start_day
=
[]
day1
=
jun1
while
day1
<=
(
daylight_saving_day
-
3
*
day
):
day2
=
day1
+
day
if
day2
.
isoweekday
()
in
weekdays
:
str_start_day
.
append
(
day1
.
ctime
())
expe_start_time
.
append
(
day1
.
timestamp
()
-
begin_trace
)
day1
+=
day
# Save selected dates in a txt file
with
open
(
f
"
{
WL_DIR
}
/start_days_for_campaign2.txt
"
,
'
w
'
)
as
f
:
for
date
in
str_start_day
:
f
.
write
(
date
+
'
\n
'
)
###############################
# Launch 10 expe
###############################
nb_expe
=
10
with
concurrent
.
futures
.
ProcessPoolExecutor
()
as
executor
:
instances
=
[]
for
i
in
range
(
nb_expe
):
print
(
f
"
Submit expe
{
i
}
"
)
# start_instance(expe_num, start_date, prepare_workload, clean_log)
instances
.
append
(
executor
.
submit
(
start_instance
,
i
,
expe_start_time
[
i
],
True
,
True
))
for
instance
in
concurrent
.
futures
.
as_completed
(
instances
):
print
(
f
"
Expe
{
instance
.
result
()
}
terminated
"
)
This diff is collapsed.
Click to expand it.
instance3.py
0 → 100755
+
128
−
0
View file @
3130d319
#!/usr/bin/env python3
######################################
# Experiments with the speedup model
######################################
import
time
import
os
import
subprocess
import
argparse
import
json
import
random
import
scripts.swf_to_batsim_split_by_user
as
split_user
from
scripts.util
import
*
def
prepare_input_data
(
expe_num
,
start_date
):
"""
Cut the original trace to extract 72h starting from this start date
"""
end_date
=
start_date
+
72
*
3600
to_keep
=
f
"
submit_time >=
{
start_date
}
and submit_time <=
{
end_date
}
"
if
not
os
.
path
.
exists
(
f
'
{
WL_DIR
}
/expe
{
expe_num
}
'
):
os
.
makedirs
(
f
'
{
WL_DIR
}
/expe
{
expe_num
}
'
)
split_user
.
generate_workload
(
input_swf
=
f
'
{
WL_DIR
}
/MC_selection_article.swf
'
,
output_folder
=
f
'
{
WL_DIR
}
/expe
{
expe_num
}
'
,
keep_only
=
to_keep
,
job_grain
=
10
,
job_walltime_factor
=
8
)
def
run_expe
(
expe_num
,
seed_alpha
,
clean_log
):
"""
Run batmen with reconfig behavior and a 4-hour DR window.
For each user, alpha is drawn at random following a Gaussian distribution
such that 0.5<alpha<1 in 95% of the cases (mean = 0.75, sigma = 0.125)
Expe_num should be a small integer (eg < 100)
"""
# Useful vars and output folder
EXPE_DIR
=
f
"
{
ROOT_DIR
}
/out/campaign3/expe
{
expe_num
}
/seed_alpha_
{
seed_alpha
}
"
create_dir_rec_if_needed
(
EXPE_DIR
)
create_dir_rec_if_needed
(
f
"
{
EXPE_DIR
}
/cmd
"
)
EXPE_FILE
=
f
"
{
EXPE_DIR
}
/cmd/robinfile.yaml
"
wl_folder
=
f
'
{
WL_DIR
}
/expe
{
expe_num
}
'
pf
=
f
"
{
ROOT_DIR
}
/platform/average_metacentrum.xml
"
wl
=
f
"
{
WL_DIR
}
/empty_workload.json
"
uf
=
f
"
{
EXPE_DIR
}
/cmd/user_description_file.json
"
window_size
=
4
# Initialize the random generator
random
.
seed
(
seed_alpha
)
# Demand response window, from 16 to (16 + window_size) on day2
dm_window
=
[(
24
+
16
)
*
3600
,
(
int
)
((
24
+
16
+
window_size
)
*
3600
)]
# User description file
def
user_description
(
user
):
return
{
"
name
"
:
user
,
"
category
"
:
"
dm_user_reconfig
"
,
"
param
"
:
{
"
input_json
"
:
f
"
{
wl_folder
}
/
{
user
}
.json
"
,
"
alpha_speedup
"
:
random
.
normalvariate
(
0.75
,
0.125
)
}
}
user_names
=
[
user_file
.
split
(
'
.
'
)[
0
]
for
user_file
in
os
.
listdir
(
wl_folder
)]
data
=
{}
data
[
"
dm_window
"
]
=
dm_window
data
[
"
log_user_stats
"
]
=
True
data
[
"
log_folder
"
]
=
EXPE_DIR
data
[
"
users
"
]
=
[
user_description
(
user
)
for
user
in
user_names
]
with
open
(
uf
,
'
w
'
)
as
user_description_file
:
json
.
dump
(
data
,
user_description_file
)
# Generate and run robin instance
socket_batsim
=
f
"
tcp://localhost:280
{
expe_num
:
02
d
}
"
socket_batsched
=
f
"
tcp://*:280
{
expe_num
:
02
d
}
"
batcmd
=
gen_batsim_cmd
(
pf
,
wl
,
EXPE_DIR
,
f
"
--socket-endpoint=
{
socket_batsim
}
--energy --enable-compute-sharing --enable-dynamic-jobs --acknowledge-dynamic-jobs --enable-profile-reuse
"
)
schedcmd
=
f
"
batsched --socket-endpoint=
{
socket_batsched
}
-v bin_packing_energy --queue_order=desc_size --variant_options_filepath=
{
uf
}
"
instance
=
RobinInstance
(
output_dir
=
EXPE_DIR
,
batcmd
=
batcmd
,
schedcmd
=
schedcmd
,
simulation_timeout
=
604800
,
ready_timeout
=
10
,
success_timeout
=
3600
,
failure_timeout
=
5
)
instance
.
to_file
(
EXPE_FILE
)
print
(
f
"
Run robin
{
EXPE_FILE
}
"
)
ret
=
run_robin
(
EXPE_FILE
)
print
(
f
"
Robin
{
EXPE_FILE
}
finished
"
)
# Remove the log files that can quickly become heavy...
if
clean_log
:
os
.
remove
(
f
"
{
EXPE_DIR
}
/log/batsim.log
"
)
os
.
remove
(
f
"
{
EXPE_DIR
}
/log/sched.err.log
"
)
os
.
remove
(
f
"
{
EXPE_DIR
}
/log/sched.out.log
"
)
def
start_instance
(
expe_num
,
start_date
,
prepare_workload
=
True
,
clean_log
=
False
):
# Prepare workload
if
prepare_workload
:
prepare_input_data
(
expe_num
,
start_date
)
# Create expe folder
create_dir_rec_if_needed
(
f
"
{
ROOT_DIR
}
/out/campaign3
"
)
create_dir_rec_if_needed
(
f
"
{
ROOT_DIR
}
/out/campaign3/expe
{
expe_num
}
"
)
# Run with various random seeds
alpha_seeds
=
[
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
]
for
seed
in
alpha_seeds
:
run_expe
(
expe_num
=
expe_num
,
seed_alpha
=
seed
,
clean_log
=
clean_log
)
return
expe_num
# def main():
# parser = argparse.ArgumentParser(
# description='One expe instance. To launch for example with `oarsub -l walltime=2 "./instance3 arg1 arg2 arg3"`')
# parser.add_argument('expe_num', type=int, help='The expe ID')
# parser.add_argument('start_date', type=int,
# help='Start of the 3-day window (in seconds since the start of the original trace)')
# args = parser.parse_args()
# start_instance(args.expe_num, args.start_date)
# if __name__ == "__main__":
# main()
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment