Commit ee95ae25 authored by Marco Perronet's avatar Marco Perronet
Browse files

Compile rtspin, fix path in Makefile and scripts

parent 3302d106
......@@ -25,9 +25,6 @@ clean-output:
sudo rm -rf testing/profiling
sudo rm -rf scripts/evaluation/plots
help:
cd scripts && ./run.sh -h
# Prints information about rt threads in the system
rt-info:
python3 scripts/rt_procs_info.py
......@@ -40,7 +37,7 @@ drum: all
mkdir -p testing/output
sleep .2
pgrep -f "^\./scripts/evaluation/workloads/drum/manual-input" | xargs sudo taskset 0x1 chrt -f -p 98
cd scripts && pgrep -f "^\./scripts/evaluation/workloads/drum/manual-input" | xargs ./run.sh -p > ../testing/output/rbftrace---manual-input
pgrep -f "^\./scripts/evaluation/workloads/drum/manual-input" | xargs ./src/tools/rbf-trace --pids > ../testing/output/rbftrace---manual-input
# NOTE: you must first run the script that reads input with:
# ./scripts/evaluation/workloads/drum/drum.sh
......@@ -53,7 +50,7 @@ rtspin:
cd testing/rtspin-port && $(MAKE) clean
cd testing/rtspin-port && $(MAKE) cleandb
cd testing/rtspin-port && $(MAKE)
cd scripts && time python3 evaluation/experiments/$(RTSPIN_EXPERIMENT).py
time python3 scripts/evaluation/experiments/$(RTSPIN_EXPERIMENT).py
# Launch rtspin + RBFTrace, use experiment script, run with busy cores
rtspin-busy: all busy-cores rtspin busy-cores-kill
......@@ -63,12 +60,10 @@ rtspin-plot: rtspin plot
rtspin-profile: profile rtspin pprof
# Launch rbftrace on a set of pids (e.g. use to see interference of migration stop threads)
# TODO won't work because as of now there is no way to run for a specified amount of time (only stops when the traced threads die), we must implement that
pid: all
sudo echo
mkdir -p testing/output
cd scripts && echo "19066 19067" | xargs ./run.sh -p > ../testing/output/rbftrace---pids
./src/tools/rbf-trace --pids 19066 19067 --ftrace-len 30 > ../testing/output/rbftrace---pids
pid-plot: pid plot
......@@ -76,7 +71,7 @@ pid-plot: pid plot
webserver: all
sudo echo
mkdir -p testing/output
cd scripts && time python3 evaluation/experiments/$(WEBSERVER_EXPERIMENT).py
time python3 scripts/evaluation/experiments/$(WEBSERVER_EXPERIMENT).py
# Launch flask webserver + RBFTrace + make plot
webserver-plot: webserver plot
......@@ -88,7 +83,7 @@ plot:
# Make again plot pictures with existing data
experiment-plot:
cd scripts && time python3 evaluation/experiments/$(RTSPIN_EXPERIMENT)_plot.py
time python3 scripts/evaluation/experiments/$(RTSPIN_EXPERIMENT)_plot.py
pprof:
pprof --svg target/debug/main testing/profiling/tracer.profile > testing/output/profile.svg
......@@ -97,35 +92,35 @@ pprof:
# Launch feature detection only
feature-detection: all
cd scripts && ./run.sh -d
./src/tools/rbf-trace -d
# The following testing targets configure the system and then run feature detection
global: all
@echo "*** Configuring as global scheduling ***"
@echo
sudo python3 scripts/sys_conf/global.py
cd scripts && ./run.sh -d
./src/tools/rbf-trace -d
@echo
partitioned: all
@echo "*** Configuring as partitioned scheduling ***"
@echo
sudo python3 scripts/sys_conf/partitioned.py
cd scripts && ./run.sh -d
./src/tools/rbf-trace -d
@echo
clustered: all
@echo "*** Configuring as clustered scheduling ***"
@echo
sudo python3 scripts/sys_conf/clustered.py
cd scripts && ./run.sh -d
./src/tools/rbf-trace -d
@echo
clustered-nf: all
@echo "*** Configuring as clustered scheduling (nonfixed cluster size) ***"
@echo
sudo python3 scripts/sys_conf/clustered_nonfixed.py
cd scripts && ./run.sh -d
./src/tools/rbf-trace -d
@echo
test-feature-detection: all test-spawn global partitioned clustered clustered-nf
......
......@@ -21,6 +21,11 @@ fn main() {
// Note: this *only works* with cargo run
println!("cargo:rustc-env=LD_LIBRARY_PATH=src/events_generation/binary_parser/lib/build_output/usr/lib64");
// Compile rtspin (for testing)
let mut build_rtspin_command = Command::new("make");
build_rtspin_command.current_dir("testing/rtspin-port");
build_rtspin_command.status().expect("Failed to build rtspin.");
// Generate bindings (reminder)
// bindgen -o src/bindings.rs src/events_generation/binary_parser/stream.h -- -I/usr/local/include/traceevent -I/usr/local/include/tracefs -I/usr/local/include/trace-cmd
}
......@@ -4,23 +4,23 @@ import subprocess
import time
import os
# workload = "evaluation/workloads/uniprocessor_double/1_uni_double_slow.json"
# workload = "evaluation/workloads/uniprocessor_double/2_uni_double.json"
# workload = "evaluation/workloads/uniprocessor_double/3_uni_double_fast.json"
# workload = "evaluation/workloads/uniprocessor_double/4_uni_double_veryfast.json"
workload = "evaluation/workloads/uniprocessor_all/8_uni_stress_veryfast.json"
# workload = "scripts/evaluation/workloads/uniprocessor_double/1_uni_double_slow.json"
# workload = "scripts/evaluation/workloads/uniprocessor_double/2_uni_double.json"
# workload = "scripts/evaluation/workloads/uniprocessor_double/3_uni_double_fast.json"
# workload = "scripts/evaluation/workloads/uniprocessor_double/4_uni_double_veryfast.json"
workload = "scripts/evaluation/workloads/uniprocessor_all/8_uni_stress_veryfast.json"
print()
print("*** RUNNING WORKLOAD: " + os.path.basename(workload) + " ***")
print()
# Spawn threads to be traced
proc1 = subprocess.Popen(["python3", "evaluation/generate_workload.py", "--workload", workload])
proc1 = subprocess.Popen(["python3", "scripts/evaluation/generate_workload.py", "--workload", workload])
# Skip rtspin init phase
time.sleep(0.5)
# Get pids to trace and call RBFtrace
outputfile = open("../testing/output/rbftrace---rt-spin", "w") # println!() output
outputfile = open("testing/output/rbftrace---rt-spin", "w") # println!() output
proc2 = subprocess.Popen(["pgrep", "rtspin"], stdout=subprocess.PIPE)
proc3 = subprocess.run(["xargs", "./run.sh", "-p"], stdin=proc2.stdout, stdout=outputfile)
proc3 = subprocess.run(["xargs", "./src/tools/rbf-trace", "-p"], stdin=proc2.stdout, stdout=outputfile)
......@@ -14,13 +14,13 @@ REPLAY = True
N_RUNS = 1
# Workload to get the samples from
workloads_path = "evaluation/workloads/uniprocessor_all_raspi/"
workloads_path = "scripts/evaluation/workloads/uniprocessor_all_raspi/"
workloads = glob.glob(workloads_path + "*")
workloads.sort()
# Setup dirs
timestamp = time.strftime("%d-%m-%X", time.localtime())
data_path = "evaluation/plots/data/" + timestamp + "/"
data_path = "scripts/evaluation/plots/data/" + timestamp + "/"
if not os.path.exists(data_path):
os.makedirs(data_path)
......@@ -34,25 +34,25 @@ def run_workload_parse_result(workload, n, replay_file="", bufsize=1_000, jmax=1
if not replay_file:
# Spawn threads to be traced
subprocess.Popen(["python3", "evaluation/generate_workload.py", "--workload", workload])
subprocess.Popen(["python3", "scripts/evaluation/generate_workload.py", "--workload", workload])
# Skip rtspin init phase
time.sleep(0.5)
# Get pids to trace and call RBFtrace
outputfile = open("../testing/output/rbftrace---rt-spin", "w") # println!() output
outputfile = open("testing/output/rbftrace---rt-spin", "w") # println!() output
proc2 = subprocess.Popen(["pgrep", "rtspin"], stdout=subprocess.PIPE)
subprocess.run(["xargs", "./run.sh", "-s", "-p"], stdin=proc2.stdout, stdout=outputfile)
subprocess.run(["xargs", "./src/tools/rbf-trace", "-s", "-p"], stdin=proc2.stdout, stdout=outputfile)
else:
outputfile = open("../testing/output/rbftrace---rt-spin", "w") # println!() output
subprocess.run(["./run.sh", "-r", str(replay_file), "-B", str(bufsize), "-J", str(jmax), "-C", str(complexity), "-S", str(sparsify)], stdout=outputfile)
outputfile = open("testing/output/rbftrace---rt-spin", "w") # println!() output
subprocess.run(["./src/tools/rbf-trace", "-r", str(replay_file), "-B", str(bufsize), "-J", str(jmax), "-C", str(complexity), "-S", str(sparsify)], stdout=outputfile)
### Parse results
# Compare RBFtrace results with original json
with open(workload, "r") as input_file:
input_model = json.loads(input_file.read())
outputfiles = glob.glob("../testing/output/extracted_models/*")
outputfiles = glob.glob("testing/output/extracted_models/*")
output_file = open(max(outputfiles, key=os.path.getctime), "r") # latest file
output_model = json.loads(output_file.read())
......@@ -100,7 +100,7 @@ if REPLAY:
sparsify_values = [2, 5, 10, 20, 30, 50]
n_produced_traces = N_RUNS*len(workloads)
traces_path = "../testing/output/other_plots/replay_run/"
traces_path = "testing/output/other_plots/replay_run/"
traces = glob.glob(traces_path + "*")
traces.sort(key=os.path.getctime)
traces.reverse() # Newest traces
......
#!/usr/bin/env python3
import subprocess
import time
import glob
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--inputpath", type=str)
parser.add_argument("--replot", action="store_true")
args = parser.parse_args()
# Number of runs of this experiment
N_RUNS = 10
# Only plot the data from the previous run, don't run RBFtrace
PLOT_INPUT = args.replot
workloads_path = "evaluation/workloads/uniprocessor_all/"
workloads = glob.glob(workloads_path + "*")
workloads.sort()
timestamp = time.strftime("%d-%m-%X", time.localtime())
figures_path = "../testing/output/plot_figures/"
figures_path_dir = figures_path + timestamp + "/"
data_path = "../testing/output/plot_data/"
data_path_dir = data_path + timestamp + "/"
# Plot already existing data, don't use RBFtrace
if PLOT_INPUT:
if not args.inputpath:
dirs = glob.glob(data_path + "/*")
data_path_dir = max(dirs, key=os.path.getctime) + "/" # latest dir
figures_path_dir = figures_path + os.path.basename(os.path.normpath(data_path_dir)) + "/"
else:
data_path_dir = data_path + args.inputpath + "/"
figures_path_dir = figures_path + args.inputpath + "/"
# Clean old plot pictures before re-plotting
old_pictures = glob.glob(figures_path_dir + "*")
for f in old_pictures:
os.remove(f)
# Create output dirs
else:
if not os.path.exists(figures_path_dir):
os.makedirs(figures_path_dir)
if not os.path.exists(data_path_dir):
os.makedirs(data_path_dir)
# Nice cdf
def cdf(data, **kwargs):
hist, edges = np.histogram(data, **kwargs)
cdf = np.cumsum(hist)/len(data)
return edges, np.concatenate(([0],cdf))
# Data for all runs
periods = list()
periods_extracted = list()
jitters = list()
errors = list()
errors_rel = list()
# Main loop
for n in range(0, N_RUNS):
# Data for single run
periods_run = list()
periods_extracted_run = list()
jitters_run = list()
errors_run = list()
errors_rel_run = list()
if not PLOT_INPUT: ### Get data from running threads
for workload in workloads:
print()
print("*** RUNNING WORKLOAD: " + os.path.basename(workload) + " Run #" + str(n) + " ***")
print()
# Spawn threads to be traced
proc1 = subprocess.Popen(["python3", "evaluation/generate_workload.py", "--workload", workload])
# Skip rtspin init phase
time.sleep(0.5)
# Get pids to trace and call RBFtrace
outputfile = open("../testing/output/rbftrace---rt-spin", "w") # println!() output
proc2 = subprocess.Popen(["pgrep", "rtspin"], stdout=subprocess.PIPE)
proc3 = subprocess.run(["xargs", "./run.sh", "-s", "-p"], stdin=proc2.stdout, stdout=outputfile)
# Compare RBFtrace results with original json
with open(workload, "r") as input_file:
input_model = json.loads(input_file.read())
outputfiles = glob.glob("../testing/output/extracted_models/*")
output_file = open(max(outputfiles, key=os.path.getctime), "r") # latest file
output_model = json.loads(output_file.read())
for c_idx, cluster in enumerate(input_model["clusters"]):
for t_idx, task in enumerate(cluster["taskset"]):
# If some tasks didn't match a model, skip this data (almost never happens)
if len(input_model["clusters"][c_idx]) == len(output_model["clusters"][c_idx]):
input_task = input_model["clusters"][c_idx]["taskset"][t_idx]
output_task = output_model["clusters"][c_idx]["taskset"][t_idx]
periods_run.append(input_task["period"])
periods_extracted_run.append(output_task["period"])
jitters_run.append(output_task["jitter"])
else:
print("*** Warning: Some models were not matched ***")
# Convert to ns for error calculation (extracted periods are already in ns)
periods_run = list(map(lambda x: x*10**6, periods_run))
# Add errors
for idx, _ in enumerate(periods_run):
errors_run.append(abs(periods_extracted_run[idx] - periods_run[idx]))
errors_rel_run.append(abs((periods_extracted_run[idx] / periods_run[idx]) - 1))
# Convert to ms for nice plotting
jitters_run = list(map(lambda x: float(x)*10**-6, jitters_run))
periods_run = list(map(lambda x: float(x)*10**-6, periods_run))
periods_extracted_run = list(map(lambda x: float(x)*10**-6, periods_extracted_run))
# Add to data for all runs
periods.extend(periods_run)
periods_extracted.extend(periods_extracted_run)
jitters.extend(jitters_run)
errors.extend(errors_run)
errors_rel.extend(errors_rel_run)
if not PLOT_INPUT: # Save plot data for re-plotting (all runs)
with open(data_path_dir + "period.json", "w") as outfile:
json.dump(periods, outfile)
with open(data_path_dir + "period_extracted.json", "w") as outfile:
json.dump(periods_extracted, outfile)
with open(data_path_dir + "jitter.json", "w") as outfile:
json.dump(jitters, outfile)
with open(data_path_dir + "error.json", "w") as outfile:
json.dump(errors, outfile)
with open(data_path_dir + "error_rel.json", "w") as outfile:
json.dump(errors_rel, outfile)
else: # Use existing data (all runs)
with open(data_path_dir + "period.json", "r") as input_file:
periods = json.loads(input_file.read())
with open(data_path_dir + "period_extracted.json", "r") as input_file:
periods_extracted = json.loads(input_file.read())
with open(data_path_dir + "jitter.json", "r") as input_file:
jitters = json.loads(input_file.read())
with open(data_path_dir + "error.json", "r") as input_file:
errors = json.loads(input_file.read())
with open(data_path_dir + "error_rel.json", "r") as input_file:
errors_rel = json.loads(input_file.read())
# Plot data (all runs)
plt.figure(1)
plt.clf()
plt.scatter(periods, errors, c="black")
plt.xlabel("Ground truth period (ms)")
plt.ylabel("Error (ns)")
plt.xscale("log")
plt.figure(2)
plt.scatter(periods, errors_rel, c="black")
plt.xlabel("Ground truth period (ms)")
plt.ylabel("Relative error")
plt.xscale("log")
plt.figure(3)
plt.hist(jitters, color="blue", edgecolor="black", bins=25)
plt.xlabel("Extracted jitter (ms)")
plt.ylabel("Number of samples")
plt.figure(4)
plt.plot(*cdf(jitters, bins=25), ds='steps')
# plt.hist(jitters, color="blue", edgecolor="black", bins=25, density=True, cumulative=True)
plt.xlabel("Extracted jitter (ms)")
plt.ylabel("Probability")
# Save plot figure
plt.figure(1)
plt.savefig(figures_path_dir + "errors_total.png", bbox_inches='tight')
plt.figure(2)
plt.savefig(figures_path_dir + "errors_rel_total.png", bbox_inches='tight')
plt.figure(3)
plt.savefig(figures_path_dir + "jitters_total.png", bbox_inches='tight')
plt.figure(4)
plt.savefig(figures_path_dir + "jitters_cdf_total.png", bbox_inches='tight')
#######################################################################################
### Use saved traces to run replays using different parameters of the model matcher ###
#######################################################################################
# TODO should save data for replotting
# Different parameters to try
b_values = [10, 50, 200, 500, 1000, 5000]
jmax_values = [50_000, 100_000, 250_000, 500_000, 1_000_000, 1_500_000]
complexity_values = [1_000, 10_000, 100_000, 1_000_000, 5_000_000, 10_000_000]
sparsify_values = [2, 5, 10, 20, 30, 50]
workloads_all_runs = workloads * N_RUNS
traces_path = "../testing/output/other_plots/replay_run/"
traces = glob.glob(traces_path + "*")
traces.sort(key=os.path.getctime)
assert len(traces) == len(workloads_all_runs)
avg_errors = list()
max_errors = list()
# Recompile with replay feature
subprocess.run(["make", "-C", "..", "replay"])
# print("Errors original " + str(errors))
# print("Period ground original " + str(periods))
# print("Period extracted original " + str(periods_extracted))
for buf in b_values:
periods_extracted = list()
jitters = list()
errors = list()
for n, trace in enumerate(traces): # For all runs
trace_file = os.path.basename(os.path.normpath(trace));
print()
print("*** RUNNING WORKLOAD: " + os.path.basename(workloads_all_runs[n]) + " Bufsize " + str(buf) + " ***")
print()
# Call RBFtrace with the replay option
outputfile = open("../testing/output/rbftrace---rt-spin", "w") # println!() output
proc3 = subprocess.run(["./run.sh", "-r", str(trace_file), "-B", str(buf)], stdout=outputfile)
# Compare RBFtrace results with original json
with open(workloads_all_runs[n], "r") as input_file:
input_model = json.loads(input_file.read())
outputfiles = glob.glob("../testing/output/extracted_models/*")
output_file = open(max(outputfiles, key=os.path.getctime), "r") # latest file
output_model = json.loads(output_file.read())
# print("Input file " + str(input_file))
# print("Output file " + str(output_file))
for c_idx, cluster in enumerate(input_model["clusters"]):
for t_idx, task in enumerate(cluster["taskset"]):
# If some tasks didn't match a model, skip this data (almost never happens)
if len(input_model["clusters"][c_idx]) == len(output_model["clusters"][c_idx]):
input_task = input_model["clusters"][c_idx]["taskset"][t_idx]
output_task = output_model["clusters"][c_idx]["taskset"][t_idx]
periods_extracted.append(output_task["period"])
jitters.append(output_task["jitter"])
else:
print("*** Warning: Some models were not matched ***")
# Convert to ns for error calculation (extracted periods are already in ns)
periods = list(map(lambda x: x*10**6, periods))
# Add errors
for idx, _ in enumerate(periods):
errors.append(abs(periods_extracted[idx] - periods[idx]))
# Convert to ms for nice plotting
jitters = list(map(lambda x: float(x)*10**-6, jitters))
periods = list(map(lambda x: float(x)*10**-6, periods))
periods_extracted = list(map(lambda x: float(x)*10**-6, periods_extracted))
# Save average and max error for this value of B
# print("Errors replay" + str(errors))
# print("Period ground replay " + str(periods))
# print("Period extracted replay " + str(periods_extracted))
avg_errors.append(np.mean(errors))
max_errors.append(np.max(errors))
# Plot data (all runs)
plt.figure(5)
plt.scatter(b_values, avg_errors)
plt.xlabel("Buffer size")
plt.ylabel("Average absolute error (ns)")
plt.xscale("log")
plt.figure(6)
plt.scatter(b_values, max_errors)
plt.xlabel("Buffer size")
plt.ylabel("Maximum absolute error (ns)")
plt.xscale("log")
# Save plot figure
plt.figure(5)
plt.savefig(figures_path_dir + "errors_avg_buffer.png", bbox_inches='tight')
plt.figure(6)
plt.savefig(figures_path_dir + "errors_max_buffer.png", bbox_inches='tight')
#!/usr/bin/env python3
import subprocess
import time
import glob
import os
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--inputpath", type=str)
parser.add_argument("--replot", action="store_true")
args = parser.parse_args()
# Number of runs of this experiment
N_RUNS = 1
# Plot also single runs
PLOT_SINGLE = False
# Only plot the latest results, don't run RBFtrace
PLOT_INPUT = args.replot
workloads_path = "evaluation/workloads/uniprocessor_all/"
workloads = glob.glob(workloads_path + "*")
workloads.sort()
timestamp = time.strftime("%d-%m-%X", time.localtime())
figures_path = "../testing/output/plot_figures/"
figures_path_dir = figures_path + timestamp + "/"
data_path = "../testing/output/plot_data/"
data_path_dir = data_path + timestamp + "/"
# Plot already existing data, don't use RBFtrace
if PLOT_INPUT:
if not args.inputpath:
dirs = glob.glob(data_path + "/*")
data_path_dir = max(dirs, key=os.path.getctime) + "/" # latest dir
figures_path_dir = figures_path + os.path.basename(os.path.normpath(data_path_dir)) + "/"
else:
data_path_dir = data_path + args.inputpath + "/"
figures_path_dir = figures_path + args.inputpath + "/"
# Clean old plot pictures before re-plotting
old_pictures = glob.glob(figures_path_dir + "*")
for f in old_pictures:
os.remove(f)
# Create output dirs
else:
if not os.path.exists(figures_path):
os.mkdir(figures_path)
if not os.path.exists(figures_path_dir):
os.mkdir(figures_path_dir)
if not os.path.exists(data_path):
os.mkdir(data_path)
if not os.path.exists(data_path_dir):
os.mkdir(data_path_dir)
# Nice cdf
def cdf(data, **kwargs):
hist, edges = np.histogram(data, **kwargs)
cdf = np.cumsum(hist)/len(data)
return edges, np.concatenate(([0],cdf))
# Data for all runs
periods = list()
periods_extracted = list()
jitters = list()
errors = list()
errors_rel = list()