Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • RT-PROOFS/rt-proofs
  • sophie/rt-proofs
  • fstutz/rt-proofs
  • sbozhko/rt-proofs
  • fradet/rt-proofs
  • mlesourd/rt-proofs
  • xiaojie/rt-proofs
  • LailaElbeheiry/rt-proofs
  • ptorrx/rt-proofs
  • proux/rt-proofs
  • LasseBlaauwbroek/rt-proofs
  • krathul/rt-proofs
12 results
Show changes
Commits on Source (7)
Showing
with 1558 additions and 62 deletions
......@@ -5,29 +5,75 @@ From mathcomp Require Import div.
(** In this file we define the notion of a hyperperiod for periodic tasks. *)
Section Hyperperiod.
(** Consider periodic tasks. *)
(** Consider any type of periodic tasks ... *)
Context {Task : TaskType} `{PeriodicModel Task}.
(** Consider any task set [ts]... *)
(** ... and any task set [ts]. *)
Variable ts : TaskSet Task.
(** ... and any task [tsk] that belongs to this task set. *)
Variable tsk : Task.
Hypothesis H_tsk_in_ts : tsk \in ts.
(** The hyperperiod of a task set is defined as the least common multiple
(LCM) of the periods of all tasks in the task set. **)
(LCM) of the periods of all tasks in the task set. **)
Definition hyperperiod : duration := lcml (map task_period ts).
(** Consequently, a task set's hyperperiod is an integral multiple
of each task's period in the task set. **)
Lemma hyperperiod_int_mult_of_any_task :
exists k, hyperperiod = k * task_period tsk.
Proof.
apply lcm_seq_is_mult_of_all_ints.
apply map_f.
by apply H_tsk_in_ts.
Qed.
End Hyperperiod.
(** In this section we provide basic definitions concerning the hyperperiod
of all tasks in a task set. *)
Section HyperperiodDefinitions.
(** Consider any type of periodic tasks ... *)
Context {Task : TaskType}.
Context `{TaskOffset Task}.
Context `{PeriodicModel Task}.
(** ... and any type of jobs. *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
(** Consider any task set [ts] ... *)
Variable ts : TaskSet Task.
(** ... and any arrival sequence [arr_seq]. *)
Variable arr_seq : arrival_sequence Job.
(** Let [O_max] denote the maximum offset of all tasks in [ts] ... *)
Let O_max := max_task_offset ts.
(** ... and let [HP] denote the hyperperiod of all tasks in [ts]. *)
Let HP := hyperperiod ts.
(** We define a hyperperiod index based on an instant [t]
which lies in it. *)
(** Note that we consider the first hyperperiod to start at time [O_max],
i.e., shifted by the maximum offset (and not at time zero as can also
be found sometimes in the literature) *)
Definition hyperperiod_index (t : instant) :=
(t - O_max) %/ HP.
(** Given an instant [t], we define the starting instant of the hyperperiod
that contains [t]. *)
Definition starting_instant_of_hyperperiod (t : instant) :=
hyperperiod_index t * HP + O_max.
(** Given a job [j], we define the starting instant of the hyperperiod
in which [j] arrives. *)
Definition starting_instant_of_corresponding_hyperperiod (j : Job) :=
starting_instant_of_hyperperiod (job_arrival j).
(** We define the sequence of jobs of a task [tsk] that arrive in a hyperperiod
given the starting instant [h] of the hyperperiod. *)
Definition jobs_in_hyperperiod (h : instant) (tsk : Task) :=
task_arrivals_between arr_seq tsk h (h + HP).
(** We define the index of a job [j] of task [tsk] in a hyperperiod starting at [h]. *)
Definition job_index_in_hyperperiod (j : Job) (h : instant) (tsk : Task) :=
index j (jobs_in_hyperperiod h tsk).
(** Given a job [j] of task [tsk] and the hyperperiod starting at [h], we define a
[corresponding_job_in_hyperperiod] which is the job that arrives in given hyperperiod
and has the same [job_index] as [j]. *)
Definition corresponding_job_in_hyperperiod (j : Job) (h : instant) (tsk : Task) :=
nth j (jobs_in_hyperperiod h tsk) (job_index_in_hyperperiod j (starting_instant_of_corresponding_hyperperiod j) tsk).
End HyperperiodDefinitions.
Require Export prosa.model.task.arrivals.
(** In this section we define the notion of an infinite release
of jobs by a task. *)
Section InfiniteJobs.
(** Consider any type of tasks ... *)
Context {Task : TaskType}.
(** ... and any type of jobs associated with these tasks. *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
(** Consider any arrival sequence. *)
Variable arr_seq : arrival_sequence Job.
(** We say that a task [tsk] releases an infinite number of jobs
if for every integer [n] there exists a job [j] of task [tsk]
such that [job_index] of [j] is equal to [n]. *)
Definition infinite_jobs :=
forall tsk n,
exists j,
arrives_in arr_seq j /\
job_task j = tsk /\
job_index arr_seq j = n.
End InfiniteJobs.
Require Export prosa.behavior.all.
(** In this section we define what it means for the response time
of a job to exceed some given duration. *)
Section JobResponseTimeExceeds.
(** Consider any kind of jobs ... *)
Context {Job : JobType}.
Context `{JobCost Job}.
Context `{JobArrival Job}.
(** ... and any kind of processor state. *)
Context `{PState : Type}.
Context `{ProcessorState Job PState}.
(** Consider any schedule. *)
Variable sched : schedule PState.
(** We say that a job [j] has a response time exceeding a number [x]
if [j] is pending [x] units of time after its arrival. *)
Definition job_response_time_exceeds (j : Job) (x : duration) :=
~~ completed_by sched j ((job_arrival j) + x).
End JobResponseTimeExceeds.
Require Export prosa.analysis.definitions.hyperperiod.
Require Export prosa.analysis.facts.periodic.task_arrivals_size.
Require Export prosa.util.div_mod.
(** In this file we prove some simple properties of hyperperiods of periodic tasks. *)
Section Hyperperiod.
(** Consider any type of periodic tasks, ... *)
Context {Task : TaskType} `{PeriodicModel Task}.
(** ... any task set [ts], ... *)
Variable ts : TaskSet Task.
(** ... and any task [tsk] that belongs to this task set. *)
Variable tsk : Task.
Hypothesis H_tsk_in_ts: tsk \in ts.
(** A task set's hyperperiod is an integral multiple
of each task's period in the task set. **)
Lemma hyperperiod_int_mult_of_any_task:
exists (k : nat),
hyperperiod ts = k * task_period tsk.
Proof.
apply lcm_seq_is_mult_of_all_ints.
apply map_f.
now apply H_tsk_in_ts.
Qed.
End Hyperperiod.
(** In this section we show a property of hyperperiod in context
of task sets with valid periods. *)
Section ValidPeriodsImplyPositiveHP.
(** Consider any type of periodic tasks ... *)
Context {Task : TaskType} `{PeriodicModel Task}.
(** ... and any task set [ts] ... *)
Variable ts : TaskSet Task.
(** ... such that all tasks in [ts] have valid periods. *)
Hypothesis H_valid_periods: valid_periods ts.
(** We show that the hyperperiod of task set [ts]
is positive. *)
Lemma valid_periods_imply_pos_hp:
hyperperiod ts > 0.
Proof.
apply all_pos_implies_lcml_pos.
move => b /mapP [x IN EQ]; subst b.
now apply H_valid_periods.
Qed.
End ValidPeriodsImplyPositiveHP.
(** In this section we prove some lemmas about the hyperperiod
in context of the periodic model. *)
Section PeriodicLemmas.
(** Consider any type of tasks, ... *)
Context {Task : TaskType}.
Context `{TaskOffset Task}.
Context `{PeriodicModel Task}.
(** ... any type of jobs, ... *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
(** ... and a consistent arrival sequence with non-duplicate arrivals. *)
Variable arr_seq : arrival_sequence Job.
Hypothesis H_consistent_arrivals: consistent_arrival_times arr_seq.
Hypothesis H_uniq_arr_seq: arrival_sequence_uniq arr_seq.
(** Consider a task set [ts] such that all tasks in
[ts] have valid periods. *)
Variable ts : TaskSet Task.
Hypothesis H_valid_periods: valid_periods ts.
(** Let [tsk] be any periodic task in [ts] with a valid offset and period. *)
Variable tsk : Task.
Hypothesis H_task_in_ts: tsk \in ts.
Hypothesis H_valid_offset: valid_offset arr_seq tsk.
Hypothesis H_valid_period: valid_period tsk.
Hypothesis H_periodic_task: respects_periodic_task_model arr_seq tsk.
(** Assume we have an infinite sequence of jobs in the arrival sequence. *)
Hypothesis H_infinite_jobs: infinite_jobs arr_seq.
(** Let [O_max] denote the maximum task offset in [ts] and let
[HP] denote the hyperperiod of all tasks in [ts]. *)
Let O_max := max_task_offset ts.
Let HP := hyperperiod ts.
(** We show that the job corresponding to any job [j1] in any other
hyperperiod is of the same task as [j1]. *)
Lemma corresponding_jobs_have_same_task:
forall j1 j2,
job_task (corresponding_job_in_hyperperiod ts arr_seq j1
(starting_instant_of_corresponding_hyperperiod ts j2) (job_task j1)) = job_task j1.
Proof.
intros *.
set ARRIVALS := (task_arrivals_between arr_seq (job_task j1) (starting_instant_of_hyperperiod ts (job_arrival j2))
(starting_instant_of_hyperperiod ts (job_arrival j2) + HP)).
set IND := (job_index_in_hyperperiod ts arr_seq j1 (starting_instant_of_hyperperiod ts (job_arrival j1)) (job_task j1)).
have SIZE_G : size ARRIVALS <= IND -> job_task (nth j1 ARRIVALS IND) = job_task j1 by intro SG; rewrite nth_default.
case: (boolP (size ARRIVALS == IND)) => [/eqP EQ|NEQ]; first by apply SIZE_G; ssrlia.
move : NEQ; rewrite neq_ltn; move => /orP [LT | G]; first by apply SIZE_G; ssrlia.
set jb := nth j1 ARRIVALS IND.
have JOB_IN : jb \in ARRIVALS by apply mem_nth.
rewrite /ARRIVALS /task_arrivals_between mem_filter in JOB_IN.
now move : JOB_IN => /andP [/eqP TSK JB_IN].
Qed.
(** We show that if a job [j] lies in the hyperperiod starting
at instant [t] then [j] arrives in the interval <<[t, t + HP)>>. *)
Lemma all_jobs_arrive_within_hyperperiod:
forall j t,
j \in jobs_in_hyperperiod ts arr_seq t tsk->
t <= job_arrival j < t + HP.
Proof.
intros * JB_IN_HP.
rewrite mem_filter in JB_IN_HP.
move : JB_IN_HP => /andP [/eqP TSK JB_IN]; apply mem_bigcat_nat_exists in JB_IN.
destruct JB_IN as [i [JB_IN INEQ]]; apply H_consistent_arrivals in JB_IN.
now rewrite JB_IN.
Qed.
(** We show that the number of jobs in a hyperperiod starting at [n1 * HP + O_max]
is the same as the number of jobs in a hyperperiod starting at [n2 * HP + O_max] given
that [n1] is less than or equal to [n2]. *)
Lemma eq_size_hyp_lt:
forall n1 n2,
n1 <= n2 ->
size (jobs_in_hyperperiod ts arr_seq (n1 * HP + O_max) tsk) =
size (jobs_in_hyperperiod ts arr_seq (n2 * HP + O_max) tsk).
Proof.
intros * N1_LT.
specialize (add_mul_diff n1 n2 HP O_max) => AD; feed_n 1 AD => //.
rewrite AD.
destruct (hyperperiod_int_mult_of_any_task ts tsk H_task_in_ts) as [k HYP]; rewrite !/HP.
rewrite [in X in _ = size (_ (n1 * HP + O_max + _ * X) tsk)]HYP.
rewrite mulnA /HP /jobs_in_hyperperiod !size_of_task_arrivals_between.
erewrite big_sum_eq_in_eq_sized_intervals => //; intros g G_LT.
have OFF_G : task_offset tsk <= O_max by apply max_offset_g.
have FG : forall v b n, v + b + n = v + n + b by intros *; ssrlia.
erewrite eq_size_of_task_arrivals_seperated_by_period => //; last by ssrlia.
now rewrite FG.
Qed.
(** We generalize the above lemma by lifting the condition on
[n1] and [n2]. *)
Lemma eq_size_of_arrivals_in_hyperperiod:
forall n1 n2,
size (jobs_in_hyperperiod ts arr_seq (n1 * HP + O_max) tsk) =
size (jobs_in_hyperperiod ts arr_seq (n2 * HP + O_max) tsk).
Proof.
intros *.
case : (boolP (n1 == n2)) => [/eqP EQ | NEQ]; first by rewrite EQ.
move : NEQ; rewrite neq_ltn; move => /orP [LT | LT].
+ now apply eq_size_hyp_lt => //; ssrlia.
+ move : (eq_size_hyp_lt n2 n1) => EQ_S.
now feed_n 1 EQ_S => //; ssrlia.
Qed.
(** Consider any two jobs [j1] and [j2] that stem from the arrival sequence
[arr_seq] such that [j1] is of task [tsk]. *)
Variable j1 : Job.
Variable j2 : Job.
Hypothesis H_j1_from_arr_seq: arrives_in arr_seq j1.
Hypothesis H_j2_from_arr_seq: arrives_in arr_seq j2.
Hypothesis H_j1_task: job_task j1 = tsk.
(** Assume that both [j1] and [j2] arrive after [O_max]. *)
Hypothesis H_j1_arr_after_O_max: O_max <= job_arrival j1.
Hypothesis H_j2_arr_after_O_max: O_max <= job_arrival j2.
(** We show that any job [j] that arrives in task arrivals in the same
hyperperiod as [j2] also arrives in task arrivals up to [job_arrival j2 + HP]. *)
Lemma job_in_hp_arrives_in_task_arrivals_up_to:
forall j,
j \in jobs_in_hyperperiod ts arr_seq ((job_arrival j2 - O_max) %/ HP * HP + O_max) tsk ->
j \in task_arrivals_up_to arr_seq tsk (job_arrival j2 + HP).
Proof.
intros j J_IN.
rewrite /task_arrivals_up_to.
set jobs_in_hp := (jobs_in_hyperperiod ts arr_seq ((job_arrival j2 - O_max) %/ HP * HP + O_max) tsk).
move : (J_IN) => J_ARR; apply all_jobs_arrive_within_hyperperiod in J_IN.
rewrite /jobs_in_hp /jobs_in_hyperperiod /task_arrivals_up_to /task_arrivals_between mem_filter in J_ARR.
move : J_ARR => /andP [/eqP TSK' NTH_IN].
apply job_in_task_arrivals_between => //; first by apply in_arrivals_implies_arrived in NTH_IN.
apply mem_bigcat_nat_exists in NTH_IN.
move : NTH_IN => [i [NJ_IN INEQ]]; apply H_consistent_arrivals in NJ_IN; rewrite -NJ_IN in INEQ.
apply /andP; split => //.
rewrite ltnS.
apply leq_trans with (n := (job_arrival j2 - O_max) %/ HP * HP + O_max + HP); first by ssrlia.
rewrite leq_add2r.
have O_M : (job_arrival j2 - O_max) %/ HP * HP <= job_arrival j2 - O_max by apply leq_trunc_div.
have ARR_G : job_arrival j2 >= O_max by auto.
now ssrlia.
Qed.
(** We show that job [j1] arrives in its own hyperperiod. *)
Lemma job_in_own_hp:
j1 \in jobs_in_hyperperiod ts arr_seq ((job_arrival j1 - O_max) %/ HP * HP + O_max) tsk.
Proof.
apply job_in_task_arrivals_between => //.
apply /andP; split.
+ rewrite addnC -leq_subRL => //.
now apply leq_trunc_div.
+ specialize (div_floor_add_g (job_arrival j1 - O_max) HP) => AB.
feed_n 1 AB; first by apply valid_periods_imply_pos_hp => //.
apply ltn_subLR in AB.
now rewrite -/(HP); ssrlia.
Qed.
(** We show that the [corresponding_job_in_hyperperiod] of [j1] in [j2]'s hyperperiod
arrives in task arrivals up to [job_arrival j2 + HP]. *)
Lemma corr_job_in_task_arrivals_up_to:
corresponding_job_in_hyperperiod ts arr_seq j1 (starting_instant_of_corresponding_hyperperiod ts j2) tsk \in
task_arrivals_up_to arr_seq tsk (job_arrival j2 + HP).
Proof.
rewrite /corresponding_job_in_hyperperiod /starting_instant_of_corresponding_hyperperiod.
rewrite /job_index_in_hyperperiod /starting_instant_of_hyperperiod /hyperperiod_index.
set ind := (index j1 (jobs_in_hyperperiod ts arr_seq ((job_arrival j1 - O_max) %/ HP * HP + O_max) tsk)).
set jobs_in_hp := (jobs_in_hyperperiod ts arr_seq ((job_arrival j2 - O_max) %/ HP * HP + O_max) tsk).
set nj := nth j1 jobs_in_hp ind.
apply job_in_hp_arrives_in_task_arrivals_up_to => //.
rewrite mem_nth /jobs_in_hp => //.
specialize (eq_size_of_arrivals_in_hyperperiod ((job_arrival j2 - O_max) %/ HP) ((job_arrival j1 - O_max) %/ HP)) => EQ.
rewrite EQ /ind index_mem.
now apply job_in_own_hp.
Qed.
(** Finally, we show that the [corresponding_job_in_hyperperiod] of [j1] in [j2]'s hyperperiod
arrives in the arrival sequence [arr_seq]. *)
Lemma corresponding_job_arrives:
arrives_in arr_seq (corresponding_job_in_hyperperiod ts arr_seq j1 (starting_instant_of_corresponding_hyperperiod ts j2) tsk).
Proof.
move : (corr_job_in_task_arrivals_up_to) => ARR_G.
rewrite /task_arrivals_up_to /task_arrivals_between mem_filter in ARR_G.
move : ARR_G => /andP [/eqP TSK' NTH_IN].
now apply in_arrivals_implies_arrived in NTH_IN.
Qed.
End PeriodicLemmas.
Require Export prosa.model.task.offset.
Require Export prosa.analysis.facts.job_index.
(** In this module, we'll prove a property of task offsets. *)
(** In this module, we prove some properties of task offsets. *)
Section OffsetLemmas.
(** Consider any type of tasks with an offset ... *)
......@@ -13,12 +13,13 @@ Section OffsetLemmas.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
(** Consider any arrival sequence with consistent and non-duplicate arrivals, ... *)
(** Consider any arrival sequence with consistent and non-duplicate arrivals ... *)
Variable arr_seq : arrival_sequence Job.
Hypothesis H_consistent_arrivals: consistent_arrival_times arr_seq.
Hypothesis H_uniq_arr_seq: arrival_sequence_uniq arr_seq.
(** ... and any job [j] of any task [tsk] with a valid offset. *)
(** ... and any job [j] (that stems from the arrival sequence) of any
task [tsk] with a valid offset. *)
Variable tsk : Task.
Variable j : Job.
Hypothesis H_job_of_task: job_task j = tsk.
......@@ -43,5 +44,20 @@ Section OffsetLemmas.
now apply/eqP; rewrite eqn_leq; apply/andP; split;
[ssrlia | apply H_valid_offset].
Qed.
(** Consider any task set [ts]. *)
Variable ts : TaskSet Task.
(** If task [tsk] is in [ts], then its offset
is less than or equal to the maximum offset of all tasks
in [ts]. *)
Lemma max_offset_g:
tsk \in ts ->
task_offset tsk <= max_task_offset ts.
Proof.
intros TSK_IN.
apply in_max0_le.
now apply map_f.
Qed.
End OffsetLemmas.
......@@ -13,7 +13,7 @@ Section TaskArrivals.
(** Consider any job arrival sequence with consistent arrivals. *)
Variable arr_seq : arrival_sequence Job.
Hypothesis H_consistent_arrivals : consistent_arrival_times arr_seq.
Hypothesis H_consistent_arrivals: consistent_arrival_times arr_seq.
(** We show that the number of arrivals of task can be split into disjoint intervals. *)
Lemma num_arrivals_of_task_cat:
......@@ -133,19 +133,30 @@ Section TaskArrivals.
now apply arrived_between_implies_in_arrivals.
Qed.
(** Any job [j] in [task_arrivals_between arr_seq tsk t1 t2] arrives
in the arrival sequence [arr_seq]. *)
Lemma arrives_in_task_arrivals_implies_arrived:
forall t1 t2 j,
j \in (task_arrivals_between arr_seq tsk t1 t2) ->
arrives_in arr_seq j.
Proof.
intros * JB_IN.
move : JB_IN; rewrite mem_filter; move => /andP [/eqP TSK JB_IN].
now apply in_arrivals_implies_arrived in JB_IN.
Qed.
(** An arrival sequence with non-duplicate arrivals implies that the
task arrivals also contain non-duplicate arrivals. *)
Lemma uniq_task_arrivals:
forall j,
arrives_in arr_seq j ->
forall t,
arrival_sequence_uniq arr_seq ->
uniq (task_arrivals_up_to arr_seq (job_task j) (job_arrival j)).
uniq (task_arrivals_up_to arr_seq tsk t).
Proof.
intros j ARR UNQ_ARR.
intros * UNQ_SEQ.
apply filter_uniq.
now apply arrivals_uniq.
Qed.
(** A job cannot arrive before it's arrival time. *)
Lemma job_notin_task_arrivals_before:
forall j t,
......@@ -180,5 +191,58 @@ Section TaskArrivals.
- rewrite /task_arrivals_up_to_job_arrival TSK1 TSK2.
now rewrite -task_arrivals_cat; try by ssrlia.
Qed.
(** For any job [j2] with [job_index] equal to [n], the nth job
in the sequence [task_arrivals_up_to arr_seq tsk t] is [j2], given that
[t] is not less than [job_arrival j2]. *)
(** Note that [j_def] is used as a default job for the access function and
has nothing to do with the lemma. *)
Lemma nth_job_of_task_arrivals:
forall n j_def j t,
arrives_in arr_seq j ->
job_task j = tsk ->
job_index arr_seq j = n ->
t >= job_arrival j ->
nth j_def (task_arrivals_up_to arr_seq tsk t) n = j.
Proof.
intros * ARR TSK IND T_G.
rewrite -IND.
have EQ_IND : index j (task_arrivals_up_to_job_arrival arr_seq j) = index j (task_arrivals_up_to arr_seq tsk t).
{ have CAT : exists xs, task_arrivals_up_to_job_arrival arr_seq j ++ xs = task_arrivals_up_to arr_seq tsk t.
{ rewrite /task_arrivals_up_to_job_arrival TSK.
exists (task_arrivals_between arr_seq tsk ((job_arrival j).+1) t.+1).
now rewrite -task_arrivals_cat.
}
move : CAT => [xs ARR_CAT].
now rewrite -ARR_CAT index_cat ifT; last by apply arrives_in_task_arrivals_up_to.
}
rewrite /job_index EQ_IND nth_index => //.
rewrite mem_filter; apply /andP.
split; first by apply /eqP.
now apply job_in_arrivals_between => //.
Qed.
(** We show that task arrivals in the interval <<[t1, t2)>>
is the same as concatenation of task arrivals at each instant in <<[t1, t2)>>. *)
Lemma task_arrivals_between_is_cat_of_task_arrivals_at:
forall t1 t2,
task_arrivals_between arr_seq tsk t1 t2 = \cat_(t1 <= t < t2) task_arrivals_at arr_seq tsk t.
Proof.
intros *.
rewrite /task_arrivals_between /task_arrivals_at /arrivals_between.
now apply cat_filter_eq_filter_cat.
Qed.
(** The number of jobs of a task [tsk] in the interval <<[t1, t2)>> is the same
as sum of the number of jobs of the task [tsk] at each instant in <<[t1, t2)>>. *)
Lemma size_of_task_arrivals_between:
forall t1 t2,
size (task_arrivals_between arr_seq tsk t1 t2)
= \sum_(t1 <= t < t2) size (task_arrivals_at arr_seq tsk t).
Proof.
intros *.
rewrite /task_arrivals_between /task_arrivals_at /arrivals_between.
now rewrite size_big_nat cat_filter_eq_filter_cat.
Qed.
End TaskArrivals.
......@@ -49,5 +49,47 @@ Section PeriodicArrivalTimes.
rewrite ARRIVAL IHn; ssrlia.
}
Qed.
(** We show that for every job [j] of task [tsk] there exists a number
[n] such that [j] arrives at the instant [task_offset tsk + n * task_period tsk]. *)
Lemma job_arrival_times:
forall j,
arrives_in arr_seq j ->
job_task j = tsk ->
exists n, job_arrival j = task_offset tsk + n * task_period tsk.
Proof.
intros * ARR TSK.
exists (job_index arr_seq j).
specialize (periodic_arrival_times (job_index arr_seq j) j) => J_ARR.
now feed_n 3 J_ARR => //.
Qed.
(** If a job [j] of task [tsk] arrives at [task_offset tsk + n * task_period tsk]
then the [job_index] of [j] is equal to [n]. *)
Lemma job_arr_index:
forall n j,
arrives_in arr_seq j ->
job_task j = tsk ->
job_arrival j = task_offset tsk + n * task_period tsk ->
job_index arr_seq j = n.
Proof.
have F : task_period tsk > 0 by auto.
induction n.
+ intros * ARR_J TSK ARR.
destruct (PeanoNat.Nat.zero_or_succ (job_index arr_seq j)) as [Z | [m SUCC]] => //.
now apply periodic_arrival_times in SUCC => //; ssrlia.
+ intros * ARR_J TSK ARR.
specialize (H_task_respects_periodic_model j); feed_n 3 H_task_respects_periodic_model => //.
{ rewrite lt0n; apply /eqP; intro EQ.
apply first_job_arrival with (tsk0 := tsk) in EQ => //.
now rewrite EQ in ARR; ssrlia.
}
move : H_task_respects_periodic_model => [j' [ARR' [IND' [TSK' ARRIVAL']]]].
specialize (IHn j'); feed_n 3 IHn => //; first by rewrite ARR in ARRIVAL'; ssrlia.
rewrite IHn in IND'.
destruct (PeanoNat.Nat.zero_or_succ (job_index arr_seq j)) as [Z | [m SUCC]]; last by ssrlia.
apply first_job_arrival with (tsk0 := tsk) in Z => //.
now rewrite Z in ARR; ssrlia.
Qed.
End PeriodicArrivalTimes.
Require Export prosa.analysis.facts.periodic.arrival_times.
Require Export prosa.analysis.definitions.infinite_jobs.
(** In this file we prove some properties concerning the size
of task arrivals in context of the periodic model. *)
Section TaskArrivalsSize.
(** Consider any type of periodic tasks with an offset ... *)
Context {Task : TaskType}.
Context `{TaskOffset Task}.
Context `{PeriodicModel Task}.
(** ... and any type of jobs associated with these tasks. *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
(** Consider any unique arrival sequence with consistent arrivals ... *)
Variable arr_seq : arrival_sequence Job.
Hypothesis H_consistent_arrivals: consistent_arrival_times arr_seq.
Hypothesis H_uniq_arr_seq: arrival_sequence_uniq arr_seq.
(** ... and any periodic task [tsk] with a valid offset and period. *)
Variable tsk : Task.
Hypothesis H_valid_offset: valid_offset arr_seq tsk.
Hypothesis H_valid_period: valid_period tsk.
Hypothesis H_task_respects_periodic_model: respects_periodic_task_model arr_seq tsk.
(** We show that if an instant [t] is not an "arrival time" for
task [tsk] then [task_arrivals_at arr_seq tsk t] is an empty sequence. *)
Lemma task_arrivals_size_at_non_arrival:
forall t,
(forall n, t <> task_offset tsk + n * task_period tsk) ->
task_arrivals_at arr_seq tsk t = [::].
Proof.
intros * T.
have EMPT_OR_EXISTS : forall xs, xs = [::] \/ exists a, a \in xs.
{ intros *.
induction xs; first by left.
right; exists a.
now apply mem_head.
}
destruct (EMPT_OR_EXISTS Job (task_arrivals_at arr_seq tsk t)) as [EMPT | [a A_IN]] => //.
rewrite /task_arrivals_at mem_filter in A_IN; move : A_IN => /andP [/eqP TSK A_ARR].
move : (A_ARR) => A_IN; apply H_consistent_arrivals in A_IN.
rewrite -A_IN in T; rewrite /arrivals_at in A_ARR.
apply in_arrseq_implies_arrives in A_ARR.
have EXISTS_N : exists n, job_arrival a = task_offset tsk + n * task_period tsk by
apply job_arrival_times with (arr_seq0 := arr_seq) => //.
move : EXISTS_N => [n A_ARRIVAL].
now move : (T n) => T1.
Qed.
(** We show that at any instant [t], at most one job of task [tsk]
can arrive (i.e. size of [task_arrivals_at arr_seq tsk t] is at most one). *)
Lemma task_arrivals_at_size_cases:
forall t,
size (task_arrivals_at arr_seq tsk t) = 0 \/
size (task_arrivals_at arr_seq tsk t) = 1.
Proof.
intro t.
case: (ltngtP (size (task_arrivals_at arr_seq tsk t)) 1) => [LT|GT|EQ]; try by auto.
destruct (size (task_arrivals_at arr_seq tsk t)); now left.
specialize (exists_two Job (task_arrivals_at arr_seq tsk t)) => EXISTS_TWO.
destruct EXISTS_TWO as [a [b [NEQ [A_IN B_IN]]]]; [by done | by apply filter_uniq | ].
rewrite mem_filter in A_IN; rewrite mem_filter in B_IN.
move: A_IN B_IN => /andP [/eqP TSKA ARRA] /andP [/eqP TSKB ARRB].
move: (ARRA); move: (ARRB); rewrite /arrivals_at => A_IN B_IN.
apply in_arrseq_implies_arrives in A_IN; apply in_arrseq_implies_arrives in B_IN.
have SPO : respects_sporadic_task_model arr_seq tsk; try by auto with basic_facts.
have EQ_ARR_A : (job_arrival a = t) by apply H_consistent_arrivals.
have EQ_ARR_B : (job_arrival b = t) by apply H_consistent_arrivals.
specialize (SPO a b); feed_n 6 SPO => //; try by ssrlia.
rewrite EQ_ARR_A EQ_ARR_B in SPO.
rewrite /task_min_inter_arrival_time /periodic_as_sporadic in SPO.
have POS : task_period tsk > 0 by auto.
now ssrlia.
Qed.
(** We show that the size of task arrivals (strictly) between two consecutive arrival
times is zero. *)
Lemma size_task_arrivals_between_eq0:
forall n,
let l := (task_offset tsk + n * task_period tsk).+1 in
let r := (task_offset tsk + n.+1 * task_period tsk) in
size (task_arrivals_between arr_seq tsk l r) = 0.
Proof.
intros n l r; rewrite /l /r.
rewrite size_of_task_arrivals_between big_nat_eq0 => //; intros t INEQ.
rewrite task_arrivals_size_at_non_arrival => //; intros n1 EQ.
rewrite EQ in INEQ.
move : INEQ => /andP [INEQ1 INEQ2].
rewrite ltn_add2l ltn_mul2r in INEQ1; rewrite ltn_add2l ltn_mul2r in INEQ2.
move : INEQ1 INEQ2 => /andP [A B] /andP [C D].
now ssrlia.
Qed.
(** In this section we show some properties of task arrivals in case
of an infinite sequence of jobs. *)
Section TaskArrivalsInCaseOfInfiniteJobs.
(** Assume that we have an infinite sequence of jobs. *)
Hypothesis H_infinite_jobs: infinite_jobs arr_seq.
(** We show that for any number [n], there exists a job [j] of task [tsk]
such that [job_index] of [j] is equal to [n] and [j] arrives
at [task_offset tsk + n * task_period tsk]. *)
Lemma jobs_exists_later:
forall n,
exists j,
arrives_in arr_seq j /\
job_task j = tsk /\
job_arrival j = task_offset tsk + n * task_period tsk /\
job_index arr_seq j = n.
Proof.
intros *.
destruct (H_infinite_jobs tsk n) as [j [ARR [TSK IND]]].
exists j; repeat split => //.
now apply periodic_arrival_times with (arr_seq0 := arr_seq) => //.
Qed.
(** We show that the size of task arrivals at any arrival time is equal to one. *)
Lemma task_arrivals_at_size:
forall n,
let l := (task_offset tsk + n * task_period tsk) in
size (task_arrivals_at arr_seq tsk l) = 1.
Proof.
intros n l; rewrite /l.
move : (jobs_exists_later n) => [j' [ARR [TSK [ARRIVAL IND]]]].
apply only_j_in_task_arrivals_at_j with (tsk0 := tsk) in ARR => //; last by
auto with basic_facts.
rewrite /task_arrivals_at_job_arrival TSK in ARR.
now rewrite -ARRIVAL ARR.
Qed.
(** We show that the size of task arrivals up to [task_offset tsk] is equal to one. *)
Lemma size_task_arrivals_up_to_offset:
size (task_arrivals_up_to arr_seq tsk (task_offset tsk)) = 1.
Proof.
rewrite /task_arrivals_up_to.
specialize (task_arrivals_between_cat arr_seq tsk 0 (task_offset tsk) (task_offset tsk).+1) => CAT.
feed_n 2 CAT => //; rewrite CAT size_cat.
have Z : size (task_arrivals_between arr_seq tsk 0 (task_offset tsk)) = 0.
{ rewrite size_of_task_arrivals_between big_nat_eq0 => //; intros t T_EQ.
rewrite task_arrivals_size_at_non_arrival => //; intros n EQ.
now ssrlia.
}
rewrite Z add0n /task_arrivals_between /arrivals_between big_nat1.
specialize (task_arrivals_at_size 0) => AT_SIZE.
now rewrite mul0n addn0 in AT_SIZE.
Qed.
(** We show that for any number [n], the number of jobs released by task [tsk] up to
[task_offset tsk + n * task_period tsk] is equal to [n + 1]. *)
Lemma task_arrivals_up_to_size:
forall n,
let l := (task_offset tsk + n * task_period tsk) in
let r := (task_offset tsk + n.+1 * task_period tsk) in
size (task_arrivals_up_to arr_seq tsk l) = n + 1.
Proof.
induction n.
intros l r; rewrite /l mul0n add0n addn0.
now apply size_task_arrivals_up_to_offset.
intros l r.
specialize (task_arrivals_cat arr_seq tsk (task_offset tsk + n * task_period tsk)
(task_offset tsk + n.+1 * task_period tsk)) => CAT.
feed_n 1 CAT; first by ssrlia.
rewrite CAT size_cat IHn.
specialize (task_arrivals_between_cat arr_seq tsk (task_offset tsk + n * task_period tsk).+1
(task_offset tsk + n.+1 * task_period tsk) (task_offset tsk + n.+1 * task_period tsk).+1) => S_CAT.
feed_n 2 S_CAT; try by ssrlia.
{ rewrite ltn_add2l ltn_mul2r.
now apply /andP; split => //.
}
rewrite S_CAT size_cat /task_arrivals_between /arrivals_between big_nat1.
rewrite size_task_arrivals_between_eq0 task_arrivals_at_size => //.
now ssrlia.
Qed.
(** We show that the number of jobs released by task [tsk] at any instant [t]
and [t + n * task_period tsk] is the same for any number [n]. *)
Lemma eq_size_of_task_arrivals_seperated_by_period:
forall n t,
t >= task_offset tsk ->
size (task_arrivals_at arr_seq tsk t) =
size (task_arrivals_at arr_seq tsk (t + n * task_period tsk)).
Proof.
intros * T_G.
destruct (exists_or_not_add_mul_cases (task_offset tsk) (task_period tsk) t) as [[n1 JB_ARR] | JB_NOT_ARR].
+ have EXISTS_N : exists nn, t + n * task_period tsk = task_offset tsk + nn * task_period tsk.
{ exists (n1 + n).
now rewrite JB_ARR; ssrlia.
}
move : EXISTS_N => [nn JB_ARR'].
now rewrite JB_ARR' JB_ARR !task_arrivals_at_size => //.
+ have FORALL_N : forall nn, t + n * task_period tsk <> task_offset tsk + nn * task_period tsk by apply mul_add_neq.
now rewrite !task_arrivals_size_at_non_arrival.
Qed.
End TaskArrivalsInCaseOfInfiniteJobs.
End TaskArrivalsSize.
Require Export prosa.analysis.facts.periodic.arrival_times.
Require Export prosa.analysis.facts.periodic.task_arrivals_size.
Require Export prosa.model.task.concept.
Require Export prosa.analysis.facts.hyperperiod.
(** In this file we define a new function for job costs
in an observation interval and prove its validity. *)
Section ValidJobCostsShifted.
(** Consider any type of periodic tasks ... *)
Context {Task : TaskType}.
Context `{TaskOffset Task}.
Context `{PeriodicModel Task}.
Context `{TaskCost Task}.
(** ... and any type of jobs. *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
Context `{JobCost Job}.
Context `{JobDeadline Job}.
(** Consider a consistent arrival sequence with non-duplicate arrivals. *)
Variable arr_seq : arrival_sequence Job.
Hypothesis H_consistent_arrivals: consistent_arrival_times arr_seq.
Hypothesis H_uniq_arr_seq: arrival_sequence_uniq arr_seq.
(** Furthermore, assume that arrivals have valid job costs. *)
Hypothesis H_arrivals_have_valid_job_costs: arrivals_have_valid_job_costs arr_seq.
(** Consider a periodic task set [ts] such that all tasks in
[ts] have valid periods and offsets. *)
Variable ts : TaskSet Task.
Hypothesis H_periodic_taskset: taskset_respects_periodic_task_model arr_seq ts.
Hypothesis H_valid_periods_in_taskset: valid_periods ts.
Hypothesis H_valid_offsets_in_taskset: valid_offsets arr_seq ts.
(** Consider a job [j] that stems from the arrival sequence. *)
Variable j : Job.
Hypothesis H_j_from_arrival_sequence: arrives_in arr_seq j.
(** Let [O_max] denote the maximum task offset of all tasks in [ts] ... *)
Let O_max := max_task_offset ts.
(** ... and let [HP] denote the hyperperiod of all tasks in [ts]. *)
Let HP := hyperperiod ts.
(** We now define a new function for job costs in the observation interval. *)
(** Given that job [j] arrives after [O_max], the cost of a job [j']
that arrives in the interval <<[O_max + HP, O_max + 2HP)>> is defined to
be the same as the job cost of its corresponding job in [j]'s hyperperiod. *)
Definition job_costs_shifted (j' : Job) :=
if (job_arrival j >= O_max) && (O_max + HP <= job_arrival j' < O_max + 2 * HP) then
job_cost (corresponding_job_in_hyperperiod ts arr_seq j' (starting_instant_of_corresponding_hyperperiod ts j) (job_task j'))
else job_cost j'.
(** Assume that we have an infinite sequence of jobs. *)
Hypothesis H_infinite_jobs: infinite_jobs arr_seq.
(** Assume all jobs in the arrival sequence [arr_seq] belong to some task
in [ts]. *)
Hypothesis H_jobs_from_taskset: all_jobs_from_taskset arr_seq ts.
(** We assign the job costs as defined by the [job_costs_shifted] function. *)
Instance job_costs_in_oi : JobCost Job :=
job_costs_shifted.
(** We show that the [job_costs_shifted] function is valid. *)
Lemma job_costs_shifted_valid: arrivals_have_valid_job_costs arr_seq.
Proof.
rewrite /arrivals_have_valid_job_costs /valid_job_cost.
intros j' ARR.
unfold job_cost; rewrite /job_costs_in_oi /job_costs_shifted.
destruct (leqP O_max (job_arrival j)) as [A | B].
destruct (leqP (O_max + HP) (job_arrival j')) as [NEQ | NEQ].
destruct (leqP (O_max + 2 * HP) (job_arrival j')) as [LT | LT].
all : try by apply H_arrivals_have_valid_job_costs => //.
simpl.
specialize (corresponding_jobs_have_same_task arr_seq ts j' j) => TSK.
rewrite -[in X in _ <= task_cost X]TSK.
have IN : job_task j' \in ts by apply H_jobs_from_taskset.
apply H_arrivals_have_valid_job_costs, corresponding_job_arrives => //.
+ now apply H_valid_offsets_in_taskset.
+ now apply H_valid_periods_in_taskset.
+ now apply H_periodic_taskset.
+ now ssrlia.
Qed.
End ValidJobCostsShifted.
......@@ -89,7 +89,7 @@ Section DifferentJobsImplyDifferentArrivalTimes.
Hypothesis H_j1_task: job_task j1 = tsk.
Hypothesis H_j2_task: job_task j2 = tsk.
(** We observe that unequal jobs cannot have equal arrival times. *)
(** We observe that distinct jobs cannot have equal arrival times. *)
Lemma uneq_job_uneq_arr:
j1 <> j2 ->
job_arrival j1 <> job_arrival j2.
......@@ -99,7 +99,22 @@ Section DifferentJobsImplyDifferentArrivalTimes.
move /neqP: UNEQ; rewrite neq_ltn => /orP [LT|LT].
all: now apply lower_index_implies_earlier_arrival with (tsk0 := tsk) in LT => //; ssrlia.
Qed.
(** We prove a stronger version of the above lemma by showing
that jobs [j1] and [j2] are equal if and only if they arrive at the
same time. *)
Lemma same_jobs_iff_same_arr:
j1 = j2 <->
job_arrival j1 = job_arrival j2.
Proof.
split; first by apply congr1.
intro EQ_ARR.
case: (boolP (j1 == j2)) => [/eqP EQ | /eqP NEQ]; first by auto.
rewrite -> diff_jobs_iff_diff_indices in NEQ => //; auto; last by rewrite H_j1_task.
move /neqP: NEQ; rewrite neq_ltn => /orP [LT|LT].
all: now apply lower_index_implies_earlier_arrival with (tsk0 := tsk) in LT => //; ssrlia.
Qed.
End DifferentJobsImplyDifferentArrivalTimes.
(** In this section we prove a few properties regarding task arrivals
......@@ -157,8 +172,8 @@ Section SporadicArrivals.
apply in_arrseq_implies_arrives in A_IN; apply in_arrseq_implies_arrives in B_IN.
have EQ_ARR_A : (job_arrival a = job_arrival j) by apply H_consistent_arrivals.
have EQ_ARR_B : (job_arrival b = job_arrival j) by apply H_consistent_arrivals.
apply (uneq_job_uneq_arr arr_seq) with (tsk0 := job_task a) in NEQ => //; try by rewrite TSKA.
now ssrlia.
apply uneq_job_uneq_arr with (arr_seq0 := arr_seq) (tsk0 := job_task j) in NEQ => //.
now rewrite EQ_ARR_A EQ_ARR_B in NEQ.
Qed.
(** We show that no jobs of the task [tsk] other than [j1] arrive at
......@@ -182,6 +197,20 @@ Section SporadicArrivals.
exists j1.
now repeat split => //; try rewrite H_j1_task.
Qed.
(** We show that no jobs of the task [tsk] other than [j1] arrive at
the same time as [j1], and thus the task arrivals at [job arrival j1]
consists only of job [j1]. *)
Lemma only_j_at_job_arrival_j:
forall t,
job_arrival j1 = t ->
task_arrivals_at arr_seq tsk t = [::j1].
Proof.
intros t ARR.
rewrite -ARR.
specialize (only_j_in_task_arrivals_at_j) => J_AT.
now rewrite /task_arrivals_at_job_arrival H_j1_task in J_AT.
Qed.
(** We show that a job [j1] is the first job that arrives
in task arrivals at [job_arrival j1] by showing that the
......
Require Export prosa.analysis.facts.transform.edf_opt.
Require Export prosa.analysis.facts.transform.wc_correctness.
Require Export prosa.analysis.facts.behavior.deadlines.
Require Export prosa.analysis.facts.readiness.backlogged.
(** * Optimality of Work-Conserving EDF on Ideal Uniprocessors *)
(** In this file, we establish the foundation needed to connect the EDF and
work-conservation optimality theorems: if there is any work-conserving way
to meet all deadlines (assuming an ideal uniprocessor schedule), then there
is also an (ideal) EDF schedule that is work-conserving in which all
deadlines are met. *)
(** Throughout this file, we assume ideal uniprocessor schedules. *)
Require Import prosa.model.processor.ideal.
(** Throughout this file, we assume the basic (i.e., Liu & Layland) readiness model. *)
Require Import prosa.model.readiness.basic.
(** ** Non-Idle Swaps *)
(** We start by showing that [swapped], a function used in the inner-most level
of [edf_transform], maintains work conservation if the two instants being
swapped are not idle. *)
Section NonIdleSwapWorkConservationLemmas.
(** For any given type of jobs... *)
Context {Job : JobType} `{JobCost Job} `{JobDeadline Job} `{JobArrival Job}.
(** ... and any valid job arrival sequence. *)
Variable arr_seq: arrival_sequence Job.
Hypothesis H_arr_seq_valid: valid_arrival_sequence arr_seq.
(** ...consider an ideal uniprocessor schedule... *)
Variable sched: schedule (ideal.processor_state Job).
(** ...that is well-behaved (i.e., in which jobs execute only after having
arrived and only if they are not yet complete, and in which all jobs come
from the arrival sequence). *)
Hypothesis H_jobs_must_arrive_to_execute: jobs_must_arrive_to_execute sched.
Hypothesis H_completed_jobs_dont_execute: completed_jobs_dont_execute sched.
Hypothesis H_from_arr_seq: jobs_come_from_arrival_sequence sched arr_seq.
(** Suppose we are given two specific times [t1] and [t2],... *)
Variables t1 t2 : instant.
(** ...which we assume to be ordered (to avoid dealing with symmetric cases),... *)
Hypothesis H_well_ordered: t1 <= t2.
(** ...and two jobs [j1] and [j2]... *)
Variables j1 j2 : Job.
(** ...such that [j2] arrives before time [t1],... *)
Hypothesis H_arrival_j2 : job_arrival j2 <= t1.
(** ...[j1] is scheduled at time [t1], and... *)
Hypothesis H_t1_not_idle : scheduled_at sched j1 t1.
(** ...[j2] is scheduled at time [t2]. *)
Hypothesis H_t2_not_idle : scheduled_at sched j2 t2.
(** We let [swap_sched] denote the schedule in which the allocations at
[t1] and [t2] have been swapped. *)
Let swap_sched := swapped sched t1 t2.
(** Now consider an arbitrary job [j]... *)
Variable j : Job.
(** ...and an arbitrary instant [t]... *)
Variable t : instant.
(** ...such that [j] arrives in [arr_seq]... *)
Hypothesis H_arrival_j : arrives_in arr_seq j.
(** ...and is backlogged in [swap_sched] at instant [t]. *)
Hypothesis H_backlogged_j_t : backlogged swap_sched j t.
(** We proceed by case analysis. We first show that, if [t] equals [t1], then
[swap_sched] maintains work conservation. That is, there exists some job
that's scheduled in [swap_sched] at instant [t] *)
Lemma non_idle_swap_maintains_work_conservation_t1 :
work_conserving arr_seq sched ->
t = t1 ->
exists j_other, scheduled_at swap_sched j_other t.
Proof.
move=> _ EQ; rewrite EQ; by exists j2; rewrite swap_job_scheduled_t1.
Qed.
(** Similarly, if [t] equals [t2] then then [swap_sched] maintains work conservation. *)
Lemma non_idle_swap_maintains_work_conservation_t2 :
work_conserving arr_seq sched ->
t = t2 ->
exists j_other, scheduled_at swap_sched j_other t.
Proof.
move=> _ EQ; rewrite EQ; by exists j1; rewrite swap_job_scheduled_t2.
Qed.
(** If [t] is less than or equal to [t1] then then then [swap_sched] maintains work conservation. *)
Lemma non_idle_swap_maintains_work_conservation_LEQ_t1 :
work_conserving arr_seq sched ->
t <= t1 ->
exists j_other, scheduled_at swap_sched j_other t.
Proof.
move=> WC_sched LEQ.
case: (boolP(t == t1)) => [/eqP EQ| /eqP NEQ]; first by apply non_idle_swap_maintains_work_conservation_t1.
case: (boolP(t == t2)) => [/eqP EQ'| /eqP NEQ']; first by apply non_idle_swap_maintains_work_conservation_t2.
have [j_other j_other_scheduled] : exists j_other, scheduled_at sched j_other t.
{ rewrite /work_conserving in WC_sched. apply (WC_sched j) => //; move :H_backlogged_j_t.
rewrite /backlogged/job_ready/basic_ready_instance/pending/completed_by.
move /andP => [ARR_INCOMP scheduled]; move :ARR_INCOMP; move /andP => [arrive not_comp].
apply /andP; split; first (apply /andP; split) => //.
+ by rewrite (service_before_swap_invariant sched t1 t2 _ t).
+ by rewrite -(swap_job_scheduled_other_times _ t1 t2 j t) //; (apply /neqP; eauto).
}
exists j_other; by rewrite (swap_job_scheduled_other_times) //; do 2! (apply /neqP; eauto).
Qed.
(** Similarly, if [t] is greater than [t2] then then then [swap_sched] maintains work conservation. *)
Lemma non_idle_swap_maintains_work_conservation_GT_t2 :
work_conserving arr_seq sched ->
t2 < t ->
exists j_other, scheduled_at swap_sched j_other t.
Proof.
move=> WC_sched GT.
case: (boolP(t == t1)) => [/eqP EQ| /eqP NEQ]; first by apply non_idle_swap_maintains_work_conservation_t1.
case: (boolP(t == t2)) => [/eqP EQ'| /eqP NEQ']; first by apply non_idle_swap_maintains_work_conservation_t2.
have [j_other j_other_scheduled] : exists j_other, scheduled_at sched j_other t.
{ rewrite /work_conserving in WC_sched. apply (WC_sched j) => //; move :H_backlogged_j_t.
rewrite /backlogged/job_ready/basic_ready_instance/pending/completed_by.
move /andP => [ARR_INCOMP scheduled]; move :ARR_INCOMP; move /andP => [arrive not_comp].
apply /andP; split; first (apply /andP; split) => //.
+ by rewrite (service_after_swap_invariant sched t1 t2 _ t) // /t2; apply fsc_range1.
+ by rewrite -(swap_job_scheduled_other_times _ t1 t2 j t) //; (apply /neqP; eauto).
}
exists j_other; by rewrite (swap_job_scheduled_other_times) //; do 2! (apply /neqP; eauto).
Qed.
(** Lastly, we show that if [t] lies between [t1] and [t2] then work conservation is still maintained. *)
Lemma non_idle_swap_maintains_work_conservation_BET_t1_t2 :
work_conserving arr_seq sched ->
t1 < t <= t2 ->
exists j_other, scheduled_at swap_sched j_other t.
Proof.
move=> WC_sched H_range.
case: (boolP(t == t1)) => [/eqP EQ| /eqP NEQ]; first by apply non_idle_swap_maintains_work_conservation_t1.
case: (boolP(t == t2)) => [/eqP EQ'| /eqP NEQ']; first by apply non_idle_swap_maintains_work_conservation_t2.
move: H_range. move /andP => [LT GE].
case: (boolP(scheduled_at sched j2 t)) => Hj'.
- exists j2; by rewrite (swap_job_scheduled_other_times _ t1 t2 j2 t) //; (apply /neqP; eauto).
- have [j_other j_other_scheduled] : exists j_other, scheduled_at sched j_other t.
{ rewrite /work_conserving in WC_sched. apply (WC_sched j2).
- by unfold jobs_come_from_arrival_sequence in H_from_arr_seq; apply (H_from_arr_seq _ t2) => //.
- rewrite/backlogged/job_ready/basic_ready_instance/pending/completed_by.
apply /andP; split; first (apply /andP; split) => //; last by done.
+ by rewrite /has_arrived; apply (leq_trans H_arrival_j2); apply ltnW.
+ rewrite -ltnNge. apply (leq_ltn_trans) with (service sched j2 t2).
* by apply service_monotonic.
* by apply H_completed_jobs_dont_execute. }
exists j_other. now rewrite (swap_job_scheduled_other_times) //; (apply /neqP; eauto).
Qed.
End NonIdleSwapWorkConservationLemmas.
(** ** Work-Conserving Swap Candidates *)
(** Now, we show that work conservation is maintained by the inner-most level
of [edf_transform], which is [find_swap_candidate]. *)
Section FSCWorkConservationLemmas.
(** For any given type of jobs... *)
Context {Job : JobType} `{JobCost Job} `{JobDeadline Job} `{JobArrival Job}.
(** ...and any valid job arrival sequence,... *)
Variable arr_seq: arrival_sequence Job.
Hypothesis H_arr_seq_valid: valid_arrival_sequence arr_seq.
(** ...consider an ideal uniprocessor schedule... *)
Variable sched: schedule (ideal.processor_state Job).
(** ...that is well-behaved (i.e., in which jobs execute only after having
arrived and only if they are not yet complete)... *)
Hypothesis H_jobs_must_arrive_to_execute: jobs_must_arrive_to_execute sched.
Hypothesis H_completed_jobs_dont_execute: completed_jobs_dont_execute sched.
(** ...and in which all jobs come from the arrival sequence. *)
Hypothesis H_from_arr_seq: jobs_come_from_arrival_sequence sched arr_seq.
(** Suppose we are given a job [j1]... *)
Variable j1: Job.
(** ...and a point in time [t1]... *)
Variable t1: instant.
(** ...at which [j1] is scheduled... *)
Hypothesis H_not_idle: scheduled_at sched j1 t1.
(** ...and that is before [j1]'s deadline. *)
Hypothesis H_deadline_not_missed: t1 < job_deadline j1.
(** We now show that, if [t2] is a swap candidate returned by
[find_swap_candidate] for [t1], then swapping the processor allocations at
the two instants maintains work conservation. *)
Corollary fsc_swap_maintains_work_conservation :
work_conserving arr_seq sched ->
work_conserving arr_seq (swapped sched t1 (edf_trans.find_swap_candidate sched t1 j1)).
Proof.
set t2 := edf_trans.find_swap_candidate sched t1 j1.
have [j2 [t2_not_idle t2_arrival]] : exists j2, scheduled_at sched j2 t2 /\ job_arrival j2 <= t1
by apply fsc_not_idle.
have H_range : t1 <= t2 by apply fsc_range1.
move=> WC_sched j t ARR BL.
case: (boolP(t == t1)) => [/eqP EQ| /eqP NEQ].
- by apply (non_idle_swap_maintains_work_conservation_t1 arr_seq _ _ _ j2).
- case: (boolP(t == t2)) => [/eqP EQ'| /eqP NEQ'].
+ by apply (non_idle_swap_maintains_work_conservation_t2 arr_seq _ _ _ j1).
+ case: (boolP((t <= t1) || (t2 < t))) => [NOT_BET | BET]. (* t <> t2 *)
* move: NOT_BET; move/orP => [] => NOT_BET.
{ by apply (non_idle_swap_maintains_work_conservation_LEQ_t1 arr_seq _ _ _ H_range _ _ H_not_idle t2_not_idle j). }
{ by apply (non_idle_swap_maintains_work_conservation_GT_t2 arr_seq _ _ _ H_range _ _ H_not_idle t2_not_idle j). }
* move: BET; rewrite negb_or. move /andP. case; rewrite <- ltnNge => range1; rewrite <- leqNgt => range2.
have BET: (t1 < t) && (t <= t2) by apply /andP.
now apply (non_idle_swap_maintains_work_conservation_BET_t1_t2 arr_seq _ H_completed_jobs_dont_execute H_from_arr_seq _ _ _ _ t2_arrival H_not_idle t2_not_idle ).
Qed.
End FSCWorkConservationLemmas.
(** ** Work-Conservation of the Point-Wise EDF Transformation *)
(** In the following section, we show that work conservation is maintained by the
next level of [edf_transform], which is [make_edf_at]. *)
Section MakeEDFWorkConservationLemmas.
(** For any given type of jobs... *)
Context {Job : JobType} `{JobCost Job} `{JobDeadline Job} `{JobArrival Job}.
(** ... and any valid job arrival sequence, ... *)
Variable arr_seq: arrival_sequence Job.
Hypothesis H_arr_seq_valid: valid_arrival_sequence arr_seq.
(** ... consider an ideal uniprocessor schedule ... *)
Variable sched: schedule (ideal.processor_state Job).
(** ... in which all jobs come from the arrival sequence, ... *)
Hypothesis H_from_arr_seq: jobs_come_from_arrival_sequence sched arr_seq.
(** ...that is well-behaved,... *)
Hypothesis H_jobs_must_arrive_to_execute: jobs_must_arrive_to_execute sched.
Hypothesis H_completed_jobs_dont_execute: completed_jobs_dont_execute sched.
(** ...and in which no scheduled job misses a deadline. *)
Hypothesis H_no_deadline_misses: all_deadlines_met sched.
(** We analyze [make_edf_at] applied to an arbitrary point in time,
which we denote [t_edf] in the following. *)
Variable t_edf: instant.
(** For brevity, let [sched'] denote the schedule obtained from
[make_edf_at] applied to [sched] at time [t_edf]. *)
Let sched' := make_edf_at sched t_edf.
(** We show that, if a schedule is work-conserving, then applying
[make_edf_at] to it at an arbitrary instant [t_edf] maintains work
conservation. *)
Lemma mea_maintains_work_conservation :
work_conserving arr_seq sched -> work_conserving arr_seq sched'.
Proof. rewrite /sched'/make_edf_at => WC_sched. destruct (sched t_edf) eqn:E => //.
apply fsc_swap_maintains_work_conservation => //.
- by rewrite scheduled_at_def; apply /eqP => //.
- apply (scheduled_at_implies_later_deadline sched) => //.
+ by apply ideal_proc_model_ensures_ideal_progress.
+ rewrite /all_deadlines_met in (H_no_deadline_misses).
now apply (H_no_deadline_misses s t_edf); rewrite scheduled_at_def; apply /eqP.
+ by rewrite scheduled_at_def; apply/eqP => //.
Qed.
End MakeEDFWorkConservationLemmas.
(** ** Work-Conserving EDF Prefix *)
(** On to the next layer, we prove that the [transform_prefix] function at the
core of the EDF transformation maintains work conservation *)
Section EDFPrefixWorkConservationLemmas.
(** For any given type of jobs, each characterized by execution
costs, an arrival time, and an absolute deadline,... *)
Context {Job : JobType} `{JobCost Job} `{JobDeadline Job} `{JobArrival Job}.
(** ... and any valid job arrival sequence, ... *)
Variable arr_seq: arrival_sequence Job.
Hypothesis H_arr_seq_valid: valid_arrival_sequence arr_seq.
(** ... consider an ideal uniprocessor schedule,... *)
Variable sched: schedule (ideal.processor_state Job).
(** ... an arbitrary finite [horizon], and ... *)
Variable horizon: instant.
(** ...let [sched_trans] denote the schedule obtained by transforming
[sched] up to the horizon. *)
Let sched_trans := edf_transform_prefix sched horizon.
(** Let [schedule_behavior_premises] define the premise that a schedule is:
1) well-behaved,
2) has all jobs coming from the arrival sequence [arr_seq], and
3) in which no scheduled job misses its deadline *)
Definition scheduled_behavior_premises (sched : schedule (processor_state Job)) :=
jobs_must_arrive_to_execute sched
/\ completed_jobs_dont_execute sched
/\ jobs_come_from_arrival_sequence sched arr_seq
/\ all_deadlines_met sched.
(** For brevity, let [P] denote the predicate that a schedule satisfies
[scheduled_behavior_premises] and is work-conserving. *)
Let P (sched : schedule (processor_state Job)) :=
scheduled_behavior_premises sched /\ work_conserving arr_seq sched.
(** We show that if [sched] is work-conserving, then so is [sched_trans]. *)
Lemma edf_transform_prefix_maintains_work_conservation:
P sched -> P sched_trans.
Proof.
rewrite/sched_trans/edf_transform_prefix. apply (prefix_map_property_invariance ). rewrite /P.
move=> sched' t [[H_ARR [H_COMPLETED [H_COME H_all_deadlines_met]]] H_wc_sched].
rewrite/scheduled_behavior_premises/valid_schedule; split; first split.
- by apply mea_jobs_must_arrive.
- split; last split.
+ by apply mea_completed_jobs.
+ by apply mea_jobs_come_from_arrival_sequence.
+ by apply mea_no_deadline_misses.
- by apply mea_maintains_work_conservation.
Qed.
End EDFPrefixWorkConservationLemmas.
(** ** Work-Conservation of the EDF Transformation *)
(** Finally, having established that [edf_transform_prefix] maintains work
conservation, we go ahead and prove that [edf_transform] maintains work
conservation, too. *)
Section EDFTransformWorkConservationLemmas.
(** For any given type of jobs, each characterized by execution
costs, an arrival time, and an absolute deadline,... *)
Context {Job : JobType} `{JobCost Job} `{JobDeadline Job} `{JobArrival Job}.
(** ... and any valid job arrival sequence, ... *)
Variable arr_seq: arrival_sequence Job.
Hypothesis H_arr_seq_valid: valid_arrival_sequence arr_seq.
(** ... consider a valid ideal uniprocessor schedule ... *)
Variable sched: schedule (ideal.processor_state Job).
Hypothesis H_sched_valid: valid_schedule sched arr_seq.
(** ...and in which no scheduled job misses a deadline. *)
Hypothesis H_no_deadline_misses: all_deadlines_met sched.
(** We first note that [sched] satisfies [scheduled_behavior_premises]. *)
Lemma sched_satisfies_behavior_premises : scheduled_behavior_premises arr_seq sched.
Proof.
split; [ apply (jobs_must_arrive_to_be_ready sched) | split; [ apply (completed_jobs_are_not_ready sched) | split]].
all: by try apply H_sched_valid.
Qed.
(** We prove that, if the given schedule [sched] is work-conserving, then the
schedule that results from transforming it into an EDF schedule is also
work-conserving. *)
Lemma edf_transform_maintains_work_conservation :
work_conserving arr_seq sched -> work_conserving arr_seq (edf_transform sched).
Proof.
move=> WC_sched j t ARR.
set sched_edf := edf_transform sched.
have IDENT: identical_prefix sched_edf (edf_transform_prefix sched t.+1) t.+1
by rewrite /sched_edf/edf_transform => t' LE_t; apply (edf_prefix_inclusion) => //; apply sched_satisfies_behavior_premises.
rewrite (backlogged_prefix_invariance _ _ (edf_transform_prefix sched t.+1) t.+1) // => BL;
last by apply basic_readiness_nonclairvoyance.
have WC_trans: work_conserving arr_seq (edf_transform_prefix sched (succn t))
by apply edf_transform_prefix_maintains_work_conservation; split => //; apply sched_satisfies_behavior_premises.
move: (WC_trans _ _ ARR BL) => [j_other SCHED_AT].
exists j_other.
now rewrite (identical_prefix_scheduled_at _ (edf_transform_prefix sched t.+1) t.+1) //.
Qed.
End EDFTransformWorkConservationLemmas.
......@@ -13,7 +13,7 @@ build: [
]
install: [make "install"]
depends: [
"coq" {((>= "8.10" & < "8.13~") | = "dev")}
"coq" {((>= "8.11" & < "8.13~") | = "dev")}
"coq-mathcomp-ssreflect" {((>= "1.10" & < "1.12~") | = "dev")}
]
......
Require Export prosa.util.rel.
Require Export prosa.model.task.arrivals.
(** * Request Bound Functions (RBF) *)
(** In the following, we define the notion of Request Bound Functions
(RBF), which can be used to reason about the job cost arrivals. In
contrast to arrival curves which constrain the number of arrivals
per time interval, request bound functions bound the sum of costs
of arriving jobs. *)
(** ** Task Parameters for the Request Bound Functions *)
(** The request bound functions give an upper bound and, optionally, a
lower bound on the cost of new job arrivals during any given
interval. *)
(** We let [max_request_bound tsk Δ] denote a bound on the maximum
cost of arrivals of jobs of task [tsk] in any interval of length
[Δ]. *)
Class MaxRequestBound (Task : TaskType) := max_request_bound : Task -> duration -> work.
(** Conversely, we let [min_request_bound tsk Δ] denote a bound on the
minimum cost of arrivals of jobs of task [tsk] in any interval of
length [Δ]. *)
Class MinRequestBound (Task : TaskType) := min_request_bound : Task -> duration -> work.
(** ** Parameter Semantics *)
(** In the following, we precisely define the semantics of the request
bound functions. *)
Section RequestBoundFunctions.
(** Consider any type of tasks ... *)
Context {Task : TaskType}.
(** ... and any type of jobs associated with these tasks. *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobCost Job}.
(** Consider any job arrival sequence. *)
Variable arr_seq : arrival_sequence Job.
(** *** Definition of Request Bound Functions *)
(** First, what constitutes a valid request bound function for a task? *)
Section RequestBoundFunctions.
(** We say that a given bound [request_bound] is a valid request
bound function iff [request_bound] is a monotonic function
that equals 0 for the empty interval [delta = 0]. *)
Definition valid_request_bound_function (request_bound : duration -> work) :=
request_bound 0 = 0 /\
monotone request_bound leq.
(** We say that [request_bound] is an upper request bound for task
[tsk] iff, for any interval <<[t1, t2)>>, [request_bound (t2 -
t1)] bounds the sum of costs of jobs of [tsk] that arrive in
that interval. *)
Definition respects_max_request_bound (tsk : Task) (max_request_bound : duration -> work) :=
forall (t1 t2 : instant),
t1 <= t2 ->
cost_of_task_arrivals arr_seq tsk t1 t2 <= max_request_bound (t2 - t1).
(** We analogously define the lower request bound. *)
Definition respects_min_request_bound (tsk : Task) (min_request_bound : duration -> work) :=
forall (t1 t2 : instant),
t1 <= t2 ->
min_request_bound (t2 - t1) <= cost_of_task_arrivals arr_seq tsk t1 t2.
End RequestBoundFunctions.
End RequestBoundFunctions.
(** ** Model Validity *)
(** Based on the just-established semantics, we define the properties of a
valid request bound model. *)
Section RequestBoundFunctionsModel.
(** Consider any type of tasks ... *)
Context {Task : TaskType}.
(** ... and any type of jobs associated with these tasks. *)
Context {Job : JobType}.
Context `{JobTask Job Task}.
Context `{JobCost Job}.
(** Consider any job arrival sequence... *)
Variable arr_seq : arrival_sequence Job.
(** ...and all kinds of request bounds. *)
Context `{MaxRequestBound Task}
`{MinRequestBound Task}.
(** Let [ts] be an arbitrary task set. *)
Variable ts : TaskSet Task.
(** We say that [request_bound] is a valid arrival curve for a task set
if it is valid for any task in the task set *)
Definition valid_taskset_request_bound_function (request_bound : Task -> duration -> work) :=
forall (tsk : Task), tsk \in ts -> valid_request_bound_function (request_bound tsk).
(** Finally, we lift the per-task semantics of the respective
request bound functions to the entire task set. *)
Definition taskset_respects_max_request_bound :=
forall (tsk : Task), tsk \in ts -> respects_max_request_bound arr_seq tsk (max_request_bound tsk).
Definition taskset_respects_min_request_bound :=
forall (tsk : Task), tsk \in ts -> respects_min_request_bound arr_seq tsk (min_request_bound tsk).
End RequestBoundFunctionsModel.
......@@ -10,7 +10,8 @@ Section TaskArrivals.
Context {Task : TaskType}.
Context `{JobTask Job Task}.
Context `{JobArrival Job}.
Context `{JobCost Job}.
(** Consider any job arrival sequence ... *)
Variable arr_seq : arrival_sequence Job.
......@@ -39,7 +40,11 @@ Section TaskArrivals.
(** ... and finally count the number of job arrivals. *)
Definition number_of_task_arrivals (t1 t2 : instant) :=
size (task_arrivals_between t1 t2).
(** ... and also count the cost of job arrivals. *)
Definition cost_of_task_arrivals (t1 t2 : instant) :=
\sum_(j <- task_arrivals_between t1 t2 | job_task j == tsk) job_cost j.
End TaskArrivals.
(** In this section we introduce a few definitions
......
......@@ -51,5 +51,21 @@ Section ValidTaskOffset.
End ValidTaskOffset.
(** In this section we define the notion of a maximum task offset. *)
Section MaxTaskOffset.
(** Consider any type of tasks with offsets, ... *)
Context {Task : TaskType}.
Context `{TaskOffset Task}.
(** ... and any task set. *)
Variable ts : TaskSet Task.
(** We define the sequence of task offsets of all tasks in [ts], ... *)
Definition task_offsets := map task_offset ts.
(** ... and the maximum among all the task offsets. *)
Definition max_task_offset := max0 task_offsets.
End MaxTaskOffset.
Require Export prosa.analysis.facts.transform.edf_opt.
Require Export prosa.analysis.facts.transform.edf_wc.
(** * Optimality of EDF on Ideal Uniprocessors *)
......@@ -48,6 +49,44 @@ Section Optimality.
- by apply edf_transform_ensures_edf.
Qed.
(** Moreover, we note that, since EDF maintains work conservation, if there
exists a schedule in which all jobs of [arr_seq] meet their deadline,
then there also exists a work-conserving EDF in which all deadlines are
met. *)
Theorem EDF_WC_optimality :
(exists any_sched : schedule (ideal.processor_state Job),
valid_schedule any_sched arr_seq /\
all_deadlines_of_arrivals_met arr_seq any_sched) ->
exists edf_wc_sched : schedule (ideal.processor_state Job),
valid_schedule edf_wc_sched arr_seq /\
all_deadlines_of_arrivals_met arr_seq edf_wc_sched /\
work_conserving arr_seq edf_wc_sched /\
EDF_schedule edf_wc_sched.
Proof.
move=> [sched [[COME READY] DL_ARR_MET]].
move: (all_deadlines_met_in_valid_schedule _ _ COME DL_ARR_MET) => DL_MET.
set wc_sched := wc_transform arr_seq sched.
have wc_COME : jobs_come_from_arrival_sequence wc_sched arr_seq
by apply wc_jobs_come_from_arrival_sequence.
have wc_READY : jobs_must_be_ready_to_execute wc_sched
by apply wc_jobs_must_be_ready_to_execute.
have wc_ARR := jobs_must_arrive_to_be_ready wc_sched wc_READY.
have wc_COMP := completed_jobs_are_not_ready wc_sched wc_READY.
have wc_DL_ARR_MET : all_deadlines_of_arrivals_met arr_seq wc_sched
by apply wc_all_deadlines_of_arrivals_met; apply DL_ARR_MET.
move: (all_deadlines_met_in_valid_schedule _ _ wc_COME wc_DL_ARR_MET) => wc_DL_MET.
set sched' := edf_transform wc_sched.
exists sched'. split; last split; last split.
- by apply edf_schedule_is_valid.
- by apply edf_schedule_meets_all_deadlines_wrt_arrivals.
- apply edf_transform_maintains_work_conservation; by [ | apply wc_is_work_conserving ].
- by apply edf_transform_ensures_edf.
Qed.
(** Remark: [EDF_optimality] is of course an immediate corollary of
[EDF_WC_optimality]. We nonetheless have two separate proofs for historic
reasons as [EDF_optimality] predates [EDF_WC_optimality] (in Prosa). *)
End Optimality.
(** ** Weak Optimality Theorem *)
......
......@@ -39,7 +39,7 @@ try:
except:
have_yaml = False
def statement_end_offsets(src):
def statement_end_offsets(opts, src):
"""A naive splitter for Coq .v files.
Pays attention to nested comments and considers each period followed
by whitespace to be the end of something that should be sent to coqtop.
......@@ -81,15 +81,6 @@ def statement_end_offsets(src):
return True
return True
def only_whitespace_since_period(i):
while i > 0:
i -= 1
if src[i] != '.' and not src[i].isspace():
return False
if src[i] == '.':
return True
return False
def end_of_bullet(i):
while i < len(src) and cur_is_bullet(i):
i += 1
......@@ -111,15 +102,33 @@ def statement_end_offsets(src):
return src[:i].endswith("Qed") \
or (in_proof and src[:i].endswith("Defined"))
for i in range(len(src)):
last = -1
seen_non_whitespace_since_last = False
i = 0
while i < len(src):
comment_just_ended = False
if opts.parse_only and opts.verbose:
print("%7d [%s] comment:%d proof:%d ns:%d nsnl:%d bullet:%d last:%d" % (
i,
src[i].replace('\n', '\\n'),
in_comment,
in_proof,
seen_non_whitespace_since_last,
not only_whitespace_since_newline(i),
cur_is_bullet(i),
last
))
assert in_comment >= 0
assert in_proof >= 0
# comment starting?
if cur_is(i, '(') and next_is(i, '*'):
in_comment += 1
i += 1 # skip next
# comment ending?
elif cur_is(i, '*') and next_is(i, ')'):
in_comment -= 1
i += 1 # skip next
comment_just_ended = True
# look for statements ending in a period
elif not in_comment and cur_is(i, '.') and next_is_whitespace(i):
if start_of_proof(i):
......@@ -127,26 +136,39 @@ def statement_end_offsets(src):
elif end_of_proof(i):
in_proof -= 1
yield i + 1
last = i + 1
# look for closing braces -- this is a brittle heuristic, but
# we need to catch sub-proofs because coqtop in emacs mode
# produces a prompt every time we enter a sub-proof
elif not in_comment and in_proof and cur_is(i, '}') \
and next_is_whitespace(i) and prev_is_whitespace(i):
yield i + 1
last = i + 1
# similarly, look for opening braces
elif not in_comment and in_proof and cur_is(i, '{') \
and next_is_whitespace(i) and prev_is_whitespace(i):
yield i + 1
last = i + 1
# detect bulleted sub-proofs for the same reason
elif not in_comment and in_proof and cur_is_bullet(i) \
and only_whitespace_since_newline(i) \
and only_whitespace_since_period(i):
and not seen_non_whitespace_since_last:
# bullets can consist of multiple characters
end = end_of_bullet(i)
if not end is False:
yield end
last = end
i = end # skip over entire bullet
# otherwise just skip over this
# reset flags
if i == last:
seen_non_whitespace_since_last = False
elif not (in_comment or comment_just_ended) and not src[i].isspace():
seen_non_whitespace_since_last = True
# advance to next character
i += 1
def launch_coqtop(opts):
# Let the shell figure out where coqtop is and resolve the options.
# A bit naive but works for now.
......@@ -157,14 +179,15 @@ START_OF_PROMPT = "<prompt>"
END_OF_PROMPT = "</prompt>"
END_OF_PROMPT_BYTES = bytes(END_OF_PROMPT, 'utf-8')
def read_from_pipe(pipe, timeout, seen_enough = lambda _: False):
def read_from_pipe(pipe, timeout, seen_enough = lambda _: False, expect_timeout=False):
output = bytearray()
while not seen_enough(output):
(readable, writable, exceptional) = select([pipe], [], [], timeout)
if not readable:
# we timed out, nothing to read
if timeout > 0:
print('=' * 30 + "[ TIMEOUT ]" + '=' * 30)
if not expect_timeout:
print('=' * 30 + "[ TIMEOUT ]" + '=' * 30)
break
else:
data = readable[0].read(512)
......@@ -180,9 +203,10 @@ def wait_for_prompt(opts, pipe, timeout):
output = read_from_pipe(pipe, timeout, seen_enough)
while True:
more = read_from_pipe(pipe, 0, seen_enough)
more = read_from_pipe(pipe, 0, seen_enough, expect_timeout=True)
output += more
if more:
print("unexpected:", more)
assert False # we lost sync; this should not be happening
else:
break
......@@ -258,10 +282,10 @@ def interact(opts, coqtop, src, start, end):
return interaction
def report(interaction):
def report(n, interaction):
print("+" * 80)
if 'input' in interaction:
print("INPUT: [%s]" % "\n".join(interaction['input']))
print("INPUT < %d : [%s]" % (n, "\n".join(interaction['input'])))
if 'output' in interaction:
print("OUTPUT:\n%s" % "\n".join(interaction['output']))
else:
......@@ -277,17 +301,17 @@ def feed_to_coqtop(opts, src):
# wait for coqtop startup to finish
interaction = wait_for_coqtop(opts, coqtop)
if opts.verbose:
report(interaction)
report(0, interaction)
interactions.append(interaction)
# feed statements
last = 0
for end in statement_end_offsets(src):
for end in statement_end_offsets(opts, src):
interaction = interact(opts, coqtop, src, last, end)
interactions.append(interaction)
last = end + 1
if opts.verbose:
report(interaction)
report(len(interactions), interaction)
# signal end of input
coqtop.stdin.close()
......@@ -300,7 +324,7 @@ def feed_to_coqtop(opts, src):
trailing_interactions += 1
interactions.append(interaction)
if opts.verbose:
report(interaction)
report(len(interactions), interaction)
assert trailing_interactions <= 1
......@@ -322,6 +346,16 @@ def process(opts, fname):
json.dump(interactions, out, sort_keys=False, indent=4)
out.close()
def dump_structure(opts, fname):
print("=" * 80)
print("Statements in %s: \n" % fname)
print("-" * 80)
src = open(fname, 'r').read()
last = 0
for end in statement_end_offsets(opts, src):
print("%s" % src[last:end+1].replace('\n', ' '))
last = end + 1
print("-" * 80)
def parse_args():
parser = argparse.ArgumentParser(
......@@ -349,6 +383,10 @@ def parse_args():
parser.add_argument('--verbose', default=False, action='store_true',
help='report on interaction with coqtop')
parser.add_argument('--parse-only', default=False, action='store_true',
help='report how the script splits the input '
'(--verbose for per-character info)')
return parser.parse_args()
def main():
......@@ -363,7 +401,10 @@ def main():
had_problem = False
for f in opts.input_files:
try:
process(opts, f)
if opts.parse_only:
dump_structure(opts, f)
else:
process(opts, f)
except OSError as e:
print(e, file=sys.stderr)
had_problem = True
......
......@@ -49,5 +49,8 @@ bursty
TODO
mathcomp
hyperperiod
hyperperiods
pointwise
notational
multiset
superset
Require Export prosa.util.tactics prosa.util.notation.
From mathcomp Require Import ssreflect ssrbool eqtype ssrnat seq fintype bigop.
Require Export prosa.util.tactics prosa.util.ssrlia.
(** In this section, we introduce useful lemmas about the concatenation operation performed
over an arbitrary range of sequences. *)
......@@ -34,7 +35,7 @@ Section BigCatLemmas.
(** Conversely, we prove that any element belonging to a concatenation of sequences
must come from one of the sequences. *)
Lemma mem_bigcat_nat_exists :
Lemma mem_bigcat_nat_exists:
forall x m n,
x \in \cat_(m <= i < n) (f i) ->
exists i,
......@@ -53,7 +54,22 @@ Section BigCatLemmas.
exists n; split; first by done.
apply/andP; split; [by done | by apply ltnSn]. }
Qed.
Lemma mem_bigcat_ord:
forall x n (j: 'I_n) (f: 'I_n -> list T),
j < n ->
x \in (f j) ->
x \in \cat_(i < n) (f i).
Proof.
move=> x; elim=> [//|n IHn] j f' Hj Hx.
rewrite big_ord_recr /= mem_cat; apply /orP.
move: Hj; rewrite ltnS leq_eqVlt => /orP [/eqP Hj|Hj].
{ by right; rewrite (_ : ord_max = j); [|apply ord_inj]. }
left.
apply (IHn (Ordinal Hj)); [by []|].
by set j' := widen_ord _ _; have -> : j' = j; [apply ord_inj|].
Qed.
End BigCatElements.
(** In this section, we show how we can preserve uniqueness of the elements
......@@ -69,7 +85,7 @@ Section BigCatLemmas.
forall x i1 i2, x \in f i1 -> x \in f i2 -> i1 = i2.
(** We prove that the concatenation will yield a sequence with unique elements. *)
Lemma bigcat_nat_uniq :
Lemma bigcat_nat_uniq:
forall n1 n2,
uniq (\cat_(n1 <= i < n2) (f i)).
Proof.
......@@ -88,7 +104,86 @@ Section BigCatLemmas.
apply H_no_elements_in_common with (i1 := i) in INx; last by done.
by rewrite INx ltnn in LTi.
Qed.
(** Conversely, if the concatenation of the sequences has no duplicates, any
element can only belong to a single sequence. *)
Lemma bigcat_ord_uniq_reverse :
forall n (f: 'I_n -> list T),
uniq (\cat_(i < n) (f i)) ->
(forall x i1 i2,
x \in (f i1) -> x \in (f i2) -> i1 = i2).
Proof.
case=> [|n]; [by move=> f' Huniq x; case|].
elim: n => [|n IHn] f' Huniq x i1 i2 Hi1 Hi2.
{ move: i1 i2 {Hi1 Hi2}; case; case=> [i1|//]; case; case=> [i2|//].
apply f_equal, eq_irrelevance. }
move: (leq_ord i1); rewrite leq_eqVlt => /orP [H'i1|H'i1];
move: (leq_ord i2); rewrite leq_eqVlt => /orP [H'i2|H'i2].
{ by apply ord_inj; move: H'i1 H'i2 => /eqP -> /eqP ->. }
{ exfalso.
move: Huniq; rewrite big_ord_recr cat_uniq => /andP [_ /andP [H _]].
move: H; apply /negP; rewrite Bool.negb_involutive.
apply /hasP; exists x => /=.
{ set o := ord_max; suff -> : o = i1; [by []|].
by apply ord_inj; move: H'i1 => /eqP ->. }
apply (mem_bigcat_ord _ _ (Ordinal H'i2)) => //.
by set o := widen_ord _ _; suff -> : o = i2; [|apply ord_inj]. }
{ exfalso.
move: Huniq; rewrite big_ord_recr cat_uniq => /andP [_ /andP [H _]].
move: H; apply /negP; rewrite Bool.negb_involutive.
apply /hasP; exists x => /=.
{ set o := ord_max; suff -> : o = i2; [by []|].
by apply ord_inj; move: H'i2 => /eqP ->. }
apply (mem_bigcat_ord _ _ (Ordinal H'i1)) => //.
by set o := widen_ord _ _; suff -> : o = i1; [|apply ord_inj]. }
move: Huniq; rewrite big_ord_recr cat_uniq => /andP [Huniq _].
apply ord_inj; rewrite -(inordK H'i1) -(inordK H'i2); apply f_equal.
apply (IHn _ Huniq x).
{ set i1' := widen_ord _ _; suff -> : i1' = i1; [by []|].
by apply ord_inj; rewrite /= inordK. }
set i2' := widen_ord _ _; suff -> : i2' = i2; [by []|].
by apply ord_inj; rewrite /= inordK.
Qed.
End BigCatDistinctElements.
(** We show that filtering a concatenated sequence by any predicate [P]
is the same as concatenating the elements of the sequence that satisfy [P]. *)
Lemma cat_filter_eq_filter_cat:
forall {X : Type} (F : nat -> seq X) (P : X -> bool) (t1 t2 : nat),
[seq x <- \cat_(t1<=t<t2) F t | P x] = \cat_(t1<=t<t2)[seq x <- F t | P x].
Proof.
intros.
specialize (leq_total t1 t2) => /orP [T1_LT | T2_LT].
+ have EX: exists Δ, t2 = t1 + Δ by exists (t2 - t1); ssrlia.
move: EX => [Δ EQ]; subst t2.
induction Δ.
{ now rewrite addn0 !big_geq => //. }
{ rewrite addnS !big_nat_recr => //=; try by rewrite leq_addr.
rewrite filter_cat IHΔ => //.
now ssrlia.
}
+ now rewrite !big_geq => //.
Qed.
(** We show that the size of a concatenated sequence is the same as
summation of sizes of each element of the sequence. *)
Lemma size_big_nat:
forall {X : Type} (F : nat -> seq X) (t1 t2 : nat),
\sum_(t1 <= t < t2) size (F t) =
size (\cat_(t1 <= t < t2) F t).
Proof.
intros.
specialize (leq_total t1 t2) => /orP [T1_LT | T2_LT].
+ have EX: exists Δ, t2 = t1 + Δ by exists (t2 - t1); ssrlia.
move: EX => [Δ EQ]; subst t2.
induction Δ.
{ now rewrite addn0 !big_geq => //. }
{ rewrite addnS !big_nat_recr => //=; try by rewrite leq_addr.
rewrite size_cat IHΔ => //.
now ssrlia.
}
+ now rewrite !big_geq => //.
Qed.
End BigCatLemmas.
......@@ -18,3 +18,16 @@ Proof.
- rewrite modn_small;try ssrlia.
Qed.
(** We show that for any two integers [a] and [c],
[a] is less than [a %/ c * c + c] given that [c] is positive. *)
Lemma div_floor_add_g:
forall a c,
c > 0 ->
a < a %/ c * c + c.
Proof.
intros * POS.
specialize (divn_eq a c) => DIV.
rewrite [in X in X < _]DIV.
rewrite ltn_add2l.
now apply ltn_pmod.
Qed.