diff --git a/Makefile b/Makefile
index 071052b82a62582058bd3d45a0e70d6437ef371b..31f4cb7ec8804f328971bc13976f60daf36f1e1f 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,7 @@
 
 #
 # This Makefile was generated by the command line :
-# coq_makefile -R . rt ./util/ssromega.v ./util/lemmas.v ./util/Vbase.v ./util/divround.v ./analysis/basic/bertogna_fp_theory.v ./analysis/basic/bertogna_edf_comp.v ./analysis/basic/bertogna_fp_comp.v ./analysis/basic/bertogna_edf_theory.v ./analysis/jitter/bertogna_fp_theory.v ./analysis/jitter/bertogna_edf_comp.v ./analysis/jitter/bertogna_fp_comp.v ./analysis/jitter/bertogna_edf_theory.v ./model/basic/schedulability.v ./model/basic/interference_bound_edf.v ./model/basic/task.v ./model/basic/interference_bound_fp.v ./model/basic/task_arrival.v ./model/basic/interference_bound.v ./model/basic/platform.v ./model/basic/schedule.v ./model/basic/priority.v ./model/basic/interference_edf.v ./model/basic/interference.v ./model/basic/workload.v ./model/basic/job.v ./model/basic/arrival_sequence.v ./model/basic/response_time.v ./model/basic/platform_fp.v ./model/basic/workload_bound.v ./model/jitter/schedulability.v ./model/jitter/interference_bound_edf.v ./model/jitter/task.v ./model/jitter/interference_bound_fp.v ./model/jitter/task_arrival.v ./model/jitter/interference_bound.v ./model/jitter/platform.v ./model/jitter/schedule.v ./model/jitter/priority.v ./model/jitter/interference_edf.v ./model/jitter/interference.v ./model/jitter/workload.v ./model/jitter/job.v ./model/jitter/arrival_sequence.v ./model/jitter/response_time.v ./model/jitter/platform_fp.v ./model/jitter/workload_bound.v -o Makefile 
+# coq_makefile -R . rt ./util/ssromega.v ./util/lemmas.v ./util/Vbase.v ./util/divround.v ./implementation/basic/schedule.v ./implementation/jitter/schedule.v ./analysis/basic/bertogna_fp_theory.v ./analysis/basic/bertogna_edf_comp.v ./analysis/basic/bertogna_fp_comp.v ./analysis/basic/bertogna_edf_theory.v ./analysis/jitter/bertogna_fp_theory.v ./analysis/jitter/bertogna_edf_comp.v ./analysis/jitter/bertogna_fp_comp.v ./analysis/jitter/bertogna_edf_theory.v ./model/basic/schedulability.v ./model/basic/interference_bound_edf.v ./model/basic/task.v ./model/basic/interference_bound_fp.v ./model/basic/task_arrival.v ./model/basic/interference_bound.v ./model/basic/platform.v ./model/basic/schedule.v ./model/basic/priority.v ./model/basic/interference_edf.v ./model/basic/interference.v ./model/basic/workload.v ./model/basic/job.v ./model/basic/arrival_sequence.v ./model/basic/response_time.v ./model/basic/platform_fp.v ./model/basic/workload_bound.v ./model/jitter/schedulability.v ./model/jitter/interference_bound_edf.v ./model/jitter/task.v ./model/jitter/interference_bound_fp.v ./model/jitter/task_arrival.v ./model/jitter/interference_bound.v ./model/jitter/platform.v ./model/jitter/schedule.v ./model/jitter/priority.v ./model/jitter/interference_edf.v ./model/jitter/interference.v ./model/jitter/workload.v ./model/jitter/job.v ./model/jitter/arrival_sequence.v ./model/jitter/response_time.v ./model/jitter/platform_fp.v ./model/jitter/workload_bound.v -o Makefile 
 #
 
 .DEFAULT_GOAL := all
@@ -84,6 +84,8 @@ VFILES:=util/ssromega.v\
   util/lemmas.v\
   util/Vbase.v\
   util/divround.v\
+  implementation/basic/schedule.v\
+  implementation/jitter/schedule.v\
   analysis/basic/bertogna_fp_theory.v\
   analysis/basic/bertogna_edf_comp.v\
   analysis/basic/bertogna_fp_comp.v\
diff --git a/implementation/basic/schedule.v b/implementation/basic/schedule.v
new file mode 100644
index 0000000000000000000000000000000000000000..71ae096b3d9d418ce6328bc804297ffe39047120
--- /dev/null
+++ b/implementation/basic/schedule.v
@@ -0,0 +1,339 @@
+Add LoadPath "../.." as rt.
+Require Import rt.util.Vbase rt.util.lemmas.
+Require Import rt.model.basic.job rt.model.basic.arrival_sequence rt.model.basic.schedule
+               rt.model.basic.platform rt.model.basic.priority.
+Require Import Program ssreflect ssrbool ssrfun eqtype ssrnat fintype bigop seq path.
+
+Module WorkConservingScheduler.
+
+  Import Job ArrivalSequence Schedule Platform Priority.
+  
+  Section Implementation.
+    
+    Context {Job: eqType}.
+    Variable job_cost: Job -> nat.
+
+    (* Let num_cpus denote the number of processors, ...*)
+    Variable num_cpus: nat.
+
+    (* ... and let arr_seq be any arrival sequence.*)
+    Variable arr_seq: arrival_sequence Job.
+
+    (* Assume a JLDP policy is given. *)
+    Variable higher_eq_priority: JLDP_policy arr_seq.
+
+    (* Consider the list of pending jobs at time t. *)
+    Definition jobs_pending_at (sched: schedule num_cpus arr_seq) (t: time) :=
+      [seq j <- jobs_arrived_up_to arr_seq t | pending job_cost sched j t].
+
+    (* Next, we sort this list by priority. *)
+    Definition sorted_pending_jobs (sched: schedule num_cpus arr_seq) (t: time) :=
+      sort (higher_eq_priority t) (jobs_pending_at sched t).
+
+    (* Starting from the empty schedule as a base, ... *)
+    Definition empty_schedule : schedule num_cpus arr_seq :=
+      fun t cpu => None.
+
+    (* ..., we redefine the mapping of jobs to processors at any time t as follows.
+       The i-th job in the sorted list is assigned to the i-th cpu, or to None
+       if the list is short. *)
+    Definition update_schedule (prev_sched: schedule num_cpus arr_seq)
+                               (t_next: time) : schedule num_cpus arr_seq :=
+      fun cpu t =>
+        if t == t_next then
+          nth_or_none (sorted_pending_jobs prev_sched t) cpu
+        else prev_sched cpu t.
+    
+    (* The schedule is iteratively constructed by applying assign_jobs at every time t, ... *)
+    Fixpoint schedule_prefix (t_max: time) : schedule num_cpus arr_seq := 
+      if t_max is t_prev.+1 then
+        (* At time t_prev + 1, schedule jobs that have not completed by time t_prev. *)
+        update_schedule (schedule_prefix t_prev) t_prev.+1
+      else
+        (* At time 0, schedule any jobs that arrive. *)
+        update_schedule empty_schedule 0.
+
+    Definition scheduler (cpu: processor num_cpus) (t: time) := (schedule_prefix t) cpu t.
+
+  End Implementation.
+
+  Section Proofs.
+
+    Context {Job: eqType}.
+    Variable job_cost: Job -> nat.
+
+    (* Assume a positive number of processors. *)
+    Variable num_cpus: nat.
+    Hypothesis H_at_least_one_cpu: num_cpus > 0.
+
+    (* Let arr_seq be any arrival sequence of jobs where ...*)
+    Variable arr_seq: arrival_sequence Job.
+    (* ...jobs have positive cost and...*)
+    Hypothesis H_job_cost_positive:
+      forall (j: JobIn arr_seq), job_cost_positive job_cost j.
+    (* ... at any time, there are no duplicates of the same job. *)
+    Hypothesis H_arrival_sequence_is_a_set :
+      arrival_sequence_is_a_set arr_seq.
+
+    (* Consider any JLDP policy higher_eq_priority that is transitive and total. *)
+    Variable higher_eq_priority: JLDP_policy arr_seq.
+    Hypothesis H_priority_transitive: forall t, transitive (higher_eq_priority t).
+    Hypothesis H_priority_total: forall t, total (higher_eq_priority t).
+
+    (* Let sched denote our concrete scheduler implementation. *)
+    Let sched := scheduler job_cost num_cpus arr_seq higher_eq_priority.
+
+    (* Next, we provide some helper lemmas about the scheduler construction. *)
+    Section HelperLemmas.
+      
+      (* First, we show that the scheduler preserves its prefixes. *)
+      Lemma scheduler_same_prefix :
+        forall t t_max cpu,
+          t <= t_max ->
+          schedule_prefix job_cost num_cpus arr_seq higher_eq_priority t_max cpu t =
+          scheduler job_cost num_cpus arr_seq higher_eq_priority cpu t.
+      Proof.
+        intros t t_max cpu LEt.
+        induction t_max.
+        {
+          by rewrite leqn0 in LEt; move: LEt => /eqP EQ; subst.
+        }
+        {
+          rewrite leq_eqVlt in LEt.
+          move: LEt => /orP [/eqP EQ | LESS]; first by subst.
+          {
+            feed IHt_max; first by done.
+            unfold schedule_prefix, update_schedule at 1.
+            assert (FALSE: t == t_max.+1 = false).
+            {
+              by apply negbTE; rewrite neq_ltn LESS orTb.
+            } rewrite FALSE.
+            by rewrite -IHt_max.
+          }
+        }
+      Qed.
+
+      (* With respect to the sorted list of pending jobs, ...*)
+      Let sorted_jobs (t: time) :=
+        sorted_pending_jobs job_cost num_cpus arr_seq higher_eq_priority sched t.
+
+      (* ..., we show that a job is mapped to a processor based on that list, ... *)
+      Lemma scheduler_nth_or_none_mapping :
+        forall t cpu x,
+          sched cpu t = x ->
+          nth_or_none (sorted_jobs t) cpu = x.
+      Proof.
+        intros t cpu x SCHED.
+        unfold sched, scheduler, schedule_prefix in *.
+        destruct t.
+        {
+          unfold update_schedule in SCHED; rewrite eq_refl in SCHED.
+          rewrite -SCHED; f_equal.
+          unfold sorted_jobs, sorted_pending_jobs; f_equal.
+          unfold jobs_pending_at; apply eq_filter; red; intro j'.
+          unfold pending; f_equal; f_equal.
+          unfold completed, service.
+          by rewrite big_geq // big_geq //.
+        }
+        {
+          unfold update_schedule at 1 in SCHED; rewrite eq_refl in SCHED.
+          rewrite -SCHED; f_equal.
+          unfold sorted_jobs, sorted_pending_jobs; f_equal.
+          unfold jobs_pending_at; apply eq_filter; red; intro j'.
+          unfold pending; f_equal; f_equal.
+          unfold completed, service; f_equal.
+          apply eq_big_nat; move => t0 /andP [_ LT].
+          unfold service_at; apply eq_bigl; red; intros cpu'.
+          fold (schedule_prefix job_cost num_cpus arr_seq higher_eq_priority).
+          by rewrite 2?scheduler_same_prefix ?leqnn //.
+        }
+      Qed.
+      
+      (* ..., a scheduled job is mapped to a cpu corresponding to its position, ... *)
+      Lemma scheduler_nth_or_none_scheduled :
+        forall j t,
+          scheduled sched j t ->
+          exists (cpu: processor num_cpus),
+            nth_or_none (sorted_jobs t) cpu = Some j. 
+      Proof.
+        intros j t SCHED.
+        move: SCHED => /exists_inP [cpu INcpu /eqP SCHED]; exists cpu.
+        by apply scheduler_nth_or_none_mapping.
+      Qed.
+
+      (* ..., and that a backlogged job has a position larger than or equal to the number
+         of processors. *)
+      Lemma scheduler_nth_or_none_backlogged :
+        forall j t,
+          backlogged job_cost sched j t ->
+          exists i,
+            nth_or_none (sorted_jobs t) i = Some j /\ i >= num_cpus.
+      Proof.
+        intros j t BACK.
+        move: BACK => /andP [PENDING /negP NOTCOMP].
+        assert (IN: j \in sorted_jobs t).
+        {
+          rewrite mem_sort mem_filter PENDING andTb.
+          move: PENDING => /andP [ARRIVED _].
+          by rewrite JobIn_has_arrived.
+        }
+        apply nth_or_none_mem_exists in IN; des.
+        exists n; split; first by done.
+        rewrite leqNgt; apply/negP; red; intro LT.
+        apply NOTCOMP; clear NOTCOMP PENDING.
+        apply/exists_inP; exists (Ordinal LT); [by done | apply/eqP].
+        unfold sorted_jobs in *; clear sorted_jobs.
+        unfold sched, scheduler, schedule_prefix in *; clear sched.
+        destruct t. 
+        {
+          unfold update_schedule; rewrite eq_refl.
+          rewrite -IN; f_equal.
+          fold (schedule_prefix job_cost num_cpus arr_seq higher_eq_priority).
+          unfold sorted_pending_jobs; f_equal.
+          apply eq_filter; red; intros x.
+          unfold pending; f_equal; f_equal.
+          unfold completed; f_equal.
+          by unfold service; rewrite 2?big_geq //.
+        }
+        {
+          unfold update_schedule at 1; rewrite eq_refl.
+          rewrite -IN; f_equal.
+          unfold sorted_pending_jobs; f_equal.
+          apply eq_filter; red; intros x.
+          unfold pending; f_equal; f_equal.
+          unfold completed; f_equal.
+          unfold service; apply eq_big_nat; move => i /andP [_ LTi].
+          unfold service_at; apply eq_bigl; red; intro cpu; f_equal.
+          fold (schedule_prefix job_cost num_cpus arr_seq higher_eq_priority).
+          by rewrite scheduler_same_prefix //.
+        }
+      Qed.
+
+    End HelperLemmas.
+
+    (* Now, we prove the important properties about the implementation. *)
+    
+    (* Jobs do not execute before they arrive, ...*)
+    Theorem scheduler_jobs_must_arrive_to_execute:
+      jobs_must_arrive_to_execute sched.
+    Proof.
+      unfold jobs_must_arrive_to_execute.
+      intros j t SCHED.
+      move: SCHED => /existsP [cpu /andP [INcpu /eqP SCHED]].
+      unfold sched, scheduler, schedule_prefix in SCHED.
+      destruct t.
+      {
+        rewrite /update_schedule eq_refl in SCHED.
+        apply (nth_or_none_mem _ cpu j) in SCHED.
+        rewrite mem_sort mem_filter in SCHED.
+        move: SCHED => /andP [_ ARR].
+        by apply JobIn_has_arrived in ARR.
+      }
+      {
+        unfold update_schedule at 1 in SCHED; rewrite eq_refl /= in SCHED.
+        apply (nth_or_none_mem _ cpu j) in SCHED.
+        rewrite mem_sort mem_filter in SCHED.
+        move: SCHED => /andP [_ ARR].
+        by apply JobIn_has_arrived in ARR.
+      }
+    Qed.
+
+    (* ..., jobs do not execute on multiple processors, ... *)
+    Theorem scheduler_no_parallelism:
+      jobs_dont_execute_in_parallel sched.
+    Proof.
+      unfold jobs_dont_execute_in_parallel, sched, scheduler, schedule_prefix.
+      intros j t cpu1 cpu2 SCHED1 SCHED2.
+      destruct t; rewrite /update_schedule eq_refl in SCHED1 SCHED2;
+      have UNIQ := nth_or_none_uniq _ cpu1 cpu2 j _ SCHED1 SCHED2; (apply ord_inj, UNIQ);
+      rewrite sort_uniq filter_uniq //;
+      by apply JobIn_uniq.
+    Qed.
+               
+    (* ... and jobs do not execute after completion. *)
+    Theorem scheduler_completed_jobs_dont_execute:
+      completed_jobs_dont_execute job_cost sched.
+    Proof.
+      rename H_job_cost_positive into GT0.
+      unfold completed_jobs_dont_execute, service.
+      intros j t.
+      induction t; first by rewrite big_geq.
+      {
+        rewrite big_nat_recr // /=.
+        rewrite leq_eqVlt in IHt; move: IHt => /orP [/eqP EQ | LESS]; last first.
+        {
+          destruct (job_cost j); first by rewrite ltn0 in LESS.
+          rewrite -addn1; rewrite ltnS in LESS.
+          apply leq_add; first by done.
+          by apply service_at_most_one, scheduler_no_parallelism.
+        }
+        rewrite EQ -{2}[job_cost j]addn0; apply leq_add; first by done.
+        destruct t.
+        {
+          rewrite big_geq // in EQ.
+          specialize (GT0 j); unfold job_cost_positive in *.
+          by rewrite -EQ ltn0 in GT0.
+        }
+        {
+          unfold service_at; rewrite big_mkcond.
+          apply leq_trans with (n := \sum_(cpu < num_cpus) 0);
+            last by rewrite big_const_ord iter_addn mul0n addn0.
+          apply leq_sum; intros cpu _; desf.
+          move: Heq => /eqP SCHED.
+          unfold scheduler, schedule_prefix in SCHED.
+          unfold sched, scheduler, schedule_prefix, update_schedule at 1 in SCHED.
+          rewrite eq_refl in SCHED.
+          apply (nth_or_none_mem _ cpu j) in SCHED.
+          rewrite mem_sort mem_filter in SCHED.
+          fold (update_schedule job_cost num_cpus arr_seq higher_eq_priority) in SCHED.
+          move: SCHED => /andP [/andP [_ /negP NOTCOMP] _].
+          exfalso; apply NOTCOMP; clear NOTCOMP.
+          unfold completed; apply/eqP.
+          unfold service; rewrite -EQ.
+          rewrite big_nat_cond [\sum_(_ <= _ < _ | true)_]big_nat_cond.
+          apply eq_bigr; move => i /andP [/andP [_ LT] _].
+          apply eq_bigl; red; ins.
+          fold (schedule_prefix job_cost num_cpus arr_seq higher_eq_priority).
+          by rewrite scheduler_same_prefix.
+        }
+      }
+    Qed.
+
+    (* In addition, the scheduler is work conserving ... *)
+    Theorem scheduler_work_conserving:
+      work_conserving job_cost sched.
+    Proof.
+      unfold work_conserving; intros j t BACK cpu.
+      set jobs := sorted_pending_jobs job_cost num_cpus arr_seq higher_eq_priority sched t.
+      destruct (sched cpu t) eqn:SCHED; first by exists j0; apply/eqP.
+      apply scheduler_nth_or_none_backlogged in BACK.
+      destruct BACK as [cpu_out [NTH GE]].
+      exfalso; rewrite leqNgt in GE; move: GE => /negP GE; apply GE.
+      apply leq_ltn_trans with (n := cpu); last by done.
+      apply scheduler_nth_or_none_mapping in SCHED.
+      apply nth_or_none_size_none in SCHED.
+      apply leq_trans with (n := size jobs); last by done.
+      by apply nth_or_none_size_some in NTH; apply ltnW.
+    Qed.
+
+    (* ... and enforces the JLDP policy. *)
+    Theorem scheduler_enforces_policy :
+      enforces_JLDP_policy job_cost sched higher_eq_priority.
+    Proof.
+      unfold enforces_JLDP_policy; intros j j_hp t BACK SCHED.
+      set jobs := sorted_pending_jobs job_cost num_cpus arr_seq higher_eq_priority sched t.
+      apply scheduler_nth_or_none_backlogged in BACK.
+      destruct BACK as [cpu_out [SOME GE]].
+      apply scheduler_nth_or_none_scheduled in SCHED.
+      destruct SCHED as [cpu SCHED].
+      have EQ1 := nth_or_none_nth jobs cpu j_hp j SCHED.
+      have EQ2 := nth_or_none_nth jobs cpu_out j j SOME.
+      rewrite -EQ1 -{2}EQ2.
+      apply sorted_lt_idx_implies_rel; [by done | by apply sort_sorted | |].
+      - by apply leq_trans with (n := num_cpus).
+      - by apply nth_or_none_size_some in SOME.
+    Qed.
+
+  End Proofs.
+    
+End WorkConservingScheduler.
\ No newline at end of file
diff --git a/implementation/jitter/schedule.v b/implementation/jitter/schedule.v
new file mode 100644
index 0000000000000000000000000000000000000000..c242c70f6371a9136e57e19ee92b1218bb3bf345
--- /dev/null
+++ b/implementation/jitter/schedule.v
@@ -0,0 +1,343 @@
+Add LoadPath "../.." as rt.
+Require Import rt.util.Vbase rt.util.lemmas.
+Require Import rt.model.jitter.job rt.model.jitter.arrival_sequence rt.model.jitter.schedule
+               rt.model.jitter.platform rt.model.jitter.priority.
+Require Import Program ssreflect ssrbool ssrfun eqtype ssrnat fintype bigop seq path.
+
+Module WorkConservingScheduler.
+
+  Import Job ArrivalSequence ScheduleWithJitter Platform Priority.
+  
+  Section Implementation.
+    
+    Context {Job: eqType}.
+    Variable job_cost: Job -> nat.
+    Variable job_jitter: Job -> nat.
+
+    (* Let num_cpus denote the number of processors, ...*)
+    Variable num_cpus: nat.
+
+    (* ... and let arr_seq be any arrival sequence.*)
+    Variable arr_seq: arrival_sequence Job.
+
+    (* Assume a JLDP policy is given. *)
+    Variable higher_eq_priority: JLDP_policy arr_seq.
+
+    (* Consider the list of pending jobs at time t. *)
+    Definition jobs_pending_at (sched: schedule num_cpus arr_seq) (t: time) :=
+      [seq j <- jobs_arrived_up_to arr_seq t | pending job_cost job_jitter sched j t].
+
+    (* Next, we sort this list by priority. *)
+    Definition sorted_pending_jobs (sched: schedule num_cpus arr_seq) (t: time) :=
+      sort (higher_eq_priority t) (jobs_pending_at sched t).
+
+    (* Starting from the empty schedule as a base, ... *)
+    Definition empty_schedule : schedule num_cpus arr_seq :=
+      fun t cpu => None.
+
+    (* ..., we redefine the mapping of jobs to processors at any time t as follows.
+       The i-th job in the sorted list is assigned to the i-th cpu, or to None
+       if the list is short. *)
+    Definition update_schedule (prev_sched: schedule num_cpus arr_seq)
+                               (t_next: time) : schedule num_cpus arr_seq :=
+      fun cpu t =>
+        if t == t_next then
+          nth_or_none (sorted_pending_jobs prev_sched t) cpu
+        else prev_sched cpu t.
+    
+    (* The schedule is iteratively constructed by applying assign_jobs at every time t, ... *)
+    Fixpoint schedule_prefix (t_max: time) : schedule num_cpus arr_seq := 
+      if t_max is t_prev.+1 then
+        (* At time t_prev + 1, schedule jobs that have not completed by time t_prev. *)
+        update_schedule (schedule_prefix t_prev) t_prev.+1
+      else
+        (* At time 0, schedule any jobs that arrive. *)
+        update_schedule empty_schedule 0.
+
+    Definition scheduler (cpu: processor num_cpus) (t: time) := (schedule_prefix t) cpu t.
+
+  End Implementation.
+
+  Section Proofs.
+
+    Context {Job: eqType}.
+    Variable job_cost: Job -> nat.
+    Variable job_jitter: Job -> nat.
+
+    (* Assume a positive number of processors. *)
+    Variable num_cpus: nat.
+    Hypothesis H_at_least_one_cpu: num_cpus > 0.
+
+    (* Let arr_seq be any arrival sequence of jobs where ...*)
+    Variable arr_seq: arrival_sequence Job.
+    (* ...jobs have positive cost and...*)
+    Hypothesis H_job_cost_positive:
+      forall (j: JobIn arr_seq), job_cost_positive job_cost j.
+    (* ... at any time, there are no duplicates of the same job. *)
+    Hypothesis H_arrival_sequence_is_a_set :
+      arrival_sequence_is_a_set arr_seq.
+
+    (* Consider any JLDP policy higher_eq_priority that is transitive and total. *)
+    Variable higher_eq_priority: JLDP_policy arr_seq.
+    Hypothesis H_priority_transitive: forall t, transitive (higher_eq_priority t).
+    Hypothesis H_priority_total: forall t, total (higher_eq_priority t).
+
+    (* Let sched denote our concrete scheduler implementation. *)
+    Let sched := scheduler job_cost job_jitter num_cpus arr_seq higher_eq_priority.
+
+    (* Next, we provide some helper lemmas about the scheduler construction. *)
+    Section HelperLemmas.
+      
+      (* First, we show that the scheduler preserves its prefixes. *)
+      Lemma scheduler_same_prefix :
+        forall t t_max cpu,
+          t <= t_max ->
+          schedule_prefix job_cost job_jitter num_cpus arr_seq higher_eq_priority t_max cpu t =
+          scheduler job_cost job_jitter num_cpus arr_seq higher_eq_priority cpu t.
+      Proof.
+        intros t t_max cpu LEt.
+        induction t_max.
+        {
+          by rewrite leqn0 in LEt; move: LEt => /eqP EQ; subst.
+        }
+        {
+          rewrite leq_eqVlt in LEt.
+          move: LEt => /orP [/eqP EQ | LESS]; first by subst.
+          {
+            feed IHt_max; first by done.
+            unfold schedule_prefix, update_schedule at 1.
+            assert (FALSE: t == t_max.+1 = false).
+            {
+              by apply negbTE; rewrite neq_ltn LESS orTb.
+            } rewrite FALSE.
+            by rewrite -IHt_max.
+          }
+        }
+      Qed.
+
+      (* With respect to the sorted list of pending jobs, ...*)
+      Let sorted_jobs (t: time) :=
+        sorted_pending_jobs job_cost job_jitter num_cpus arr_seq higher_eq_priority sched t.
+
+      (* ..., we show that a job is mapped to a processor based on that list, ... *)
+      Lemma scheduler_nth_or_none_mapping :
+        forall t cpu x,
+          sched cpu t = x ->
+          nth_or_none (sorted_jobs t) cpu = x.
+      Proof.
+        intros t cpu x SCHED.
+        unfold sched, scheduler, schedule_prefix in *.
+        destruct t.
+        {
+          unfold update_schedule in SCHED; rewrite eq_refl in SCHED.
+          rewrite -SCHED; f_equal.
+          unfold sorted_jobs, sorted_pending_jobs; f_equal.
+          unfold jobs_pending_at; apply eq_filter; red; intro j'.
+          unfold pending; f_equal; f_equal.
+          unfold completed, service.
+          by rewrite big_geq // big_geq //.
+        }
+        {
+          unfold update_schedule at 1 in SCHED; rewrite eq_refl in SCHED.
+          rewrite -SCHED; f_equal.
+          unfold sorted_jobs, sorted_pending_jobs; f_equal.
+          unfold jobs_pending_at; apply eq_filter; red; intro j'.
+          unfold pending; f_equal; f_equal.
+          unfold completed, service; f_equal.
+          apply eq_big_nat; move => t0 /andP [_ LT].
+          unfold service_at; apply eq_bigl; red; intros cpu'.
+          fold (schedule_prefix job_cost job_jitter num_cpus arr_seq higher_eq_priority).
+          by rewrite 2?scheduler_same_prefix ?leqnn //.
+        }
+      Qed.
+      
+      (* ..., a scheduled job is mapped to a cpu corresponding to its position, ... *)
+      Lemma scheduler_nth_or_none_scheduled :
+        forall j t,
+          scheduled sched j t ->
+          exists (cpu: processor num_cpus),
+            nth_or_none (sorted_jobs t) cpu = Some j. 
+      Proof.
+        intros j t SCHED.
+        move: SCHED => /exists_inP [cpu INcpu /eqP SCHED]; exists cpu.
+        by apply scheduler_nth_or_none_mapping.
+      Qed.
+
+      (* ..., and that a backlogged job has a position larger than or equal to the number
+         of processors. *)
+      Lemma scheduler_nth_or_none_backlogged :
+        forall j t,
+          backlogged job_cost job_jitter sched j t ->
+          exists i,
+            nth_or_none (sorted_jobs t) i = Some j /\ i >= num_cpus.
+      Proof.
+        intros j t BACK.
+        move: BACK => /andP [PENDING /negP NOTCOMP].
+        assert (IN: j \in sorted_jobs t).
+        {
+          rewrite mem_sort mem_filter PENDING andTb.
+          move: PENDING => /andP [ARRIVED _].
+          rewrite JobIn_has_arrived.
+          by apply leq_trans with (n := job_arrival j + job_jitter j);
+            first by apply leq_addr.
+        }
+        apply nth_or_none_mem_exists in IN; des.
+        exists n; split; first by done.
+        rewrite leqNgt; apply/negP; red; intro LT.
+        apply NOTCOMP; clear NOTCOMP PENDING.
+        apply/exists_inP; exists (Ordinal LT); [by done | apply/eqP].
+        unfold sorted_jobs in *; clear sorted_jobs.
+        unfold sched, scheduler, schedule_prefix in *; clear sched.
+        destruct t. 
+        {
+          unfold update_schedule; rewrite eq_refl.
+          rewrite -IN; f_equal.
+          fold (schedule_prefix job_cost job_jitter num_cpus arr_seq higher_eq_priority).
+          unfold sorted_pending_jobs; f_equal.
+          apply eq_filter; red; intros x.
+          unfold pending; f_equal; f_equal.
+          unfold completed; f_equal.
+          by unfold service; rewrite 2?big_geq //.
+        }
+        {
+          unfold update_schedule at 1; rewrite eq_refl.
+          rewrite -IN; f_equal.
+          unfold sorted_pending_jobs; f_equal.
+          apply eq_filter; red; intros x.
+          unfold pending; f_equal; f_equal.
+          unfold completed; f_equal.
+          unfold service; apply eq_big_nat; move => i /andP [_ LTi].
+          unfold service_at; apply eq_bigl; red; intro cpu; f_equal.
+          fold (schedule_prefix job_cost job_jitter num_cpus arr_seq higher_eq_priority).
+          by rewrite scheduler_same_prefix //.
+        }
+      Qed.
+
+    End HelperLemmas.
+
+    (* Now, we prove the important properties about the implementation. *)
+    
+    (* Jobs do not execute before they arrive, ...*)
+    Theorem scheduler_jobs_must_arrive_to_execute:
+      jobs_must_arrive_to_execute sched.
+    Proof.
+      unfold jobs_must_arrive_to_execute.
+      intros j t SCHED.
+      move: SCHED => /existsP [cpu /andP [INcpu /eqP SCHED]].
+      unfold sched, scheduler, schedule_prefix in SCHED.
+      destruct t.
+      {
+        rewrite /update_schedule eq_refl in SCHED.
+        apply (nth_or_none_mem _ cpu j) in SCHED.
+        rewrite mem_sort mem_filter in SCHED.
+        move: SCHED => /andP [_ ARR].
+        by apply JobIn_has_arrived in ARR.
+      }
+      {
+        unfold update_schedule at 1 in SCHED; rewrite eq_refl /= in SCHED.
+        apply (nth_or_none_mem _ cpu j) in SCHED.
+        rewrite mem_sort mem_filter in SCHED.
+        move: SCHED => /andP [_ ARR].
+        by apply JobIn_has_arrived in ARR.
+      }
+    Qed.
+
+    (* ..., jobs do not execute on multiple processors, ... *)
+    Theorem scheduler_no_parallelism:
+      jobs_dont_execute_in_parallel sched.
+    Proof.
+      unfold jobs_dont_execute_in_parallel, sched, scheduler, schedule_prefix.
+      intros j t cpu1 cpu2 SCHED1 SCHED2.
+      destruct t; rewrite /update_schedule eq_refl in SCHED1 SCHED2;
+      have UNIQ := nth_or_none_uniq _ cpu1 cpu2 j _ SCHED1 SCHED2; (apply ord_inj, UNIQ);
+      rewrite sort_uniq filter_uniq //;
+      by apply JobIn_uniq.
+    Qed.
+               
+    (* ... and jobs do not execute after completion. *)
+    Theorem scheduler_completed_jobs_dont_execute:
+      completed_jobs_dont_execute job_cost sched.
+    Proof.
+      rename H_job_cost_positive into GT0.
+      unfold completed_jobs_dont_execute, service.
+      intros j t.
+      induction t; first by rewrite big_geq.
+      {
+        rewrite big_nat_recr // /=.
+        rewrite leq_eqVlt in IHt; move: IHt => /orP [/eqP EQ | LESS]; last first.
+        {
+          destruct (job_cost j); first by rewrite ltn0 in LESS.
+          rewrite -addn1; rewrite ltnS in LESS.
+          apply leq_add; first by done.
+          by apply service_at_most_one, scheduler_no_parallelism.
+        }
+        rewrite EQ -{2}[job_cost j]addn0; apply leq_add; first by done.
+        destruct t.
+        {
+          rewrite big_geq // in EQ.
+          specialize (GT0 j); unfold job_cost_positive in *.
+          by rewrite -EQ ltn0 in GT0.
+        }
+        {
+          unfold service_at; rewrite big_mkcond.
+          apply leq_trans with (n := \sum_(cpu < num_cpus) 0);
+            last by rewrite big_const_ord iter_addn mul0n addn0.
+          apply leq_sum; intros cpu _; desf.
+          move: Heq => /eqP SCHED.
+          unfold scheduler, schedule_prefix in SCHED.
+          unfold sched, scheduler, schedule_prefix, update_schedule at 1 in SCHED.
+          rewrite eq_refl in SCHED.
+          apply (nth_or_none_mem _ cpu j) in SCHED.
+          rewrite mem_sort mem_filter in SCHED.
+          fold (update_schedule job_cost job_jitter num_cpus arr_seq higher_eq_priority) in SCHED.
+          move: SCHED => /andP [/andP [_ /negP NOTCOMP] _].
+          exfalso; apply NOTCOMP; clear NOTCOMP.
+          unfold completed; apply/eqP.
+          unfold service; rewrite -EQ.
+          rewrite big_nat_cond [\sum_(_ <= _ < _ | true)_]big_nat_cond.
+          apply eq_bigr; move => i /andP [/andP [_ LT] _].
+          apply eq_bigl; red; ins.
+          fold (schedule_prefix job_cost job_jitter num_cpus arr_seq higher_eq_priority).
+          by rewrite scheduler_same_prefix.
+        }
+      }
+    Qed.
+
+    (* In addition, the scheduler is work conserving ... *)
+    Theorem scheduler_work_conserving:
+      work_conserving job_cost job_jitter sched.
+    Proof.
+      unfold work_conserving; intros j t BACK cpu.
+      set jobs := sorted_pending_jobs job_cost job_jitter num_cpus arr_seq higher_eq_priority sched t.
+      destruct (sched cpu t) eqn:SCHED; first by exists j0; apply/eqP.
+      apply scheduler_nth_or_none_backlogged in BACK.
+      destruct BACK as [cpu_out [NTH GE]].
+      exfalso; rewrite leqNgt in GE; move: GE => /negP GE; apply GE.
+      apply leq_ltn_trans with (n := cpu); last by done.
+      apply scheduler_nth_or_none_mapping in SCHED.
+      apply nth_or_none_size_none in SCHED.
+      apply leq_trans with (n := size jobs); last by done.
+      by apply nth_or_none_size_some in NTH; apply ltnW.
+    Qed.
+
+    (* ... and enforces the JLDP policy. *)
+    Theorem scheduler_enforces_policy :
+      enforces_JLDP_policy job_cost job_jitter sched higher_eq_priority.
+    Proof.
+      unfold enforces_JLDP_policy; intros j j_hp t BACK SCHED.
+      set jobs := sorted_pending_jobs job_cost job_jitter num_cpus arr_seq higher_eq_priority sched t.
+      apply scheduler_nth_or_none_backlogged in BACK.
+      destruct BACK as [cpu_out [SOME GE]].
+      apply scheduler_nth_or_none_scheduled in SCHED.
+      destruct SCHED as [cpu SCHED].
+      have EQ1 := nth_or_none_nth jobs cpu j_hp j SCHED.
+      have EQ2 := nth_or_none_nth jobs cpu_out j j SOME.
+      rewrite -EQ1 -{2}EQ2.
+      apply sorted_lt_idx_implies_rel; [by done | by apply sort_sorted | |].
+      - by apply leq_trans with (n := num_cpus).
+      - by apply nth_or_none_size_some in SOME.
+    Qed.
+
+  End Proofs.
+    
+End WorkConservingScheduler.
\ No newline at end of file
diff --git a/model/basic/platform.v b/model/basic/platform.v
index 1db70b21f85a55a1863b576e4b045911aa91a06d..41234c3ac2676acf18f04e50bd0cf5a40d271b02 100644
--- a/model/basic/platform.v
+++ b/model/basic/platform.v
@@ -32,6 +32,14 @@ Module Platform.
       (* A scheduler is work-conserving iff when a job j is backlogged,
          all processors are busy with other jobs. *)
       Definition work_conserving :=
+        forall (j: JobIn arr_seq) t,
+          backlogged job_cost sched j t ->
+          forall cpu, exists j_other,
+            scheduled_on sched j_other cpu t.
+
+      (* We also provide an alternative, equivalent definition of work-conserving
+         based on counting the number of scheduled jobs. *)
+      Definition work_conserving_count :=
         forall (j: JobIn arr_seq) t,
           backlogged job_cost sched j t ->
           size (jobs_scheduled_at sched t) = num_cpus.
@@ -73,6 +81,83 @@ Module Platform.
         forall (j: JobIn arr_seq),
           valid_sporadic_job task_cost task_deadline job_cost job_deadline job_task j.
 
+      (* In this section, we prove that the two definitions of work-conserving are equivalent. *)
+      Section EquivalentDefinitions.
+
+        Lemma work_conserving_eq_work_conserving_count :
+          work_conserving <-> work_conserving_count.
+        Proof.
+          unfold work_conserving, work_conserving_count; split.
+          {
+            intros EX j t BACK.
+            specialize (EX j t BACK).
+            apply eq_trans with (y := size (enum (processor num_cpus)));
+              last by rewrite size_enum_ord.
+            unfold jobs_scheduled_at.
+            apply eq_trans with (y := size ((\cat_(cpu < num_cpus) map (fun x => Some x)
+                                              (make_sequence (sched cpu t)))));
+              first by rewrite -map_bigcat_ord size_map.
+            apply eq_trans with (y := size (\cat_(cpu < num_cpus) [:: sched cpu t])).
+            {
+              f_equal; apply eq_bigr; intros cpu _.
+              destruct (sched cpu t) eqn:SCHED; first by done.
+              by specialize (EX cpu); des; move: EX => /eqP EX; rewrite EX in SCHED.
+            }
+            rewrite size_bigcat_ord.
+            apply eq_trans with (y := \sum_(i < num_cpus) 1);
+              last by rewrite big_const_ord iter_addn mul1n addn0 size_enum_ord.
+            by apply eq_bigr.
+          }
+          {
+            intros SIZE j t BACK cpu.
+            specialize (SIZE j t BACK).
+            destruct ([exists cpu, sched cpu t == None]) eqn:EX; last first.
+            {
+              apply negbT in EX; rewrite negb_exists in EX.
+              move: EX => /forallP ALL; specialize (ALL cpu).
+              destruct (sched cpu t) eqn:SOME; last by done.
+              by exists j0; apply/eqP.
+            }
+            {
+              move: EX => /existsP [cpu' /eqP EX].
+              unfold jobs_scheduled_at in SIZE.
+              move: SIZE => /eqP SIZE; rewrite -[size _ == _]negbK in SIZE.
+              move: SIZE => /negP SIZE; exfalso; apply SIZE; clear SIZE.
+              rewrite neq_ltn; apply/orP; left.
+              rewrite size_bigcat_ord.
+              rewrite -> big_mkord_ord with (x0 := 0).
+              have MKORD := big_mkord (fun x => true); rewrite -MKORD.
+              have CAT := big_cat_nat _ (fun x => true).
+              rewrite -> CAT with (n := cpu'); [simpl | by done | by apply ltnW, ltn_ord].   
+              assert (DIFF: exists k, num_cpus = (cpu' + k).+1).
+              {
+                exists (num_cpus - cpu').-1.
+                rewrite -addnS prednK; last by rewrite ltn_subRL addn0 ltn_ord.
+                rewrite addnBA; last by apply ltnW, ltn_ord.
+                by rewrite addnC -addnBA // subnn addn0.
+              } 
+              des; rewrite {5}DIFF.
+              rewrite big_nat_recl; last by apply leq_addr.
+              apply leq_trans with (n := (\sum_(0 <= i < cpu') 1) + 1 + (\sum_(cpu' <= i < cpu' + k) 1));
+                last first.
+              {
+                rewrite 2!big_const_nat 2!iter_addn 2!mul1n addn0 subn0.
+                rewrite [cpu' + k]addnC -addnBA // subnn 2!addn0.
+                by rewrite -addnA [1 + k]addnC addnA addn1 -DIFF.
+              }
+              {
+                rewrite -addn1 addnC [_ + 1]addnC -addnA.
+                apply leq_add; first by done.
+                rewrite eq_fun_ord_to_nat; unfold make_sequence at 2; rewrite EX /= add0n. 
+                apply leq_add; apply leq_sum; ins; unfold fun_ord_to_nat; des_eqrefl2; try done;
+                by unfold make_sequence; desf.
+              }
+            }
+          }
+        Qed.
+          
+      End EquivalentDefinitions.
+      
       Section JobInvariantAsTaskInvariant.
 
         (* Assume any work-conserving priority-based scheduler. *)
@@ -174,6 +259,7 @@ Module Platform.
                  H_all_previous_jobs_completed into PREV,
                  H_completed_jobs_dont_execute into COMP,
                  H_jobs_must_arrive_to_execute into ARRIVE.
+          apply work_conserving_eq_work_conserving_count in WORK.
           unfold valid_sporadic_job, valid_realtime_job,
                  enforces_JLDP_policy,
                  task_precedence_constraints, completed_jobs_dont_execute,
diff --git a/model/basic/platform_fp.v b/model/basic/platform_fp.v
index 464df24050ae5818fce1106503338f1352a8a0c1..a67195da248e6cada3aae20e2ae62c6a6fe4977a 100644
--- a/model/basic/platform_fp.v
+++ b/model/basic/platform_fp.v
@@ -189,6 +189,7 @@ Module PlatformFP.
                H_completed_jobs_dont_execute into COMP,
                H_all_previous_jobs_of_tsk_completed into PREVtsk,
                H_jobs_must_arrive_to_execute into ARRIVE.
+        apply work_conserving_eq_work_conserving_count in WORK.
         unfold valid_sporadic_job, valid_realtime_job,
                enforces_FP_policy, enforces_JLDP_policy, FP_to_JLDP,
                task_precedence_constraints, completed_jobs_dont_execute,
diff --git a/model/basic/schedule.v b/model/basic/schedule.v
index 5abd7681f48803fa724ee085331f3eefb533cf6c..ae17d3efc8cb37179e6cc02dc513bed07ada1ca5 100644
--- a/model/basic/schedule.v
+++ b/model/basic/schedule.v
@@ -43,6 +43,10 @@ Module Schedule.
     Variable sched: schedule num_cpus arr_seq.
     Variable j: JobIn arr_seq.
 
+    (* A job j is scheduled on processor cpu at time t iff such a mapping exists. *)
+    Definition scheduled_on (cpu: processor num_cpus) (t: time) :=
+      sched cpu t == Some j.
+    
     (* A job j is scheduled at time t iff there exists a cpu where it is mapped.*)
     Definition scheduled (t: time) :=
       [exists cpu in 'I_(num_cpus), sched cpu t == Some j].
@@ -507,7 +511,8 @@ Module Schedule.
         intros t.
         unfold jobs_scheduled_at.
         destruct num_cpus; first by rewrite big_ord0.
-        apply size_bigcat_ord; first by apply (Ordinal (ltnSn n)).
+        apply leq_trans with (1*n.+1); last by rewrite mul1n.
+        apply size_bigcat_ord_max.
         by ins; unfold make_sequence; desf.
       Qed.
 
diff --git a/model/jitter/platform.v b/model/jitter/platform.v
index 97f3cc17f29a539c54e433cedea39032691d4e90..7e71ae94772cb556879f6b6d11ebe83196d2e80a 100644
--- a/model/jitter/platform.v
+++ b/model/jitter/platform.v
@@ -32,12 +32,20 @@ Module Platform.
 
       (* A scheduler is work-conserving iff when a job j is backlogged,
          all processors are busy with other jobs.
-         NOTE: backlogged means that jitter has passed. *)
+         NOTE: backlogged means that jitter has already passed. *)
       Definition work_conserving :=
+        forall (j: JobIn arr_seq) t,
+          backlogged job_cost job_jitter sched j t ->
+          forall cpu, exists j_other,
+            scheduled_on sched j_other cpu t.
+
+      (* We also provide an alternative, equivalent definition of work-conserving
+         based on counting the number of scheduled jobs. *)
+      Definition work_conserving_count :=
         forall (j: JobIn arr_seq) t,
           backlogged job_cost job_jitter sched j t ->
           size (jobs_scheduled_at sched t) = num_cpus.
-      
+
     End WorkConserving.
 
     Section JLDP.
@@ -75,6 +83,83 @@ Module Platform.
         forall (j: JobIn arr_seq),
           valid_sporadic_job task_cost task_deadline job_cost job_deadline job_task j.
 
+      (* In this section, we prove that the two definitions of work-conserving are equivalent. *)
+      Section EquivalentDefinitions.
+
+        Lemma work_conserving_eq_work_conserving_count :
+          work_conserving <-> work_conserving_count.
+        Proof.
+          unfold work_conserving, work_conserving_count; split.
+          {
+            intros EX j t BACK.
+            specialize (EX j t BACK).
+            apply eq_trans with (y := size (enum (processor num_cpus)));
+              last by rewrite size_enum_ord.
+            unfold jobs_scheduled_at.
+            apply eq_trans with (y := size ((\cat_(cpu < num_cpus) map (fun x => Some x)
+                                              (make_sequence (sched cpu t)))));
+              first by rewrite -map_bigcat_ord size_map.
+            apply eq_trans with (y := size (\cat_(cpu < num_cpus) [:: sched cpu t])).
+            {
+              f_equal; apply eq_bigr; intros cpu _.
+              destruct (sched cpu t) eqn:SCHED; first by done.
+              by specialize (EX cpu); des; move: EX => /eqP EX; rewrite EX in SCHED.
+            }
+            rewrite size_bigcat_ord.
+            apply eq_trans with (y := \sum_(i < num_cpus) 1);
+              last by rewrite big_const_ord iter_addn mul1n addn0 size_enum_ord.
+            by apply eq_bigr.
+          }
+          {
+            intros SIZE j t BACK cpu.
+            specialize (SIZE j t BACK).
+            destruct ([exists cpu, sched cpu t == None]) eqn:EX; last first.
+            {
+              apply negbT in EX; rewrite negb_exists in EX.
+              move: EX => /forallP ALL; specialize (ALL cpu).
+              destruct (sched cpu t) eqn:SOME; last by done.
+              by exists j0; apply/eqP.
+            }
+            {
+              move: EX => /existsP [cpu' /eqP EX].
+              unfold jobs_scheduled_at in SIZE.
+              move: SIZE => /eqP SIZE; rewrite -[size _ == _]negbK in SIZE.
+              move: SIZE => /negP SIZE; exfalso; apply SIZE; clear SIZE.
+              rewrite neq_ltn; apply/orP; left.
+              rewrite size_bigcat_ord.
+              rewrite -> big_mkord_ord with (x0 := 0).
+              have MKORD := big_mkord (fun x => true); rewrite -MKORD.
+              have CAT := big_cat_nat _ (fun x => true).
+              rewrite -> CAT with (n := cpu'); [simpl | by done | by apply ltnW, ltn_ord].   
+              assert (DIFF: exists k, num_cpus = (cpu' + k).+1).
+              {
+                exists (num_cpus - cpu').-1.
+                rewrite -addnS prednK; last by rewrite ltn_subRL addn0 ltn_ord.
+                rewrite addnBA; last by apply ltnW, ltn_ord.
+                by rewrite addnC -addnBA // subnn addn0.
+              } 
+              des; rewrite {5}DIFF.
+              rewrite big_nat_recl; last by apply leq_addr.
+              apply leq_trans with (n := (\sum_(0 <= i < cpu') 1) + 1 + (\sum_(cpu' <= i < cpu' + k) 1));
+                last first.
+              {
+                rewrite 2!big_const_nat 2!iter_addn 2!mul1n addn0 subn0.
+                rewrite [cpu' + k]addnC -addnBA // subnn 2!addn0.
+                by rewrite -addnA [1 + k]addnC addnA addn1 -DIFF.
+              }
+              {
+                rewrite -addn1 addnC [_ + 1]addnC -addnA.
+                apply leq_add; first by done.
+                rewrite eq_fun_ord_to_nat; unfold make_sequence at 2; rewrite EX /= add0n. 
+                apply leq_add; apply leq_sum; ins; unfold fun_ord_to_nat; des_eqrefl2; try done;
+                by unfold make_sequence; desf.
+              }
+            }
+          }
+        Qed.
+          
+      End EquivalentDefinitions.
+
       Section JobInvariantAsTaskInvariant.
 
         (* Assume any work-conserving priority-based scheduler. *)
@@ -181,6 +266,7 @@ Module Platform.
                  H_all_previous_jobs_completed into PREV,
                  H_completed_jobs_dont_execute into COMP,
                  H_jobs_execute_after_jitter into JITTER.
+          apply work_conserving_eq_work_conserving_count in WORK.
           unfold valid_sporadic_job, valid_realtime_job,
                  enforces_JLDP_policy,
                  task_precedence_constraints, completed_jobs_dont_execute,
diff --git a/model/jitter/platform_fp.v b/model/jitter/platform_fp.v
index 61666d1254fc2d4501dcf5022beaac1711cdf79e..a3612466076ab1546ffad07f2ace95255c5e2b84 100644
--- a/model/jitter/platform_fp.v
+++ b/model/jitter/platform_fp.v
@@ -200,6 +200,7 @@ Module PlatformFP.
                H_completed_jobs_dont_execute into COMP,
                H_all_previous_jobs_of_tsk_completed into PREVtsk,
                H_jobs_execute_after_jitter into JITTER.
+        apply work_conserving_eq_work_conserving_count in WORK.
         unfold valid_sporadic_job, valid_realtime_job,
                enforces_FP_policy, enforces_JLDP_policy, FP_to_JLDP,
                task_precedence_constraints, completed_jobs_dont_execute,