Theory mem_spec

theory mem_spec
imports Heap picore_SIMP_Syntax List_aux
(*
Created by Yongwang Zhao (zhaoyw@buaa.edu.cn)
School of Computer Science & Engineering, Beihang University, China
*)

theory mem_spec
imports Main Heap "../../adapter_SIMP/picore_SIMP" "../../adapter_SIMP/picore_SIMP_Syntax" List_aux (*aux_lemma*)
begin

section ‹data types and state›

typedecl Thread

typedef mempool_ref = ref by (simp add: ref_def)

text‹ we define memory address as nat ›
type_synonym mem_ref = nat

abbreviation "NULL ≡ 0 :: nat"

text‹ we have a thread scheduler, thread has 3 types. BLOCKED means a thread is waiting for memory and is in wait queue ›
datatype Thread_State_Type = READY | RUNNING | BLOCKED

text‹
a memory block: a ref to a memor pool, a level index and a block index in this level, a start address ``data''.
max number of levels is n\_level of a memory pool. So @level should be < n\_levels.
The number of blocks at level 0 is n\_max.
the max number of blocks at level n is $n\_max * 4 ^ n$. the block index should less then this number.
›
record Mem_block = pool :: mempool_ref
                   level :: nat
                   block :: nat (* block id *)
                   data :: mem_ref

text‹
BlockState defines the bit info in bitmap. We uses different types, while not 0 or 1 in this design.
Then the blockstate could be implemented as 0 or 1, with additional information.

basic states of memory block are ALLOCATED, FREE, DIVIDED and NOEXIST.
The levels of bitmap is actually a quad-tree of BlockState.
ALLOCATED: the block is allocated to a thread
FREE: the block is free
DIVIDED: the block is divided, which means is was splited to 4 subblocks
NOEXIST: the block is not exist

ALLOCACTED and FREE blocks are the leaf blocks of the quad-tree.
DIVIDED blocks are inner nodes of the quad-tree.
Otherwise is NOEXIST.

we also introduce FREEING and ALLOCATING state to avoid a case that a FREEING block may be allocated by other threads
and a ALLOCATING block may be freed by other threads. In OS implementation, the allocating/freeing block is
an inner block of alloc/free services, and other threads will not manipulate them.
they are used to indicate state of the block which are going to be merged during freeing a block,
and the block which is going to be split during allocating a block.

we may remove FREEING/ALLOCATING state later by revising alloc and free syscalls to avoid allocate or free blocks
in freeing\_node and allocating\_node.
›
datatype BlockState = ALLOCATED | FREE | DIVIDED | NOEXIST | FREEING | ALLOCATING
(*the FREEING state means that the block is being freed (i.e. during mem_pool_free)
such that other mem_pool_free can not free the same block. Otherwise, error happens in the source code *)

text‹data stucture at each level, a bitmap and a free block list›
record Mem_pool_lvl = (* bits :: "bool list" *) (* True: free, False: allocated*)
                      bits :: "BlockState list"
                      free_list :: "mem_ref list"

text‹
a memory pool is actually a forest of @n\_max numbers of blocks with size of @max\_sz.
A block may be split to 4 sub-blocks and so on, at most for @n\_levels times. Thus, each block may be split as a quad-tree.
a memory pool maintains a big memory block, where @buf is the start address of the memory block.
The size of a memory pool is @n\_max * @max\_sz.
@max\_sz has a constraint. a small block at last level (level index is @n\_levels - 1) should be aligned by 4 bits,
i.e. the size of block at last level should be 4*n (n > 0). Here, we dont demand $4^n$, which is a special case of 4*n.
Thus, @max\_sz should be $4*n*4^n\_levels$.

@levels maintain the information at each level including a bitmap and a free block list.
@wait\_q is a list of threads, which is blocked on this memory pool.
›
record Mem_pool = buf :: mem_ref (* head address of the mem pool *)
                  max_sz :: nat (* the size of level 0 blocks *)
                  n_max :: nat (* the number of level 0 blocks *)
                  n_levels :: nat (* the max number of levels of blocks *)
                  (*max_inline_level :: nat *)
                  levels :: "Mem_pool_lvl list"
                  wait_q :: "Thread list"

text‹
The state of memory management consists of thread state, memory pools, and local variables of each thread.
In monocore OSs, there is only one currently executing thread @cur, where None means the scheduler has not choose a thread.
@tick save a time for the system.
@mem\_pools maintains the refs of all memory pools.
@mem\_pool\_info shows the detailed information of each memory pool by its ref.
we assume that all memory pools are shared by all threads. This is the most relaxed case.
The case that some memory pool is only shared by a set of thread is just a special case.
Other fields are local vars of each thread used in alloc/free syscalls.

for each thread, we use freeing\_node to maintain the freeing node in free syscall.
when free a block, we set it to FREEING, and check if its other 3 partner blocks are also free.
If so, we set the 4 blocks to NOEXIST and set their parent block to FREEING, and so on.
until that other 3 partner blocks are not all free, then set the FREEING block to FREE.
This design avoids the FREEING node is allocated by other threads.

we use allocating\_node to maintain the allocating node in alloc syscall.
when alloc a block, we find a free block at the nearest upper level, and set it to ALLOCATING.
if size of the block is too big, we split it into 4 child blocks.
We set the first child block to ALLOCATING and other 3 blocks to FREE, and so on.
until that the size of block is suitable, then set the ALLOCATING block to ALLOCATED.
This design avoids the ALLOCATING node is freed by other threads.
›
record State =
  (* threads and info *)
  cur :: "Thread option"
  tick :: nat
  thd_state :: "Thread ⇒ Thread_State_Type"

  (* all mem pools *)
  mem_pools :: "mempool_ref set"

  (*  k_mem_pool  *)
  mem_pool_info :: "mempool_ref ⇒ Mem_pool"

  (* local variables of threads *)
  i :: "Thread ⇒ nat"
  j :: "Thread ⇒ nat"
  ret :: "Thread ⇒ int"
  endt :: "Thread ⇒ nat"
  rf :: "Thread ⇒ bool"
  tmout :: "Thread ⇒ int"
  lsizes :: "Thread ⇒ nat list"
  alloc_l :: "Thread ⇒ int"
  free_l :: "Thread ⇒ int"
  from_l :: "Thread ⇒ int"
  blk :: "Thread ⇒ mem_ref"
  nodev :: "Thread ⇒ mem_ref"
  bn :: "Thread ⇒ nat"
  lbn :: "Thread ⇒ nat"
  lsz :: "Thread ⇒ nat"
  block2 :: "Thread ⇒ mem_ref"
  free_block_r :: "Thread ⇒ bool"
  alloc_lsize_r :: "Thread ⇒ bool"
  lvl :: "Thread ⇒ nat"
  bb :: "Thread ⇒ nat"
  block_pt :: "Thread ⇒ mem_ref"
  th :: "Thread ⇒ Thread"
  need_resched :: "Thread ⇒ bool"
  mempoolalloc_ret :: "Thread ⇒ Mem_block option"

  (* aux variables for RG *)
  freeing_node :: "Thread ⇒ Mem_block option"
  allocating_node :: "Thread ⇒ Mem_block option"


section ‹specification of events›

subsection ‹data types›

text‹
Since Zephyr uses fine-grained locks for shared memory pools,
interleaving among scheduling, syscalls (alloc, free), and clock tick are allowed.
Thus, we use 3 event systems to model scheduling, syscalls from threads, and clock tick.
Then the whole system is the parallel composition of the three event systems.
Actually, we have 1 scheduler, 1 timer, and n threads.
›
datatype Core = 𝒮 | 𝒯 Thread | Timer

text‹labels for different events›
datatype EL = ScheduleE | TickE | Mem_pool_allocE | Mem_pool_freeE | Mem_pool_defineE

text‹data types for event parameters›
datatype Parameter = Thread Thread | MPRef mempool_ref | MRef mem_ref | Block Mem_block | Natural nat | Integer int

type_synonym EventLabel = "EL × (Parameter list × Core)"

definition get_evt_label :: "EL ⇒ Parameter list ⇒ Core ⇒ EventLabel" ("_ _ ⇛ _" [30,30,30] 20)
  where "get_evt_label el ps k ≡ (el,(ps,k))"

text‹
define the waiting mode for alloc.
FOREVER means that if allocating fails, the thread will wait forever until allocating succeed.
NOWAIT means that if allocating fails, alloc syscall return error immediately.
otherwise n > 0, means the thread will wait for a timeout n.
›
abbreviation "FOREVER ≡ (-1)::int"
abbreviation "NOWAIT ≡ 0::int"

text‹
return CODE for alloc and free syscalls.
free syscall always succeed, so it returns OK.
alloc syscall may succeed (OK), timeout (ETIMEOUT), fails(ENOMEM), fails due to request too large size (ESIZEERR).

EAGAIN is an inner flag of alloc syscall. After it finds an available block for request, the block may be allocated immediately
by other threads. In such a case, alloc will provide EAGAIN and try to allocate again.

We introduce ESIZEERR for Zephyr to avoid a dead loop.
We introduce ETIMEOUT for Zephyr for robustness.
›
abbreviation "EAGAIN ≡ (-2)::int"
abbreviation "ENOMEM ≡ (-3)::int"
abbreviation "ESIZEERR ≡ (-4)::int"
abbreviation "OK ≡ 0 :: int"
abbreviation "ETIMEOUT ≡ (-1) :: int"

text‹
due to fine-grained lock used by Zephyr, we use a command for each atomic statement in free/alloc syscalls.
the statements of syscalls from a thread $t$ can only be executed when $t$ is the currently executing thread by the scheduler.
We use the AWAIT statement to represent this semantics.
›
definition stm :: "Thread ⇒ State com ⇒ State com" ("_ ▸ _" [0,0] 21)
where "stm t p = AWAIT ´cur = Some t THEN p END"

subsection ‹aux definitions for events›

definition ALIGN4 :: "nat ⇒ nat"
where "ALIGN4 n ≡ ((n + 3) div 4) * 4"

lemma align40: "n mod 4 = 0 ⟹ ALIGN4 n = n"
  unfolding ALIGN4_def by auto

lemma align41: "n mod 4 = 1 ⟹ ALIGN4 n = n + 3"
  unfolding ALIGN4_def
proof -
  assume "n mod 4 = 1"
  then have "(n + 3) mod 4 = 0"
    by presburger
  then show "(n + 3) div 4 * 4 = n + 3"
    by fastforce
qed

lemma align42: "n mod 4 = 2 ⟹ ALIGN4 n = n + 2"
  unfolding ALIGN4_def
proof -
  assume "n mod 4 = 2"
  then have "(n + 2) mod 4 = 0"
    using mod_add_left_eq by presburger
  then show "(n + 3) div 4 * 4 = n + 2"
    by fastforce
qed

lemma align43: "n mod 4 = 3 ⟹ ALIGN4 n = n + 1"
  unfolding ALIGN4_def
proof -
  assume "n mod 4 = 3"
  then have "(n + 1) mod 4 = 0"
    using mod_add_left_eq by presburger
  then show "(n + 3) div 4 * 4 = n + 1"
    by fastforce
qed

lemma align_mod0: "ALIGN4 n mod 4 = 0"
  unfolding ALIGN4_def by simp

lemma align4_gt: "ALIGN4 n ≥ n ∧ ALIGN4 n ≤ n + 3"
  apply(case_tac "n mod 4 = 0")
    using align40 apply simp
  apply(case_tac "n mod 4 = 1")
    using align41 apply simp
  apply(case_tac "n mod 4 = 2")
    using align42 apply simp
  apply(case_tac "n mod 4 = 3")
    using align43 apply simp
  by auto

lemma align2_eq_align: "ALIGN4 (ALIGN4 n) = ALIGN4 n"
  unfolding ALIGN4_def by auto

text‹
Zephyr uses two events: reschedule for free and swap for alloc for context switch

›
definition reschedule :: "State com"
where "reschedule ≡
  ´thd_state := ´thd_state(the ´cur := READY);;
  ´cur := Some (SOME t. ´thd_state t = READY);;
  ´thd_state := ´thd_state(the ´cur := RUNNING)"

definition swap :: "State com"
where "swap ≡
  IF (∃t. ´thd_state t = READY) THEN
    ´cur := Some (SOME t. ´thd_state t = READY);;
    ´thd_state := ´thd_state(the ´cur := RUNNING)
  ELSE
    ´cur := None
  FI"

definition block_num :: "Mem_pool ⇒ mem_ref ⇒ nat ⇒ nat"
where "block_num p bl sz ≡ (bl - (buf p)) div sz"

definition clear_free_bit :: "(mempool_ref ⇒ Mem_pool) ⇒ mempool_ref ⇒ nat ⇒ nat ⇒ (mempool_ref ⇒ Mem_pool)"
where "clear_free_bit mp_info p l b ≡
        mp_info (p := (mp_info p) ⦇levels := (levels (mp_info p))
                [l := ((levels (mp_info p)) ! l) ⦇bits := (bits ((levels (mp_info p)) ! l)) [b := ALLOCATED]⦈] ⦈)"

definition set_bit :: "(mempool_ref ⇒ Mem_pool) ⇒ mempool_ref ⇒ nat ⇒ nat ⇒ BlockState ⇒ (mempool_ref ⇒ Mem_pool)"
where "set_bit mp_info p l b st ≡
        mp_info (p := (mp_info p) ⦇levels := (levels (mp_info p))
                [l := ((levels (mp_info p)) ! l) ⦇bits := (bits ((levels (mp_info p)) ! l)) [b := st]⦈] ⦈)"

abbreviation "set_bit_free mp_info p l b ≡ set_bit mp_info p l b FREE"
abbreviation "set_bit_alloc mp_info p l b ≡ set_bit mp_info p l b ALLOCATED"
abbreviation "set_bit_divide mp_info p l b ≡ set_bit mp_info p l b DIVIDED"
abbreviation "set_bit_noexist mp_info p l b ≡ set_bit mp_info p l b NOEXIST"
abbreviation "set_bit_freeing mp_info p l b ≡ set_bit mp_info p l b FREEING"
abbreviation "set_bit_allocating mp_info p l b ≡ set_bit mp_info p l b ALLOCATING"

definition set_bit_s :: "State ⇒ mempool_ref ⇒ nat ⇒ nat ⇒ BlockState ⇒ State"
where "set_bit_s s p l b st ≡
        s⦇mem_pool_info := set_bit (mem_pool_info s) p l b st ⦈"

lemma set_bit_prev_len:
"length (bits (levels (mp_info p) ! l)) = length (bits (levels ((set_bit mp_info p l b flg) p) ! l))"
  apply(simp add:set_bit_def)
  using list_updt_samelen
  by (metis (no_types, lifting) Mem_pool_lvl.select_convs(1) Mem_pool_lvl.surjective
        Mem_pool_lvl.update_convs(1) list_update_beyond not_less nth_list_update_eq)

lemma set_bit_prev_len2:
"l ≠ t ⟹ length (bits (levels (mp_info p) ! l)) = length (bits (levels ((set_bit mp_info p t b flg) p) ! l))"
  by(simp add:set_bit_def)

abbreviation get_bit :: "(mempool_ref ⇒ Mem_pool) ⇒ mempool_ref ⇒ nat ⇒ nat ⇒ BlockState"
where "get_bit mp_info p l b ≡ (bits ((levels (mp_info p)) ! l)) ! b"

abbreviation get_bit_s :: "State ⇒ mempool_ref ⇒ nat ⇒ nat ⇒ BlockState"
where "get_bit_s s p l b ≡ get_bit (mem_pool_info s) p l b"


lemma set_bit_get_bit_eq:
  "l < length (levels (mp_info p)) ⟹
   b < length (bits (levels (mp_info p) ! l)) ⟹
   mp_info2 = set_bit mp_info p l b st ⟹
   get_bit mp_info2 p l b = st"
 by (simp add:set_bit_def)

lemma set_bit_get_bit_eq2:
  "l < length (levels ((mem_pool_info Va) p)) ⟹
   b < length (bits (levels ((mem_pool_info Va) p) ! l)) ⟹
   get_bit_s (Va⦇ mem_pool_info := set_bit (mem_pool_info Va) p l b st⦈) p l b = st"
  using set_bit_get_bit_eq
    [of l "(mem_pool_info Va)" p b " set_bit (mem_pool_info Va) p l b st" st]
by simp

lemma set_bit_get_bit_neq:
  "p ≠ p1 ∨ l ≠ l1 ∨ b ≠ b1 ⟹
   mp_info2 = set_bit mp_info p l b st ⟹
   get_bit mp_info2 p1 l1 b1 = get_bit mp_info p1 l1 b1"
  apply(simp add:set_bit_def) apply auto
  by (metis (no_types, lifting) Mem_pool_lvl.select_convs(1) Mem_pool_lvl.surjective
      Mem_pool_lvl.update_convs(1) list_update_beyond not_less nth_list_update_eq nth_list_update_neq)

lemma set_bit_get_bit_neq2:
  "p ≠ p1 ∨ l ≠ l1 ∨ b ≠ b1 ⟹
   get_bit_s (Va⦇ mem_pool_info := set_bit (mem_pool_info Va) p l b st⦈) p1 l1 b1
     = get_bit_s Va p1 l1 b1"
  using set_bit_get_bit_neq
        [of p p1 l l1 b b1 "set_bit (mem_pool_info Va) p l b st" "mem_pool_info Va"]
by simp

definition buf_size :: "Mem_pool ⇒ nat"
where "buf_size m ≡ n_max m * max_sz m"

definition block_fits :: "Mem_pool ⇒ mem_ref ⇒ nat ⇒ bool"
where "block_fits p b bsz ≡ b + bsz < buf_size p + buf p + 1"

definition block_ptr :: "Mem_pool ⇒ nat ⇒ nat ⇒ mem_ref"
where "block_ptr p lsize b ≡ buf p + lsize * b"

definition partner_bits :: "Mem_pool ⇒ nat ⇒ nat ⇒ bool"
where "partner_bits p l b ≡ let bits = bits (levels p ! l);
                                a = (b div 4) * 4 in
                             bits!a = FREE ∧ bits!(a+1) = FREE ∧ bits!(a+2) = FREE ∧ bits!(a+3) = FREE"


lemma partbits_div4: "a div 4 = b div 4 ⟹ partner_bits p l a = partner_bits p l b"
by(simp add:partner_bits_def)

abbreviation noexist_bits :: "Mem_pool ⇒ nat ⇒ nat ⇒ bool"
where "noexist_bits mp ii jj ≡ (bits (levels mp ! ii)) ! jj = NOEXIST
                          ∧ (bits (levels mp ! ii)) ! (jj + 1) = NOEXIST
                          ∧ (bits (levels mp ! ii)) ! (jj + 2) = NOEXIST
                          ∧ (bits (levels mp ! ii)) ! (jj + 3) = NOEXIST"

definition level_empty :: "Mem_pool ⇒ nat ⇒ bool"
where "level_empty p n ≡ free_list (levels p!n) = []"

definition head_free_list :: "Mem_pool ⇒ nat ⇒ mem_ref"
where "head_free_list p l ≡ hd (free_list ((levels p) ! l))"

definition rmhead_free_list :: "Mem_pool ⇒ nat ⇒ Mem_pool"
where "rmhead_free_list p l ≡
    p⦇levels := (levels p)
        [l :=  ((levels p) ! l) ⦇free_list := tl (free_list ((levels p) ! l))⦈] ⦈"

definition remove_free_list :: "Mem_pool ⇒ nat ⇒ mem_ref ⇒ Mem_pool"
where "remove_free_list p l b ≡
    p⦇levels := (levels p)
        [l := ((levels p) ! l) ⦇free_list := remove1 b (free_list ((levels p) ! l))⦈] ⦈"

definition append_free_list :: "Mem_pool ⇒ nat ⇒ mem_ref ⇒ Mem_pool"
where "append_free_list p l b ≡
    p⦇levels := (levels p)
        [l := ((levels p) ! l) ⦇free_list := (free_list ((levels p) ! l)) @ [b]⦈] ⦈"

definition in_free_list :: "mem_ref ⇒ mem_ref list ⇒ bool"
where "in_free_list v fl ≡ (∃i<length fl. fl!i = v)"

subsection ‹specification of events›

lemma timeout_lm: "(timeout = FOREVER ∨ timeout = NOWAIT ∨ timeout > 0) = (timeout ≥ -1)"
  by auto

definition Mem_pool_alloc :: "Thread ⇒ mempool_ref ⇒ nat ⇒ int ⇒ (EventLabel, 'a, State, State com option) esys"
where "Mem_pool_alloc t p sz timeout =
  EVENT Mem_pool_allocE [MPRef p, Natural sz, Integer timeout] ⇛ (𝒯 t)
  WHEN
    p ∈ ´mem_pools
    ⌦‹(* ∧ ´cur = Some t*) (* t is the current thread *) (** this condition is not stable on rely condition **)›
    ∧ timeout ≥ -1 ⌦‹(* equv to (timeout = FOREVER ∨ timeout = NOWAIT ∨ timeout > 0) *)›
    ⌦‹(* ∧ p ∈ ´pools_of_thread t *) (* the mem pool p is shared in the thread t *)›
  THEN
    (t ▸ ´tmout := ´tmout(t := timeout));;

    (t ▸ ´endt := ´endt(t := 0));;
    (t ▸ IF timeout > 0 THEN
           ´endt := ´endt(t := ´tick + nat timeout)
         FI);;
    (t ▸ ´mempoolalloc_ret := ´mempoolalloc_ret (t := None));;
    (t ▸ ´ret := ´ret(t := ESIZEERR));;
    (t ▸ ´rf := ´rf(t := False));;
    WHILE ¬ (´rf t) DO

      ⌦‹(* ==== start: ret = pool_alloc(p, block, size); ======================== *)›
      ⌦‹(*(t ▸ ´lsizes := ´lsizes(t := []));;*)›
      (t ▸ ´blk := ´blk(t := NULL));;
      (t ▸ ´alloc_lsize_r := ´alloc_lsize_r(t := False));;
      (t ▸ ´alloc_l := ´alloc_l(t := -1));;
      (t ▸ ´free_l := ´free_l(t := -1));;
      (t ▸ ´lsizes := ´lsizes(t := [ALIGN4 (max_sz (´mem_pool_info p))]));;
      (t ▸ ´i := ´i(t := 0));;
      WHILE ´i t < n_levels (´mem_pool_info p) ∧ ¬ ´alloc_lsize_r t DO
        IF ´i t > 0 THEN
          (t ▸ ´lsizes := ´lsizes(t := ´lsizes t @ [ALIGN4 (´lsizes t ! (´i t - 1) div 4)]))
        FI;;
        IF ´lsizes t ! ´i t < sz THEN
          (t ▸ ´alloc_lsize_r := ´alloc_lsize_r(t := True))
        ELSE
          (t ▸ ´alloc_l := ´alloc_l(t := int (´i t)));;
          IF ¬ level_empty (´mem_pool_info p) (´i t) THEN
            (t ▸ ´free_l := ´free_l(t := int (´i t)))
          FI;;
          (t ▸ ´i := ´i(t := ´i t + 1))
        FI
      OD;;

      IF ´alloc_l t < 0 THEN
        (t ▸ ´ret := ´ret(t := ESIZEERR))
      ELSE
        IF ´free_l t < 0 THEN
          ⌦‹(* block->data = NULL; *)›
          (t ▸ ´ret := ´ret(t := ENOMEM))
        ELSE
          ⌦‹(* ==== start: blk = alloc_block(p, free_l, lsizes[free_l]); *)›
          (t ▸ ATOMIC
            ⌦‹(* ==== start: block = sys_dlist_get(&p->levels[l].free_list); *)›
            IF level_empty (´mem_pool_info p) (nat (´free_l t)) THEN
              ´blk := ´blk(t := NULL)
            ELSE
              ´blk := ´blk(t := head_free_list (´mem_pool_info p) (nat (´free_l t)));;

              ⌦‹(* sys_dlist_remove(node); *)›
              ´mem_pool_info := ´mem_pool_info (p := rmhead_free_list (´mem_pool_info p) (nat (´free_l t)))

            FI;;
            ⌦‹(* ==== end: block = sys_dlist_get(&p->levels[l].free_list); *)›

            IF ´blk t ≠ NULL THEN
              ⌦‹(* clear_free_bit(p, l, block_num(p, block, lsz)); *)›
              ´mem_pool_info := set_bit_allocating ´mem_pool_info p (nat (´free_l t))
                                  (block_num (´mem_pool_info p) (´blk t) ((´lsizes t)!(nat (´free_l t))));;
              ⌦‹(* set the allocating node info of the thread *)›
              ´allocating_node := ´allocating_node (t := Some ⦇pool = p, level = nat (´free_l t),
                    block = (block_num (´mem_pool_info p) (´blk t) ((´lsizes t)!(nat (´free_l t)))), data = ´blk t ⦈)
            FI
          END);;
          ⌦‹(* ==== end: blk = alloc_block(p, free_l, lsizes[free_l]); *)›

          IF ´blk t = NULL THEN
            (t ▸ ´ret := ´ret (t := EAGAIN))
          ELSE

            FOR (t ▸ ´from_l := ´from_l(t := ´free_l t));
                ⌦‹(* level_empty (´mem_pool_info p) (nat (´alloc_l t)) ∧ *)› ´from_l t < ´alloc_l t;
                ⌦‹(********* we remove the FOR termination condition ``level_empty'' to remove a concurrency BUG here ***********)›
                (t ▸ ´from_l := ´from_l(t := ´from_l t + 1)) DO

              ⌦‹(* ==== start: blk = break_block(p, blk, from_l, lsizes); *)›
              (t ▸ ATOMIC
                ´bn := ´bn (t := block_num (´mem_pool_info p) (´blk t) ((´lsizes t)!(nat (´from_l t))));;

                ´mem_pool_info := set_bit_divide ´mem_pool_info p (nat (´from_l t)) (´bn t);;

                ´mem_pool_info := set_bit_allocating ´mem_pool_info p (nat (´from_l t + 1)) (4 * ´bn t);;

                ⌦‹(* set the allocating node info of the thread *)›
                ´allocating_node := ´allocating_node (t := Some ⦇pool = p, level = nat (´from_l t + 1),
                      block = 4 * ´bn t, data = ´blk t ⦈);;

                FOR ´i := ´i (t := 1);
                    ´i t < 4;
                    ´i := ´i (t := ´i t + 1) DO
                  ´lbn := ´lbn (t := 4 * ´bn t + ´i t);;
                  ´lsz := ´lsz (t := (´lsizes t) ! (nat (´from_l t + 1)));;
                  ´block2 := ´block2(t := ´lsz t * ´i t + ´blk t);;

                  ⌦‹(* set_free_bit(p, l + 1, lbn); *)›
                  ´mem_pool_info := set_bit_free ´mem_pool_info p (nat (´from_l t + 1)) (´lbn t);;

                  IF block_fits (´mem_pool_info p) (´block2 t) (´lsz t) THEN

                    ⌦‹(* sys_dlist_append(&p->levels[l + 1].free_list, block2); *)›
                    ´mem_pool_info := ´mem_pool_info (p :=
                            append_free_list (´mem_pool_info p) (nat (´from_l t + 1)) (´block2 t) )
                  FI
                ROF

              END)
              ⌦‹(* ==== end: blk = break_block(p, blk, from_l, lsizes); *)›

            ROF;;

            ⌦‹(* finally set the node from allocating to allocated and remove the allocating node info of the thread *)›
            (t ▸ ´mem_pool_info := set_bit_alloc ´mem_pool_info p (nat (´alloc_l t))
                                  (block_num (´mem_pool_info p) (´blk t) ((´lsizes t)!(nat (´alloc_l t))));;
                 ´allocating_node := ´allocating_node (t := None)
            );;

            (t ▸ ´mempoolalloc_ret := ´mempoolalloc_ret (t :=
              Some ⦇pool = p, level = nat (´alloc_l t),
                    block = block_num (´mem_pool_info p) (´blk t) ((´lsizes t)!(nat (´alloc_l t))),
                    data = ´blk t ⦈));;

            (t ▸ ´ret := ´ret (t := OK))
          FI
        FI
      FI;;
      ⌦‹(* ==== end: ret = pool_alloc(p, block, size); ==================== *)›

      ⌦‹(* IF ´ret t = 0 ∨ timeout = NOWAIT ∨ ´ret t = EAGAIN ∨ ´ret t ≠ ENOMEM THEN *)
      (***** we change the IF condition to remove a functional BUG here *****)›
      IF ´ret t = OK ∨ timeout = NOWAIT ∨ ´ret t = ESIZEERR THEN
        (t ▸ ´rf := ´rf(t := True));;
        IF ´ret t = EAGAIN THEN ⌦‹(*EAGAIN should not export to users*)›
          (t ▸ ´ret := ´ret(t := ENOMEM))
        FI
      ELSE
        IF ´ret t = EAGAIN THEN SKIP
        ELSE
          (t ▸ ATOMIC

            ⌦‹(* _pend_current_thread(&p->wait_q, timeout); *)›
            ´thd_state := ´thd_state(the ´cur := BLOCKED);;
            ⌦‹(*´cur := None;;*)›
            ´mem_pool_info := ´mem_pool_info(p := ´mem_pool_info p⦇wait_q := wait_q (´mem_pool_info p) @ [the ´cur] ⦈);;

            ⌦‹(* _Swap(key); *)›
            swap

          END);;

          IF ´tmout t ≠ FOREVER THEN
            (t ▸ ´tmout := ´tmout (t := int (´endt t) - int ´tick));;
            IF ´tmout t < 0 THEN
              (t ▸ ´rf := ´rf(t := True));;
              (t ▸ ´ret := ´ret (t := ETIMEOUT))
            FI
          FI
        FI
      FI
    OD

  END"

definition Mem_pool_free :: "Thread ⇒ Mem_block ⇒ (EventLabel, 'a, State, State com option) esys"
where "Mem_pool_free t b =
  EVENT Mem_pool_freeE [Block b] ⇛ (𝒯 t)
  WHEN
    pool b ∈ ´mem_pools
    ∧ level b < length (levels (´mem_pool_info (pool b)))
    ∧ block b < length (bits (levels (´mem_pool_info (pool b))!(level b)))
    ∧ data b = block_ptr (´mem_pool_info (pool b)) ((ALIGN4 (max_sz (´mem_pool_info (pool b)))) div (4 ^ (level b))) (block b)
    ⌦‹(*∧ (bits ((levels (´mem_pool_info (pool b))) ! (level b))) ! (block b) = ALLOCATED
    ∧ ´cur = Some t*) (* t is the current thread *)
    (* ∧ pool b ∈ ´pools_of_thread t *) (* the mem pool is shared in the thread t *)›
  THEN
    ⌦‹(* here we set the bit to FREEING, so that other thread cannot mem_pool_free the same block
       it also requires that it can only free ALLOCATED block *)›
    (t ▸ AWAIT (bits ((levels (´mem_pool_info (pool b))) ! (level b))) ! (block b) = ALLOCATED THEN
           ´mem_pool_info := set_bit_freeing ´mem_pool_info (pool b) (level b) (block b);;
           ´freeing_node := ´freeing_node (t := Some b) ⌦‹(* set the freeing node of current thread *)›
         END);;

    (t ▸ ´need_resched := ´need_resched(t := False));;
    ⌦‹(* (t ▸ ´lsizes := ´lsizes(t := []));; *)›
    (t ▸ ´lsizes := ´lsizes(t := [ALIGN4 (max_sz (´mem_pool_info (pool b)))]));;
    FOR (t ▸ ´i := ´i(t := 1));
        ´i t ≤ level b;
        (t ▸ ´i := ´i(t := ´i t + 1)) DO
      (t ▸ ´lsizes := ´lsizes(t := ´lsizes t @ [ALIGN4 (´lsizes t ! (´i t - 1) div 4)]))
    ROF;;

    ⌦‹(* === start: free_block(get_pool(block->id.pool), block->id.level, lsizes, block->id.block); *)›
    (t ▸ ´free_block_r := ´free_block_r (t := True));;
    (t ▸ ´bn := ´bn (t := block b));;
    (t ▸ ´lvl := ´lvl (t := level b));;

    WHILE ´free_block_r t DO
      (t ▸ ´lsz := ´lsz (t := ´lsizes t ! (´lvl t)));;
      (t ▸ ´blk := ´blk (t := block_ptr (´mem_pool_info (pool b)) (´lsz t) (´bn t)));;

      (t ▸ ATOMIC

        ´mem_pool_info := set_bit_free ´mem_pool_info (pool b) (´lvl t) (´bn t);;
        ´freeing_node := ´freeing_node (t := None);;  ⌦‹(* remove the freeing node info of the thread *)›

        IF ´lvl t > 0 ∧ partner_bits (´mem_pool_info (pool b)) (´lvl t) (´bn t) THEN
          FOR ´i := ´i(t := 0);
              ´i t < 4;
              ´i := ´i(t := ´i t + 1) DO
            ´bb := ´bb (t := (´bn t div 4) * 4 + ´i t);;
            ⌦‹(*(t ▸ ´mem_pool_info := clear_free_bit ´mem_pool_info (pool b) (´lvl t) (´bb t));;*)›
            ´mem_pool_info := set_bit_noexist ´mem_pool_info (pool b) (´lvl t) (´bb t);;
            ´block_pt := ´block_pt (t := block_ptr (´mem_pool_info (pool b)) (´lsz t) (´bb t));;
            IF ´bn t ≠ ´bb t ∧ block_fits (´mem_pool_info (pool b))
                                          (´block_pt t)
                                          (´lsz t) THEN

              ⌦‹(* sys_dlist_remove(block_ptr(p, lsz, b)); *)›
              ´mem_pool_info := ´mem_pool_info ((pool b) :=
                      remove_free_list (´mem_pool_info (pool b)) (´lvl t) (´block_pt t))
            FI
          ROF;;

          (
          ⌦‹(*´j := ´j (t := ´lvl t);; (* use lbn and j to store the previous lvl and bn, or can not give the post condition *)
          ´lbn := ´lbn (t := ´bn t);; (* since the lbn and j are not used in M_pool_free *)
          ´lvl := ´lvl (t := ´j t - 1);;
          ´bn := ´bn (t := ´lbn t div 4);;*)›
          ´lvl := ´lvl (t := ´lvl t - 1);;
          ´bn := ´bn (t := ´bn t div 4);;
          ⌦‹(* we add this statement. set the parent node from divided to freeing *)›
          ´mem_pool_info := set_bit_freeing ´mem_pool_info (pool b) (´lvl t) (´bn t);;
          ⌦‹(*´freeing_node := ´freeing_node (t := Some ⦇pool = (pool b), level = (´lvl t),
                    block = (´bn t), data = block_ptr (´mem_pool_info (pool b)) (´lsz t) (´bn t) ⦈)*)›
          ´freeing_node := ´freeing_node (t := Some ⦇pool = (pool b), level = (´lvl t),
                    block = (´bn t),
                    data = block_ptr (´mem_pool_info (pool b))
                            (((ALIGN4 (max_sz (´mem_pool_info (pool b)))) div (4 ^ (´lvl t))))
                            (´bn t) ⦈)
          )

        ELSE
          IF block_fits (´mem_pool_info (pool b)) (´blk t) (´lsz t) THEN

            ⌦‹(* sys_dlist_append(&p->levels[level].free_list, block); *)›
            ´mem_pool_info := ´mem_pool_info ((pool b) :=
                    append_free_list (´mem_pool_info (pool b)) (´lvl t) (´blk t) )
          FI;;

          ´free_block_r := ´free_block_r (t := False)
        FI

      END)

    OD;;
    ⌦‹(* === end: free_block(get_pool(block->id.pool), block->id.level, lsizes, block->id.block); *)›

    (t ▸ ATOMIC

      WHILE wait_q (´mem_pool_info (pool b)) ≠ [] DO
        ´th := ´th (t := hd (wait_q (´mem_pool_info (pool b))));;
        ⌦‹(* _unpend_thread(th); *)›
        ´mem_pool_info := ´mem_pool_info (pool b := ´mem_pool_info (pool b)
                ⦇wait_q := tl (wait_q (´mem_pool_info (pool b)))⦈);;
        ⌦‹(* _ready_thread(th); *)›
        ´thd_state := ´thd_state (´th t := READY);;
        ´need_resched := ´need_resched(t := True)
      OD;;

      IF ´need_resched t THEN
        reschedule
      FI
    END)
  END"

definition Schedule :: "Thread ⇒ (EventLabel, 'a, State, State com option) esys"
where "Schedule t ≡
  EVENT ScheduleE [Thread t] ⇛ 𝒮
  THEN
    AWAIT ´thd_state t = READY THEN  ⌦‹(* only schedule the READY threads *)›
      IF (´cur ≠ None) THEN
        ´thd_state := ´thd_state(the (´cur) := READY);;
        ´cur := None
      FI;;
      ´cur := Some t;;
      ´thd_state := ´thd_state(t := RUNNING)
    END
  END"

definition Tick :: "(EventLabel, 'a, State, State com option) esys"
where "Tick ≡
  EVENT TickE [] ⇛ Timer
  THEN
    ´tick := ´tick + 1
  END"

term Evt_sat_RG
end