Preemption-Safe and Scheduler-Conscious Busy-Wait Synchronization Algorithms

Pseudo-code from article of the above name, ACM TOCS, February 1997. Leonidas I. Kontothanassis, Robert W. Wisniewski, and Michael L. Scott.


Kernel Interface Declarations for Preemption-Safe and Scheduler-Conscious Synchronization Algorithms

  type context_block = record
      state : (preempted, preemptable, unpreemptable_self, unpreemptable_other)
      warning : Boolean
      ...
  
  type partition_block = record
      num_processors, generation : integer
      processes_on_processor : array [MAX_PROCESSORS] of integer
      processor_ids : array [MAX_PROCESSES] of integer
      ...
  

Preemption-Safe "Handshaking" Queue Lock

  type multi_flag = (not_yet, can_go, got_it, lost_it, ack, nack)
  type qnode = record
      next, prev : ^qnode
      next_done : Boolean
      status : multi_flag
  type lock = ^qnode
  private cb : ^context_block
  
  procedure acquire_lock (L : ^lock, I : ^qnode)
      loop
          I->next := nil
          cb->state := unpreemptable_self
          I->prev := fetch_and_store (L, I)
          if I->prev = nil  return
          I->status := not_yet
          I->prev->next := I
          repeat
              cb->state := preemptable
              if cb->warning  yield          // kernel wanted to preempt me
              cb->state := unpreemptable_self
          until I->status != not_yet         // spin
          val : multi_flag := fetch_and_store (I->status, got_it)
          if val = can_go
              I->prev->next_done := true     // tell prev I'm done with its qnode
              repeat until I->status = ack   // let prev finish using my qnode
              return
          while val != nack val := I->status // wait until qnode no longer needed
  
  procedure release_lock (L : ^lock, I: ^qnode)
      if I->next = nil                       // no known successor
          if compare_and_store (L, I, nil)  goto rtn
          repeat while I->next = nil         // spin
      I->next_done := false
      loop
          I->next->status := can_go
          for i in 1..TIMEOUT                // spin
              if I->next_done
                  I->next->status := ack;  goto rtn
          if fetch_and_store (I->next->status, lost_it) = got_it
                  // oh! successor was awake after all
              repeat until I->next_done
              I->next->status := ack;  goto rtn
          succ : ^qnode := I->next->next     // successor was asleep
          if succ = nil
              if compare_and_store (L, I->next, nil)
                  I->next->status := nack;  goto rtn
              repeat while (succ := I->next->next) = nil  // spin; non-local
          I->next->status := nack
          I->next := succ;  succ->prev := I
   rtn:
      cb->state := preemptable
      if cb->warning  yield                  // kernel wanted to preempt me
  

Scheduler-Conscious "Smart Queue" Lock

NB: This code incorporates a bug fix due to Hiroaki Takada and another due to Injong Rhee and Chi-Yung Lee.

  type qnode = record
      self : ^context_block
      next : ^qnode
      status : (waiting, success, failure)
  type lock = ^qnode
  private cb : ^context_block;
  
  procedure acquire_lock (L : ^lock, I : ^qnode)
      repeat
          I->next := nil
          I->self := cb
          cb->state := unpreemptable_self
          pred : ^qnode := fetch_and_store (L, I)
          if pred = nil
              return
          I->status := waiting
          pred->next := I
          (void) compare_and_store (&cb->state,
                                    unpreemptable_self, preemptable)
          repeat while I->status = waiting        // spin
      until I->status = success
  
  procedure release_lock (L : ^lock, I : ^qnode)
      shadow : ^qnode := I
      candidate : ^qnode := I->next
      loop
          if candidate = nil
              if compare_and_store (L, shadow, nil)
                  shadow->status := failure       // may set our own flag,
                                                  // but that's ok
                  exit loop                       // no one waiting for lock
              repeat while shadow->next = nil     // spin; probably non-local
              candidate := shadow->next
  
          shadow->status := failure               // outside the if 
          // order of following checks is important
          if compare_and_store (&candidate->self->state,
                                unpreemptable_self, unpreemptable_other)
                  or compare_and_store (&candidate->self->state,
                                        preemptable, unpreemptable_other)
              candidate->status := success
              exit loop
          // else candidate seems to be preempted
          shadow := candidate                     // move down queue
          candidate := shadow->next
      cb->state := preemptable
      if cb->warning
          yield
  

Preemption-Safe Ticket Lock

  type t_lock = record
      next_ticket, now_serving, ack_flag, done_flag : unsigned integer
  private cb : ^context_block
  
  procedure acquire_lock (L : ^t_lock)
  restart:
      cb->state := unpreemptable_self
      my_ticket : integer := fetch_and_increment (&L->next_ticket)
          // overflow is benign
      while my_ticket != L->now_serving
          cb->state := preemptable
          if cb->warning
              yield
          cb->state := unpreemptable_self
          if (my_ticket - L->now_serving) > MAX_PROCESSES
              // I've been passed up (overflow is benign)
              goto restart
          for i in 1..((my_ticket - L->now_serving) * SPIN_FACTOR)
              // spin
      if L->done_flag = my_ticket
          // optimization; releaser definitely isn't waiting for acknowledgment
          return
      if !compare_and_store (&L->ack_flag, my_ticket, my_ticket-MAX_PROCESSES)
          goto restart
  
  procedure release_lock (L : ^t_lock)
  retry:
      new_ticket : integer := L->ack_flag := L->now_serving + 1
      L->now_serving := new_ticket
      if L->next_ticket = L->now_serving      // nobody waiting
          goto rtn
      for i : integer in 1..TIMEOUT
          if L->ack_flag = new_ticket - MAX_PROCESSES
              goto rtn
      // I timed out
      if compare_and_store (&L->ack_flag, new_ticket, new_ticket-MAX_PROCESSES)
          // ticket successfully rescinded
          goto retry
  rtn:
      L->done_flag := new_ticket
          // optimization; avoids need for acknowledgment in no-contention case
      cb->state := preemptable
      if cb->warning
          yield
  

Scheduler-Conscious Fair Reader-Writer Lock

NB: This code incorporates a bug fix due to Injong Rhee and Chi-Yung Lee.

  type rw_qnode = record
      self : ^context_block
      state : (reader, active_reader, writer)
      spin_flag : (waiting, success, failure)
      next, prev : ^rw_qnode
      exc_lock : exclusive_lock
  type rw_lock = ^rw_qnode
  private cb : ^context_block
  
  procedure writer_lock (L : ^rw_lock, I : ^rw_qnode)
      I->self := cb
      repeat
          cb->state := unpreemptable_self
          I->state := writer
          I->spin_flag := waiting
          I->next := nil
          pred : ^rw_qnode := fetch_and_store (L, I)
          if pred != nil
              pred->next := I
              (void) compare_and_store (&cb->state,
                                        unpreemptable_self, preemptable)
              repeat while I->spin_flag = waiting     // spin
          else
              return
      until I->spin_flag = success
  
  procedure writer_unlock (L: ^rw_lock, I: ^rw_qnode)
      shadow : ^rw_qnode := I
      candidate : ^rw_qnode := I->next
      loop
          if candidate = nil
              if compare_and_store (L, shadow, nil)
                  shadow-&ft;spin_flag := failure        // may set our own flag,
                                                      // but that's ok
                  exit loop                           // no one waiting for lock
              repeat while shadow->next = nil         // spin; probably non-local
              candidate := shadow->next
  
          shadow->spin_flag := failure;               // outside the if
          // order of following checks is important
          if compare_and_store (&candidate->self->state,
                                unpreemptable_self, unpreemptable_other)
                   or compare_and_store (&candidate->self->state, preemptable,
                                         unpreemptable_other)
               candidate->prev := nil
               candidate->spin_flag := success
               exit loop
          // else candidate seems to be preempted
          shadow := candidate                         // move down queue
          candidate := shadow->next
          shadow->spin_flag := failure
      cb->state := preemptable
      if cb->warning
          yield
  
  procedure reader_lock (L : ^rw_lock, I : ^rw_qnode)
      I->self := cb
      exc_lock (I)
      repeat
          cb->state := unpreemptable_self
          I->next := I->prev := nil
          I->state := reader
          I->spin_flag := waiting
          pred : ^rw_qnode := fetch_and_store (L, I)
          if pred = nil
              exit loop               // leave repeat
          I->prev := pred
          pred->next := I
          if pred->state = active_reader
              exit loop               // leave repeat
          compare_and_store (&cb->state,
                             unpreemptable_self, preemptable)
          repeat while I->spin_flag = waiting
      until I->spin_flag = success
      I->state := active_reader
      candidate : ^rw_qnode := I->next
      loop
          if candidate = nil or candidate->state != reader
              exit loop
          // order of following checks is important
          if compare_and_store (&candidate->self->state, 
                                unpreemptable_self, unpreemptable_other)
                  or compare_and_store (&candidate->self->state, 
                                        preemptable, unpreemptable_other)
              candidate->spin_flag := success
              exit loop
          // else candidate seems to be preempted
          if candidate->next = nil
              I->next := nil
              if compare_and_store (L, candidate, I)
                  // we are now tail of queue
                  candidate->spin_flag := failure
                  exit loop
              // else need to spin until successor establishes pointers
              repeat while candidate->next = nil
          // preempted candidate has a successor
          I->next := candidate->next
          candidate->next->prev := I
          candidate->spin_flag := failure
          candidate := I->next
      exc_unlock (I)
  
  procedure reader_unlock (L : ^rw_lock, I : ^rw_qnode)
   find_previous:
      pred : ^rw_qnode := I->prev
      if pred = nil  goto no_previous
      while !exc_lock_conditional (pred)
          pred := I->prev
          if pred = nil  goto no_previous
      if pred != I->prev
         exc_unlock (pred)
         goto find_previous
      exc_lock (I)
      pred->next := nil
      if I->next = nil
          if !compare_and_store (L, I, I->prev)
             repeat while I->next = nil                   // spin
      if I->next != nil
          I->next->prev := I->prev
          I->prev->next := I->next
      exc_unlock (pred)
      goto rtn
   no_previous:
      exc_lock (I)
      loop
          candidate : ^rw_qnode := I->next
          if candidate = nil
              if compare_and_store (L, I, nil)  goto rtn
              repeat while I->next = nil                  // spin
          else
              if candidate->self->state = unpreemptable_other
                      or compare_and_store (&candidate->self->state,
                                unpreemptable_self, unpreemptable_other)
                      or compare_and_store (&candidate->self->state,
                                preemptable, unpreemptable_other)
                  if (candidate->state = writer)
                      candidate->prev := nil
                      candidate->spin_flag := success
                  else
                      candidate->spin_flag := success
                      candidate->prev := nil
                  goto rtn
              // else candidate seems to be preempted
              if candidate->next = nil
                  if compare_and_store (L, candidate, nil)
                      // no one at tail of queue
                      candidate->spin_flag := failure
                      goto rtn
                  repeat while candidate->next = nil      // spin
              // preempted candidate has a successor
              I->next := candidate->next
              candidate->next->prev := I
              candidate->spin_flag := failure
   rtn:
      exc_unlock (I)
      cb->state := preemptable
      if cb->warning  yield
  

Preemption-Safe Fixed-Time (Spin-Block) Competitive Barrier

  shared global_sense, barrier_count, num_blocked : integer := 0, 0, 0
  shared wakeup_sems : array [2] of semaphore := {0}
  shared mutex : lock
  private local_sense : integer := 0
  
  procedure barrier ()
      local_sense := 1 - local_sense
      count : integer := fetch_and_increment (&barrier_count)
      if count < NUM_PROCESSES - 1
          for i : integer in 1..SWITCH_TIME
              if global_sense = local_sense 
                  return
          acquire_lock (mutex)
          if global_sense = local_sense
              release_lock (mutex)
              return
          num_blocked +:= 1
          release_lock (mutex)
          P (wakeup_sem[local_sense])
      else
          barrier_count := 0
          acquire_lock (mutex)
          global_sense := 1 - global_sense    // release spinning processes
          count := num_blocked
          num_blocked := 0
          release_lock (mutex)
          for i in 1..count
              V (wakeup_sems[local_sense])    // release blocked processes
  

Preemption-Safe "Average Three" Competitive Barrier

  shared global_sense, barrier_count, num_blocked : integer := 0, 0, 0
  shared wakeup_sems : array [2] of semaphore := {0}
  shared mutex : lock
  private local_sense : integer := 0
  private spin_threshold : integer := SWITCH_TIME
  private episode_count : integer := 0
  private episode_time : array [3] of integer := {SWITCH_TIME}
  
  procedure barrier ()
      local_sense := 1 - local_sense
      count : integer := fetch_and_increment (&barrier_count)
      if count < NUM_PROCESSES - 1 
          now : integer := get_current_time ()
          for i in 1..spin_threshold 
              if global_sense = local_sense 
                  goto exit_barrier
          acquire_lock (mutex)
          if global_sense = local_sense
              release_lock (mutex)
              goto exit_barrier
          num_blocked +:= 1
          release_lock (mutex)
          P (wakeup_sem[local_sense])
       exit_barrier:
          episode_time[episode_count] := get_current_time () - now
          episode_count := (episode_count + 1) % 3
          if average (episode_time) < SWITCH_TIME
              spin_threshold := min (SWITCH_TIME, spin_threshold + ADJUST)
          else
              spin_threshold := max (0, spin_threshold - ADJUST)
      else
          barrier_count := 0
          acquire_lock (mutex)
          global_sense := 1 - global_sense    // release spinning processes
          count := num_blocked
          num_blocked := 0
          release_lock (mutex)
          for i in 1..count
              V (wakeup_sem[local_sense])     // release blocked processes
  

Small-Scale Sceduler-Conscious "Scheduler Informaiton" Barrier

  shared global_sense, barrier_count : integer := 0, 0
  shared wakeup_sems : array [2] of semaphore := {0}
  shared partition : ^partition_block
  shared barrier_processors : array [2] of integer := {partition->num_processors}
  private local_sense : integer := 0
  
  procedure barrier ()
      local_sense := 1 - local_sense
      count : integer := fetch_and_increment (&barrier_count)
      if count + 1 %lt; NUM_PROCESSES
          if count + 1 >= NUM_PROCESSES - barrier_processors[local_sense]
              repeat until global_sense = local_sense     // spin
          else
              P (wakeup_sem[local_sense])
      else
          barrier_count := 0
          barrier_processors[1-local_sense] := partition->num_processors
          global_sense := 1 - global_sense
          for i in 1..(NUM_PROCESSES - barrier_processors[local_sense])
              V (wakeup_sem[local_sense])
  

Scalable Scheduler-Concsious Tree Barrier

  type whole_and_parts = union
      whole : long
      parts: array [4] of byte
  type tree_node = record
      have_child : whole_and_parts
      child_not_ready : whole_and_parts := have_child
      parent_flag : ^byte
      dummy : byte                    // something harmless to point at
  type processor_info = record
      barrier_count : integer := 0
      wakeup_sems : array [2] of semaphore := {0}
      generation : integer := 0       // used to synchronize reorganization
  shared processors : array [MAX_PROCESSORS] of processor_info
  shared nodes : array [MAX_PROCESSORS] of tree_node
      // have_child and parent_flag fields of individual nodes are initialized
      // as appropriate in the inter-processor tree; see code in reorganize ()
  shared global_sense : integer := 0
  shared partition : ^partition_block
  shared barrier_partition : partition_block := partition^
  private local_sense : integer := 0
  private cb : ^context_block
  private process_id : integer :=     // unique number in 0..NUM_PROCESSES-1
  private my_processor : integer := partition->processor_ids[process_id]
  private my_generation : integer := 0
  
  procedure barrier ()
      local_sense := 1 - local_sense
      L : processor_info := &processors[my_processor]
      count : integer := fetch_and_increment (&L->barrier_count)
      if count + 1 %lt; barrier_partition.processes_on_processor[my_processor]
          // not the last process on the processor
          P (L->wakeup_sems[local_sense])
          goto rtn
      // last process on this processor; wait for children on other processors
      my_node : ^tree_node := &nodes[my_processor]
      repeat while my_node->child_not_ready.whole != 0    // spin
      // barrier has been achieved
      my_node->child_not_ready.whole := my_node->have_child.whole
      my_node->parent_flag^ := 0              // notify parent  
      if my_processor = 0                     // root of inter-processor tree
          // copy partition information if necessary; loop ensures atomicity
          check : integer := barrier_partition.generation
          while check != partition->generation
              check := partition->generation
              barrier_partition := partition^
          global_sense := local_sense         // release spinning processes
      else repeat while global_sense != local_sense       // spin
      L->barrier_count := 0                   // reset for this processor only
      for i in 1..count
          V (L->wakeup_sems[local_sense])     // release blocked processes
   rtn:
      if my_generation != barrier_partition.generation  reorganize ()
  
  procedure reorganize ()
      my_generation := barrier_partition.generation
      my_processor := barrier_partition.processor_ids[process_id]
      my_node : ^tree_node := &nodes[my_processor]
      for i in 0..process_id-1
          if barrier_partition.processor_ids[i] = my_processor
              // I'm not the representative of my processor
              repeat until processors[my_processor].generation = my_generation
                  // spin
              return
      for i in 0..3
          my_node->havechild.parts[i] := (integer)
              ((my_processor*4 + i+1) %lt; barrier_partition.num_processors)
      my_node->childnotready.whole := my_node->havechild.whole
      if my_processor = 0                 // root of inter-processor tree
          my_node->parentflag := &my_node->dummy
          processors[my_processor].generation := my_generation
              // signal children it is safe to proceed
      else
          parent_id : integer := (my_processor-1)/4
          my_node->parentflag :=
              &nodes[parent_id].childnotready.parts[(my_processor-1)%4]
          processors[my_processor].generation := my_generation
          repeat until processors[parent_id].generation = my_generation
              // spin
  


Last Change: 2 October 1995 /