EE445M RTOS
Taken at the University of Texas Spring 2015
os.c File Reference
#include "os.h"
#include "inc/hw_nvic.h"
#include "inc/hw_types.h"
#include "libstd/nexus.h"
#include "libut/utlist.h"
#include "libsystick/systick.h"
#include "libschedule/schedule.h"
Include dependency graph for os.c:

Go to the source code of this file.

Functions

void os_threading_init ()
 
int8_t get_os_num_threads ()
 
tcb_tos_add_thread (task_t task)
 
tcb_tos_remove_thread (task_t task)
 
int32_t os_running_thread_id ()
 
tcb_tos_tcb_of (const task_t task)
 
void os_launch ()
 
always void _os_reset_thread_stack (tcb_t *tcb, task_t task)
 
always void scheduler_reschedule (void)
 
void SysTick_Handler ()
 
void PendSV_Handler ()
 
void os_suspend ()
 
void schedule (task_t task, frequency_t frequency, DEADLINE_TYPE seriousness)
 
void schedule_aperiodic (pisr_t pisr, HW_TYPE hw_type, hw_metadata metadata, microseconds_t allowed_run_time, DEADLINE_TYPE seriousness)
 
void schedule_init ()
 
sched_task_poolschedule_hash_find_int (sched_task_pool *queues, frequency_t target_frequency)
 
void edf_init ()
 
void edf_insert (sched_task *task)
 
tcb_tedf_pop ()
 
sched_taskedf_get_edf_queue ()
 
void _os_choose_next_thread ()
 

Variables

static int32_t OS_PROGRAM_STACKS [SCHEDULER_MAX_THREADS][100]
 
volatile sched_taskexecuting
 
volatile sched_task_poolpool
 
volatile uint32_t clock = 0
 
volatile sched_task EDF [SCHEDULER_MAX_THREADS]
 
volatile sched_taskEDF_QUEUE = NULL
 
bool OS_FIRST_RUN = true
 
bool OS_THREADING_INITIALIZED
 
uint8_t OS_NUM_THREADS
 

Function Documentation

void _os_choose_next_thread ( )
Precondition
disable interrupts before we get here

Definition at line 544 of file os.c.

References scheduler_reschedule().

Referenced by os_launch().

544  {
545 
547 
548 }
always void scheduler_reschedule(void)
Definition: os.c:237

Here is the call graph for this function:

Here is the caller graph for this function:

sched_task* edf_get_edf_queue ( )

Definition at line 538 of file os.c.

References EDF_QUEUE.

538  {
539  return EDF_QUEUE;
540 }
volatile sched_task * EDF_QUEUE
Definition: os.c:27
void edf_init ( )
Precondition
ensure at least one task has been scheduled prior to invoking this function

Definition at line 475 of file os.c.

References DL_EDF_INSERT, edf_insert(), sched_task_pool::next, sched_task_pool::queue, and SCHEDULER_QUEUES.

Referenced by os_launch().

475  {
476 
478  sched_task_pool *next = start->next;
479 
480  /* avoid the NULL EDF_QUEUE to allow optimized form of `edf_insert' */
481  DL_EDF_INSERT(EDF_QUEUE, start->queue);
482 
483  /* Create the rest of the EDF */
484  while(next && next != start) {
485  /* DL_EDF_INSERT(EDF_QUEUE, next->queue); */
486  edf_insert(next->queue);
487  next = next->next;
488  }
489 }
#define DL_EDF_INSERT(head, add)
Definition: utlist.h:538
void edf_insert(sched_task *task)
Definition: os.c:491
volatile sched_task * EDF_QUEUE
Definition: os.c:27
struct sched_task_pool * next
sched_task * queue
static volatile sched_task_pool * SCHEDULER_QUEUES
Definition: schedule.h:38

Here is the call graph for this function:

Here is the caller graph for this function:

void edf_insert ( sched_task )

Add by insertion sort the specified task into the EDF queue

Definition at line 491 of file os.c.

References sched_task::absolute_deadline, DL_EDF_INSERT, DL_EDF_PREPEND, EDF_QUEUE, and sched_task::pri_next.

Referenced by edf_init(), edf_pop(), and scheduler_reschedule().

491  {
492 
493  sched_task *elt = EDF_QUEUE;
494 
495  while(elt && task->absolute_deadline > elt->absolute_deadline) {
496  elt = elt->pri_next;
497  }
498  /* inject task at point -- if elt is null, this is the new end*/
499  if (elt) {
500  if (elt == EDF_QUEUE) {
501  DL_EDF_PREPEND(elt, task);
502  EDF_QUEUE = elt;
503  } else {
504  DL_EDF_PREPEND(elt, task);
505  }
506  } else {
507  /* TODO: this incurs the O(n) again, optimize */
508  DL_EDF_INSERT(EDF_QUEUE, task);
509  }
510 }
#define DL_EDF_INSERT(head, add)
Definition: utlist.h:538
volatile sched_task * EDF_QUEUE
Definition: os.c:27
#define DL_EDF_PREPEND(head, add)
Definition: utlist.h:513
struct sched_task * pri_next
tick_t absolute_deadline

Here is the caller graph for this function:

tcb_t* edf_pop ( )

Definition at line 512 of file os.c.

References sched_task::absolute_deadline, sched_task_pool::deadline, edf_insert(), EDF_QUEUE, sched_task::next, sched_task_pool::next, sched_task::pri_next, sched_task_pool::queue, SCHEDULER_QUEUES, SYSTICKS_PER_HZ, and sched_task::tcb.

512  {
513 
514  volatile sched_task *executing = EDF_QUEUE;
516 
517  /* DL_EDF_DELETE(EDF_QUEUE, elt); */
518  /* TODO: CHECKME: should we use pri_next or pri_prev?? */
520 
521  /* KLUDGE: fix this for production */
522  while (pool->queue != executing) {
523  pool = pool->next;
524  }
525  /* will drop out with pool->queue = executing */
526 
527  executing->absolute_deadline = pool->deadline * SYSTICKS_PER_HZ;
528 
529  /* do the recycling, change the pool's head */
530  /* TODO: CHECKME: should we use next or prev?? */
531  pool->queue = executing->next;
532  edf_insert(pool->queue);
533 
534  /* TODO: after use, put the command back in the appropriate pool */
535  return executing->tcb;
536 }
#define SYSTICKS_PER_HZ
Definition: schedule.h:20
volatile sched_task_pool * pool
Definition: os.c:18
volatile sched_task * executing
Definition: os.c:17
void edf_insert(sched_task *task)
Definition: os.c:491
volatile sched_task * EDF_QUEUE
Definition: os.c:27
frequency_t deadline
struct sched_task * pri_next
struct sched_task_pool * next
sched_task * queue
struct sched_task * next
tick_t absolute_deadline
static volatile sched_task_pool * SCHEDULER_QUEUES
Definition: schedule.h:38

Here is the call graph for this function:

void PendSV_Handler ( void  )

Definition at line 291 of file os.c.

References HWREG, NVIC_ST_CURRENT, and os_running_threads.

Referenced by __attribute__().

291  {
292 
293  asm volatile("CPSID I");
294 
295  /* -------------------------------------------------- */
296  /* phase 1: store context */
297  /* -------------------------------------------------- */
298 
299  /* load the msp of thread A into r12 */
300  asm volatile("mrs r12, msp" );
301 
302  /* save thread A's registers into the msp */
303  asm volatile("stmdb r12!, {r4 - r11, lr}");
304 
305  /* -------------------------------------------------- */
306  /* phase 2: os_running_threads manipulation */
307  /* -------------------------------------------------- */
308 
309  /* set the profiling data structures for the current thread */
310  /* should this be "enabled" or "disabled"? */
311  #ifdef OS_TIME_PROFILING_ENABLED
312  if (os_running_threads->time_started >= 0) {
313  os_running_threads->time_running_last =
314  HWREG(NVIC_ST_CURRENT) - os_running_threads->time_started;
315 
316  /* If we haven't reached the max samples yet, increment the
317  number of samples taken */
318  if (os_running_threads->time_running_samples_taken < OS_TIME_MAX_SAMPLES) {
319  ++(os_running_threads->time_running_samples_taken);
320  }
321 
322  HWREG(NVIC_ST_CURRENT) = 0;
323  /* _os_reset_thread_stack(os_running_threads, os_running_threads->entry_point); */
324 
325  /* take another sample */
326  os_running_threads->time_running_avg = os_running_threads->time_running_samples_taken > 1 ?
327  /* if true */
328  (os_running_threads->time_running_last +
329  os_running_threads->time_running_samples_taken * os_running_threads->time_running_avg) /
330  (os_running_threads->time_running_samples_taken+1) :
331  /* else */
332  os_running_threads->time_running_last;
333 
334  }
335  #endif /* OS_TIME_PROFILING */
336 
337  /* load the value of os_running_threads */
338  asm volatile("LDR R2, =os_running_threads");
339 
340  /* r3 = *os_running_threads, of thread A */
341  asm volatile("LDR R3, [R2]");
342 
343  /* load the value of os_running_threads->next into r1 */
344  asm volatile("LDR R1, =OS_NEXT_THREAD");
345  asm volatile("LDR R1, [R1]");
346 
347  /* os_running_threads = OS_NEXT_THREAD */
348  asm volatile("STR R1, [R2]");
349 
350  /* -------------------------------------------------- */
351  /* phase 3: load context */
352  /* -------------------------------------------------- */
353 
354  /* store the msp from thread A */
355  asm volatile("str r12, [r3, #0]");
356 
357  /* load thread B's msp */
358  asm volatile("ldr r12, [r1]");
359 
360  /* set the profiling data structures for the next thread */
361  #ifdef OS_TIME_PROFILING_ENABLED
362  os_running_threads->time_started = HWREG(NVIC_ST_CURRENT);
363  #endif /* OS_TIME_PROFILING_ENABLED */
364 
365  /* load thread B's context */
366  asm volatile("ldmia r12!, {r4 - r11, lr}");
367 
368  /* put thread B's msp into the arch msp register */
369  asm volatile("msr msp, r12");
370 
371  /* reenable interrupts */
372  asm volatile("CPSIE I");
373 
374  asm volatile ("bx lr");
375 }
#define HWREG(x)
Definition: hw_types.h:48
static tcb_t * os_running_threads
Definition: os.h:45
#define NVIC_ST_CURRENT
Definition: hw_nvic.h:52

Here is the caller graph for this function:

void schedule ( task_t  task,
frequency_t  frequency,
DEADLINE_TYPE  seriousness 
)

Definition at line 386 of file os.c.

References sched_task::absolute_deadline, CDL_APPEND, CDL_DELETE, CDL_PREPEND, clock, sched_task_pool::deadline, MAX_SYSTICKS_PER_HZ, sched_task_pool::next, NULL, os_add_thread(), postpone_death, sched_task_pool::prev, sched_task_pool::queue, schedule_hash_find_int(), SCHEDULER_QUEUES, SCHEDULER_UNUSED_QUEUES, SCHEDULER_UNUSED_TASKS, sched_task::seriousness, sched_task::task, and sched_task::tcb.

Referenced by main().

386  {
387 
388  sched_task *ready_task = NULL;
389  sched_task_pool *ready_queue = NULL;
390 
391  /* Grab a new task from the unused task pile */
392  ready_task = SCHEDULER_UNUSED_TASKS;
393  CDL_DELETE(SCHEDULER_UNUSED_TASKS, ready_task);
394 
395  /* Set new task's metadata */
396  ready_task->task = task;
397  ready_task->seriousness = seriousness;
398 
399  if (frequency > MAX_SYSTICKS_PER_HZ) {
400  postpone_death();
401  }
402  ready_task->absolute_deadline = frequency + clock;
403 
404  ready_task->tcb = os_add_thread(task);
405 
406  /* Test the pool of ready queues for a queue of tasks with this
407  * frequency */
408  /* todo: uthash configurable without malloc */
409  ready_queue = schedule_hash_find_int(SCHEDULER_QUEUES, frequency);
410  /* HASH_FIND_INT(SCHEDULER_QUEUES, &frequency, ready_queue); */
411 
412  /* No similar tasks exist yet -- create the pool */
413  if (!ready_queue) {
414  /* Grab a new queue, remove it from the unused pile,
415  * initialize it and associate it with this requency of
416  * task */
417  ready_queue = SCHEDULER_UNUSED_QUEUES;
418  CDL_DELETE(SCHEDULER_UNUSED_QUEUES, ready_queue);
419 
420  ready_queue->deadline = frequency;
421  if (!SCHEDULER_QUEUES) {
422  SCHEDULER_QUEUES = ready_queue;
425  } else {
426  CDL_PREPEND(SCHEDULER_QUEUES, ready_queue);
427  }
428  /* HASH_ADD_INT(SCHEDULER_QUEUES, deadline, ready_queue); */
429  }
430 
431  /* Add task to ready queue */
432  CDL_APPEND(ready_queue->queue, ready_task);
433 }
static volatile sched_task_pool * SCHEDULER_UNUSED_QUEUES
Definition: schedule.h:34
tcb_t * os_add_thread(task_t task)
Definition: os.c:72
#define MAX_SYSTICKS_PER_HZ
Definition: schedule.h:21
DEADLINE_TYPE seriousness
static sched_task * SCHEDULER_UNUSED_TASKS
Definition: schedule.h:45
#define CDL_DELETE(head, del)
Definition: utlist.h:705
#define postpone_death()
Definition: nexus.h:40
frequency_t deadline
struct sched_task_pool * next
volatile uint32_t clock
Definition: os.c:20
sched_task * queue
sched_task_pool * schedule_hash_find_int(sched_task_pool *queues, frequency_t target_frequency)
Definition: os.c:456
#define NULL
Definition: defines.h:32
tick_t absolute_deadline
#define CDL_APPEND(head, add)
Definition: utlist.h:688
struct sched_task_pool * prev
static volatile sched_task_pool * SCHEDULER_QUEUES
Definition: schedule.h:38
#define CDL_PREPEND(head, add)
Definition: utlist.h:671

Here is the call graph for this function:

Here is the caller graph for this function:

void schedule_aperiodic ( pisr_t  ,
HW_TYPE  ,
hw_metadata  ,
microseconds_t  ,
DEADLINE_TYPE   
)

Schedule a pseudo-isr to be executed when a hardware event described by HW_TYPE and hw_metadata occurs.

Definition at line 435 of file os.c.

References _hw_subscribe().

439  {
440 
441  /* todo: utilize \allowed_run_time, \seriousness */
442  _hw_subscribe(hw_type, metadata, pisr, true);
443 }
void _hw_subscribe(HW_TYPE type, hw_metadata metadata, void(*isr)(notification note), bool single_shot)
Definition: hardware.c:121

Here is the call graph for this function:

sched_task_pool* schedule_hash_find_int ( sched_task_pool queues,
frequency_t  target_frequency 
)

Definition at line 456 of file os.c.

References sched_task_pool::deadline, sched_task_pool::next, and NULL.

Referenced by schedule().

456  {
457 
458  sched_task_pool* start = queues;
459  sched_task_pool* inspect = queues;
460 
461  if (!inspect) { return NULL; }
462 
463  do {
464  if(inspect->deadline == target_frequency) {
465  return inspect;
466  }
467  inspect = inspect->next;
468  } while (inspect != start);
469  return NULL;
470 }
frequency_t deadline
struct sched_task_pool * next
#define NULL
Definition: defines.h:32

Here is the caller graph for this function:

void schedule_init ( )

Initialize all deep datastructures used by libschedule.

Definition at line 445 of file os.c.

References DL_PREPEND, SCHEDULER_MAX_THREADS, SCHEDULER_TASK_QUEUES, SCHEDULER_TASKS, SCHEDULER_UNUSED_QUEUES, and SCHEDULER_UNUSED_TASKS.

Referenced by os_threading_init().

445  {
446 
447  int32_t i;
448  for(i=0; i<SCHEDULER_MAX_THREADS; ++i) {
449  /* Add all tasks to the unused pile */
451  /* Add all task queues to the unused pile */
453  }
454 }
static volatile sched_task_pool * SCHEDULER_UNUSED_QUEUES
Definition: schedule.h:34
#define DL_PREPEND(head, add)
Definition: utlist.h:510
static sched_task SCHEDULER_TASKS[5]
Definition: schedule.h:42
static sched_task * SCHEDULER_UNUSED_TASKS
Definition: schedule.h:45
#define SCHEDULER_MAX_THREADS
static sched_task_pool SCHEDULER_TASK_QUEUES[5]
Definition: schedule.h:31

Here is the caller graph for this function:

always void scheduler_reschedule ( void  )
inline

Definition at line 237 of file os.c.

References sched_task::absolute_deadline, clock, sched_task_pool::deadline, edf_insert(), EDF_QUEUE, FAULT_PENDSV, HWREG, IntPendSet(), sched_task::next, sched_task_pool::next, NVIC_ST_CURRENT, OS_FIRST_RUN, OS_NEXT_THREAD, sched_task::pri_next, sched_task_pool::queue, SCHEDULER_QUEUES, SysCtlClockGet(), SysTickDisable(), SysTickEnable(), SysTickIntEnable(), SysTickPeriodSet(), and sched_task::tcb.

Referenced by _os_choose_next_thread(), os_suspend(), and SysTick_Handler().

237  {
240 
241  /* find the queue that the executing task is a member of */
242  /* TODO: Should we link from the sched_task to the
243  sched_task_pool? That will avoid this loop. */
244  while (pool->queue != executing) {
245  pool = pool->next;
246  }
247 
248  /* update */
249  if (EDF_QUEUE) {
251  }
252 
253  if (OS_FIRST_RUN) {
254  OS_FIRST_RUN = false;
255  SysTickEnable();
257  } else {
258  clock += pool->deadline;
259  }
260 
261  SysTickDisable();
262  /* SysTickPeriodSet(SysCtlClockGet() / (executing->absolute_deadline - clock)); */
264  HWREG(NVIC_ST_CURRENT) = 0;
266  SysTickEnable();
267 
268  /* do the recycling, change the pool's head */
269  pool->queue = executing->next;
271 
273 
274  /* Queue the PendSV_Handler after this ISR returns */
276 }
void SysTickDisable(void)
Definition: systick.c:94
#define HWREG(x)
Definition: hw_types.h:48
volatile sched_task_pool * pool
Definition: os.c:18
volatile sched_task * executing
Definition: os.c:17
void edf_insert(sched_task *task)
Definition: os.c:491
volatile sched_task * EDF_QUEUE
Definition: os.c:27
uint32_t SysCtlClockGet(void)
Definition: sysctl.c:2727
#define NVIC_ST_CURRENT
Definition: hw_nvic.h:52
void IntPendSet(uint32_t ui32Interrupt)
Definition: interrupt.c:844
frequency_t deadline
struct sched_task * pri_next
struct sched_task_pool * next
volatile uint32_t clock
Definition: os.c:20
void SysTickIntEnable(void)
Definition: systick.c:174
bool OS_FIRST_RUN
Definition: os.c:29
sched_task * queue
void SysTickEnable(void)
Definition: systick.c:75
struct sched_task * next
static tcb_t * OS_NEXT_THREAD
Definition: os.h:78
tick_t absolute_deadline
#define FAULT_PENDSV
Definition: hw_ints.h:56
static volatile sched_task_pool * SCHEDULER_QUEUES
Definition: schedule.h:38
void SysTickPeriodSet(uint32_t ui32Period)
Definition: systick.c:221

Here is the call graph for this function:

Here is the caller graph for this function:

void SysTick_Handler ( void  )
Warning
Ensure you have something to run before enabling SysTick

Definition at line 282 of file os.c.

References scheduler_reschedule().

Referenced by __attribute__().

282  {
283 
284  asm volatile("CPSID I");
285 
287 
288  asm volatile("CPSIE I");
289 }
always void scheduler_reschedule(void)
Definition: os.c:237

Here is the call graph for this function:

Here is the caller graph for this function:

Variable Documentation

volatile uint32_t clock = 0

Definition at line 20 of file os.c.

Referenced by schedule(), and scheduler_reschedule().

Statically allocated array of periodic tasks arranged by increasing time-to-deadline.

Definition at line 24 of file os.c.

volatile sched_task* EDF_QUEUE = NULL

Linked list of tasks (with different periods) ready to be run.

Definition at line 27 of file os.c.

Referenced by edf_get_edf_queue(), edf_insert(), edf_pop(), and scheduler_reschedule().

volatile sched_task* executing

Definition at line 17 of file os.c.

bool OS_FIRST_RUN = true

Definition at line 29 of file os.c.

Referenced by scheduler_reschedule().

uint8_t OS_NUM_THREADS

Definition at line 32 of file os.c.

Referenced by get_os_num_threads(), and os_remove_thread().

int32_t OS_PROGRAM_STACKS[SCHEDULER_MAX_THREADS][100]
static

A block of memory for each thread's local stack.

Definition at line 16 of file os.c.

Referenced by _os_reset_thread_stack(), and os_threading_init().

bool OS_THREADING_INITIALIZED

Definition at line 31 of file os.c.

Referenced by os_threading_init().

volatile sched_task_pool* pool

Definition at line 18 of file os.c.