EE445M RTOS
Taken at the University of Texas Spring 2015
os.c
Go to the documentation of this file.
1 /* -*- mode: c; c-basic-offset: 4; -*- */
2 /* Created by Hershal Bhave 2015-02-08 */
3 /* Revision History: Look in Git FGT */
4 
5 #include "os.h"
6 
7 #include "inc/hw_nvic.h"
8 #include "inc/hw_types.h"
9 
10 #include "libstd/nexus.h"
11 #include "libut/utlist.h"
12 #include "libsystick/systick.h"
13 #include "libschedule/schedule.h"
14 
19 
20 volatile uint32_t clock = 0;
21 
25 
28 
29 bool OS_FIRST_RUN = true;
30 
33 
35 
36  uint32_t i;
37 
38  /* This check exists so libraries may call os_threading_init
39  * without breaking everything. We dont' want to contribute to the
40  * black magic that is the specific initialization of TI
41  * libraries, do we? */
43  return;
44  }
45 
47 
48  /* Initialize thread metadata */
49  for (i=0; i<SCHEDULER_MAX_THREADS; ++i) {
52  OS_THREADS[i].id = i;
54  /* OS_THREADS[i].status = THREAD_DEAD; */
55  /* OS_THREADS[i].sleep_timer = 0; */
56  }
57 
58  /* Initialize thread pool metadata */
59  for (i=0; i<OS_NUM_POOLS; ++i) {
60  OS_THREAD_POOL[i] = (tcb_t*) NULL;
61  }
62 
64 
65  schedule_init();
66 }
67 
69  return OS_NUM_THREADS;
70 }
71 
73 
74  int32_t status;
75  tcb_t* thread_to_add;
76 
77  /* 1. Disable interrupts and save the priority mask */
78  atomic_start();
79 
80  /* 2. Pop the task from the dead_thread pile */
81  thread_to_add = os_dead_threads;
82  CDL_DELETE(os_dead_threads, thread_to_add);
83 
84  /* 3. Add the thread to the appropriate priority pool. */
85  /* CDL_APPEND(os_running_threads, thread_to_add); */
86  /* CDL_APPEND(OS_THREAD_POOL[priority], thread_to_add); */
87 
88  /* 4. Set the initial stack contents for the new thread. */
89  _os_reset_thread_stack(thread_to_add, task);
90 
91  /* 5. Set metadata for this thread's TCB. */
92 
93  /* TODO: Remove this from semaphore. The scheduler manages
94  * this. */
95  /* thread_to_add->status = THREAD_RUNNING; */
96  thread_to_add->entry_point = task;
97  /* thread_to_add->priority = priority; */
98  /* thread_to_add->sleep_timer = 0; */
99 
100  atomic_end();
101 
102  /* 5. Return. */
103  return thread_to_add;
104 }
105 
107 
108  int32_t status;
109  tcb_t* thread_to_remove;
110 
111  /* 1. Disable interrupts and save the priority mask */
112  /* TODO: clean this up, make uniform */
113  status = StartCritical();
114 
115  /* 2. Pop the task from the running_thread pile and add it to the
116  * list of dead threads. */
117  thread_to_remove = os_tcb_of(task);
118  CDL_DELETE(OS_THREAD_POOL[thread_to_remove->priority], thread_to_remove);
119  /* Append means low tcb_t memory reusability. Prepend means high */
120  CDL_APPEND(os_dead_threads, thread_to_remove);
121 
122  /* OPTIONAL TODO: do the check for overwritten stacks as long as
123  * we kill every thread in our testing we'll get valuable yet
124  * unobtrusive debugging data. also consider a check on the
125  * immutable fields, using the offset from the array base as
126  * verification. */
127 
128  /* 3. Reset metadata for this thread's TCB. */
129  /* thread_to_remove->status = THREAD_DEAD; */
130  thread_to_remove->entry_point = NULL;
131  thread_to_remove->next = NULL;
132  thread_to_remove->prev = NULL;
133 
134  /* 4. Return. */
135  /* TODO: clean this up, make uniform */
136  --OS_NUM_THREADS;
137 
138  EndCritical(status);
139  return thread_to_remove;
140 }
141 
143 
144  return os_running_threads->id;
145 }
146 
147 tcb_t* os_tcb_of(const task_t task) {
148 
149  int32_t i;
150  for(i=0; i<SCHEDULER_MAX_THREADS; ++i) {
151  if (task == OS_THREADS[i].entry_point) {
152  return &OS_THREADS[i];
153  }
154  }
155 #ifndef DANGER_ZONE
156  postpone_death();
157 #endif
158  return NULL;
159 }
160 
161 void os_launch() {
162 
163  edf_init();
166 
167  /* acquire the pointer to the stack pointer here */
168  asm volatile("LDR R0, =OS_NEXT_THREAD");
169 
170  /* acquire the current value of the stack pointer */
171  asm volatile("LDR R0, [R0]");
172  asm volatile("LDR R0, [R0]");
173 
174  /* set the process stack value */
175  asm volatile("MSR MSP, R0");
176 
177  /* change EXC_RETURN for return on MSP */
178  /* asm volatile("ORR LR, LR, #4"); */
179 
180  /* change the active stack to use msp */
181  /* asm volatile("MRS R0, CONTROL"); */
182  /* asm volatile("ORR R0, R0, #2"); */
183  /* asm volatile("MSR CONTROL, R0"); */
184 
185  asm volatile("POP {R4-R11}");
186 
187  asm volatile("POP {LR}");
188  asm volatile("POP {R0-R3}");
189 
190  asm volatile("pop {r12, lr}");
191  asm volatile("pop {pc}");
192 
193  asm volatile ("BX LR");
194 }
195 
196 always inline
198 
200  (((uint32_t)OS_PROGRAM_STACKS[tcb->id]) +
201  sizeof(uint32_t)*OS_STACK_SIZE - sizeof(hwcontext_t));
202 
204  (((uint32_t)hwcontext) - sizeof(swcontext_t));
205 
206  hwcontext->pc = (uint32_t)(task);
207  hwcontext->psr = 0x01000000;
208 
209  swcontext->lr = 0xfffffff9;
210 
211  #ifdef OS_REGISTER_DEBUGGING_ENABLED
212  hwcontext->r0 = 0x00000000;
213  hwcontext->r1 = 0x01010101;
214  hwcontext->r2 = 0x02020202;
215  hwcontext->r3 = 0x03030303;
216 
217  swcontext->r4 = 0x04040404;
218  swcontext->r5 = 0x05050505;
219  swcontext->r6 = 0x06060606;
220  swcontext->r7 = 0x07070707;
221  swcontext->r8 = 0x08080808;
222  swcontext->r9 = 0x09090909;
223  swcontext->r10 = 0x10101010;
224  swcontext->r11 = 0x11111111;
225  #endif /* OS_REGISTER_DEBUGGING_ENABLED */
226 
227  tcb->sp = (uint32_t*)(((uint32_t)hwcontext) - sizeof(swcontext_t));
228  asm volatile ("PUSH {R9, R10, R11, R12}");
229  asm volatile ( "mrs r12, msp" );
230  asm volatile ( "mrs r11, msp" );
231  asm volatile ( "mrs r10, control" );
232 
233  asm volatile ("POP {R9, R10, R11, R12}");
234 }
235 
236 always inline
238  executing = EDF_QUEUE;
239  pool = SCHEDULER_QUEUES;
240 
241  /* find the queue that the executing task is a member of */
242  /* TODO: Should we link from the sched_task to the
243  sched_task_pool? That will avoid this loop. */
244  while (pool->queue != executing) {
245  pool = pool->next;
246  }
247 
248  /* update */
249  if (EDF_QUEUE) {
250  EDF_QUEUE = EDF_QUEUE->pri_next;
251  }
252 
253  if (OS_FIRST_RUN) {
254  OS_FIRST_RUN = false;
255  SysTickEnable();
257  } else {
258  clock += pool->deadline;
259  }
260 
261  SysTickDisable();
262  /* SysTickPeriodSet(SysCtlClockGet() / (executing->absolute_deadline - clock)); */
264  HWREG(NVIC_ST_CURRENT) = 0;
265  executing->absolute_deadline = pool->deadline + clock;
266  SysTickEnable();
267 
268  /* do the recycling, change the pool's head */
269  pool->queue = executing->next;
270  edf_insert(pool->queue);
271 
272  OS_NEXT_THREAD = executing->tcb;
273 
274  /* Queue the PendSV_Handler after this ISR returns */
276 }
277 
278 
281 /* this is the canonical code for edf scheduling */
283 
284  asm volatile("CPSID I");
285 
287 
288  asm volatile("CPSIE I");
289 }
290 
292 
293  asm volatile("CPSID I");
294 
295  /* -------------------------------------------------- */
296  /* phase 1: store context */
297  /* -------------------------------------------------- */
298 
299  /* load the msp of thread A into r12 */
300  asm volatile("mrs r12, msp" );
301 
302  /* save thread A's registers into the msp */
303  asm volatile("stmdb r12!, {r4 - r11, lr}");
304 
305  /* -------------------------------------------------- */
306  /* phase 2: os_running_threads manipulation */
307  /* -------------------------------------------------- */
308 
309  /* set the profiling data structures for the current thread */
310  /* should this be "enabled" or "disabled"? */
311  #ifdef OS_TIME_PROFILING_ENABLED
312  if (os_running_threads->time_started >= 0) {
313  os_running_threads->time_running_last =
314  HWREG(NVIC_ST_CURRENT) - os_running_threads->time_started;
315 
316  /* If we haven't reached the max samples yet, increment the
317  number of samples taken */
318  if (os_running_threads->time_running_samples_taken < OS_TIME_MAX_SAMPLES) {
319  ++(os_running_threads->time_running_samples_taken);
320  }
321 
322  HWREG(NVIC_ST_CURRENT) = 0;
323  /* _os_reset_thread_stack(os_running_threads, os_running_threads->entry_point); */
324 
325  /* take another sample */
326  os_running_threads->time_running_avg = os_running_threads->time_running_samples_taken > 1 ?
327  /* if true */
328  (os_running_threads->time_running_last +
329  os_running_threads->time_running_samples_taken * os_running_threads->time_running_avg) /
330  (os_running_threads->time_running_samples_taken+1) :
331  /* else */
332  os_running_threads->time_running_last;
333 
334  }
335  #endif /* OS_TIME_PROFILING */
336 
337  /* load the value of os_running_threads */
338  asm volatile("LDR R2, =os_running_threads");
339 
340  /* r3 = *os_running_threads, of thread A */
341  asm volatile("LDR R3, [R2]");
342 
343  /* load the value of os_running_threads->next into r1 */
344  asm volatile("LDR R1, =OS_NEXT_THREAD");
345  asm volatile("LDR R1, [R1]");
346 
347  /* os_running_threads = OS_NEXT_THREAD */
348  asm volatile("STR R1, [R2]");
349 
350  /* -------------------------------------------------- */
351  /* phase 3: load context */
352  /* -------------------------------------------------- */
353 
354  /* store the msp from thread A */
355  asm volatile("str r12, [r3, #0]");
356 
357  /* load thread B's msp */
358  asm volatile("ldr r12, [r1]");
359 
360  /* set the profiling data structures for the next thread */
361  #ifdef OS_TIME_PROFILING_ENABLED
362  os_running_threads->time_started = HWREG(NVIC_ST_CURRENT);
363  #endif /* OS_TIME_PROFILING_ENABLED */
364 
365  /* load thread B's context */
366  asm volatile("ldmia r12!, {r4 - r11, lr}");
367 
368  /* put thread B's msp into the arch msp register */
369  asm volatile("msr msp, r12");
370 
371  /* reenable interrupts */
372  asm volatile("CPSIE I");
373 
374  asm volatile ("bx lr");
375 }
376 
380 void os_suspend() {
381 
383 }
384 
385 
386 void schedule(task_t task, frequency_t frequency, DEADLINE_TYPE seriousness) {
387 
388  sched_task *ready_task = NULL;
389  sched_task_pool *ready_queue = NULL;
390 
391  /* Grab a new task from the unused task pile */
392  ready_task = SCHEDULER_UNUSED_TASKS;
393  CDL_DELETE(SCHEDULER_UNUSED_TASKS, ready_task);
394 
395  /* Set new task's metadata */
396  ready_task->task = task;
397  ready_task->seriousness = seriousness;
398 
399  if (frequency > MAX_SYSTICKS_PER_HZ) {
400  postpone_death();
401  }
402  ready_task->absolute_deadline = frequency + clock;
403 
404  ready_task->tcb = os_add_thread(task);
405 
406  /* Test the pool of ready queues for a queue of tasks with this
407  * frequency */
408  /* todo: uthash configurable without malloc */
409  ready_queue = schedule_hash_find_int(SCHEDULER_QUEUES, frequency);
410  /* HASH_FIND_INT(SCHEDULER_QUEUES, &frequency, ready_queue); */
411 
412  /* No similar tasks exist yet -- create the pool */
413  if (!ready_queue) {
414  /* Grab a new queue, remove it from the unused pile,
415  * initialize it and associate it with this requency of
416  * task */
417  ready_queue = SCHEDULER_UNUSED_QUEUES;
418  CDL_DELETE(SCHEDULER_UNUSED_QUEUES, ready_queue);
419 
420  ready_queue->deadline = frequency;
421  if (!SCHEDULER_QUEUES) {
422  SCHEDULER_QUEUES = ready_queue;
425  } else {
426  CDL_PREPEND(SCHEDULER_QUEUES, ready_queue);
427  }
428  /* HASH_ADD_INT(SCHEDULER_QUEUES, deadline, ready_queue); */
429  }
430 
431  /* Add task to ready queue */
432  CDL_APPEND(ready_queue->queue, ready_task);
433 }
434 
436  HW_TYPE hw_type,
437  hw_metadata metadata,
438  microseconds_t allowed_run_time,
439  DEADLINE_TYPE seriousness) {
440 
441  /* todo: utilize \allowed_run_time, \seriousness */
442  _hw_subscribe(hw_type, metadata, pisr, true);
443 }
444 
446 
447  int32_t i;
448  for(i=0; i<SCHEDULER_MAX_THREADS; ++i) {
449  /* Add all tasks to the unused pile */
451  /* Add all task queues to the unused pile */
453  }
454 }
455 
457 
458  sched_task_pool* start = queues;
459  sched_task_pool* inspect = queues;
460 
461  if (!inspect) { return NULL; }
462 
463  do {
464  if(inspect->deadline == target_frequency) {
465  return inspect;
466  }
467  inspect = inspect->next;
468  } while (inspect != start);
469  return NULL;
470 }
471 
472 /* Create the EDF queue for the first time */
475 void edf_init() {
476 
478  sched_task_pool *next = start->next;
479 
480  /* avoid the NULL EDF_QUEUE to allow optimized form of `edf_insert' */
481  DL_EDF_INSERT(EDF_QUEUE, start->queue);
482 
483  /* Create the rest of the EDF */
484  while(next && next != start) {
485  /* DL_EDF_INSERT(EDF_QUEUE, next->queue); */
486  edf_insert(next->queue);
487  next = next->next;
488  }
489 }
490 
491 void edf_insert(sched_task* task) {
492 
493  sched_task *elt = EDF_QUEUE;
494 
495  while(elt && task->absolute_deadline > elt->absolute_deadline) {
496  elt = elt->pri_next;
497  }
498  /* inject task at point -- if elt is null, this is the new end*/
499  if (elt) {
500  if (elt == EDF_QUEUE) {
501  DL_EDF_PREPEND(elt, task);
502  EDF_QUEUE = elt;
503  } else {
504  DL_EDF_PREPEND(elt, task);
505  }
506  } else {
507  /* TODO: this incurs the O(n) again, optimize */
508  DL_EDF_INSERT(EDF_QUEUE, task);
509  }
510 }
511 
513 
514  volatile sched_task *executing = EDF_QUEUE;
515  volatile sched_task_pool *pool = SCHEDULER_QUEUES;
516 
517  /* DL_EDF_DELETE(EDF_QUEUE, elt); */
518  /* TODO: CHECKME: should we use pri_next or pri_prev?? */
519  EDF_QUEUE = EDF_QUEUE->pri_next;
520 
521  /* KLUDGE: fix this for production */
522  while (pool->queue != executing) {
523  pool = pool->next;
524  }
525  /* will drop out with pool->queue = executing */
526 
527  executing->absolute_deadline = pool->deadline * SYSTICKS_PER_HZ;
528 
529  /* do the recycling, change the pool's head */
530  /* TODO: CHECKME: should we use next or prev?? */
531  pool->queue = executing->next;
532  edf_insert(pool->queue);
533 
534  /* TODO: after use, put the command back in the appropriate pool */
535  return executing->tcb;
536 }
537 
539  return EDF_QUEUE;
540 }
541 
542 /* TODO: implement the edf queue of queues */
545 
547 
548 }
#define OS_NUM_POOLS
Definition: os.h:25
static volatile sched_task_pool * SCHEDULER_UNUSED_QUEUES
Definition: schedule.h:34
uint32_t r11
Definition: os.h:71
#define SYSTICKS_PER_HZ
Definition: schedule.h:20
#define DL_EDF_INSERT(head, add)
Definition: utlist.h:538
tcb_t * os_add_thread(task_t task)
Definition: os.c:72
void SysTickDisable(void)
Definition: systick.c:94
immutable int32_t id
DEADLINE_TYPE
always void scheduler_reschedule(void)
Definition: os.c:237
void os_suspend()
Definition: os.c:380
#define atomic_start()
Definition: nexus.h:20
#define MAX_SYSTICKS_PER_HZ
Definition: schedule.h:21
uint32_t r2
Definition: os.h:54
#define HWREG(x)
Definition: hw_types.h:48
volatile sched_task_pool * pool
Definition: os.c:18
uint32_t r6
Definition: os.h:66
void PendSV_Handler()
Definition: os.c:291
uint32_t r4
Definition: os.h:64
always void _os_reset_thread_stack(tcb_t *tcb, task_t task)
Definition: os.c:197
DEADLINE_TYPE seriousness
volatile sched_task * executing
Definition: os.c:17
void edf_insert(sched_task *task)
Definition: os.c:491
#define always
Definition: nexus.h:17
void(* task_t)()
Definition: defines.h:21
static int32_t OS_PROGRAM_STACKS[SCHEDULER_MAX_THREADS][100]
Definition: os.c:16
uint32_t r3
Definition: os.h:55
volatile sched_task * EDF_QUEUE
Definition: os.c:27
#define DL_PREPEND(head, add)
Definition: utlist.h:510
uint32_t pc
Definition: os.h:58
static tcb_t * OS_THREAD_POOL[2]
Definition: os.h:84
static tcb_t * os_running_threads
Definition: os.h:45
void schedule_init()
Definition: os.c:445
uint32_t r8
Definition: os.h:68
uint32_t r1
Definition: os.h:53
uint32_t SysCtlClockGet(void)
Definition: sysctl.c:2727
uint32_t r5
Definition: os.h:65
#define NVIC_ST_CURRENT
Definition: hw_nvic.h:52
static sched_task SCHEDULER_TASKS[5]
Definition: schedule.h:42
uint32_t psr
Definition: os.h:59
#define DL_EDF_PREPEND(head, add)
Definition: utlist.h:513
static tcb_t * os_dead_threads
Definition: os.h:48
uint32_t lr
Definition: os.h:72
void os_threading_init()
Definition: os.c:34
void schedule(task_t task, frequency_t frequency, DEADLINE_TYPE seriousness)
Definition: os.c:386
int32_t microseconds_t
void IntPendSet(uint32_t ui32Interrupt)
Definition: interrupt.c:844
sched_task * edf_get_edf_queue()
Definition: os.c:538
static sched_task * SCHEDULER_UNUSED_TASKS
Definition: schedule.h:45
#define CDL_DELETE(head, del)
Definition: utlist.h:705
struct tcb_t * prev
void os_launch()
Definition: os.c:161
#define postpone_death()
Definition: nexus.h:40
frequency_t deadline
priority_t priority
tcb_t * os_remove_thread(task_t task)
Definition: os.c:106
int8_t get_os_num_threads()
Definition: os.c:68
struct sched_task * pri_next
tcb_t * edf_pop()
Definition: os.c:512
struct sched_task_pool * next
bool OS_THREADING_INITIALIZED
Definition: os.c:31
void _hw_subscribe(HW_TYPE type, hw_metadata metadata, void(*isr)(notification note), bool single_shot)
Definition: hardware.c:121
int32_t * sp
volatile uint32_t clock
Definition: os.c:20
HW_TYPE
Definition: hardware.h:46
void SysTickIntEnable(void)
Definition: systick.c:174
bool OS_FIRST_RUN
Definition: os.c:29
#define SCHEDULER_MAX_THREADS
static sched_task_pool SCHEDULER_TASK_QUEUES[5]
Definition: schedule.h:31
sched_task * queue
static tcb_t OS_THREADS[SCHEDULER_MAX_THREADS]
Definition: os.h:81
task_t entry_point
tcb_t * os_tcb_of(const task_t task)
Definition: os.c:147
void SysTickEnable(void)
Definition: systick.c:75
void(* pisr_t)(notification note)
Definition: schedule.h:28
struct sched_task * next
sched_task_pool * schedule_hash_find_int(sched_task_pool *queues, frequency_t target_frequency)
Definition: os.c:456
struct tcb_t * next
void SysTick_Handler()
Definition: os.c:282
uint32_t r7
Definition: os.h:67
static tcb_t * OS_NEXT_THREAD
Definition: os.h:78
uint8_t OS_NUM_THREADS
Definition: os.c:32
volatile sched_task EDF[SCHEDULER_MAX_THREADS]
Definition: os.c:24
#define OS_STACK_SIZE
Definition: os.h:22
Definition: os.h:63
void _os_choose_next_thread()
Definition: os.c:544
int32_t frequency_t
Definition: defines.h:24
Definition: os.h:51
uint32_t r10
Definition: os.h:70
#define NULL
Definition: defines.h:32
#define atomic_end()
Definition: nexus.h:25
tick_t absolute_deadline
int32_t os_running_thread_id()
Definition: os.c:142
#define CDL_APPEND(head, add)
Definition: utlist.h:688
uint32_t r9
Definition: os.h:69
struct sched_task_pool * prev
#define FAULT_PENDSV
Definition: hw_ints.h:56
static volatile sched_task_pool * SCHEDULER_QUEUES
Definition: schedule.h:38
void SysTickPeriodSet(uint32_t ui32Period)
Definition: systick.c:221
void edf_init()
Definition: os.c:475
uint32_t r0
Definition: os.h:52
#define CDL_PREPEND(head, add)
Definition: utlist.h:671
void schedule_aperiodic(pisr_t pisr, HW_TYPE hw_type, hw_metadata metadata, microseconds_t allowed_run_time, DEADLINE_TYPE seriousness)
Definition: os.c:435