00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019 #ifndef _RTAI_SCHEDCORE_H
00020 #define _RTAI_SCHEDCORE_H
00021
00022 #include <rtai_version.h>
00023 #include <rtai_lxrt.h>
00024 #include <rtai_sched.h>
00025 #include <rtai_malloc.h>
00026 #include <rtai_trace.h>
00027 #include <rtai_leds.h>
00028 #include <rtai_sem.h>
00029 #include <rtai_rwl.h>
00030 #include <rtai_spl.h>
00031 #include <rtai_scb.h>
00032 #include <rtai_mbx.h>
00033 #include <rtai_msg.h>
00034 #include <rtai_tbx.h>
00035 #include <rtai_mq.h>
00036 #include <rtai_bits.h>
00037 #include <rtai_wd.h>
00038 #include <rtai_tasklets.h>
00039 #include <rtai_fifos.h>
00040 #include <rtai_netrpc.h>
00041 #include <rtai_shm.h>
00042 #include <rtai_usi.h>
00043
00044 #ifdef __KERNEL__
00045
00046 #include <linux/module.h>
00047 #include <linux/init.h>
00048 #include <linux/kernel.h>
00049 #include <linux/version.h>
00050 #include <linux/errno.h>
00051 #include <linux/slab.h>
00052 #include <linux/timex.h>
00053 #include <linux/sched.h>
00054 #include <asm/param.h>
00055 #include <asm/system.h>
00056 #include <asm/io.h>
00057
00058 extern RT_TASK rt_smp_linux_task[];
00059
00060 extern RT_TASK *rt_smp_current[];
00061
00062 extern RTIME rt_smp_time_h[];
00063
00064 extern int rt_smp_oneshot_timer[];
00065
00066 #ifdef CONFIG_RTAI_MALLOC
00067 #define sched_malloc(size) rt_malloc((size))
00068 #define sched_free(adr) rt_free((adr))
00069 #ifndef CONFIG_RTAI_MALLOC_BUILTIN
00070 #define sched_mem_init()
00071 #define sched_mem_end()
00072 #else
00073 #define sched_mem_init() \
00074 { if(__rtai_heap_init() != 0) { \
00075 return(-ENOMEM); \
00076 } }
00077 #define sched_mem_end() __rtai_heap_exit()
00078 #endif
00079 #define call_exit_handlers(task) __call_exit_handlers(task)
00080 #define set_exit_handler(task, fun, arg1, arg2) __set_exit_handler(task, fun, arg1, arg2)
00081 #else
00082 #define sched_malloc(size) kmalloc((size), GFP_KERNEL)
00083 #define sched_free(adr) kfree((adr))
00084 #define sched_mem_init()
00085 #define sched_mem_end()
00086 #define call_exit_handlers(task)
00087 #define set_exit_handler(task, fun, arg1, arg2)
00088 #endif
00089
00090 #define RT_SEM_MAGIC 0xaabcdeff
00091
00092 #define SEM_ERR (0xFfff)
00093
00094 #define MSG_ERR ((RT_TASK *)0xFfff)
00095
00096 #define NOTHING ((void *)0)
00097
00098 #define SOMETHING ((void *)1)
00099
00100 #define SEMHLF 0x0000FFFF
00101 #define RPCHLF 0xFFFF0000
00102 #define RPCINC 0x00010000
00103
00104 #define DECLARE_RT_CURRENT int cpuid; RT_TASK *rt_current
00105 #define ASSIGN_RT_CURRENT rt_current = rt_smp_current[cpuid = rtai_cpuid()]
00106 #define RT_CURRENT rt_smp_current[rtai_cpuid()]
00107
00108 #define MAX_LINUX_RTPRIO 99
00109 #define MIN_LINUX_RTPRIO 1
00110
00111 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00112 void rtai_handle_isched_lock(int nesting);
00113 #endif
00114
00115 #ifdef CONFIG_SMP
00116 #define rt_time_h (rt_smp_time_h[cpuid])
00117 #define oneshot_timer (rt_smp_oneshot_timer[cpuid])
00118 #define rt_linux_task (rt_smp_linux_task[cpuid])
00119 #else
00120 #define rt_time_h (rt_smp_time_h[0])
00121 #define oneshot_timer (rt_smp_oneshot_timer[0])
00122 #define rt_linux_task (rt_smp_linux_task[0])
00123 #endif
00124
00125 #ifdef CONFIG_SMP
00126
00127 static inline void send_sched_ipi(unsigned long dest)
00128 {
00129 _send_sched_ipi(dest);
00130 }
00131
00132 #define RT_SCHEDULE_MAP(schedmap) \
00133 do { if (schedmap) send_sched_ipi(schedmap); } while (0)
00134
00135 #define RT_SCHEDULE_MAP_BOTH(schedmap) \
00136 do { if (schedmap) send_sched_ipi(schedmap); rt_schedule(); } while (0)
00137
00138 #define RT_SCHEDULE(task, cpuid) \
00139 do { \
00140 if ((task)->runnable_on_cpus != (cpuid)) { \
00141 send_sched_ipi(1 << (task)->runnable_on_cpus); \
00142 } else { \
00143 rt_schedule(); \
00144 } \
00145 } while (0)
00146
00147 #define RT_SCHEDULE_BOTH(task, cpuid) \
00148 { \
00149 if ((task)->runnable_on_cpus != (cpuid)) { \
00150 send_sched_ipi(1 << (task)->runnable_on_cpus); \
00151 } \
00152 rt_schedule(); \
00153 }
00154
00155 #else
00156
00157 #define send_sched_ipi(dest)
00158
00159 #define RT_SCHEDULE_MAP_BOTH(schedmap) rt_schedule()
00160
00161 #define RT_SCHEDULE_MAP(schedmap) rt_schedule()
00162
00163 #define RT_SCHEDULE(task, cpuid) rt_schedule()
00164
00165 #define RT_SCHEDULE_BOTH(task, cpuid) rt_schedule()
00166
00167 #endif
00168
00169 #define BASE_SOFT_PRIORITY 1000000000
00170
00171 #define TASK_HARDREALTIME TASK_UNINTERRUPTIBLE
00172
00173 static inline void enq_ready_edf_task(RT_TASK *ready_task)
00174 {
00175 RT_TASK *task;
00176 #ifdef CONFIG_SMP
00177 task = rt_smp_linux_task[ready_task->runnable_on_cpus].rnext;
00178 #else
00179 task = rt_smp_linux_task[0].rnext;
00180 #endif
00181 while (task->policy < 0 && ready_task->period >= task->period) {
00182 task = task->rnext;
00183 }
00184 task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task;
00185 ready_task->rnext = task;
00186 }
00187
00188 #define MAX_WAKEUP_SRQ (2 << 6)
00189
00190 struct klist_t { volatile int srq, in, out; void *task[MAX_WAKEUP_SRQ]; };
00191 extern struct klist_t wake_up_srq;
00192
00193 static inline void enq_ready_task(RT_TASK *ready_task)
00194 {
00195 RT_TASK *task;
00196 if (ready_task->is_hard) {
00197 #ifdef CONFIG_SMP
00198 task = rt_smp_linux_task[ready_task->runnable_on_cpus].rnext;
00199 #else
00200 task = rt_smp_linux_task[0].rnext;
00201 #endif
00202 while (ready_task->priority >= task->priority) {
00203 if ((task = task->rnext)->priority < 0) break;
00204 }
00205 task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task;
00206 ready_task->rnext = task;
00207 } else {
00208 ready_task->state |= RT_SCHED_SFTRDY;
00209 wake_up_srq.task[wake_up_srq.in] = ready_task->lnxtsk;
00210 wake_up_srq.in = (wake_up_srq.in + 1) & (MAX_WAKEUP_SRQ - 1);
00211 rt_pend_linux_srq(wake_up_srq.srq);
00212 }
00213 }
00214
00215 static inline int renq_ready_task(RT_TASK *ready_task, int priority)
00216 {
00217 int retval;
00218 if ((retval = ready_task->priority != priority)) {
00219 ready_task->priority = priority;
00220 if (ready_task->state == RT_SCHED_READY) {
00221 (ready_task->rprev)->rnext = ready_task->rnext;
00222 (ready_task->rnext)->rprev = ready_task->rprev;
00223 enq_ready_task(ready_task);
00224 }
00225 }
00226 return retval;
00227 }
00228
00229 static inline int renq_current(RT_TASK *rt_current, int priority)
00230 {
00231 int retval;
00232 if ((retval = rt_current->priority != priority)) {
00233 rt_current->priority = priority;
00234 (rt_current->rprev)->rnext = rt_current->rnext;
00235 (rt_current->rnext)->rprev = rt_current->rprev;
00236 enq_ready_task(rt_current);
00237 }
00238 return retval;
00239 }
00240
00241 static inline void rem_ready_task(RT_TASK *task)
00242 {
00243 if (task->state == RT_SCHED_READY) {
00244 if (!task->is_hard) {
00245 (task->lnxtsk)->state = TASK_HARDREALTIME;
00246 }
00247 (task->rprev)->rnext = task->rnext;
00248 (task->rnext)->rprev = task->rprev;
00249 }
00250 }
00251
00252 static inline void rem_ready_current(RT_TASK *rt_current)
00253 {
00254 if (!rt_current->is_hard) {
00255 (rt_current->lnxtsk)->state = TASK_HARDREALTIME;
00256 }
00257 (rt_current->rprev)->rnext = rt_current->rnext;
00258 (rt_current->rnext)->rprev = rt_current->rprev;
00259 }
00260
00261 static inline void enq_timed_task(RT_TASK *timed_task)
00262 {
00263 RT_TASK *task;
00264 #ifdef CONFIG_SMP
00265 task = rt_smp_linux_task[timed_task->runnable_on_cpus].tnext;
00266 #else
00267 task = rt_smp_linux_task[0].tnext;
00268 #endif
00269 while (timed_task->resume_time > task->resume_time) {
00270 task = task->tnext;
00271 }
00272 task->tprev = (timed_task->tprev = task->tprev)->tnext = timed_task;
00273 timed_task->tnext = task;
00274 }
00275
00276 static inline void wake_up_timed_tasks(int cpuid)
00277 {
00278 RT_TASK *task;
00279 #ifdef CONFIG_SMP
00280 task = rt_smp_linux_task[cpuid].tnext;
00281 #else
00282 task = rt_smp_linux_task[0].tnext;
00283 #endif
00284 while (task->resume_time <= rt_time_h) {
00285 if ((task->state &= ~(RT_SCHED_DELAYED | RT_SCHED_SEMAPHORE | RT_SCHED_RECEIVE | RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN | RT_SCHED_MBXSUSP)) == RT_SCHED_READY) {
00286 if (task->policy < 0) {
00287 enq_ready_edf_task(task);
00288 } else {
00289 enq_ready_task(task);
00290 }
00291 }
00292 task = task->tnext;
00293 }
00294 #ifdef CONFIG_SMP
00295 rt_smp_linux_task[cpuid].tnext = task;
00296 task->tprev = &rt_smp_linux_task[cpuid];
00297 #else
00298 rt_smp_linux_task[0].tnext = task;
00299 task->tprev = &rt_smp_linux_task[0];
00300 #endif
00301 }
00302
00303 static inline void rem_timed_task(RT_TASK *task)
00304 {
00305 if ((task->state & RT_SCHED_DELAYED)) {
00306 (task->tprev)->tnext = task->tnext;
00307 (task->tnext)->tprev = task->tprev;
00308 }
00309 }
00310
00311 #define get_time() rt_get_time()
00312 #if 0
00313 static inline RTIME get_time(void)
00314 {
00315 #ifdef CONFIG_SMP
00316 int cpuid;
00317 return rt_smp_oneshot_timer[cpuid = rtai_cpuid()] ? rdtsc() : rt_smp_times[cpuid].tick_time;
00318 #else
00319 return rt_smp_oneshot_timer[0] ? rdtsc() : rt_smp_times[0].tick_time;
00320 #endif
00321 }
00322 #endif
00323
00324 static inline void enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype)
00325 {
00326 QUEUE *q;
00327 task->blocked_on = (q = queue);
00328 if (!qtype) {
00329 while ((q = q->next) != queue && (q->task)->priority <= task->priority);
00330 }
00331 q->prev = (task->queue.prev = q->prev)->next = &(task->queue);
00332 task->queue.next = q;
00333 }
00334
00335
00336 static inline void dequeue_blocked(RT_TASK *task)
00337 {
00338 task->prio_passed_to = NOTHING;
00339 (task->queue.prev)->next = task->queue.next;
00340 (task->queue.next)->prev = task->queue.prev;
00341 task->blocked_on = NOTHING;
00342 }
00343
00344 static __volatile__ inline unsigned long pass_prio(RT_TASK *to, RT_TASK *from)
00345 {
00346 QUEUE *q;
00347 #ifdef CONFIG_SMP
00348 unsigned long schedmap;
00349 schedmap = 0;
00350 #endif
00351 from->prio_passed_to = to;
00352 while (to && to->priority > from->priority) {
00353 to->priority = from->priority;
00354 if (to->state == RT_SCHED_READY) {
00355 (to->rprev)->rnext = to->rnext;
00356 (to->rnext)->rprev = to->rprev;
00357 enq_ready_task(to);
00358 #ifdef CONFIG_SMP
00359 set_bit(to->runnable_on_cpus & 0x1F, &schedmap);
00360 #endif
00361 } else if ((q = to->blocked_on) && !((to->state & RT_SCHED_SEMAPHORE) &&
00362 ((SEM *)q)->qtype)) {
00363 (to->queue.prev)->next = to->queue.next;
00364 (to->queue.next)->prev = to->queue.prev;
00365 while ((q = q->next) != to->blocked_on && (q->task)->priority <= to->priority);
00366 q->prev = (to->queue.prev = q->prev)->next = &(to->queue);
00367 to->queue.next = q;
00368 }
00369 to = to->prio_passed_to;
00370 }
00371 #ifdef CONFIG_SMP
00372 return schedmap;
00373 #else
00374 return 0;
00375 #endif
00376 }
00377
00378 static inline RT_TASK *_rt_whoami(void)
00379 {
00380 #ifdef CONFIG_SMP
00381 RT_TASK *rt_current;
00382 unsigned long flags;
00383 flags = rt_global_save_flags_and_cli();
00384 rt_current = RT_CURRENT;
00385 rt_global_restore_flags(flags);
00386 return rt_current;
00387 #else
00388 return rt_smp_current[0];
00389 #endif
00390 }
00391
00392 static inline void __call_exit_handlers(RT_TASK *task)
00393 {
00394 XHDL *pt, *tmp;
00395
00396 pt = task->ExitHook;
00397 while ( pt ) {
00398 (*pt->fun) (pt->arg1, pt->arg2);
00399 tmp = pt;
00400 pt = pt->nxt;
00401 rt_free(tmp);
00402 }
00403 task->ExitHook = 0;
00404 }
00405
00406 static inline XHDL *__set_exit_handler(RT_TASK *task, void (*fun) (void *, int), void *arg1, int arg2)
00407 {
00408 XHDL *p;
00409
00410
00411
00412 if (task->magic != RT_TASK_MAGIC) return 0;
00413 if (!(p = (XHDL *) rt_malloc (sizeof(XHDL)))) return 0;
00414 p->fun = fun;
00415 p->arg1 = arg1;
00416 p->arg2 = arg2;
00417 p->nxt = task->ExitHook;
00418 return (task->ExitHook = p);
00419 }
00420
00421 static inline int rtai_init_features (void)
00422
00423 {
00424 #ifdef CONFIG_RTAI_LEDS_BUILTIN
00425 __rtai_leds_init();
00426 #endif
00427 #ifdef CONFIG_RTAI_SEM_BUILTIN
00428 __rtai_sem_init();
00429 #endif
00430 #ifdef CONFIG_RTAI_MSG_BUILTIN
00431 __rtai_msg_init();
00432 #endif
00433 #ifdef CONFIG_RTAI_MBX_BUILTIN
00434 __rtai_mbx_init();
00435 #endif
00436 #ifdef CONFIG_RTAI_TBX_BUILTIN
00437 __rtai_tbx_init();
00438 #endif
00439 #ifdef CONFIG_RTAI_MQ_BUILTIN
00440 __rtai_mq_init();
00441 #endif
00442 #ifdef CONFIG_RTAI_BITS_BUILTIN
00443 __rtai_bits_init();
00444 #endif
00445 #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00446 __rtai_tasklets_init();
00447 #endif
00448 #ifdef CONFIG_RTAI_FIFOS_BUILTIN
00449 __rtai_fifos_init();
00450 #endif
00451 #ifdef CONFIG_RTAI_NETRPC_BUILTIN
00452 __rtai_netrpc_init();
00453 #endif
00454 #ifdef CONFIG_RTAI_SHM_BUILTIN
00455 __rtai_shm_init();
00456 #endif
00457 #ifdef CONFIG_RTAI_USI_BUILTIN
00458 __rtai_usi_init();
00459 #endif
00460 #ifdef CONFIG_RTAI_MATH_BUILTIN
00461 __rtai_math_init();
00462 #endif
00463
00464 return 0;
00465 }
00466
00467 static inline void rtai_cleanup_features (void) {
00468
00469 #ifdef CONFIG_RTAI_MATH_BUILTIN
00470 __rtai_math_exit();
00471 #endif
00472 #ifdef CONFIG_RTAI_USI_BUILTIN
00473 __rtai_usi_exit();
00474 #endif
00475 #ifdef CONFIG_RTAI_SHM_BUILTIN
00476 __rtai_shm_exit();
00477 #endif
00478 #ifdef CONFIG_RTAI_NETRPC_BUILTIN
00479 __rtai_netrpc_exit();
00480 #endif
00481 #ifdef CONFIG_RTAI_FIFOS_BUILTIN
00482 __rtai_fifos_exit();
00483 #endif
00484 #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00485 __rtai_tasklets_exit();
00486 #endif
00487 #ifdef CONFIG_RTAI_BITS_BUILTIN
00488 __rtai_bits_exit();
00489 #endif
00490 #ifdef CONFIG_RTAI_MQ_BUILTIN
00491 __rtai_mq_exit();
00492 #endif
00493 #ifdef CONFIG_RTAI_TBX_BUILTIN
00494 __rtai_tbx_exit();
00495 #endif
00496 #ifdef CONFIG_RTAI_MBX_BUILTIN
00497 __rtai_mbx_exit();
00498 #endif
00499 #ifdef CONFIG_RTAI_MSG_BUILTIN
00500 __rtai_msg_exit();
00501 #endif
00502 #ifdef CONFIG_RTAI_SEM_BUILTIN
00503 __rtai_sem_exit();
00504 #endif
00505 #ifdef CONFIG_RTAI_LEDS_BUILTIN
00506 __rtai_leds_exit();
00507 #endif
00508 }
00509
00510 int rt_check_current_stack(void);
00511
00512 int rt_kthread_init(RT_TASK *task,
00513 void (*rt_thread)(int),
00514 int data,
00515 int stack_size,
00516 int priority,
00517 int uses_fpu,
00518 void(*signal)(void));
00519
00520 int rt_kthread_init_cpuid(RT_TASK *task,
00521 void (*rt_thread)(int),
00522 int data,
00523 int stack_size,
00524 int priority,
00525 int uses_fpu,
00526 void(*signal)(void),
00527 unsigned int cpuid);
00528
00529 #endif
00530
00531 #endif