nuttx context switc(RISC-V)

nuttx的context switch可以從sleep開始看起,代碼的流程爲:
sleep —> clock_nanosleep —> nxsig_timedwait —> up_block_task —>
up_switchcontext —> sys_call2 —> ecall —> irq_handler —>
up_dispatch_all —> up_swint —> up_copystate --> up_swint
—> up_dispatch_all —> irq_handler —> mret —> New task.
引起調度的核心函數爲up_block_task,還有其他三個函數也可以引起context switch的系統動作,up_unblock_task(), up_reprioritize_rtr(), up_release_pending()
而且在up_block_task函數中,也分在中斷上下文和task 上下文的調度,如果是在中斷上下文則是調用up_savestate和up_restorestate這兩個函數。

110 unsigned int sleep(unsigned int seconds)
111 {
112   struct timespec rqtp;
113   struct timespec rmtp;
114   unsigned int remaining = 0;
115   int ret;
116
117   /* Don't sleep if seconds == 0 */
118
119   if (seconds > 0)
120     {
121       /* Let clock_nanosleep() do all of the work. */
122
123       rqtp.tv_sec  = seconds;
124       rqtp.tv_nsec = 0;
125
126       ret = **clock_nanosleep**(CLOCK_REALTIME, 0, &rqtp, &rmtp);
127
128       /* clock_nanosleep() should only fail if it was interrupted by a signal,
129        * but we treat all errors the same,
130        */
131
132       if (ret < 0)
133         {
134           remaining = rmtp.tv_sec;
135           if (remaining < seconds && rmtp.tv_nsec >= 500000000)
136             {
137               /* Round up */
138
139               remaining++;
140             }
141         }
142
143       return remaining;
144     }
145
146   return 0;
147 }
274 int clock_nanosleep(clockid_t clockid, int flags,
275                     FAR const struct timespec *rqtp,
276                     FAR struct timespec *rmtp)
277 {
278   int ret;
279
280   /* clock_nanosleep() is a cancellation point */
281
282   (void)enter_cancellation_point();
283
284   /* Check if absolute time is selected */
285
286   if ((flags & TIMER_ABSTIME) != 0)
287     {
288       struct timespec reltime;
289       struct timespec now;
290       irqstate_t irqstate;
             ...
310       /* Now that we have the relative time, the remaining operations
311        * are equivalent to nxsig_nanosleep().
312        */
313
314       ret = **nxsig_nanosleep**(&reltime, rmtp);
315       leave_critical_section(irqstate);
316     }
317   else
318     {
319       /* In the relative time case, clock_nanosleep() is equivalent to
320        * nanosleep.  In this case, it is a paper thin wrapper around
321        * nxsig_nanosleep().
322        */
323
324       ret = **nxsig_nanosleep**(rqtp, rmtp);
325     }
          ...
}
106 int nxsig_nanosleep(FAR const struct timespec *rqtp,
107                     FAR struct timespec *rmtp)
108 {
109   irqstate_t flags;
110   clock_t starttick;
111   sigset_t set;
112   int ret;
113
114   /* Sanity check */
115
116   if (rqtp == NULL || rqtp->tv_nsec < 0 || rqtp->tv_nsec >= 1000000000)
117     {
118       return -EINVAL;
119     }
...
136   /* nxsig_nanosleep is a simple application of nxsig_timedwait. */
137
138   ret = **nxsig_timedwait**(&set, NULL, rqtp);
139
...
}
240 int nxsig_timedwait(FAR const sigset_t *set, FAR struct siginfo *info,
241                     FAR const struct timespec *timeout)
242 {
243   FAR struct tcb_s *rtcb = this_task();
244   sigset_t intersection;
245   FAR sigpendq_t *sigpend;
246   irqstate_t flags;
247   int32_t waitticks;
248   int ret;
249
250   DEBUGASSERT(set != NULL && rtcb->waitdog == NULL);
...
342           rtcb->waitdog = wd_create();
343           DEBUGASSERT(rtcb->waitdog);
344
345           if (rtcb->waitdog)
346             {
347               /* This little bit of nonsense is necessary for some
348                * processors where sizeof(pointer) < sizeof(uint32_t).
349                * see wdog.h.
350                */
351
352               union wdparm_u wdparm;
353               wdparm.pvarg = (FAR void *)rtcb;
354
355               /* Start the watchdog */
356
357               (void)wd_start(rtcb->waitdog, waitticks,
358                              (wdentry_t)nxsig_timeout, 1, wdparm.pvarg);
359
360               /* Now wait for either the signal or the watchdog, but
361                * first, make sure this is not the idle task,
362                * descheduling that isn't going to end well.
363                */
364
365               DEBUGASSERT(NULL != rtcb->flink);
366               up_block_task(rtcb, TSTATE_WAIT_SIG);
367
368               /* We no longer need the watchdog */
369
370               wd_delete(rtcb->waitdog);
371               rtcb->waitdog = NULL;
372             }
...
}
arch/risc-v/src/rv32im/up_blocktask.c
 78 void up_block_task(struct tcb_s *tcb, tstate_t task_state)
 79 {
 80   struct tcb_s *rtcb = this_task();
 81   bool switch_needed;
 82
 83   /* Verify that the context switch can be performed */
 84
 85   DEBUGASSERT((tcb->task_state >= FIRST_READY_TO_RUN_STATE) &&
 86               (tcb->task_state <= LAST_READY_TO_RUN_STATE));
 87
 88   /* Remove the tcb task from the ready-to-run list.  If we
 89    * are blocking the task at the head of the task list (the
 90    * most likely case), then a context switch to the next
 91    * ready-to-run task is needed. In this case, it should
 92    * also be true that rtcb == tcb.
 93    */
 94
 95   switch_needed = sched_removereadytorun(tcb);
 96
 97   /* Add the task to the specified blocked task list */
 98
 99   sched_addblocked(tcb, (tstate_t)task_state);
100
101   /* If there are any pending tasks, then add them to the ready-to-run
102    * task list now
103    */
104
105   if (g_pendingtasks.head)
106     {
107       switch_needed |= sched_mergepending();
108     }
109
110   /* Now, perform the context switch if one is needed */
111
112   if (switch_needed)
113     {
114       /* Update scheduler parameters */
115
116       sched_suspend_scheduler(rtcb);
117
118       /* Are we in an interrupt handler? */
119
120       if (g_current_regs)
121         {
122           /* Yes, then we have to do things differently.
123            * Just copy the g_current_regs into the OLD rtcb.
124            */
125
126           **up_savestate**(rtcb->xcp.regs);
127
128           /* Restore the exception context of the rtcb at the (new) head
129            * of the ready-to-run task list.
130            */
131
132           rtcb = this_task();
133
134           /* Reset scheduler parameters */
135
136           sched_resume_scheduler(rtcb);
137
138           /* Then switch contexts.  Any necessary address environment
139            * changes will be made when the interrupt returns.
140            */
141
142           **up_restorestate**(rtcb->xcp.regs);
143         }
144
145       /* No, then we will need to perform the user context switch */
146
147       else
148         {
149           /* Get the context of the task at the head of the ready to
150            * run list.
151            */
152
153           struct tcb_s *nexttcb = this_task();
154
155 #ifdef CONFIG_ARCH_ADDRENV
156           /* Make sure that the address environment for the previously
157            * running task is closed down gracefully (data caches dump,
158            * MMU flushed) and set up the address environment for the new
159            * thread at the head of the ready-to-run list.
160            */
161
162           (void)group_addrenv(nexttcb);
163 #endif
164           /* Reset scheduler parameters */
165
166           sched_resume_scheduler(nexttcb);
167
168           /* Then switch contexts */
169
170           **up_switchcontext**(rtcb->xcp.regs, nexttcb->xcp.regs);
171
172           /* up_switchcontext forces a context switch to the task at the
173            * head of the ready-to-run list.  It does not 'return' in the
174            * normal sense.  When it does return, it is because the blocked
175            * task is again ready to run and has execution priority.
176            */
177         }
178     }
179 }
arch/risc-v/include/rv32im/syscall.h
108 #define up_switchcontext(saveregs, restoreregs) \
109   (void)sys_call2(**SYS_switch_context**, (uintptr_t)saveregs, (uintptr_t)restoreregs)
 89 sys_call0:  /* a0 holds the syscall number */
 90 sys_call1:  /* a0 holds the syscall number, argument in a1 */
 91 sys_call2:  /* a0 holds the syscall number, arguments in a1 and a2 */
 92 sys_call3:  /* a0 holds the syscall number, arguments in a1, a2, and a3 */
 93 sys_call4:  /* a0 holds the syscall number, arguments in a1, a2, a3 and a4 */
 94 sys_call5:  /* a0 holds the syscall number, arguments in a1, a2, a3, a4 and a5 */
 95
 96     /* Issue the ECALL opcode to perform a SW interrupt to the OS */
 97
 98    ecall
 99
100     /* The actual interrupt may not a occur for a few more cycles.  Let's
101      * put a few nop's here in hope that the SW interrupt occurs during
102      * the sequence of nops.
103      */
104
105     nop
106     nop
107
108     /* Then return with the result of the software interrupt in v0 */
109
110     ret
111     nop
 59 _vectors:
...
 94     j       **irq_handler /* ECALL */**
107 /****************************************************************************
108  * Name: irq_handler
109  ****************************************************************************/
110
111     .globl      irq_handler
112     .type       irq_handler, function
113
114 irq_handler:
115     addi        x2, x2, -33*4				x2 is stack pointer
116     sw      x1, 1*4(x2)
117     addi        x1, x2, 33*4
118     sw      x1, 2*4(x2)
119     sw      x3, 3*4(x2)
120     sw      x4, 4*4(x2)
121     sw      x5, 5*4(x2)
122     sw      x6, 6*4(x2)
123     sw      x7, 7*4(x2)
124     sw      x8, 8*4(x2)
125     sw      x9, 9*4(x2)
126     sw      x10, 10*4(x2)
127     sw      x11, 11*4(x2)
128     sw      x12, 12*4(x2)
129     sw      x13, 13*4(x2)
130     sw      x14, 14*4(x2)
131     sw      x15, 15*4(x2)
132     sw      x16, 16*4(x2)
133     sw      x17, 17*4(x2)
134     sw      x18, 18*4(x2)
135     sw      x19, 19*4(x2)
136     sw      x20, 20*4(x2)
137     sw      x21, 21*4(x2)
138     sw      x22, 22*4(x2)
139     sw      x23, 23*4(x2)
140     sw      x24, 24*4(x2)
141     sw      x25, 25*4(x2)
142     sw      x26, 26*4(x2)
143     sw      x27, 27*4(x2)
144     sw      x28, 28*4(x2)
145     sw      x29, 29*4(x2)
146     sw      x30, 30*4(x2)
147     sw      x31, 31*4(x2)
148     csrr        x10, mepc
149     sw      x10, 0*4(x2)
150     csrr        x10, mstatus
151     sw      x10, 32*4(x2)
152     mv      x10, x2
153     call        **up_dispatch_all**
154     mv      x1, x10
155     lw      x10, 32*4(x1)
156     csrw        mstatus, x10
157     lw      x10, 0*4(x1)
158     csrw        mepc, x10
159     lw      x31, 31*4(x1)
160     lw      x30, 30*4(x1)
161     lw      x29, 29*4(x1)
162     lw      x28, 28*4(x1)
163     lw      x27, 27*4(x1)
164     lw      x26, 26*4(x1)
165     lw      x25, 25*4(x1)
166     lw      x24, 24*4(x1)
167     lw      x23, 23*4(x1)
168     lw      x22, 22*4(x1)
169     lw      x21, 21*4(x1)
170     lw      x20, 20*4(x1)
171     lw      x19, 19*4(x1)
172     lw      x18, 18*4(x1)
173     lw      x17, 17*4(x1)
174     lw      x16, 16*4(x1)
175     lw      x15, 15*4(x1)
176     lw      x14, 14*4(x1)
177     lw      x13, 13*4(x1)
178     lw      x12, 12*4(x1)
179     lw      x11, 11*4(x1)
180     lw      x10, 10*4(x1)
181     lw      x9, 9*4(x1)
182     lw      x8, 8*4(x1)
183     lw      x7, 7*4(x1)
184     lw      x6, 6*4(x1)
185     lw      x5, 5*4(x1)
186     lw      x4, 4*4(x1)
187     lw      x3, 3*4(x1)
188     lw      x2, 2*4(x1)
189     lw      x1, 1*4(x1)
190     mret

在這裏插入圖片描述

160 uint32_t *up_dispatch_all(uint32_t *regs)
161 {
162   uint32_t *savestate;
163   uint32_t cause;
164   int hirq = 0;
165
166   savestate = (uint32_t *)g_current_regs;
167   g_current_regs = regs;
168
169   __asm__ volatile("csrr %0, mcause" : "=r"(cause));
170   if (cause & 0x80000000)
171     {
172       cause &= 0x7fffffff;
173
174       /* Deliver the IRQ */
175
176       if (cause == 11)
177         {
178           hirq = up_dispatch_irq(cause, regs);
179         }
180 #ifdef CONFIG_RISCV_MTIME
181       else if (cause == 7)
182         {
183           riscv_mtimer_interrupt();
184         }
185 #endif
186       else
187         {
188           irq_unexpected_isr(cause , regs, NULL);
189         }
190     }
191   else if (cause == 11)
192     {
193       up_swint(11, regs, NULL);
194     }
195   else
196     {
197       irq_unexpected_isr(cause, regs, NULL);
198     }
199
200 #ifdef CONFIG_ARCH_FPU
201   if (regs != g_current_regs)
202     {
203       up_restorefpu((uint32_t *)g_current_regs);
204     }
205 #endif
206
207   /* If a context switch occurred while processing the interrupt then
208    * g_current_regs may have change value.  If we return any value different
209    * from the input regs, then the lower level will know that a context
210    * switch occurred during interrupt processing.
211    */
212
213   regs = (uint32_t *)g_current_regs;
214   g_current_regs = savestate;
215
216 #ifdef CONFIG_ARCH_HIPRI_INTERRUPT
217
218   /* Restore the irqflags for the new task */
219
220   if (!hirq)
221     {
222       struct tcb_s *rtcb = this_task();
223       up_irq_restore(rtcb->xcp.irqflags);
224     }
225 #else
226   (hirq);
227 #endif
228
229   /* Record the new "running" task.  g_running_tasks[] is only used by
230    * assertion logic for reporting crashes.
231    */
232
233   g_running_tasks[this_cpu()] = this_task();
234
235   /* Return the stack pointer */
236
237   return regs;
238 }
131 int up_swint(int irq, FAR void *context, FAR void *arg)
132 {
133   uint32_t *regs = (uint32_t *)context;
134
135   DEBUGASSERT(regs && regs == g_current_regs);
136
137   /* Software interrupt 0 is invoked with REG_A0 (REG_X10) = system call
138    * command and REG_A1-6 = variable number of
139    * arguments depending on the system call.
140    */
141
142 #ifdef CONFIG_DEBUG_SYSCALL_INFO
143   svcinfo("Entry: regs: %p cmd: %d\n", regs, regs[REG_A0]);
144   up_registerdump(regs);
145 #endif
146
147   /* Skip ECALL instruction */
148
149   regs[REG_EPC] += 4;
150
151   /* Handle the SWInt according to the command in $a0 */
152
153   switch (regs[REG_A0])
154     {
155       /* A0=SYS_save_context:  This is a save context command:
156        *
157        *  int up_saveusercontext(uint32_t *saveregs);
158        *
159        * At this point, the following values are saved in context:
160        *
161        * A0 = SYS_save_context
162        * A1 = saveregs
163        *
164        * Return:
165        * 0: Normal return
166        * 1: Context switch return
167        *
168        * In this case, we simply need to copy the current registers to the
169        * save register space references in the saved A1 and return.
170        */
171
172       case SYS_save_context:
173         {
174           DEBUGASSERT(regs[REG_A1] != 0);
175           up_copystate((uint32_t *)regs[REG_A1], regs);
176           ((uint32_t *)regs[REG_A1])[REG_A0] = 1;
177         }
178       break;
179
180       /* A0=SYS_restore_context: This a restore context command:
181        *
182        *   void up_fullcontextrestore(uint32_t *restoreregs) noreturn_function;
183        *
184        * At this point, the following values are saved in context:
185        *
186        *   A0 = SYS_restore_context
187        *   A1 = restoreregs
188        *
189        * In this case, we simply need to set g_current_regs to restore register
190        * area referenced in the saved R1. context == g_current_regs is the normal
191        * exception return.  By setting g_current_regs = context[R1], we force
192        * the return to the saved context referenced in $a1.
193        */
194
195       case SYS_restore_context:
196         {
197           DEBUGASSERT(regs[REG_A1] != 0);
198           g_current_regs = (uint32_t *)regs[REG_A1];
199         }
200         break;
201
202       /* A0=SYS_switch_context: This a switch context command:
203        *
204        *   void up_switchcontext(uint32_t *saveregs, uint32_t *restoreregs);
205        *
206        * At this point, the following values are saved in context:
207        *
208        *   A0 = SYS_switch_context
209        *   A1 = saveregs
210        *   A2 = restoreregs
211        *
212        * In this case, we save the context registers to the save register
213        * area reference by the saved contents of R5 and then set
214        * g_current_regs to to the save register area referenced by the saved
215        * contents of R6.
216        */
217
218       case **SYS_switch_context**:
219         {
220           DEBUGASSERT(regs[REG_A1] != 0 && regs[REG_A2] != 0);
221           up_copystate((uint32_t *)regs[REG_A1], regs);
222           g_current_regs = (uint32_t *)regs[REG_A2];
223         }
224         break;
225
226       /* A0=SYS_syscall_return: This a switch context command:
227        *
228        *   void up_sycall_return(void);
229        *
230        * At this point, the following values are saved in context:
231        *
232        *   A0 = SYS_syscall_return
233        *
234        * We need to restore the saved return address and return in
235        * unprivileged thread mode.
236        */
237
238 #ifdef CONFIG_BUILD_KERNEL
239       case SYS_syscall_return:
240         {
241           struct tcb_s *rtcb = sched_self();
242           int index = (int)rtcb->xcp.nsyscalls - 1;
243
244           /* Make sure that there is a saved syscall return address. */
245
246           DEBUGASSERT(index >= 0);
247
248           /* Setup to return to the saved syscall return address in
249            * the original mode.
250            */
251
252           g_current_regs[REG_EPC] = rtcb->xcp.syscall[index].sysreturn;
253 #error "Missing logic -- need to restore the original mode"
254           rtcb->xcp.nsyscalls   = index;
255         }
256         break;
257 #endif
258
259       /* This is not an architecture-specify system call.  If NuttX is built
260        * as a standalone kernel with a system call interface, then all of the
261        * additional system calls must be handled as in the default case.
262        */
263
264       default:
265         {
266 #ifdef CONFIG_BUILD_KERNEL
267           FAR struct tcb_s *rtcb = sched_self();
268           int index = rtcb->xcp.nsyscalls;
269
270           /* Verify that the SYS call number is within range */
271
272           DEBUGASSERT(g_current_regs[REG_A0] < SYS_maxsyscall);
273
274           /* Make sure that we got here that there is a no saved syscall
275            * return address.  We cannot yet handle nested system calls.
276            */
277
278           DEBUGASSERT(index < CONFIG_SYS_NNEST);
279
280           /* Setup to return to dispatch_syscall in privileged mode. */
281
282           rtcb->xcpsyscall[index].sysreturn = regs[REG_EPC];
283 #error "Missing logic -- Need to save mode"
284           rtcb->xcp.nsyscalls  = index + 1;
285
286           regs[REG_EPC] = (uint32_t)dispatch_syscall;
287 #error "Missing logic -- Need to set privileged mode"
288
289           /* Offset R0 to account for the reserved values */
290
291           g_current_regs[REG_A0] -= CONFIG_SYS_RESERVED;
292 #else
293           svcerr("ERROR: Bad SYS call: %d\n", regs[REG_A0]);
294 #endif
295         }
296         break;
297     }
298
299   /* Report what happened.  That might difficult in the case of a context switch */
300
301 #ifdef CONFIG_DEBUG_SYSCALL_INFO
302   if (regs != g_current_regs)
303     {
304       svcinfo("SWInt Return: Context switch!\n");
305       up_registerdump((const uint32_t *)g_current_regs);
306     }
307   else
308     {
309       svcinfo("SWInt Return: %d\n", regs[REG_A0]);
310     }
311 #endif
312
313   return OK;
314 }
...
}
 69 void up_copystate(uint32_t *dest, uint32_t *src)
 70 {
 71   int i;
 72
 73   /* In the MIPS model, the state is copied from the stack to the TCB,
 74    * but only a reference is passed to get the state from the TCB.  So the
 75    * following check avoids copying the TCB save area onto itself:
 76    */
 77
 78   if (src != dest)
 79     {
 80       for (i = 0; i < INT_XCPT_REGS; i++)
 81         {
 82           dest[i] = src[i];
 83         }
 84
 85 #ifdef CONFIG_ARCH_FPU
 86       up_savefpu(dest);
 87 #endif
 88     }
 89 }

注:

1, RISC-V的寄存器表

In practice, the programmer doesn’t use this notation for the registers. Though
x1 to x31 are all equally general-use registers as far as the processor is
concerned, by convention certain registers are used for special tasks. In
assembler, they are given standardized names as part of the RISC-V application
binary interface (ABI). This is what you will usually see in code listings. If
you really want to see the numeric register names, the -M argument to objdump
will provide them.

Register ABI Use by convention Preserved?
x0 zero hardwired to 0, ignores writes n/a
x1 ra return address for jumps no
x2 sp stack pointer yes
x3 gp global pointer n/a
x4 tp thread pointer n/a
x5 t0 temporary register 0 no
x6 t1 temporary register 1 no
x7 t2 temporary register 2 no
x8 s0 or fp saved register 0 or frame pointer yes
x9 s1 saved register 1 yes
x10 a0 return value or function argument 0 no
x11 a1 return value or function argument 1 no
x12 a2 function argument 2 no
x13 a3 function argument 3 no
x14 a4 function argument 4 no
x15 a5 function argument 5 no
x16 a6 function argument 6 no
x17 a7 function argument 7 no
x18 s2 saved register 2 yes
x19 s3 saved register 3 yes
x20 s4 saved register 4 yes
x21 s5 saved register 5 yes
x22 s6 saved register 6 yes
x23 s7 saved register 7 yes
x24 s8 saved register 8 yes
x25 s9 saved register 9 yes
x26 s10 saved register 10 yes
x27 s11 saved register 11 yes
x28 t3 temporary register 3 no
x29 t4 temporary register 4 no
x30 t5 temporary register 5 no
x31 t6 temporary register 6 no
pc (none) program counter n/a

2, struct tcb_s 和 enum tstate_e

enum tstate_e
{
  TSTATE_TASK_INVALID    = 0, /* INVALID      - The TCB is uninitialized */
  TSTATE_TASK_PENDING,        /* READY_TO_RUN - Pending preemption unlock */
  TSTATE_TASK_READYTORUN,     /* READY-TO-RUN - But not running */
#ifdef CONFIG_SMP
  TSTATE_TASK_ASSIGNED,       /* READY-TO-RUN - Not running, but assigned to a CPU */
#endif
  TSTATE_TASK_RUNNING,        /* READY_TO_RUN - And running */

  TSTATE_TASK_INACTIVE,       /* BLOCKED      - Initialized but not yet activated */
  TSTATE_WAIT_SEM,            /* BLOCKED      - Waiting for a semaphore */
#ifndef CONFIG_DISABLE_SIGNALS
  TSTATE_WAIT_SIG,            /* BLOCKED      - Waiting for a signal */
#endif
#ifndef CONFIG_DISABLE_MQUEUE
  TSTATE_WAIT_MQNOTEMPTY,     /* BLOCKED      - Waiting for a MQ to become not empty. */
  TSTATE_WAIT_MQNOTFULL,      /* BLOCKED      - Waiting for a MQ to become not full. */
#endif
#ifdef CONFIG_PAGING
  TSTATE_WAIT_PAGEFILL,       /* BLOCKED      - Waiting for page fill */
#endif
#ifdef CONFIG_SIG_SIGSTOP_ACTION
  TSTATE_TASK_STOPPED,        /* BLOCKED      - Waiting for SIGCONT */
#endif

  NUM_TASK_STATES             /* Must be last */
};
typedef enum tstate_e tstate_t;

/* Task Lists ***************************************************************/
/* The state of a task is indicated both by the task_state field of the TCB
 * and by a series of task lists.  All of these tasks lists are declared
 * below. Although it is not always necessary, most of these lists are
 * prioritized so that common list handling logic can be used (only the
 * g_readytorun, the g_pendingtasks, and the g_waitingforsemaphore lists
 * need to be prioritized).
 */

/* This is the list of all tasks that are ready to run.  This is a
 * prioritized list with head of the list holding the highest priority
 * (unassigned) task.  In the non-SMP case, the head of this list is the
 * currently active task and the tail of this list, the lowest priority
 * task, is always the IDLE task.
 */

volatile dq_queue_t g_readytorun;

#ifdef CONFIG_SMP
/* In order to support SMP, the function of the g_readytorun list changes,
 * The g_readytorun is still used but in the SMP case it will contain only:
 *
 *  - Only tasks/threads that are eligible to run, but not currently running,
 *    and
 *  - Tasks/threads that have not been assigned to a CPU.
 *
 * Otherwise, the TCB will be retained in an assigned task list,
 * g_assignedtasks.  As its name suggests, on 'g_assignedtasks queue for CPU
 * 'n' would contain only tasks/threads that are assigned to CPU 'n'.  Tasks/
 * threads would be assigned a particular CPU by one of two mechanisms:
 *
 *  - (Semi-)permanently through an RTOS interfaces such as
 *    pthread_attr_setaffinity(), or
 *  - Temporarily through scheduling logic when a previously unassigned task
 *    is made to run.
 *
 * Tasks/threads that are assigned to a CPU via an interface like
 * pthread_attr_setaffinity() would never go into the g_readytorun list, but
 * would only go into the g_assignedtasks[n] list for the CPU 'n' to which
 * the thread has been assigned.  Hence, the g_readytorun list would hold
 * only unassigned tasks/threads.
 *
 * Like the g_readytorun list in in non-SMP case, each g_assignedtask[] list
 * is prioritized:  The head of the list is the currently active task on this
 * CPU.  Tasks after the active task are ready-to-run and assigned to this
 * CPU. The tail of this assigned task list, the lowest priority task, is
 * always the CPU's IDLE task.
 */

volatile dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS];

/* g_running_tasks[] holds a references to the running task for each cpu.
 * It is valid only when up_interrupt_context() returns true.
 */

FAR struct tcb_s *g_running_tasks[CONFIG_SMP_NCPUS];

#else

FAR struct tcb_s *g_running_tasks[1];

#endif

/* This is the list of all tasks that are ready-to-run, but cannot be placed
 * in the g_readytorun list because:  (1) They are higher priority than the
 * currently active task at the head of the g_readytorun list, and (2) the
 * currently active task has disabled pre-emption.
 */

volatile dq_queue_t g_pendingtasks;

/* This is the list of all tasks that are blocked waiting for a semaphore */

volatile dq_queue_t g_waitingforsemaphore;

#ifndef CONFIG_DISABLE_SIGNALS
/* This is the list of all tasks that are blocked waiting for a signal */

volatile dq_queue_t g_waitingforsignal;
#endif

#ifndef CONFIG_DISABLE_MQUEUE
/* This is the list of all tasks that are blocked waiting for a message
 * queue to become non-empty.
 */

volatile dq_queue_t g_waitingformqnotempty;
#endif

#ifndef CONFIG_DISABLE_MQUEUE
/* This is the list of all tasks that are blocked waiting for a message
 * queue to become non-full.
 */

volatile dq_queue_t g_waitingformqnotfull;
#endif

#ifdef CONFIG_PAGING
/* This is the list of all tasks that are blocking waiting for a page fill */

volatile dq_queue_t g_waitingforfill;
#endif

#ifdef CONFIG_SIG_SIGSTOP_ACTION
/* This is the list of all tasks that have been stopped via SIGSTOP or SIGSTP */

volatile dq_queue_t g_stoppedtasks;
#endif

/* This the list of all tasks that have been initialized, but not yet
 * activated. NOTE:  This is the only list that is not prioritized.
 */

volatile dq_queue_t g_inactivetasks;

/* This is the value of the last process ID assigned to a task */

volatile pid_t g_lastpid;

/* The following hash table is used for two things:
 *
 * 1. This hash table greatly speeds the determination of a new unique
 *    process ID for a task, and
 * 2. Is used to quickly map a process ID into a TCB.
 *
 * It has the side effects of using more memory and limiting
 * the number of tasks to CONFIG_MAX_TASKS.
 */

struct pidhash_s g_pidhash[CONFIG_MAX_TASKS];

/* The following definitions are determined by tstate_t.  Ordering of values
 * in the enumeration is important!
 */

#define FIRST_READY_TO_RUN_STATE   TSTATE_TASK_READYTORUN
#define LAST_READY_TO_RUN_STATE    TSTATE_TASK_RUNNING
#define FIRST_ASSIGNED_STATE       TSTATE_TASK_ASSIGNED
#define LAST_ASSIGNED_STATE        TSTATE_TASK_RUNNING
#define FIRST_BLOCKED_STATE        TSTATE_TASK_INACTIVE
#define LAST_BLOCKED_STATE         (NUM_TASK_STATES-1)

struct tcb_s
{
  /* Fields used to support list management *************************************/

  FAR struct tcb_s *flink;               /* Doubly linked list                  */
  FAR struct tcb_s *blink;

  /* Task Group *****************************************************************/

  FAR struct task_group_s *group;        /* Pointer to shared task group data   */

  /* Task Management Fields *****************************************************/

  pid_t    pid;                          /* This is the ID of the thread        */
  start_t  start;                        /* Thread start function               */
  entry_t  entry;                        /* Entry Point into the thread         */
  uint8_t  sched_priority;               /* Current priority of the thread      */
  uint8_t  init_priority;                /* Initial priority of the thread      */

#ifdef CONFIG_PRIORITY_INHERITANCE
#if CONFIG_SEM_NNESTPRIO > 0
  uint8_t  npend_reprio;                 /* Number of nested reprioritizations  */
  uint8_t  pend_reprios[CONFIG_SEM_NNESTPRIO];
#endif
  uint8_t  base_priority;                /* "Normal" priority of the thread     */
#endif

  uint8_t  task_state;                   /* Current state of the thread         */
#ifdef CONFIG_SMP
  uint8_t  cpu;                          /* CPU index if running or assigned    */
  cpu_set_t affinity;                    /* Bit set of permitted CPUs           */
#endif
  uint16_t flags;                        /* Misc. general status flags          */
  int16_t  lockcount;                    /* 0=preemptable (not-locked)          */
#ifdef CONFIG_IRQCOUNT
  int16_t  irqcount;                     /* 0=Not in critical section           */
#endif
#ifdef CONFIG_CANCELLATION_POINTS
  int16_t  cpcount;                      /* Nested cancellation point count     */
#endif

#if CONFIG_RR_INTERVAL > 0 || defined(CONFIG_SCHED_SPORADIC)
  int32_t  timeslice;                    /* RR timeslice OR Sporadic budget     */
                                         /* interval remaining                  */
#endif
#ifdef CONFIG_SCHED_SPORADIC
  FAR struct sporadic_s *sporadic;       /* Sporadic scheduling parameters      */
#endif

  FAR struct wdog_s *waitdog;            /* All timed waits use this timer      */

  /* Stack-Related Fields *******************************************************/

  size_t    adj_stack_size;              /* Stack size after adjustment         */
                                         /* for hardware, processor, etc.       */
                                         /* (for debug purposes only)           */
  FAR void *stack_alloc_ptr;             /* Pointer to allocated stack          */
                                         /* Need to deallocate stack            */
  FAR void *adj_stack_ptr;               /* Adjusted stack_alloc_ptr for HW     */
                                         /* The initial stack pointer value     */

  /* External Module Support ****************************************************/

#ifdef CONFIG_PIC
  FAR struct dspace_s *dspace;           /* Allocated area for .bss and .data   */
#endif

  /* POSIX Semaphore Control Fields *********************************************/

  sem_t *waitsem;                        /* Semaphore ID waiting on             */

  /* POSIX Signal Control Fields ************************************************/

#ifndef CONFIG_DISABLE_SIGNALS
  sigset_t   sigprocmask;                /* Signals that are blocked            */
  sigset_t   sigwaitmask;                /* Waiting for pending signals         */
  sq_queue_t sigpendactionq;             /* List of pending signal actions      */
  sq_queue_t sigpostedq;                 /* List of posted signals              */
  siginfo_t  sigunbinfo;                 /* Signal info when task unblocked     */
#endif

  /* POSIX Named Message Queue Fields *******************************************/

#ifndef CONFIG_DISABLE_MQUEUE
  FAR struct mqueue_inode_s *msgwaitq;   /* Waiting for this message queue      */
#endif

  /* POSIX Thread Specific Data *************************************************/

#if CONFIG_NPTHREAD_KEYS > 0
  FAR void *pthread_data[CONFIG_NPTHREAD_KEYS];
#endif

  /* Pre-emption monitor support ************************************************/

#ifdef CONFIG_SCHED_CRITMONITOR
  uint32_t premp_start;                  /* Time when preemption disabled       */
  uint32_t premp_max;                    /* Max time preemption disabled        */
  uint32_t crit_start;                   /* Time critical section entered       */
  uint32_t crit_max;                     /* Max time in critical section        */
#endif

  /* Library related fields *****************************************************/

  int pterrno;                           /* Current per-thread errno            */

  /* State save areas ***********************************************************/
  /* The form and content of these fields are platform-specific.                */

  struct xcptcontext xcp;                /* Interrupt register save area        */

#if CONFIG_TASK_NAME_SIZE > 0
  char name[CONFIG_TASK_NAME_SIZE+1];    /* Task name (with NUL terminator)     */
#endif
};

/* struct task_tcb_s *************************************************************/
/* This is the particular form of the task control block (TCB) structure used by
 * tasks (and kernel threads).  There are two TCB forms:  one for pthreads and
 * one for tasks.  Both share the common TCB fields (which must appear at the
 * top of the structure) plus additional fields unique to tasks and threads.
 * Having separate structures for tasks and pthreads adds some complexity, but
 * saves memory in that it prevents pthreads from being burdened with the
 * overhead required for tasks (and vice versa).
 */

struct task_tcb_s
{
  /* Common TCB fields **********************************************************/

  struct tcb_s cmn;                      /* Common TCB fields                   */

  /* Task Management Fields *****************************************************/

#ifdef CONFIG_SCHED_STARTHOOK
  starthook_t starthook;                 /* Task startup function               */
  FAR void *starthookarg;                /* The argument passed to the function */
#endif

  /* [Re-]start name + start-up parameters **************************************/

  FAR char **argv;                       /* Name+start-up parameters            */
};

/* struct pthread_tcb_s **********************************************************/
/* This is the particular form of the task control block (TCB) structure used by
 * pthreads.  There are two TCB forms:  one for pthreads and one for tasks.  Both
 * share the common TCB fields (which must appear at the top of the structure)
 * plus additional fields unique to tasks and threads.  Having separate structures
 * for tasks and pthreads adds some complexity,  but saves memory in that it
 * prevents pthreads from being burdened with the overhead required for tasks
 * (and vice versa).
 */

#ifndef CONFIG_DISABLE_PTHREAD
struct pthread_tcb_s
{
  /* Common TCB fields **********************************************************/

  struct tcb_s cmn;                      /* Common TCB fields                   */

  /* Task Management Fields *****************************************************/

  pthread_addr_t arg;                    /* Startup argument                    */
  FAR void *joininfo;                    /* Detach-able info to support join    */

  /* Robust mutex support *******************************************************/

#ifndef CONFIG_PTHREAD_MUTEX_UNSAFE
  FAR struct pthread_mutex_s *mhead;     /* List of mutexes held by thread      */
#endif

  /* Clean-up stack *************************************************************/

#ifdef CONFIG_PTHREAD_CLEANUP
  /* tos   - The index to the next avaiable entry at the top of the stack.
   * stack - The pre-allocated clean-up stack memory.
   */

  uint8_t tos;
  struct pthread_cleanup_s stack[CONFIG_PTHREAD_CLEANUP_STACKSIZE];
#endif
};
#endif /* !CONFIG_DISABLE_PTHREAD */

3, nuttx的調度策略

1, FIFO first in first out 在優先級相同時的一種調度策略
2, RR round robin 時間片輪轉,在優先級相同時的一種調度策略
3, Sporadic,偶發調度, 也被稱爲零星調度,sporadic的引入主要是爲了去除週期性和非週期性事件對實時性的影響,相比RR策略,它可以在一個設定的時間段裏限制線程執行時間的長短。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章