1 // core/thread.cc -- Scheduler and thread creation/destruction
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following condition:
12 // The above copyright notice and this permission notice shall be
13 // included in all copies or substantial portions of the Software.
15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
23 #include <kern/thread.h>
24 #include <kern/pagealloc.h>
25 #include <kern/time.h>
26 #include <kern/arch.h>
28 #include <lowlevel/bitops.h>
33 Thread *Sched::best_rt(int prio)
35 Util::List *rq = &rt_runqueue[prio];
38 return rq->next->listentry(Thread, runqueue_node);
41 void Sched::replenish_all()
43 // All runnable tasks have depleted timeslices, so mark
44 // all depleted prios active. Replenishment will happen
48 ts_bitmap = ts_depleted_bitmap;
49 ts_depleted_bitmap = 0;
52 void Sched::replenish_prio(int prio)
54 // Move the depleted marker to the end, effectively moving
55 // all other tasks to the active list at once.
57 ts_depleted[prio].del();
58 ts_runqueue[prio].add_back(&ts_depleted[prio]);
63 assert(runqueue_node.empty());
66 // This puts it at the end of the active list, not depleted.
67 sched.ts_depleted[ts_prio].add_back(&runqueue_node);
69 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
72 sched.ts_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
79 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
80 sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
81 if (sched.ts_depleted[ts_prio].next == &sched.ts_runqueue[ts_prio])
82 sched.ts_depleted_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
85 void Thread::ts_deplete()
89 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
90 sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
92 // This puts it at the end of the depleted list, not active.
93 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
94 sched.ts_depleted_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
97 inline void Thread::add()
99 if (policy == TimeShared) {
103 sched.rt_runqueue[rt_prio].add_back(&runqueue_node);
106 ll_multiword_set_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
109 inline void Thread::del()
111 if (policy == TimeShared) {
114 if (!sched.ts_bitmap && !sched.ts_depleted_bitmap)
115 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1);
119 if (sched.rt_runqueue[rt_prio].empty())
120 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
124 inline u32 Sched::prio_to_slice(int prio)
126 assert(prio >= 1 && prio < ts_prios);
127 return prio * default_timeslice / 8;
130 int Sched::slice_to_prio(u32 slice)
132 return slice * 8 / default_timeslice;
135 void Thread::prio_adjust()
137 assert(runqueue_node.empty());
139 if (last_replenish != sched.last_replenish) {
140 if (sched.last_replenish - last_replenish > 3) {
141 time_left = time_slice * 2 - 1;
142 } else while (sched.last_replenish != last_replenish) {
143 time_left = time_left / 2 + time_slice;
144 assert(time_left < (s32)time_slice * 2);
148 last_replenish = sched.last_replenish;
150 int new_prio = Sched::slice_to_prio(time_left);
152 if (!(new_prio >= ts_static_prio && new_prio < Sched::ts_prios)) {
153 printf("new prio %d, static %d, time left %d\n",
154 new_prio, ts_static_prio, time_left);
157 assert(new_prio >= ts_static_prio && new_prio < Sched::ts_prios);
162 void Thread::replenish()
164 assert(!runqueue_node.empty());
166 if (last_replenish != sched.last_replenish) {
167 assert(time_left == 0);
168 time_left = time_slice;
169 last_replenish = sched.last_replenish;
171 if (ts_static_prio != ts_prio) {
172 assert(ts_static_prio < ts_prio);
174 ts_prio = ts_static_prio;
180 Thread *Sched::best_ts()
186 int best = ts_prios - 1 - ll_ffs(ts_bitmap);
187 assert(best >= 1 && best < ts_prios);
189 if (ts_runqueue[best].next == &ts_depleted[best])
190 replenish_prio(best);
192 assert(ts_runqueue[best].next != &ts_depleted[best]);
193 assert(!ts_runqueue[best].empty());
194 Thread *t = ts_runqueue[best].next->listentry(Thread, runqueue_node);
196 assert(!t->blocked_on);
197 assert(t->policy == Thread::TimeShared);
198 assert(t->rt_prio == 0);
199 assert(t->ts_prio == best);
201 // The replenish can lower the threads priority if it was boosted;
202 // in some cases, this may mean that a different thread is now the
203 // highest priority thread. As these aren't real-time threads, and
204 // as priorities are mainly to determine which threads can take the
205 // CPU immediately on wakeup rather than which CPU hog goes first,
206 // it's not important enough to worry about; the new priority will
207 // be used on the next schedule.
213 void Sched::schedule_nolock()
215 assert(!IRQ::in_irq);
217 int rt = rt_prios - 1 - ll_multiword_ffs(bitmap, 0, rt_prios);
221 Time::monotonic_clock.get_time(&now);
223 if (curthread != Arch::init_thread)
224 curthread->charge(now);
227 best = Arch::init_thread;
233 if (best != curthread) {
234 if (best != Arch::init_thread) {
235 best->last_time = now;
236 Time::Time expiry = now + best->time_left;
237 resched_timer.arm(expiry);
240 Arch::switch_thread(best, curthread);
244 void Sched::schedule()
246 AutoSpinLockRecIRQ autolock(runqueue_lock);
250 void Sched::sched_new_thread()
252 runqueue_lock.unlock_irq();
255 Thread *Sched::new_thread(thread_func func, void *arg, char *name)
257 // Allocate a page for the thread's stack, and stick the thread
258 // struct at the top of the stack. It's placed at the top rather
259 // than the bottom to facilitate the possible eventual use of guard
262 Mem::Page *page = Mem::PageAlloc::alloc(1);
263 ulong addr = reinterpret_cast<ulong>(Mem::page_to_kvirt(page));
264 addr += Arch::ArchThread::size - thread_size;
266 Thread *t = reinterpret_cast<Thread *>(addr);
268 // Use placement new to run the constructors of lists
269 // (and any other self-constructing type that may be
274 // FIXME: settable schedparams
276 t->time_left = Sched::default_timeslice;
277 t->policy = Thread::TimeShared;
279 t->ts_static_prio = 8;
281 t->time_slice = prio_to_slice(t->ts_prio);
282 t->blocked_on = NULL;
283 t->last_replenish = 0;
284 t->addr_space = NULL;
285 t->active_addr_space = NULL;
287 t->arch.init(reinterpret_cast<void *>(func), arg);
290 strncpy(t->name, name, Thread::name_len);
292 threadlist_lock.lock_irq();
293 threadlist.add_back(&t->threadlist_node);
294 threadlist_lock.unlock_irq();
299 static void do_resched_timer(Time::KTimerEntry *timer)
306 assert(curthread == Arch::init_thread);
308 resched_timer.mux = Time::monotonic_timers;
309 resched_timer.func = do_resched_timer;
310 resched_timer.data = NULL;
312 for (int i = 0; i < ts_prios; i++)
313 ts_runqueue[i].add_front(&ts_depleted[i]);
315 strcpy(curthread->name, "idle thread");
320 sched.threadlist_lock.lock_irq();
321 threadlist_node.del();
322 sched.threadlist_lock.unlock_irq();
324 ulong addr = reinterpret_cast<ulong>(this);
325 addr &= ~(Arch::ArchThread::size - 1);
326 Mem::Page *page = Mem::kvirt_to_page(reinterpret_cast<void *>(addr));
327 Mem::PageAlloc::free(page, 1);
332 printf("thread %p exiting...\n", this);
336 void Thread::block(ThreadBlocker *blocker)
338 AutoSpinLockRecIRQ autolock(sched.runqueue_lock);
339 assert(!runqueue_node.empty());
341 assert(!IRQ::in_irq);
343 if (blocker->blocked) {
344 blocked_on = blocker;
346 assert(runqueue_node.empty());
347 sched.schedule_nolock();
351 void Thread::wake_nolock()
355 if (runqueue_node.empty()) {
363 ulong irq = sched.runqueue_lock.lock_recirq();
365 sched.runqueue_lock.unlock_recirq(irq);
367 if (ll_get_int_state() && need_resched)
371 void Thread::charge(Time::Time &now)
374 ival = now - last_time;
377 if (ival.seconds != 0) {
380 time_left -= ival.nanos;
386 if (!blocked_on && time_left == 0 && policy != FIFO) {
387 if (policy == TimeShared) {
396 void Thread::set_aspace(Mem::AddrSpace *aspace)
398 // FIXME: lock thread against scheduling; this temporary method should
399 // be gone before SMP anyway.
402 addr_space = active_addr_space = aspace;
403 Arch::set_aspace(aspace);
407 void ThreadBlocker::wake()
413 void CascadeBlocker::wake()
419 void ThreadBlocker::unblock()
424 void CascadeBlocker::unblock()
430 void WaitQueue::block(Blocker *blocker)
432 AutoSpinLockRecIRQ autolock(lock);
433 blockers.add_back(&blocker->list_node);
436 void WaitQueue::unblock(Blocker *blocker)
438 AutoSpinLockRecIRQ autolock(lock);
439 blocker->list_node.del();
442 bool WaitQueue::empty()
444 AutoSpinLockRecIRQ autolock(lock);
445 return blockers.empty();
448 Blocker *WaitQueue::unblock_one_nolock()
450 AutoSpinLock schedautolock(sched.runqueue_lock);
454 Blocker *best = NULL;
456 for (Util::List *node = blockers.next; node != &blockers;
459 Blocker *b = node->listentry(Blocker, list_node);
460 Thread *t = b->thread;
462 if (best_rt < t->rt_prio) {
463 best_rt = t->rt_prio;
464 best_ts = t->ts_prio;
468 if (best_rt == t->rt_prio && best_rt == 0 && best_ts < t->ts_prio) {
469 best_ts = t->ts_prio;
475 best->list_node.del();
480 Blocker *WaitQueue::unblock_one()
482 AutoSpinLockRecIRQ autolock(lock);
483 return unblock_one_nolock();
486 bool WaitQueue::wake_one()
488 // The lock is held over the wake to make sure that we still
489 // have a valid reference to best. For external calls to
490 // unblock_one(), the caller must use its own locks (or other
491 // mechanisms) to ensure that the blocker is still valid.
493 ulong irq = lock.lock_recirq();
494 Blocker *best = unblock_one_nolock();
499 lock.unlock_recirq(irq);
501 if (ll_get_int_state() && need_resched)
507 int WaitQueue::wake_all()
509 ulong irq = lock.lock_recirq();
510 sched.runqueue_lock.lock();
513 for (Util::List *node = blockers.next; node != &blockers;
516 Blocker *b = node->listentry(Blocker, list_node);
518 b->thread->wake_nolock();
523 sched.runqueue_lock.unlock();
524 lock.unlock_recirq(irq);
526 if (ll_get_int_state() && need_resched)
535 extern "C" void exit_thread()
538 assertl(0, Assert::Always);
541 extern "C" void sched_new_thread()
543 Threads::sched.sched_new_thread();
546 extern "C" void schedule()
548 Threads::sched.schedule();