1 // core/thread.cc -- Scheduler and thread creation/destruction
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following conditions:
12 // * Redistributions of source code must retain the above copyright notice,
13 // this list of conditions and the following disclaimers.
15 // * Redistributions in binary form must reproduce the above copyright notice,
16 // this list of conditions and the following disclaimers in the
17 // documentation and/or other materials provided with the distribution.
19 // * The names of the Software's authors and/or contributors
20 // may not be used to endorse or promote products derived from
21 // this Software without specific prior written permission.
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
25 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
31 #include <kern/thread.h>
32 #include <kern/pagealloc.h>
33 #include <kern/time.h>
34 #include <kern/arch.h>
36 #include <lowlevel/bitops.h>
41 Thread *Sched::best_rt(int prio)
43 Util::List *rq = &rt_runqueue[prio];
46 return rq->next->listentry(Thread, runqueue_node);
49 void Sched::replenish_all()
51 // All runnable tasks have depleted timeslices, so mark
52 // all depleted prios active. Replenishment will happen
56 ts_bitmap = ts_depleted_bitmap;
57 ts_depleted_bitmap = 0;
60 void Sched::replenish_prio(int prio)
62 // Move the depleted marker to the end, effectively moving
63 // all other tasks to the active list at once.
65 ts_depleted[prio].del();
66 ts_runqueue[prio].add_back(&ts_depleted[prio]);
71 assert(runqueue_node.empty());
74 // This puts it at the end of the active list, not depleted.
75 sched.ts_depleted[ts_prio].add_back(&runqueue_node);
77 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
80 sched.ts_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
87 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
88 sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
89 if (sched.ts_depleted[ts_prio].next == &sched.ts_runqueue[ts_prio])
90 sched.ts_depleted_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
93 void Thread::ts_deplete()
97 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
98 sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
100 // This puts it at the end of the depleted list, not active.
101 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
102 sched.ts_depleted_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
105 inline void Thread::add()
107 if (policy == TimeShared) {
111 sched.rt_runqueue[rt_prio].add_back(&runqueue_node);
114 ll_multiword_set_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
117 inline void Thread::del()
119 if (policy == TimeShared) {
122 if (!sched.ts_bitmap && !sched.ts_depleted_bitmap)
123 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1);
127 if (sched.rt_runqueue[rt_prio].empty())
128 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
132 inline u32 Sched::prio_to_slice(int prio)
134 assert(prio >= 1 && prio < ts_prios);
135 return prio * default_timeslice / 8;
138 int Sched::slice_to_prio(u32 slice)
140 return slice * 8 / default_timeslice;
143 void Thread::prio_adjust()
145 assert(runqueue_node.empty());
147 if (last_replenish != sched.last_replenish) {
148 if (sched.last_replenish - last_replenish > 3) {
149 time_left = time_slice * 2 - 1;
150 } else while (sched.last_replenish != last_replenish) {
151 time_left = time_left / 2 + time_slice;
152 assert(time_left < (s32)time_slice * 2);
156 last_replenish = sched.last_replenish;
158 int new_prio = Sched::slice_to_prio(time_left);
160 if (!(new_prio >= ts_static_prio && new_prio < Sched::ts_prios)) {
161 printf("new prio %d, static %d, time left %d\n",
162 new_prio, ts_static_prio, time_left);
165 assert(new_prio >= ts_static_prio && new_prio < Sched::ts_prios);
170 void Thread::replenish()
172 assert(!runqueue_node.empty());
174 if (last_replenish != sched.last_replenish) {
175 assert(time_left == 0);
176 time_left = time_slice;
177 last_replenish = sched.last_replenish;
179 if (ts_static_prio != ts_prio) {
180 assert(ts_static_prio < ts_prio);
182 ts_prio = ts_static_prio;
188 Thread *Sched::best_ts()
194 int best = ts_prios - 1 - ll_ffs(ts_bitmap);
195 assert(best >= 1 && best < ts_prios);
197 if (ts_runqueue[best].next == &ts_depleted[best])
198 replenish_prio(best);
200 assert(ts_runqueue[best].next != &ts_depleted[best]);
201 assert(!ts_runqueue[best].empty());
202 Thread *t = ts_runqueue[best].next->listentry(Thread, runqueue_node);
204 assert(!t->blocked_on);
205 assert(t->policy == Thread::TimeShared);
206 assert(t->rt_prio == 0);
207 assert(t->ts_prio == best);
209 // The replenish can lower the threads priority if it was boosted;
210 // in some cases, this may mean that a different thread is now the
211 // highest priority thread. As these aren't real-time threads, and
212 // as priorities are mainly to determine which threads can take the
213 // CPU immediately on wakeup rather than which CPU hog goes first,
214 // it's not important enough to worry about; the new priority will
215 // be used on the next schedule.
221 void Sched::schedule_nolock()
223 assert(!IRQ::in_irq);
225 int rt = rt_prios - 1 - ll_multiword_ffs(bitmap, 0, rt_prios);
229 Time::monotonic_clock.get_time(&now);
231 if (curthread != Arch::init_thread)
232 curthread->charge(now);
235 best = Arch::init_thread;
241 if (best != curthread) {
242 if (best != Arch::init_thread) {
243 best->last_time = now;
244 Time::Time expiry = now + best->time_left;
245 resched_timer.arm(expiry);
248 Arch::switch_thread(best, curthread);
252 void Sched::schedule()
254 AutoSpinLockRecIRQ autolock(runqueue_lock);
258 void Sched::sched_new_thread()
260 runqueue_lock.unlock_irq();
263 Thread *Sched::new_thread(thread_func func, void *arg, char *name)
265 // Allocate a page for the thread's stack, and stick the thread
266 // struct at the top of the stack. It's placed at the top rather
267 // than the bottom to facilitate the possible eventual use of guard
270 Mem::Page *page = Mem::PageAlloc::alloc(1);
271 ulong addr = reinterpret_cast<ulong>(Mem::page_to_kvirt(page));
272 addr += Arch::ArchThread::size - thread_size;
274 Thread *t = reinterpret_cast<Thread *>(addr);
276 // Use placement new to run the constructors of lists
277 // (and any other self-constructing type that may be
282 // FIXME: settable schedparams
284 t->time_left = Sched::default_timeslice;
285 t->policy = Thread::TimeShared;
287 t->ts_static_prio = 8;
289 t->time_slice = prio_to_slice(t->ts_prio);
290 t->blocked_on = NULL;
291 t->last_replenish = 0;
292 t->addr_space = NULL;
293 t->active_addr_space = NULL;
295 t->arch.init(reinterpret_cast<void *>(func), arg);
298 strncpy(t->name, name, Thread::name_len);
300 threadlist_lock.lock_irq();
301 threadlist.add_back(&t->threadlist_node);
302 threadlist_lock.unlock_irq();
307 static void do_resched_timer(Time::KTimerEntry *timer)
314 assert(curthread == Arch::init_thread);
316 resched_timer.mux = Time::monotonic_timers;
317 resched_timer.func = do_resched_timer;
318 resched_timer.data = NULL;
320 for (int i = 0; i < ts_prios; i++)
321 ts_runqueue[i].add_front(&ts_depleted[i]);
323 strcpy(curthread->name, "idle thread");
328 sched.threadlist_lock.lock_irq();
329 threadlist_node.del();
330 sched.threadlist_lock.unlock_irq();
332 ulong addr = reinterpret_cast<ulong>(this);
333 addr &= ~(Arch::ArchThread::size - 1);
334 Mem::Page *page = Mem::kvirt_to_page(reinterpret_cast<void *>(addr));
335 Mem::PageAlloc::free(page, 1);
340 printf("thread %p exiting...\n", this);
344 void Thread::block(ThreadBlocker *blocker)
346 AutoSpinLockRecIRQ autolock(sched.runqueue_lock);
347 assert(!runqueue_node.empty());
349 assert(!IRQ::in_irq);
351 if (blocker->blocked) {
352 blocked_on = blocker;
354 assert(runqueue_node.empty());
355 sched.schedule_nolock();
359 void Thread::wake_nolock()
363 if (runqueue_node.empty()) {
371 ulong irq = sched.runqueue_lock.lock_recirq();
373 sched.runqueue_lock.unlock_recirq(irq);
375 if (ll_get_int_state() && need_resched)
379 void Thread::charge(Time::Time &now)
382 ival = now - last_time;
385 if (ival.seconds != 0) {
388 time_left -= ival.nanos;
394 if (!blocked_on && time_left == 0 && policy != FIFO) {
395 if (policy == TimeShared) {
404 void Thread::set_aspace(Mem::AddrSpace *aspace)
406 // FIXME: lock thread against scheduling; this temporary method should
407 // be gone before SMP anyway.
410 addr_space = active_addr_space = aspace;
411 Arch::set_aspace(aspace);
415 void ThreadBlocker::wake()
421 void CascadeBlocker::wake()
427 void ThreadBlocker::unblock()
432 void CascadeBlocker::unblock()
438 void WaitQueue::block(Blocker *blocker)
440 AutoSpinLockRecIRQ autolock(lock);
441 blockers.add_back(&blocker->list_node);
444 void WaitQueue::unblock(Blocker *blocker)
446 AutoSpinLockRecIRQ autolock(lock);
447 blocker->list_node.del();
450 bool WaitQueue::empty()
452 AutoSpinLockRecIRQ autolock(lock);
453 return blockers.empty();
456 Blocker *WaitQueue::unblock_one_nolock()
458 AutoSpinLock schedautolock(sched.runqueue_lock);
462 Blocker *best = NULL;
464 for (Util::List *node = blockers.next; node != &blockers;
467 Blocker *b = node->listentry(Blocker, list_node);
468 Thread *t = b->thread;
470 if (best_rt < t->rt_prio) {
471 best_rt = t->rt_prio;
472 best_ts = t->ts_prio;
476 if (best_rt == t->rt_prio && best_rt == 0 && best_ts < t->ts_prio) {
477 best_ts = t->ts_prio;
483 best->list_node.del();
488 Blocker *WaitQueue::unblock_one()
490 AutoSpinLockRecIRQ autolock(lock);
491 return unblock_one_nolock();
494 bool WaitQueue::wake_one()
496 // The lock is held over the wake to make sure that we still
497 // have a valid reference to best. For external calls to
498 // unblock_one(), the caller must use its own locks (or other
499 // mechanisms) to ensure that the blocker is still valid.
501 ulong irq = lock.lock_recirq();
502 Blocker *best = unblock_one_nolock();
507 lock.unlock_recirq(irq);
509 if (ll_get_int_state() && need_resched)
515 int WaitQueue::wake_all()
517 ulong irq = lock.lock_recirq();
518 sched.runqueue_lock.lock();
521 for (Util::List *node = blockers.next; node != &blockers;
524 Blocker *b = node->listentry(Blocker, list_node);
526 b->thread->wake_nolock();
531 sched.runqueue_lock.unlock();
532 lock.unlock_recirq(irq);
534 if (ll_get_int_state() && need_resched)
543 extern "C" void exit_thread()
546 assertl(0, Assert::Always);
549 extern "C" void sched_new_thread()
551 Threads::sched.sched_new_thread();
554 extern "C" void schedule()
556 Threads::sched.schedule();