1 // core/thread.cc -- Scheduler and thread creation/destruction
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
5 // This software is provided 'as-is', without any express or implied warranty.
6 // In no event will the authors or contributors be held liable for any damages
7 // arising from the use of this software.
9 // Permission is hereby granted to everyone, free of charge, to use, copy,
10 // modify, prepare derivative works of, publish, distribute, perform,
11 // sublicense, and/or sell copies of the Software, provided that the above
12 // copyright notice and disclaimer of warranty be included in all copies or
13 // substantial portions of this software.
15 #include <kern/thread.h>
16 #include <kern/pagealloc.h>
17 #include <kern/time.h>
18 #include <kern/arch.h>
20 #include <lowlevel/bitops.h>
25 Thread *Sched::best_rt(int prio)
27 Util::List *rq = &rt_runqueue[prio];
30 return rq->next->listentry(Thread, runqueue_node);
33 void Sched::replenish_all()
35 // All runnable tasks have depleted timeslices, so mark
36 // all depleted prios active. Replenishment will happen
40 ts_bitmap = ts_depleted_bitmap;
41 ts_depleted_bitmap = 0;
44 void Sched::replenish_prio(int prio)
46 // Move the depleted marker to the end, effectively moving
47 // all other tasks to the active list at once.
49 ts_depleted[prio].del();
50 ts_runqueue[prio].add_back(&ts_depleted[prio]);
55 assert(runqueue_node.empty());
58 // This puts it at the end of the active list, not depleted.
59 sched.ts_depleted[ts_prio].add_back(&runqueue_node);
61 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
64 sched.ts_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
71 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
72 sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
73 if (sched.ts_depleted[ts_prio].next == &sched.ts_runqueue[ts_prio])
74 sched.ts_depleted_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
77 void Thread::ts_deplete()
81 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
82 sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
84 // This puts it at the end of the depleted list, not active.
85 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
86 sched.ts_depleted_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
89 inline void Thread::add()
91 if (policy == TimeShared) {
95 sched.rt_runqueue[rt_prio].add_back(&runqueue_node);
98 ll_multiword_set_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
101 inline void Thread::del()
103 if (policy == TimeShared) {
106 if (!sched.ts_bitmap && !sched.ts_depleted_bitmap)
107 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1);
111 if (sched.rt_runqueue[rt_prio].empty())
112 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
116 inline u32 Sched::prio_to_slice(int prio)
118 assert(prio >= 1 && prio < ts_prios);
119 return prio * default_timeslice / 8;
122 int Sched::slice_to_prio(u32 slice)
124 return slice * 8 / default_timeslice;
127 void Thread::prio_adjust()
129 assert(runqueue_node.empty());
131 if (last_replenish != sched.last_replenish) {
132 if (sched.last_replenish - last_replenish > 3) {
133 time_left = time_slice * 2 - 1;
134 } else while (sched.last_replenish != last_replenish) {
135 time_left = time_left / 2 + time_slice;
136 assert(time_left < (s32)time_slice * 2);
140 last_replenish = sched.last_replenish;
142 int new_prio = Sched::slice_to_prio(time_left);
144 if (!(new_prio >= ts_static_prio && new_prio < Sched::ts_prios)) {
145 printf("new prio %d, static %d, time left %d\n",
146 new_prio, ts_static_prio, time_left);
149 assert(new_prio >= ts_static_prio && new_prio < Sched::ts_prios);
154 void Thread::replenish()
156 assert(!runqueue_node.empty());
158 if (last_replenish != sched.last_replenish) {
159 assert(time_left == 0);
160 time_left = time_slice;
161 last_replenish = sched.last_replenish;
163 if (ts_static_prio != ts_prio) {
164 assert(ts_static_prio < ts_prio);
166 ts_prio = ts_static_prio;
172 Thread *Sched::best_ts()
178 int best = ts_prios - 1 - ll_ffs(ts_bitmap);
179 assert(best >= 1 && best < ts_prios);
181 if (ts_runqueue[best].next == &ts_depleted[best])
182 replenish_prio(best);
184 assert(ts_runqueue[best].next != &ts_depleted[best]);
185 assert(!ts_runqueue[best].empty());
186 Thread *t = ts_runqueue[best].next->listentry(Thread, runqueue_node);
188 assert(!t->blocked_on);
189 assert(t->policy == Thread::TimeShared);
190 assert(t->rt_prio == 0);
191 assert(t->ts_prio == best);
193 // The replenish can lower the threads priority if it was boosted;
194 // in some cases, this may mean that a different thread is now the
195 // highest priority thread. As these aren't real-time threads, and
196 // as priorities are mainly to determine which threads can take the
197 // CPU immediately on wakeup rather than which CPU hog goes first,
198 // it's not important enough to worry about; the new priority will
199 // be used on the next schedule.
205 void Sched::schedule_nolock()
207 assert(!IRQ::in_irq);
209 int rt = rt_prios - 1 - ll_multiword_ffs(bitmap, 0, rt_prios);
213 Time::monotonic_clock.get_time(&now);
215 if (curthread != Arch::init_thread)
216 curthread->charge(now);
219 best = Arch::init_thread;
225 if (best != curthread) {
226 if (best != Arch::init_thread) {
227 best->last_time = now;
228 Time::Time expiry = now + best->time_left;
229 resched_timer.arm(expiry);
232 Arch::switch_thread(best, curthread);
236 void Sched::schedule()
238 AutoSpinLockRecIRQ autolock(runqueue_lock);
242 void Sched::sched_new_thread()
244 runqueue_lock.unlock_irq();
247 Thread *Sched::new_thread(thread_func func, void *arg, char *name)
249 // Allocate a page for the thread's stack, and stick the thread
250 // struct at the top of the stack. It's placed at the top rather
251 // than the bottom to facilitate the possible eventual use of guard
254 Mem::Page *page = Mem::PageAlloc::alloc(1);
255 ulong addr = reinterpret_cast<ulong>(Mem::page_to_kvirt(page));
256 addr += Arch::ArchThread::size - thread_size;
258 Thread *t = reinterpret_cast<Thread *>(addr);
260 // Use placement new to run the constructors of lists
261 // (and any other self-constructing type that may be
266 // FIXME: settable schedparams
268 t->time_left = Sched::default_timeslice;
269 t->policy = Thread::TimeShared;
271 t->ts_static_prio = 8;
273 t->time_slice = prio_to_slice(t->ts_prio);
274 t->blocked_on = NULL;
275 t->last_replenish = 0;
276 t->addr_space = NULL;
277 t->active_addr_space = NULL;
279 t->arch.init(reinterpret_cast<void *>(func), arg);
282 strncpy(t->name, name, Thread::name_len);
284 threadlist_lock.lock_irq();
285 threadlist.add_back(&t->threadlist_node);
286 threadlist_lock.unlock_irq();
291 static void do_resched_timer(Time::KTimerEntry *timer)
298 assert(curthread == Arch::init_thread);
300 resched_timer.mux = Time::monotonic_timers;
301 resched_timer.func = do_resched_timer;
302 resched_timer.data = NULL;
304 for (int i = 0; i < ts_prios; i++)
305 ts_runqueue[i].add_front(&ts_depleted[i]);
307 strcpy(curthread->name, "idle thread");
312 sched.threadlist_lock.lock_irq();
313 threadlist_node.del();
314 sched.threadlist_lock.unlock_irq();
316 ulong addr = reinterpret_cast<ulong>(this);
317 addr &= ~(Arch::ArchThread::size - 1);
318 Mem::Page *page = Mem::kvirt_to_page(reinterpret_cast<void *>(addr));
319 Mem::PageAlloc::free(page, 1);
324 printf("thread %p exiting...\n", this);
328 void Thread::block(ThreadBlocker *blocker)
330 AutoSpinLockRecIRQ autolock(sched.runqueue_lock);
331 assert(!runqueue_node.empty());
333 assert(!IRQ::in_irq);
335 if (blocker->blocked) {
336 blocked_on = blocker;
338 assert(runqueue_node.empty());
339 sched.schedule_nolock();
343 void Thread::wake_nolock()
347 if (runqueue_node.empty()) {
355 ulong irq = sched.runqueue_lock.lock_recirq();
357 sched.runqueue_lock.unlock_recirq(irq);
359 if (ll_get_int_state() && need_resched)
363 void Thread::charge(Time::Time &now)
366 ival = now - last_time;
369 if (ival.seconds != 0) {
372 time_left -= ival.nanos;
378 if (!blocked_on && time_left == 0 && policy != FIFO) {
379 if (policy == TimeShared) {
388 void Thread::set_aspace(Mem::ProcAddrSpace *aspace)
390 // FIXME: lock thread against scheduling; this temporary method should
391 // be gone before SMP anyway.
394 addr_space = active_addr_space = aspace;
395 Arch::set_aspace(aspace);
399 void ThreadBlocker::wake()
405 void CascadeBlocker::wake()
411 void ThreadBlocker::unblock()
416 void CascadeBlocker::unblock()
422 void WaitQueue::block(Blocker *blocker)
424 AutoSpinLockRecIRQ autolock(lock);
425 blockers.add_back(&blocker->list_node);
428 void WaitQueue::unblock(Blocker *blocker)
430 AutoSpinLockRecIRQ autolock(lock);
431 blocker->list_node.del();
434 bool WaitQueue::empty()
436 AutoSpinLockRecIRQ autolock(lock);
437 return blockers.empty();
440 Blocker *WaitQueue::unblock_one_nolock()
442 AutoSpinLock schedautolock(sched.runqueue_lock);
446 Blocker *best = NULL;
448 for (Util::List *node = blockers.next; node != &blockers;
451 Blocker *b = node->listentry(Blocker, list_node);
452 Thread *t = b->thread;
454 if (best_rt < t->rt_prio) {
455 best_rt = t->rt_prio;
456 best_ts = t->ts_prio;
460 if (best_rt == t->rt_prio && best_rt == 0 && best_ts < t->ts_prio) {
461 best_ts = t->ts_prio;
467 best->list_node.del();
472 Blocker *WaitQueue::unblock_one()
474 AutoSpinLockRecIRQ autolock(lock);
475 return unblock_one_nolock();
478 bool WaitQueue::wake_one()
480 // The lock is held over the wake to make sure that we still
481 // have a valid reference to best. For external calls to
482 // unblock_one(), the caller must use its own locks (or other
483 // mechanisms) to ensure that the blocker is still valid.
485 ulong irq = lock.lock_recirq();
486 Blocker *best = unblock_one_nolock();
491 lock.unlock_recirq(irq);
493 if (ll_get_int_state() && need_resched)
499 int WaitQueue::wake_all()
501 ulong irq = lock.lock_recirq();
502 sched.runqueue_lock.lock();
505 for (Util::List *node = blockers.next; node != &blockers;
508 Blocker *b = node->listentry(Blocker, list_node);
510 b->thread->wake_nolock();
515 sched.runqueue_lock.unlock();
516 lock.unlock_recirq(irq);
518 if (ll_get_int_state() && need_resched)
527 extern "C" void exit_thread()
530 assertl(0, Assert::Always);
533 extern "C" void sched_new_thread()
535 Threads::sched.sched_new_thread();
538 extern "C" void schedule()
540 Threads::sched.schedule();