]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/core/thread.cc
b84d8dda6f07bf2b87bdaad16abb0f99abd9ea1d
[polintos/scott/priv.git] / kernel / core / thread.cc
1 // core/thread.cc -- Scheduler and thread creation/destruction
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // This software is provided 'as-is', without any express or implied warranty.
6 // In no event will the authors or contributors be held liable for any damages
7 // arising from the use of this software.
8 // 
9 // Permission is hereby granted to everyone, free of charge, to use, copy,
10 // modify, prepare derivative works of, publish, distribute, perform,
11 // sublicense, and/or sell copies of the Software, provided that the above
12 // copyright notice and disclaimer of warranty be included in all copies or
13 // substantial portions of this software.
14
15 #include <kern/thread.h>
16 #include <kern/pagealloc.h>
17 #include <kern/time.h>
18 #include <kern/arch.h>
19 #include <kern/irq.h>
20 #include <lowlevel/bitops.h>
21
22 using namespace Lock;
23
24 namespace Threads {
25         Thread *Sched::best_rt(int prio)
26         {
27                 Util::List *rq = &rt_runqueue[prio];
28                 assert(!rq->empty());
29         
30                 return rq->next->listentry(Thread, runqueue_node);
31         }
32         
33         void Sched::replenish_all()
34         {
35                 // All runnable tasks have depleted timeslices, so mark
36                 // all depleted prios active.  Replenishment will happen
37                 // lazily.
38         
39                 last_replenish++;
40                 ts_bitmap = ts_depleted_bitmap;
41                 ts_depleted_bitmap = 0;
42         }
43         
44         void Sched::replenish_prio(int prio)
45         {
46                 // Move the depleted marker to the end, effectively moving
47                 // all other tasks to the active list at once.
48                 
49                 ts_depleted[prio].del();
50                 ts_runqueue[prio].add_back(&ts_depleted[prio]);
51         }
52         
53         void Thread::ts_add()
54         {
55                 assert(runqueue_node.empty());
56         
57                 if (time_left) {
58                         // This puts it at the end of the active list, not depleted.
59                         sched.ts_depleted[ts_prio].add_back(&runqueue_node);
60                 } else {
61                         sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
62                 }
63                 
64                 sched.ts_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
65         }
66         
67         void Thread::ts_del()
68         {
69                 runqueue_node.del();
70                 
71                 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
72                         sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
73                 if (sched.ts_depleted[ts_prio].next == &sched.ts_runqueue[ts_prio])
74                         sched.ts_depleted_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
75         }
76         
77         void Thread::ts_deplete()
78         {
79                 runqueue_node.del();
80
81                 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
82                         sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
83                 
84                 // This puts it at the end of the depleted list, not active.
85                 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
86                 sched.ts_depleted_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
87         }
88
89         inline void Thread::add()
90         {
91                 if (policy == TimeShared) {
92                         prio_adjust();
93                         ts_add();
94                 } else {
95                         sched.rt_runqueue[rt_prio].add_back(&runqueue_node);
96                 }
97                 
98                 ll_multiword_set_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
99         }
100         
101         inline void Thread::del()
102         {
103                 if (policy == TimeShared) {
104                         ts_del();
105
106                         if (!sched.ts_bitmap && !sched.ts_depleted_bitmap)
107                                 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1);
108                 } else {
109                         runqueue_node.del();
110                         
111                         if (sched.rt_runqueue[rt_prio].empty())
112                                 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
113                 }
114         }
115         
116         inline u32 Sched::prio_to_slice(int prio)
117         {
118                 assert(prio >= 1 && prio < ts_prios);
119                 return prio * default_timeslice / 8;
120         }
121         
122         int Sched::slice_to_prio(u32 slice)
123         {
124                 return slice * 8 / default_timeslice;
125         }
126         
127         void Thread::prio_adjust()
128         {
129                 assert(runqueue_node.empty());
130         
131                 if (last_replenish != sched.last_replenish) {
132                         if (sched.last_replenish - last_replenish > 3) {
133                                 time_left = time_slice * 2 - 1;
134                         } else while (sched.last_replenish != last_replenish) {
135                                 time_left = time_left / 2 + time_slice;
136                                 assert(time_left < (s32)time_slice * 2);
137                                 last_replenish++;
138                         }
139                         
140                         last_replenish = sched.last_replenish;
141
142                         int new_prio = Sched::slice_to_prio(time_left);
143                         
144                         if (!(new_prio >= ts_static_prio && new_prio < Sched::ts_prios)) {
145                                 printf("new prio %d, static %d, time left %d\n",
146                                        new_prio, ts_static_prio, time_left);
147                         }
148
149                         assert(new_prio >= ts_static_prio && new_prio < Sched::ts_prios);
150                         ts_prio = new_prio;
151                 }
152         }
153         
154         void Thread::replenish()
155         {
156                 assert(!runqueue_node.empty());
157         
158                 if (last_replenish != sched.last_replenish) {
159                         assert(time_left == 0);
160                         time_left = time_slice;
161                         last_replenish = sched.last_replenish;
162
163                         if (ts_static_prio != ts_prio) {
164                                 assert(ts_static_prio < ts_prio);
165                                 ts_del();
166                                 ts_prio = ts_static_prio;
167                                 ts_add();
168                         }
169                 }
170         }
171         
172         Thread *Sched::best_ts()
173         {
174                 if (!ts_bitmap)
175                         replenish_all();
176                 
177                 assert(ts_bitmap);
178                 int best = ts_prios - 1 - ll_ffs(ts_bitmap);
179                 assert(best >= 1 && best < ts_prios);
180                 
181                 if (ts_runqueue[best].next == &ts_depleted[best])
182                         replenish_prio(best);
183                 
184                 assert(ts_runqueue[best].next != &ts_depleted[best]);
185                 assert(!ts_runqueue[best].empty());
186                 Thread *t = ts_runqueue[best].next->listentry(Thread, runqueue_node);
187                 
188                 assert(!t->blocked_on);
189                 assert(t->policy == Thread::TimeShared);
190                 assert(t->rt_prio == 0);
191                 assert(t->ts_prio == best);
192                 
193                 // The replenish can lower the threads priority if it was boosted;
194                 // in some cases, this may mean that a different thread is now the
195                 // highest priority thread.  As these aren't real-time threads, and
196                 // as priorities are mainly to determine which threads can take the
197                 // CPU immediately on wakeup rather than which CPU hog goes first,
198                 // it's not important enough to worry about; the new priority will
199                 // be used on the next schedule.
200                 
201                 t->replenish();
202                 return t;
203         }
204         
205         void Sched::schedule_nolock()
206         {
207                 assert(!IRQ::in_irq);
208                 need_resched = 0;
209                 int rt = rt_prios - 1 - ll_multiword_ffs(bitmap, 0, rt_prios);
210                 Thread *best;
211                 
212                 Time::Time now;
213                 Time::monotonic_clock.get_time(&now);
214                 
215                 if (curthread != Arch::init_thread)
216                         curthread->charge(now);
217                 
218                 if (rt == rt_prios)
219                         best = Arch::init_thread;
220                 else if (rt == 0)
221                         best = best_ts();
222                 else
223                         best = best_rt(rt);
224                         
225                 if (best != curthread) {
226                         if (best != Arch::init_thread) {
227                                 best->last_time = now;
228                                 Time::Time expiry = now + best->time_left;
229                                 resched_timer.arm(expiry);
230                         }
231                         
232                         Arch::switch_thread(best, curthread);
233                 }
234         }
235
236         void Sched::schedule()
237         {
238                 AutoSpinLockRecIRQ autolock(runqueue_lock);
239                 schedule_nolock();
240         }
241         
242         void Sched::sched_new_thread()
243         {
244                 runqueue_lock.unlock_irq();
245         }
246         
247         Thread *Sched::new_thread(thread_func func, void *arg, char *name)
248         {
249                 // Allocate a page for the thread's stack, and stick the thread
250                 // struct at the top of the stack.  It's placed at the top rather
251                 // than the bottom to facilitate the possible eventual use of guard
252                 // pages.
253         
254                 Mem::Page *page = Mem::PageAlloc::alloc(1);
255                 ulong addr = reinterpret_cast<ulong>(Mem::page_to_kvirt(page));
256                 addr += Arch::ArchThread::size - thread_size;
257                 
258                 Thread *t = reinterpret_cast<Thread *>(addr);
259                 
260                 // Use placement new to run the constructors of lists
261                 // (and any other self-constructing type that may be
262                 // added).
263                 
264                 new(t) Thread;
265                 
266                 // FIXME: settable schedparams
267
268                 t->time_left = Sched::default_timeslice;
269                 t->policy = Thread::TimeShared;
270                 t->rt_prio = 0;
271                 t->ts_static_prio = 8;
272                 t->ts_prio = 8;
273                 t->time_slice = prio_to_slice(t->ts_prio);
274                 t->blocked_on = NULL;
275                 t->last_replenish = 0;
276                 t->addr_space = NULL;
277                 t->active_addr_space = NULL;
278                 
279                 t->arch.init(reinterpret_cast<void *>(func), arg);
280                 
281                 if (name)
282                         strncpy(t->name, name, Thread::name_len);
283                 
284                 threadlist_lock.lock_irq();
285                 threadlist.add_back(&t->threadlist_node);
286                 threadlist_lock.unlock_irq();
287
288                 return t;
289         }
290         
291         static void do_resched_timer(Time::KTimerEntry *timer)
292         {
293                 need_resched = 1;
294         }
295         
296         void Sched::init()
297         {
298                 assert(curthread == Arch::init_thread);
299
300                 resched_timer.mux = Time::monotonic_timers;
301                 resched_timer.func = do_resched_timer;
302                 resched_timer.data = NULL;
303                 
304                 for (int i = 0; i < ts_prios; i++)
305                         ts_runqueue[i].add_front(&ts_depleted[i]);
306                 
307                 strcpy(curthread->name, "idle thread");
308         }
309         
310         Thread::~Thread()
311         {
312                 sched.threadlist_lock.lock_irq();
313                 threadlist_node.del();
314                 sched.threadlist_lock.unlock_irq();
315         
316                 ulong addr = reinterpret_cast<ulong>(this);
317                 addr &= ~(Arch::ArchThread::size - 1);
318                 Mem::Page *page = Mem::kvirt_to_page(reinterpret_cast<void *>(addr));
319                 Mem::PageAlloc::free(page, 1);
320         }
321         
322         void Thread::exit()
323         {
324                 printf("thread %p exiting...\n", this);
325                 for(;;);
326         }
327
328         void Thread::block(ThreadBlocker *blocker)
329         {
330                 AutoSpinLockRecIRQ autolock(sched.runqueue_lock);
331                 assert(!runqueue_node.empty());
332                 assert(!blocked_on);
333                 assert(!IRQ::in_irq);
334
335                 if (blocker->blocked) {
336                         blocked_on = blocker;
337                         del();
338                         assert(runqueue_node.empty());
339                         sched.schedule_nolock();
340                 }
341         }
342         
343         void Thread::wake_nolock()
344         {
345                 blocked_on = NULL;
346
347                 if (runqueue_node.empty()) {
348                         add();
349                         need_resched = 1;
350                 }
351         }
352         
353         void Thread::wake()
354         {
355                 ulong irq = sched.runqueue_lock.lock_recirq();
356                 wake_nolock();
357                 sched.runqueue_lock.unlock_recirq(irq);
358
359                 if (ll_get_int_state() && need_resched)
360                         sched.schedule();
361         }
362         
363         void Thread::charge(Time::Time &now)
364         {
365                 Time::Time ival;
366                 ival = now - last_time;
367                 last_time = now;
368                 
369                 if (ival.seconds != 0) {
370                         time_left = 0;
371                 } else {
372                         time_left -= ival.nanos;
373
374                         if (time_left < 0)
375                                 time_left = 0;
376                 }
377                 
378                 if (!blocked_on && time_left == 0 && policy != FIFO) {
379                         if (policy == TimeShared) {
380                                 ts_deplete();
381                         } else {
382                                 del();
383                                 add();
384                         }
385                 }
386         }
387         
388         void Thread::set_aspace(Mem::AddrSpace *aspace)
389         {
390                 // FIXME: lock thread against scheduling; this temporary method should
391                 // be gone before SMP anyway.
392                 
393                 ll_ints_off();
394                 addr_space = active_addr_space = aspace;
395                 Arch::set_aspace(aspace);
396                 ll_ints_on();
397         }
398         
399         void ThreadBlocker::wake()
400         {
401                 blocked = false;
402                 thread->wake();
403         }
404         
405         void CascadeBlocker::wake()
406         {
407                 blocked = false;
408                 blocker->wake();
409         }
410
411         void ThreadBlocker::unblock()
412         {
413                 blocked = false;
414         }
415         
416         void CascadeBlocker::unblock()
417         {
418                 blocked = false;
419                 blocker->unblock();
420         }
421         
422         void WaitQueue::block(Blocker *blocker)
423         {
424                 AutoSpinLockRecIRQ autolock(lock);
425                 blockers.add_back(&blocker->list_node);
426         }
427         
428         void WaitQueue::unblock(Blocker *blocker)
429         {
430                 AutoSpinLockRecIRQ autolock(lock);
431                 blocker->list_node.del();
432         }
433
434         bool WaitQueue::empty()
435         {
436                 AutoSpinLockRecIRQ autolock(lock);
437                 return blockers.empty();
438         }
439         
440         Blocker *WaitQueue::unblock_one_nolock()
441         {
442                 AutoSpinLock schedautolock(sched.runqueue_lock);
443                 
444                 int best_rt = -1;
445                 int best_ts = 0;
446                 Blocker *best = NULL;
447                 
448                 for (Util::List *node = blockers.next; node != &blockers;
449                      node = node->next)
450                 {
451                         Blocker *b = node->listentry(Blocker, list_node);
452                         Thread *t = b->thread;
453
454                         if (best_rt < t->rt_prio) {
455                                 best_rt = t->rt_prio;
456                                 best_ts = t->ts_prio;
457                                 best = b;
458                         }
459                         
460                         if (best_rt == t->rt_prio && best_rt == 0 && best_ts < t->ts_prio) {
461                                 best_ts = t->ts_prio;
462                                 best = b;
463                         }
464                 }
465
466                 if (best)
467                         best->list_node.del();
468                 
469                 return best;
470         }
471         
472         Blocker *WaitQueue::unblock_one()
473         {
474                 AutoSpinLockRecIRQ autolock(lock);
475                 return unblock_one_nolock();
476         }
477
478         bool WaitQueue::wake_one()
479         {
480                 // The lock is held over the wake to make sure that we still
481                 // have a valid reference to best.  For external calls to
482                 // unblock_one(), the caller must use its own locks (or other
483                 // mechanisms) to ensure that the blocker is still valid.
484         
485                 ulong irq = lock.lock_recirq();
486                 Blocker *best = unblock_one_nolock();
487                 
488                 if (best)
489                         best->wake();
490                 
491                 lock.unlock_recirq(irq);
492                 
493                 if (ll_get_int_state() && need_resched)
494                         sched.schedule();
495
496                 return false;
497         }
498
499         int WaitQueue::wake_all()
500         {
501                 ulong irq = lock.lock_recirq();
502                 sched.runqueue_lock.lock();
503                 int count = 0;
504
505                 for (Util::List *node = blockers.next; node != &blockers;
506                      node = node->next)
507                 {
508                         Blocker *b = node->listentry(Blocker, list_node);
509                         b->unblock();
510                         b->thread->wake_nolock();
511                         b->list_node.del();
512                         count++;
513                 }
514                 
515                 sched.runqueue_lock.unlock();
516                 lock.unlock_recirq(irq);
517                 
518                 if (ll_get_int_state() && need_resched)
519                         sched.schedule();
520
521                 return count;
522         }
523         
524         Sched sched;
525 }
526
527 extern "C" void exit_thread()
528 {
529         curthread->exit();
530         assertl(0, Assert::Always);
531 }
532
533 extern "C" void sched_new_thread()
534 {
535         Threads::sched.sched_new_thread();
536 }
537
538 extern "C" void schedule()
539 {
540         Threads::sched.schedule();
541 }
542
543 int need_resched;