]> git.buserror.net Git - polintos/scott/priv.git/blob - kernel/core/thread.cc
Switch to a simple X11-style license.
[polintos/scott/priv.git] / kernel / core / thread.cc
1 // core/thread.cc -- Scheduler and thread creation/destruction
2 //
3 // This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
4 // 
5 // Permission is hereby granted, free of charge, to any person obtaining a copy of
6 // this software and associated documentation files (the "Software"), to deal with
7 // the Software without restriction, including without limitation the rights to
8 // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9 // of the Software, and to permit persons to whom the Software is furnished to do
10 // so, subject to the following condition:
11 // 
12 // The above copyright notice and this permission notice shall be
13 // included in all copies or substantial portions of the Software.
14 // 
15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
18 // CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
21 // SOFTWARE.
22
23 #include <kern/thread.h>
24 #include <kern/pagealloc.h>
25 #include <kern/time.h>
26 #include <kern/arch.h>
27 #include <kern/irq.h>
28 #include <lowlevel/bitops.h>
29
30 using namespace Lock;
31
32 namespace Threads {
33         Thread *Sched::best_rt(int prio)
34         {
35                 Util::List *rq = &rt_runqueue[prio];
36                 assert(!rq->empty());
37         
38                 return rq->next->listentry(Thread, runqueue_node);
39         }
40         
41         void Sched::replenish_all()
42         {
43                 // All runnable tasks have depleted timeslices, so mark
44                 // all depleted prios active.  Replenishment will happen
45                 // lazily.
46         
47                 last_replenish++;
48                 ts_bitmap = ts_depleted_bitmap;
49                 ts_depleted_bitmap = 0;
50         }
51         
52         void Sched::replenish_prio(int prio)
53         {
54                 // Move the depleted marker to the end, effectively moving
55                 // all other tasks to the active list at once.
56                 
57                 ts_depleted[prio].del();
58                 ts_runqueue[prio].add_back(&ts_depleted[prio]);
59         }
60         
61         void Thread::ts_add()
62         {
63                 assert(runqueue_node.empty());
64         
65                 if (time_left) {
66                         // This puts it at the end of the active list, not depleted.
67                         sched.ts_depleted[ts_prio].add_back(&runqueue_node);
68                 } else {
69                         sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
70                 }
71                 
72                 sched.ts_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
73         }
74         
75         void Thread::ts_del()
76         {
77                 runqueue_node.del();
78                 
79                 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
80                         sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
81                 if (sched.ts_depleted[ts_prio].next == &sched.ts_runqueue[ts_prio])
82                         sched.ts_depleted_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
83         }
84         
85         void Thread::ts_deplete()
86         {
87                 runqueue_node.del();
88
89                 if (sched.ts_runqueue[ts_prio].next == &sched.ts_depleted[ts_prio])
90                         sched.ts_bitmap &= ~(1 << (Sched::ts_prios - 1 - ts_prio));
91                 
92                 // This puts it at the end of the depleted list, not active.
93                 sched.ts_runqueue[ts_prio].add_back(&runqueue_node);
94                 sched.ts_depleted_bitmap |= 1 << (Sched::ts_prios - 1 - ts_prio);
95         }
96
97         inline void Thread::add()
98         {
99                 if (policy == TimeShared) {
100                         prio_adjust();
101                         ts_add();
102                 } else {
103                         sched.rt_runqueue[rt_prio].add_back(&runqueue_node);
104                 }
105                 
106                 ll_multiword_set_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
107         }
108         
109         inline void Thread::del()
110         {
111                 if (policy == TimeShared) {
112                         ts_del();
113
114                         if (!sched.ts_bitmap && !sched.ts_depleted_bitmap)
115                                 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1);
116                 } else {
117                         runqueue_node.del();
118                         
119                         if (sched.rt_runqueue[rt_prio].empty())
120                                 ll_multiword_clear_bit(sched.bitmap, Sched::rt_prios - 1 - rt_prio);
121                 }
122         }
123         
124         inline u32 Sched::prio_to_slice(int prio)
125         {
126                 assert(prio >= 1 && prio < ts_prios);
127                 return prio * default_timeslice / 8;
128         }
129         
130         int Sched::slice_to_prio(u32 slice)
131         {
132                 return slice * 8 / default_timeslice;
133         }
134         
135         void Thread::prio_adjust()
136         {
137                 assert(runqueue_node.empty());
138         
139                 if (last_replenish != sched.last_replenish) {
140                         if (sched.last_replenish - last_replenish > 3) {
141                                 time_left = time_slice * 2 - 1;
142                         } else while (sched.last_replenish != last_replenish) {
143                                 time_left = time_left / 2 + time_slice;
144                                 assert(time_left < (s32)time_slice * 2);
145                                 last_replenish++;
146                         }
147                         
148                         last_replenish = sched.last_replenish;
149
150                         int new_prio = Sched::slice_to_prio(time_left);
151                         
152                         if (!(new_prio >= ts_static_prio && new_prio < Sched::ts_prios)) {
153                                 printf("new prio %d, static %d, time left %d\n",
154                                        new_prio, ts_static_prio, time_left);
155                         }
156
157                         assert(new_prio >= ts_static_prio && new_prio < Sched::ts_prios);
158                         ts_prio = new_prio;
159                 }
160         }
161         
162         void Thread::replenish()
163         {
164                 assert(!runqueue_node.empty());
165         
166                 if (last_replenish != sched.last_replenish) {
167                         assert(time_left == 0);
168                         time_left = time_slice;
169                         last_replenish = sched.last_replenish;
170
171                         if (ts_static_prio != ts_prio) {
172                                 assert(ts_static_prio < ts_prio);
173                                 ts_del();
174                                 ts_prio = ts_static_prio;
175                                 ts_add();
176                         }
177                 }
178         }
179         
180         Thread *Sched::best_ts()
181         {
182                 if (!ts_bitmap)
183                         replenish_all();
184                 
185                 assert(ts_bitmap);
186                 int best = ts_prios - 1 - ll_ffs(ts_bitmap);
187                 assert(best >= 1 && best < ts_prios);
188                 
189                 if (ts_runqueue[best].next == &ts_depleted[best])
190                         replenish_prio(best);
191                 
192                 assert(ts_runqueue[best].next != &ts_depleted[best]);
193                 assert(!ts_runqueue[best].empty());
194                 Thread *t = ts_runqueue[best].next->listentry(Thread, runqueue_node);
195                 
196                 assert(!t->blocked_on);
197                 assert(t->policy == Thread::TimeShared);
198                 assert(t->rt_prio == 0);
199                 assert(t->ts_prio == best);
200                 
201                 // The replenish can lower the threads priority if it was boosted;
202                 // in some cases, this may mean that a different thread is now the
203                 // highest priority thread.  As these aren't real-time threads, and
204                 // as priorities are mainly to determine which threads can take the
205                 // CPU immediately on wakeup rather than which CPU hog goes first,
206                 // it's not important enough to worry about; the new priority will
207                 // be used on the next schedule.
208                 
209                 t->replenish();
210                 return t;
211         }
212         
213         void Sched::schedule_nolock()
214         {
215                 assert(!IRQ::in_irq);
216                 need_resched = 0;
217                 int rt = rt_prios - 1 - ll_multiword_ffs(bitmap, 0, rt_prios);
218                 Thread *best;
219                 
220                 Time::Time now;
221                 Time::monotonic_clock.get_time(&now);
222                 
223                 if (curthread != Arch::init_thread)
224                         curthread->charge(now);
225                 
226                 if (rt == rt_prios)
227                         best = Arch::init_thread;
228                 else if (rt == 0)
229                         best = best_ts();
230                 else
231                         best = best_rt(rt);
232                         
233                 if (best != curthread) {
234                         if (best != Arch::init_thread) {
235                                 best->last_time = now;
236                                 Time::Time expiry = now + best->time_left;
237                                 resched_timer.arm(expiry);
238                         }
239                         
240                         Arch::switch_thread(best, curthread);
241                 }
242         }
243
244         void Sched::schedule()
245         {
246                 AutoSpinLockRecIRQ autolock(runqueue_lock);
247                 schedule_nolock();
248         }
249         
250         void Sched::sched_new_thread()
251         {
252                 runqueue_lock.unlock_irq();
253         }
254         
255         Thread *Sched::new_thread(thread_func func, void *arg, char *name)
256         {
257                 // Allocate a page for the thread's stack, and stick the thread
258                 // struct at the top of the stack.  It's placed at the top rather
259                 // than the bottom to facilitate the possible eventual use of guard
260                 // pages.
261         
262                 Mem::Page *page = Mem::PageAlloc::alloc(1);
263                 ulong addr = reinterpret_cast<ulong>(Mem::page_to_kvirt(page));
264                 addr += Arch::ArchThread::size - thread_size;
265                 
266                 Thread *t = reinterpret_cast<Thread *>(addr);
267                 
268                 // Use placement new to run the constructors of lists
269                 // (and any other self-constructing type that may be
270                 // added).
271                 
272                 new(t) Thread;
273                 
274                 // FIXME: settable schedparams
275
276                 t->time_left = Sched::default_timeslice;
277                 t->policy = Thread::TimeShared;
278                 t->rt_prio = 0;
279                 t->ts_static_prio = 8;
280                 t->ts_prio = 8;
281                 t->time_slice = prio_to_slice(t->ts_prio);
282                 t->blocked_on = NULL;
283                 t->last_replenish = 0;
284                 t->addr_space = NULL;
285                 t->active_addr_space = NULL;
286                 
287                 t->arch.init(reinterpret_cast<void *>(func), arg);
288                 
289                 if (name)
290                         strncpy(t->name, name, Thread::name_len);
291                 
292                 threadlist_lock.lock_irq();
293                 threadlist.add_back(&t->threadlist_node);
294                 threadlist_lock.unlock_irq();
295
296                 return t;
297         }
298         
299         static void do_resched_timer(Time::KTimerEntry *timer)
300         {
301                 need_resched = 1;
302         }
303         
304         void Sched::init()
305         {
306                 assert(curthread == Arch::init_thread);
307
308                 resched_timer.mux = Time::monotonic_timers;
309                 resched_timer.func = do_resched_timer;
310                 resched_timer.data = NULL;
311                 
312                 for (int i = 0; i < ts_prios; i++)
313                         ts_runqueue[i].add_front(&ts_depleted[i]);
314                 
315                 strcpy(curthread->name, "idle thread");
316         }
317         
318         Thread::~Thread()
319         {
320                 sched.threadlist_lock.lock_irq();
321                 threadlist_node.del();
322                 sched.threadlist_lock.unlock_irq();
323         
324                 ulong addr = reinterpret_cast<ulong>(this);
325                 addr &= ~(Arch::ArchThread::size - 1);
326                 Mem::Page *page = Mem::kvirt_to_page(reinterpret_cast<void *>(addr));
327                 Mem::PageAlloc::free(page, 1);
328         }
329         
330         void Thread::exit()
331         {
332                 printf("thread %p exiting...\n", this);
333                 for(;;);
334         }
335
336         void Thread::block(ThreadBlocker *blocker)
337         {
338                 AutoSpinLockRecIRQ autolock(sched.runqueue_lock);
339                 assert(!runqueue_node.empty());
340                 assert(!blocked_on);
341                 assert(!IRQ::in_irq);
342
343                 if (blocker->blocked) {
344                         blocked_on = blocker;
345                         del();
346                         assert(runqueue_node.empty());
347                         sched.schedule_nolock();
348                 }
349         }
350         
351         void Thread::wake_nolock()
352         {
353                 blocked_on = NULL;
354
355                 if (runqueue_node.empty()) {
356                         add();
357                         need_resched = 1;
358                 }
359         }
360         
361         void Thread::wake()
362         {
363                 ulong irq = sched.runqueue_lock.lock_recirq();
364                 wake_nolock();
365                 sched.runqueue_lock.unlock_recirq(irq);
366
367                 if (ll_get_int_state() && need_resched)
368                         sched.schedule();
369         }
370         
371         void Thread::charge(Time::Time &now)
372         {
373                 Time::Time ival;
374                 ival = now - last_time;
375                 last_time = now;
376                 
377                 if (ival.seconds != 0) {
378                         time_left = 0;
379                 } else {
380                         time_left -= ival.nanos;
381
382                         if (time_left < 0)
383                                 time_left = 0;
384                 }
385                 
386                 if (!blocked_on && time_left == 0 && policy != FIFO) {
387                         if (policy == TimeShared) {
388                                 ts_deplete();
389                         } else {
390                                 del();
391                                 add();
392                         }
393                 }
394         }
395         
396         void Thread::set_aspace(Mem::AddrSpace *aspace)
397         {
398                 // FIXME: lock thread against scheduling; this temporary method should
399                 // be gone before SMP anyway.
400                 
401                 ll_ints_off();
402                 addr_space = active_addr_space = aspace;
403                 Arch::set_aspace(aspace);
404                 ll_ints_on();
405         }
406         
407         void ThreadBlocker::wake()
408         {
409                 blocked = false;
410                 thread->wake();
411         }
412         
413         void CascadeBlocker::wake()
414         {
415                 blocked = false;
416                 blocker->wake();
417         }
418
419         void ThreadBlocker::unblock()
420         {
421                 blocked = false;
422         }
423         
424         void CascadeBlocker::unblock()
425         {
426                 blocked = false;
427                 blocker->unblock();
428         }
429         
430         void WaitQueue::block(Blocker *blocker)
431         {
432                 AutoSpinLockRecIRQ autolock(lock);
433                 blockers.add_back(&blocker->list_node);
434         }
435         
436         void WaitQueue::unblock(Blocker *blocker)
437         {
438                 AutoSpinLockRecIRQ autolock(lock);
439                 blocker->list_node.del();
440         }
441
442         bool WaitQueue::empty()
443         {
444                 AutoSpinLockRecIRQ autolock(lock);
445                 return blockers.empty();
446         }
447         
448         Blocker *WaitQueue::unblock_one_nolock()
449         {
450                 AutoSpinLock schedautolock(sched.runqueue_lock);
451                 
452                 int best_rt = -1;
453                 int best_ts = 0;
454                 Blocker *best = NULL;
455                 
456                 for (Util::List *node = blockers.next; node != &blockers;
457                      node = node->next)
458                 {
459                         Blocker *b = node->listentry(Blocker, list_node);
460                         Thread *t = b->thread;
461
462                         if (best_rt < t->rt_prio) {
463                                 best_rt = t->rt_prio;
464                                 best_ts = t->ts_prio;
465                                 best = b;
466                         }
467                         
468                         if (best_rt == t->rt_prio && best_rt == 0 && best_ts < t->ts_prio) {
469                                 best_ts = t->ts_prio;
470                                 best = b;
471                         }
472                 }
473
474                 if (best)
475                         best->list_node.del();
476                 
477                 return best;
478         }
479         
480         Blocker *WaitQueue::unblock_one()
481         {
482                 AutoSpinLockRecIRQ autolock(lock);
483                 return unblock_one_nolock();
484         }
485
486         bool WaitQueue::wake_one()
487         {
488                 // The lock is held over the wake to make sure that we still
489                 // have a valid reference to best.  For external calls to
490                 // unblock_one(), the caller must use its own locks (or other
491                 // mechanisms) to ensure that the blocker is still valid.
492         
493                 ulong irq = lock.lock_recirq();
494                 Blocker *best = unblock_one_nolock();
495                 
496                 if (best)
497                         best->wake();
498                 
499                 lock.unlock_recirq(irq);
500                 
501                 if (ll_get_int_state() && need_resched)
502                         sched.schedule();
503
504                 return false;
505         }
506
507         int WaitQueue::wake_all()
508         {
509                 ulong irq = lock.lock_recirq();
510                 sched.runqueue_lock.lock();
511                 int count = 0;
512
513                 for (Util::List *node = blockers.next; node != &blockers;
514                      node = node->next)
515                 {
516                         Blocker *b = node->listentry(Blocker, list_node);
517                         b->unblock();
518                         b->thread->wake_nolock();
519                         b->list_node.del();
520                         count++;
521                 }
522                 
523                 sched.runqueue_lock.unlock();
524                 lock.unlock_recirq(irq);
525                 
526                 if (ll_get_int_state() && need_resched)
527                         sched.schedule();
528
529                 return count;
530         }
531         
532         Sched sched;
533 }
534
535 extern "C" void exit_thread()
536 {
537         curthread->exit();
538         assertl(0, Assert::Always);
539 }
540
541 extern "C" void sched_new_thread()
542 {
543         Threads::sched.sched_new_thread();
544 }
545
546 extern "C" void schedule()
547 {
548         Threads::sched.schedule();
549 }
550
551 int need_resched;