Except where indicated otherwise, this software is:
-Copyright (c) 2006 Scott Wood <scott@buserror.net>
+Copyright (c) 2007 Scott Wood <scott@buserror.net>
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors or contributors be held liable for any damages
Callee:
eax: pointer to PIB
- edx: pointer to caller information struct, if such was
- requested
+ edx: pointer to caller information struct, or NULL if not
+ requested.
+ esp: NULL.
Jump to the 32-bit address stored at 0x7fff0004 to return.
Upon return:
eax: pointer to exception, or NULL if none.
edx: length of exception segment, if any
- ebx, esi, edi, ebp, esp: should be preserved
- ecx: may be clobbered
+ ebx, esi, edi, ebp, esp: may be clobbered
Copy segments.
The total number of bytes in all of the segments that require a
- buffer to be created in the destination address space. This is
+ buffer to be created in the destination address space, with each
+ buffer individually rounded up to an 8-byte alignment. This is
specified so that the kernel can allocate one large buffer for all
- segments before traversing the segment list. When returning from a
- method, the buffer size only includes buffers allocated by the
- caller; "inout" segments where the caller specified a non-NULL ptr,
- and the callee did not increase the length, are not included
+ segments before traversing the segment list. When returning from
+ a method, the buffer size only includes buffers allocated by the
+ caller; "inout" segments where the caller specified a non-NULL
+ ptr, and the callee did not increase the length, are not included
(because the kernel does not need to allocate a caller-side buffer
- for them). The kernel may throw an exception if the actual
- size is greater than specified in this field.
+ for them). The kernel may throw an exception if the actual size
+ is greater than specified in this field.
This only covers the "normal" segments which are mapped only
for the duration of the call. Copy segments are handled
copy_size 1 Size of all Copy segments.
- This is like buffer_size, but for Copy segments. The pages
+ This is like buffer_size, but for Copy segments.
objlist_ptr 2 Pointer to the object list
objlist_len 3 Length of the object list, in IDs
// Address that the method tried to access
ulong addr;
- // Address of the faulting instruction
- ulong pc;
+ // Address of the faulting instruction, 0 if from kernel
+ ulong pc;
// Process of faulting method
Proc.Process proc;
// An I/O error occured accessing a memory mapped region, or an
// uncorrectable memory error was encountered.
- IOError
+ IOError,
};
};
struct ParamInfoBlock {
uintptr_t buffer_size;
uintptr_t copy_size;
- uintptr_t *objlist_ptr;
+ uintptr_t *objlist;
uintptr_t objlist_len;
uintptr_t num_segments;
struct Segment {
- void *ptr;
+ unsigned char *ptr;
uintptr_t len;
uintptr_t flags;
uintptr_t reserved;
+
+ enum {
+ In = 1,
+ Out = 2,
+ Inline = 4,
+ Copy = 8
+ };
} segments[0];
};
}
rotate_left(parent);
}
}
+
+ // RBPtr is a Pointer->Value associative array, and RBInt is an
+ // Integer->Value associative array.
+
+ template<typename Ptr, typename Val>
+ struct RBPtrNode {
+ typedef RBTree<RBPtrNode, Ptr, Ptr> Tree;
+ typename Tree::Node rbtree_node;
+ Val value;
+
+ intptr_t operator < (RBPtrNode &other)
+ {
+ return (intptr_t)other.rbtree_node.value -
+ (intptr_t)rbtree_node.value;
+ }
+
+ intptr_t operator > (RBPtrNode &other)
+ {
+ return (intptr_t)rbtree_node.value -
+ (intptr_t)other.rbtree_node.value;
+ }
+
+ operator Val &()
+ {
+ return value;
+ }
+ };
+
+ template<typename Ptr, typename Val>
+ struct RBPtr : public RBTree<RBPtrNode<Ptr, Val>, Ptr, Ptr>
+ {
+ typedef RBPtrNode<Ptr, Val> Node;
+ typedef RBTree<Node, Ptr, Ptr> Tree;
+
+ void add(Ptr ptr, Val &val)
+ {
+ Node *node = new Node;
+ node->value = val;
+ node->rbtree_node.value = ptr;
+ Tree::add(node);
+ }
+
+ void del(Ptr ptr)
+ {
+ delete find(ptr);
+ }
+ };
+
+ template<typename Int, typename Val>
+ struct RBIntNode {
+ typedef RBTree<RBIntNode, Int, Int> Tree;
+ typename Tree::Node rbtree_node;
+ Val value;
+
+ intptr_t operator < (RBIntNode &other)
+ {
+ return other.rbtree_node.value - rbtree_node.value;
+ }
+
+ intptr_t operator > (RBIntNode &other)
+ {
+ return rbtree_node.value - other.rbtree_node.value;
+ }
+
+ operator Val &()
+ {
+ return value;
+ }
+ };
+
+ template<typename Int, typename Val>
+ struct RBInt : public RBTree<RBIntNode<Int, Val>, Int, Int>
+ {
+ typedef RBIntNode<Int, Val> Node;
+ typedef RBTree<Node, Int, Int> Tree;
+
+ void add(Int key, Val &val)
+ {
+ Node *node = new Node;
+ node->value = val;
+ node->rbtree_node.value = key;
+ Tree::add(node);
+ }
+
+ void del(Int key)
+ {
+ delete find(key);
+ }
+ };
}
#endif
x64_gdt[4].base_high = (tss_addr & 0xff000000) >> 24;
asm volatile("ltr %w0" : : "r" (0x20) : "memory");
- init_thread->addr_space = new Mem::AddrSpace(x64_init_ptbl_l4);
- init_thread->active_addr_space = init_thread->addr_space;
+ init_thread->aspace = new Mem::AddrSpace(x64_init_ptbl_l4);
+ init_thread->active_aspace = init_thread->aspace;
}
void timer_init()
goto bad_fault;
// Don't allow fault-ins using a borrowed addr-space.
- as = curthread->addr_space;
+ as = curthread->aspace;
if (!as || curthread == Arch::init_thread)
goto bad_fault;
{
u64 dummy1, dummy2;
- if (dest->addr_space) {
- assert(dest->addr_space == dest->active_addr_space);
+ if (dest->aspace) {
+ assert(dest->aspace == dest->active_aspace);
- if (dest->addr_space != src->active_addr_space) {
- u64 cr3 = Mem::kvirt_to_phys(dest->addr_space->
+ if (dest->aspace != src->active_aspace) {
+ u64 cr3 = Mem::kvirt_to_phys(dest->aspace->
page_table->toplevel);
asm volatile("movq %0, %%cr3" : : "r" (cr3) : "memory");
}
} else {
- dest->active_addr_space = src->active_addr_space;
+ dest->active_aspace = src->active_aspace;
}
Priv::tss.rsp[0] = reinterpret_cast<u64>(dest);
Descriptor x86_gdt[1024] = {
{}, // The first entry is reserved for the NULL selector.
- { // 0x08: data
+ { // 0x08: kernel data
limit_low: 0xffff,
base_low: 0,
base_mid: 0,
gran: 1,
base_high: 0
},
- { // 0x10: code
+ { // 0x10: kernel code
limit_low: 0xffff,
base_low: 0,
base_mid: 0,
opsize: 0,
gran: 0,
base_high: 0
- }
+ },
+ { // 0x23: user data
+ limit_low: 0xffff,
+ base_low: 0,
+ base_mid: 0,
+ type: 2, // data segment, writable
+ user: 1,
+ dpl: 3,
+ present: 1,
+ limit_high: 0x7f,
+ sw: 0,
+ reserved: 0,
+ opsize: 1,
+ gran: 1,
+ base_high: 0
+ },
+ { // 0x2b: code
+ limit_low: 0xffff,
+ base_low: 0,
+ base_mid: 0,
+ type: 10, // code segment, readable
+ user: 1,
+ dpl: 3,
+ present: 1,
+ limit_high: 0x7f,
+ sw: 0,
+ reserved: 0,
+ opsize: 1,
+ gran: 1,
+ base_high: 0
+ },
};
struct X86DescriptorTablePointer {
.org 0
.code32
.global _start
-.align 4
+ .align 4
multiboot_hdr:
.long 0x1badb002 // Multi-boot magic
movl $0x87, x86_init_ptbl_l2 + 0x800 - KERNEL_START
ljmp $0x10, $paging_on - KERNEL_START
+
-paging_on:
+paging_on:
lgdt x86_gdtr + 6
movl %esi, x86_boot_info_phys
.global x86_int99_entry
x86_int99_entry:
- pushl %ecx
+ pushl %edx
pushl %eax
enterkernel %edx
exitkernel
popl %eax
- popl %ecx
- xorl %edx, %edx
+ xorl %ecx, %ecx
+ popl %edx
xorl %ebx, %ebx
xorl %ebp, %ebp
xorl %esi, %esi
xorl %edi, %edi
call sched_new_thread
- pop %eax
+ popl %eax
call *%eax
call exit_thread
ud2a
+
+ .global arch_new_user_thread
+arch_new_user_thread:
+ movl 4(%esp), %eax // arg1: user entry
+ movl 8(%esp), %ecx // arg2: user stack
+ pushl $0x23 // SS
+ pushl %ecx // ESP
+ pushl $0x202 // EFLAGS
+ pushl $0x2b // CS
+ pushl %eax // EIP
+
+ movl $0x23, %ebx
+ movl %ebx, %ds
+ movl %ebx, %es
+
+ xorl %eax, %eax
+ xorl %ecx, %ecx
+ iret
- .section ".roshared","x"
+ .section ".roshared","ax"
+ .align 4096
// The vsyscall table must be the first thing in roshared
// (at vaddr 0x7fff0000).
int $0x99
ret
- .section ".rwshared",""
+ .global test_user_ptr
+ .align 4
+test_user_ptr:
+ .long ROSHARED_PTR(test_user)
+
+test_pib:
+ .long 0
+ .long 0
+ .long ROSHARED_PTR(test_objlist)
+ .long 1
+ .long 0
+
+test_objlist:
+ .long 0
+ .space 128
+
+test_user:
+ movl $ROSHARED_PTR(test_pib), %eax
+ call *0x7fff0000
+1: jmp 1b
+
+ .section ".rwshared","a"
+ .align 4096
// Userspace must set this to an appropriate entry point
// prior to registering objects with the kernel. It should
LONG(0)
ctors_end = .;
*(.irqs)
+ *(.excode)
} :all
. = (. + 0xfff) & ~ 0xfff;
roshared_start = .;
. = . + 1;
data_start = .;
.data : { *(.data) *(.gnu.linkonce.d.*) } :all
+ .extable : {
+ extable_start = .;
+ *(.extable)
+ extable_end = .;
+ } :all
bss_start = .;
.bss : { *(.bss) } :all
bss_end = .;
extern u32 x86_init_ptbl_l2[1024];
+struct X86ExTable {
+ ulong faultaddr, handler;
+};
+
+extern X86ExTable extable_start, extable_end;
+
struct X86InitStack {
u8 stack[4096 - ::Threads::thread_size];
::Threads::Thread thread;
void show_regs(Regs *regs)
{
+ u32 esp = regs->cs & 3 ? regs->user_esp :
+ (u32)regs + (u32)sizeof(regs);
+
printf("eax: 0x%08x ecx: 0x%08x edx: 0x%08x ebx: 0x%08x\n"
"esp: 0x%08x ebp: 0x%08x esi: 0x%08x edi: 0x%08x\n"
"eflags: 0x%08x ds: %04x es: %04x ss: %04x\n",
- regs->eax, regs->ecx, regs->edx, regs->ebx,
- regs->cs & 3 ? regs->user_esp : (u32)regs + (u32)sizeof(regs),
+ regs->eax, regs->ecx, regs->edx, regs->ebx, esp,
regs->ebp, regs->esi, regs->edi, regs->eflags,
regs->ds, regs->es, regs->cs & 3 ? regs->user_ss : regs->ds);
-
+
printf("Stack trace: ");
u32 *frame = (u32 *)regs->ebp;
for (int i = 2; i < 32; i++) {
- u32 stackptr = frame[1];
- frame = (u32 *)frame[0];
-
if ((u32)frame < 0x80000000UL)
break;
+ u32 stackptr = frame[1];
+ frame = (u32 *)frame[0];
+
if (!(i % 7))
printf("\n");
printf("0x%08x ", stackptr);
}
+
+ printf("\nStack dump: ");
+
+ for (int i = 2; i < 32; i++) {
+ if (!(i % 7))
+ printf("\n");
+
+ printf("0x%08x ", ((u32 *)esp)[i-2]);
+ }
+
}
struct TimerInt : public IRQ::Interrupt {
};
TimerInt timer_int;
+
+ bool check_extable(Regs *regs, int cause)
+ {
+ X86ExTable *ext;
+ assert((regs->cs & 3) == 0);
+
+ for (ext = &extable_start; ext < &extable_end; ext++) {
+ if (ext->faultaddr == regs->eip) {
+ regs->eip = ext->handler;
+ regs->edx = cause;
+ return true;
+ }
+ }
+
+ return false;
+ }
}
using IRQ::i8259;
Priv::tss.ss0 = 8;
asm volatile("ltr %w0" : : "r" (0x18) : "memory");
- init_thread->addr_space = new Mem::ProcAddrSpace(x86_init_ptbl_l2);
- init_thread->active_addr_space = init_thread->addr_space;
+ init_thread->aspace = new Mem::ProcAddrSpace(x86_init_ptbl_l2);
+ init_thread->active_aspace = init_thread->aspace;
}
void timer_init()
IRQ::InterruptSlot *timer = i8259.get_slot(0);
i8259.request_int(timer, &Priv::timer_int);
}
-
- void ArchThread::init(void *entry, void *arg)
- {
- void **stack = reinterpret_cast<void **>(this);
-
- *--stack = arg;
- *--stack = entry;
-
- esp = stack;
- ebp = 0;
- jump_to_init = 1;
- }
}
using Arch::Priv::show_regs;
extern "C" void x86_do_page_fault(Regs *regs, u32 fault_addr, u32 error_code)
{
Mem::AddrSpace *as;
+ int cause;
if (in_fault)
for(;;);
goto bad_fault;
// Don't allow fault-ins using a borrowed addr-space.
- as = curthread->addr_space;
+ as = curthread->aspace;
if (!as || curthread == Arch::init_thread)
goto bad_fault;
ll_ints_on();
// FIXME: no-exec
- if (as->handle_fault(fault_addr, error_code & 2,
- false /* error_code & 16 */, error_code & 4))
+ cause = as->handle_fault(fault_addr, error_code & 2,
+ false /* error_code & 16 */,
+ error_code & 4);
+
+ if (cause < 0)
+ return;
+
+ if (!(error_code & 4) && check_extable(regs, cause))
return;
// FIXME: throw exception to user
namespace Arch {
namespace Priv {
struct OrbRegs {
+ u32 ds, es;
+
union {
System::RunTime::ParamInfoBlock *pib;
ulong exptr;
extern "C" void x86_invoke_method(Arch::Priv::OrbRegs *regs)
{
assert(regs->cs & 3);
- ORB::invoke_method(regs->pib, regs->user_esp);
+ printf("x86_invoke_method %p\n", regs->pib);
+
+ try {
+ ORB::invoke_method(regs->pib, regs->user_esp);
+ }
+
+ catch (SystemException &se) {
+ // Copy exception to user
+ // If an exception is thrown during dispatch, it must be
+ // with the caller's address space, and user_esp must be
+ // the caller's.
+
+ printf("sys exception\n");
+
+ typedef System::Exceptions::NativeCodeExceptionOriginInfo OriginInfo;
+ OriginInfo *oi = OriginInfo::downcast(se.origin);
+
+ if (oi)
+ printf("PC %llx\n", oi->pc);
+ }
+
+ catch (...) {
+ printf("other exception\n");
+ }
}
extern "C" void x86_return_from_method(Arch::Priv::OrbRegs *regs)
{
u32 dummy1, dummy2;
- if (dest->addr_space) {
- assert(dest->addr_space == dest->active_addr_space);
+ if (dest->aspace) {
+ assert(dest->aspace == dest->active_aspace);
- if (dest->addr_space != src->active_addr_space)
- set_aspace(dest->addr_space);
+ if (dest->aspace != src->active_aspace)
+ set_aspace(dest->aspace);
} else {
- dest->active_addr_space = src->active_addr_space;
+ dest->active_aspace = src->active_aspace;
}
Priv::tss.esp0 = reinterpret_cast<u32>(dest);
"0" (&src->arch.esp), "1" (&dest->arch.esp) :
"ebx", "edx", "esi", "edi", "memory");
}
+
+ void ArchThread::init(void *entry, void *arg1, void *arg2)
+ {
+ void **stack = reinterpret_cast<void **>(this);
+
+ *--stack = arg2;
+ *--stack = arg1;
+ *--stack = entry;
+
+ esp = stack;
+ ebp = 0;
+ jump_to_init = 1;
+ }
}
#include <kern/arch.h>
#include <kern/time.h>
#include <kern/thread.h>
+#include <kern/orb.h>
extern void *eh_frame_begin;
extern "C" void __register_frame(const void *begin);
Time::init();
Threads::sched.init();
ll_ints_on();
+ ORB::init();
run_test();
runqueue_lock.unlock_irq();
}
- Thread *Sched::new_thread(thread_func func, void *arg, char *name)
+ Thread *Sched::new_thread(thread_func func, void *arg1,
+ void *arg2, char *name)
{
// Allocate a page for the thread's stack, and stick the thread
// struct at the top of the stack. It's placed at the top rather
t->time_slice = prio_to_slice(t->ts_prio);
t->blocked_on = NULL;
t->last_replenish = 0;
- t->addr_space = NULL;
- t->active_addr_space = NULL;
+ t->aspace = NULL;
+ t->active_aspace = NULL;
- t->arch.init(reinterpret_cast<void *>(func), arg);
+ t->arch.init(reinterpret_cast<void *>(func), arg1, arg2);
if (name)
strncpy(t->name, name, Thread::name_len);
}
}
- void Thread::set_aspace(Mem::ProcAddrSpace *aspace)
+ void Thread::set_aspace(Mem::ProcAddrSpace *ASPACE)
{
// FIXME: lock thread against scheduling; this temporary method should
// be gone before SMP anyway.
ll_ints_off();
- addr_space = active_addr_space = aspace;
+ aspace = active_aspace = ASPACE;
Arch::set_aspace(aspace);
ll_ints_on();
}
PhysAddr Global:1;
PhysAddr FaultOnWrite:1;
PhysAddr PermWrite:1; // High-level permission-based write access
- PhysAddr Avail:1;
+ PhysAddr AddressOnly:1;
PhysAddr Addr:20;
};
maskout = 0;
flagsout = 0;
- if (maskin.Valid) {
- maskout.Valid = 1;
- flagsout.Valid = flagsin.Valid;
- }
-
- if (maskin.FaultOnWrite) {
- maskout.FaultOnWrite = 1;
- flagsout.FaultOnWrite = flagsin.FaultOnWrite;
- }
+ maskout.Valid = maskin.Valid;
+ maskout.FaultOnWrite = maskin.FaultOnWrite;
+ maskout.AddressOnly = maskin.AddressOnly;
+ maskout.CacheDisable = maskin.Uncached;
+
+ flagsout.Valid = flagsin.Valid;
+ flagsout.FaultOnWrite = flagsin.FaultOnWrite;
+ flagsout.AddressOnly = flagsin.AddressOnly;
+ flagsout.CacheDisable = flagsin.Uncached;
if (maskin.Writeable) {
maskout.Writeable = 1;
Mem::PTEFlags ret = 0;
ret.Valid = Valid;
- ret.User = User;
if (Valid) {
+ ret.User = User;
ret.Readable = 1;
ret.Writeable = PermWrite;
ret.Executable = 1;
+ ret.Uncached = CacheDisable;
ret.FaultOnWrite = FaultOnWrite;
+ ret.AddressOnly = AddressOnly;
}
return ret;
return Dirty;
}
+ bool addronly_pte()
+ {
+ return AddressOnly;
+ }
+
enum {
page_size = Arch::page_size,
page_shift = Arch::page_shift,
namespace Priv {
struct Regs {
u32 ds, es;
-
- union {
- struct {
- u32 eax, ecx, edx, ebx, ebp, esi, edi;
- };
-
- u32 gpr[8];
- };
-
+ u32 eax, ecx, edx, ebx, ebp, esi, edi;
u32 eip, cs, eflags, user_esp, user_ss;
};
}};
size = 4096
};
- void init(void *entry, void *arg);
+ void init(void *entry, void *arg1, void *arg2);
};
void switch_thread(Threads::Thread *dest, Threads::Thread *src);
namespace Arch {
template <typename T>
- static inline T copyin(T *ptr)
+ static inline void copyin(T *uptr, T *kdata, int count)
{
- // FIXME
- return *ptr;
+ // OPT: use alignof for movsw and movsl versions
+ ulong bytes = sizeof(*kdata) * count;
+ int cause;
+
+ if ((ulong)uptr + bytes >= PHYSMEM_START ||
+ (ulong)uptr + bytes < (ulong)uptr) {
+ cause = 1;
+ goto bad;
+ }
+
+ asm("1: rep; movsb\n"
+ "2:\n"
+ ".section .extable,\"a\"\n"
+ ".align 4\n"
+ ".long 1b\n"
+ ".long 2b\n"
+ ".previous\n" : "+c" (bytes), "+D" (kdata), "+S" (uptr), "=d" (cause));
+
+ if (bytes != 0) {
+ bad:
+ throw_idl(MemoryFault, reinterpret_cast<ulong>(uptr),
+ 0, NULL, NULL, cause);
+ }
}
template <typename T>
- static inline void copyin(T *ptr, T &data)
+ static inline void copyin(T *uptr, T &kdata)
{
- // FIXME
- data = *ptr;
+ // OPT: special versions for common small sizes
+ copyin(uptr, &kdata, 1);
}
template <typename T>
- static inline void copyin(T *ptr, T *data, int count)
+ static inline T copyin(T *uptr)
{
- // FIXME
- memcpy(data, ptr, count * sizeof(T));
+ T kdata;
+ copyin(uptr, kdata);
+ return kdata;
}
template <typename T>
- static inline void copyout(T *ptr, T &data)
+ static inline void copyout(T *uptr, T *kdata, int count)
{
- // FIXME
- *ptr = data;
+ // OPT: use alignof for movsw and movsl versions
+ ulong bytes = sizeof(*kdata) * count;
+ int cause;
+
+ if ((ulong)uptr + bytes >= PHYSMEM_START ||
+ (ulong)uptr + bytes < (ulong)uptr) {
+ cause = 1;
+ goto bad;
+ }
+
+ asm("1: rep; movsb\n"
+ "2:\n"
+ ".section .extable,\"a\"\n"
+ ".long 1b\n"
+ ".long 2b\n"
+ ".previous\n" : "+c" (bytes), "+S" (kdata), "+D" (uptr), "=d" (cause));
+
+ if (bytes != 0) {
+ bad:
+ throw_idl(MemoryFault, reinterpret_cast<ulong>(uptr),
+ 0, NULL, NULL, cause);
+ }
}
template <typename T>
- static inline void copyout(T *ptr, T *data, int count)
+ static inline void copyout(T *uptr, T &kdata)
{
- // FIXME
- memcpy(ptr, data, count * sizeof(T));
+ copyout(uptr, &kdata, sizeof(kdata));
}
}
PhysAddr User:1;
PhysAddr Accessed:1;
PhysAddr Dirty:1;
+ PhysAddr Uncached:1;
PhysAddr FaultOnWrite:1;
- PhysAddr Addr:56;
+ PhysAddr AddressOnly:1;
+ PhysAddr Addr:54;
#elif defined(BITFIELD_BE)
- PhysAddr Addr:56;
+ PhysAddr Addr:54;
+ PhysAddr AddressOnly:1;
PhysAddr FaultOnWrite:1;
PhysAddr Dirty:1;
+ PhysAddr Uncached:1;
PhysAddr Accessed:1;
PhysAddr User:1;
PhysAddr Executable:1;
return false;
}
+ bool addronly_pte()
+ {
+ return AddressOnly;
+ }
+
enum {
page_size = Arch::page_size,
page_shift = Arch::page_shift,
using System::RunTime::countarray;
using System::RunTime::Array;
using System::RunTime::nullarray;
+using System::RunTime::orbmm;
using System::Object;
using System::Objects::Factory;
using namespace System::Exceptions::Std;
// These are C-ABI so libgcc and libsupc++ can use them.
extern "C" {
+ // FIXME: template/alignof versions
void *memcpy(void *dest, const void *src, size_t len);
void *memmove(void *dest, const void *src, size_t len);
int memcmp(const void *b1, const void *b2, size_t len);
using System::Mem::AccessFlags;
union PTEFlags {
+ enum {
+ valid = 0x001,
+ writeable = 0x002,
+ readable = 0x004,
+ executable = 0x008,
+ user = 0x010,
+ accessed = 0x020,
+ dirty = 0x040,
+ uncached = 0x080,
+ faultonwrite = 0x100,
+ addressonly = 0x200,
+ protectedmap = 0x400,
+ };
+
struct {
// This must be kept in sync with include/kern/generic-pte.h
ulong User:1;
ulong Accessed:1;
ulong Dirty:1;
+ ulong Uncached:1;
// If set, then on a write access, the page is copied and this
// address space gets the new, anonymous version. The rmap list
ulong FaultOnWrite:1;
+ // The address itself is being mapped, not the page located
+ // there. Do not manipulate page reference counts. This bit
+ // does not get propagated during copy-on-write.
+
+ ulong AddressOnly:1;
+
// VArea Only:
// Do not allow the user to unmap or modify flags.
// Used for the shared user/kernel mappings.
ulong Protected:1;
#elif defined(BITFIELD_BE)
- ulong pad:_LL_LONG_BYTES * 8 - 9;
+ ulong pad:_LL_LONG_BYTES * 8 - 11;
ulong Protected:1;
+ ulong AddressOnly:1;
ulong FaultOnWrite:1;
+ ulong Uncached:1;
ulong Dirty:1;
ulong Accessed:1;
ulong User:1;
struct BadPageFault {
+ MemoryFault_ns::Cause cause;
+
+ BadPageFault(MemoryFault_ns::Cause CAUSE) : cause(CAUSE)
+ {
+ }
};
class ASpaceMappable : public Mappable {
AddrSpace(PageTable *ptbl = NULL);
- // Returns true if the fault was "good"; otherwise, the caller
- // should dump regs. exec should only be used if the CPU
- // implements per-page exec protection; otherwise, treat it
- // as a read.
+ // Returns negative if the fault was "good"; otherwise, a fault
+ // code corresponding to MemoryFault.Cause is returned. exec
+ // should only be set if the CPU implements per-page exec
+ // protection; otherwise, treat it as a read.
- bool handle_fault(ulong addr, bool write, bool exec, bool user);
+ int handle_fault(ulong addr, bool write, bool exec, bool user);
void get_mappable(IMappable *ma);
void clone(IAddrSpace *addrspace, u8 clone_is_real);
- enum {
- map_user,
- map_protected,
- map_kernel
- };
-
void map(IMappable ma, Region region, u64 *vstart, MapFlags mflags,
- int map_type = map_user);
+ PTEFlags set = 0, PTEFlags clear = 0);
void unmap(Region region, bool from_kernel = false);
void set_mapflags(Region region, MapFlags mflags);
#include <kern/types.h>
#include <kern/radix.h>
-#include <orb.h>
+#include <kern/lock.h>
#include <util/list.h>
#include <util/rbtree.h>
#include <util/bmaptree.h>
#include <System/Objects.h>
+#include <orb.h>
namespace Mem {
class ProcAddrSpace;
struct CallFrame {
// Address Space and PC to return to
Mem::ProcAddrSpace *ret_aspace;
- ulong ret_pc;
+ uintptr_t ret_stack;
// Caller's PIB Pointer
System::RunTime::ParamInfoBlock *caller_user_pib;
-
- // Object and Method that were called -- it probably isn't strictly
- // necessary to keep track of this here, but it'd help in doing a
- // "traceforward" of the method invocation stack in order to
- // debug a stalled method call.
-
- ID object, method;
};
struct CallStackHeader {
union {
struct {
+ u32 Valid:1;
u32 Pointer:1;
};
struct Object : public ObjectHdr {
Mem::ProcAddrSpace *aspace;
- uintptr_t entry;
};
struct ObjectPtr : public ObjectHdr {
};
typedef Util::RadixTree<ObjTableEntry, ID, 6> IDTable;
- typedef Util::RadixTree<ObjectHdr *, ID, 4> IDRMap;
+ typedef Util::RBPtr<Object *, ObjectPtr *> IDRMap;
typedef Util::BitmapTree<ID> IDAlloc;
class IDSpace {
- // Reverse mapping of object pointers to local IDs
- IDTable table;
- IDRMap rmap;
- IDAlloc alloc;
+ Lock::SpinLock lock; // For add/del only; not needed for lookup
+ IDTable table; // Forward mapping of local IDs to object ptrs
+ IDRMap rmap; // Reverse mapping of remote object pointers to local IDs
+ IDAlloc alloc; // Bitmap for allocating IDs
public:
Object *lookup(u32 id);
- ObjectHdr *get_local_hdr(Object *obj);
- Object *newobj(Mem::ProcAddrSpace *aspace, uintptr_t entry);
+ ObjectHdr *get_local(Object *obj);
+ Object *newobj(Mem::ProcAddrSpace *aspace);
};
void invoke_method(System::RunTime::ParamInfoBlock *pib, uintptr_t &stack);
uintptr_t return_from_method(uintptr_t &exptr, size_t &exlen);
+ void init();
}
#endif
// the page refcount is not decremented.
void kill_pte(ulong vaddr, u64 physaddr, bool dirty, bool valid,
- bool no_release = false);
+ bool no_release);
};
}
// FIXME: use sleeping lock once implemented
Lock::SpinLock threadlist_lock;
- typedef void (*thread_func)(void *arg);
+ typedef void (*thread_func)(void *arg1, void *arg2);
- Thread *new_thread(thread_func func, void *arg, char *name = NULL);
+ Thread *new_thread(thread_func func, void *arg1, void *arg2 = NULL,
+ char *name = NULL);
void schedule();
void sched_new_thread();
public:
Util::List threadlist_node;
- Mem::ProcAddrSpace *addr_space, *active_addr_space;
+ Mem::ProcAddrSpace *aspace, *active_aspace;
enum {
name_len = 32
mf.access_IDLNS_Read = 1;
mf.access_IDLNS_Exec = 1;
- as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
+ as->map(physmem, region, &vstart, mf,
+ PTEFlags::protectedmap | PTEFlags::addressonly);
region.start = kvirt_to_phys(&rwshared_start);
region.end = kvirt_to_phys(&rwshared_page_end);
mf.access_IDLNS_Write = 1;
mf.CopyOnWrite = 1;
- as->map(physmem, region, &vstart, mf, AddrSpace::map_protected);
+ as->map(physmem, region, &vstart, mf,
+ PTEFlags::protectedmap | PTEFlags::addressonly);
// Leave the stack no-exec by default.
region.start = vstart = Arch::stack_bottom;
region.end = Arch::stack_top;
mf.CopyOnWrite = 0;
- printf("vstart %llx\n", vstart);
as->map(anonmem, region, &vstart, mf);
*obj = static_cast<IAddrSpace>(*as);
*addrspace = NULL;
}
- bool AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
+ int AddrSpace::handle_fault(ulong vaddr, bool write, bool exec, bool user)
{
if (lock.held_by_curthread())
return false;
}
catch (BadPageFault &bpf) {
- // FIXME: retain info about nature of bpf
- // to throw to user?
- return false;
+ return bpf.cause;
}
- return true;
+ return -1;
}
bool AddrSpace::check_overlap(Region region, VirtualArea *&va)
VirtualArea *va = aspace->varea_tree.find(vaddr);
if (!va)
- throw BadPageFault();
+ throw BadPageFault(MemoryFault_ns::Cause::Unmapped);
if ((va->flags & reqflags) != reqflags)
- throw BadPageFault();
+ throw BadPageFault(MemoryFault_ns::Cause::Protected);
if (aspace->map(va, vaddr, reqflags))
break;
}
void AddrSpace::map(IMappable ma, Region region, u64 *vstart,
- MapFlags mflags, int map_type)
+ MapFlags mflags, PTEFlags set, PTEFlags clear)
{
// FIXME: check alignment for VIPT caches
// FIXME: Implement the "Replace" map flag
newva->region() = vregion;
newva->flags.Valid = 1;
- newva->flags.User = map_type != map_kernel;
+ newva->flags.User = 1;
newva->flags.Readable = mflags.access_IDLNS_Read;
newva->flags.Writeable = mflags.access_IDLNS_Write;
newva->flags.Executable = mflags.access_IDLNS_Exec;
newva->flags.FaultOnWrite = mflags.CopyOnWrite;
- newva->flags.Protected = map_type != map_user;
+ newva->flags.raw |= set;
+ newva->flags.raw &= ~clear;
newva->ma = cma;
newva->offset = region.start - vregion.start;
oldpage->release();
}
- // FIXME: Add a special PTE flag to indicate that PhysMem mappings
- // don't mess with page refcounts.
-
class PhysMem : public Mappable {
public:
void get_size(u64 *size)
flags->Writeable = 1;
flags->Executable = 1;
flags->User = 1;
+ flags->AddressOnly = 1;
}
};
PTE newpte = PTE::addr_to_pte(region.offset) | flags;
PTE oldpte = newpte.xchg_pte(ptable, i);
- retain_if_phys(region.offset);
+ if (!newpte.addronly_pte())
+ retain_if_phys(region.offset);
if (oldpte) {
// vaddr is only for process aspaces, so don't worry
((i - start) << PTE::page_shift);
kill_pte(vaddr, oldpte.pte_to_addr(),
- oldpte.dirty_pte(), oldpte.valid_pte());
+ oldpte.dirty_pte(), oldpte.valid_pte(),
+ oldpte.addronly_pte());
} else {
page->retain();
}
((i - start) << PTE::page_shift);
kill_pte(vaddr, oldpte.pte_to_addr(),
- oldpte.dirty_pte(), oldpte.valid_pte());
+ oldpte.dirty_pte(), oldpte.valid_pte(),
+ oldpte.addronly_pte());
}
assert(page->inuse.refcount > 1);
// orb/invoke.cc -- Method Invocation
//
-// This software is copyright (c) 2006 Scott Wood <scott@buserror.net>.
+// This software is copyright (c) 2007 Scott Wood <scott@buserror.net>.
//
// This software is provided 'as-is', without any express or implied warranty.
// In no event will the authors or contributors be held liable for any damages
#include <kern/pagealloc.h>
#include <kern/compiler.h>
#include <kern/thread.h>
+#include <kern/process.h>
+#include <kern/arch.h>
+#include <kern/lock.h>
#include <arch/usercopy.h>
+#include <util/misc.h>
#include <orb.h>
using System::RunTime::ParamInfoBlock;
+typedef ParamInfoBlock::Segment Segment;
using namespace ORB;
using Threads::Thread;
+using Util::round_up;
namespace ORB {
static CallFrame *new_frame(Thread *thread)
return &hdr->frames[++thread->orbstack_top];
}
+
+ Object *IDSpace::lookup(ID id)
+ {
+ // FIXME: refcounting
+ ObjTableEntry *ote = table.lookup(id);
+ if (!ote || !ote->hdr.Valid)
+ return NULL;
+
+ if (ote->hdr.Pointer)
+ return ote->ptr.object;
- ObjectHdr *IDSpace::get_local_hdr(Object *obj)
+ return &ote->obj;
+ }
+
+ ObjectHdr *IDSpace::get_local(Object *obj)
{
- ObjectHdr *hdr = *rmap.lookup(obj->id);
- if (hdr)
- return hdr;
+ if (&obj->aspace->idspace == this)
+ return obj;
+
+ Lock::AutoSpinLock autolock(lock);
+ ObjectPtr *ptr = *rmap.find(obj);
+ if (ptr)
+ return ptr;
int id = alloc.alloc();
- ObjectPtr *ptr = &table.lookup(id, true)->ptr;
+ ptr = &table.lookup(id, true)->ptr;
ptr->id = id;
ptr->flags = 0;
+ ptr->Valid = 1;
ptr->Pointer = 1;
ptr->object = obj;
- *rmap.lookup(id, true) = ptr;
+ rmap.add(obj, ptr);
return ptr;
}
- Object *IDSpace::newobj(Mem::ProcAddrSpace *aspace, uintptr_t entry)
+ Object *IDSpace::newobj(Mem::ProcAddrSpace *aspace)
{
+ Lock::AutoSpinLock autolock(lock);
int id = alloc.alloc();
Object *obj = &table.lookup(id, true)->obj;
obj->id = id;
obj->flags = 0;
+ obj->Valid = 1;
obj->aspace = aspace;
- obj->entry = entry;
return obj;
}
+
+ static void copy_data(u8 *dest, u8 *vaddr, size_t bufsize,
+ ParamInfoBlock *dpib, ParamInfoBlock *spib,
+ u32 fmask, u32 flags)
+ {
+ size_t copied = 0;
+
+ for (uint i = 0; i < spib->num_segments; i++) {
+ if ((spib->segments[i].flags & fmask) != flags)
+ continue;
+
+ size_t len = round_up(spib->segments[i].len, 3);
+
+ if (len + copied > bufsize || len + copied < copied)
+ throw_idl(InvalidArgument, 0, countarray("copy_data: bad size"));
+
+ dpib->segments[i].ptr = vaddr;
+ dpib->segments[i].len = len;
+ dpib->segments[i].reserved = 0;
+ Arch::copyin(spib->segments[i].ptr, dest, len);
+
+ copied += len;
+ dest += len;
+ vaddr += len;
+ }
+ }
+
+ typedef void (*KernEntry)(ParamInfoBlock *pib);
+
+ struct KernObject : public Object {
+ KernEntry entry;
+ };
+
+ typedef Util::RadixTree<KernObject, ID, 6> KIDTable;
+ KIDTable *kernobjs;
+
+ void copy_ids_to_kern(ParamInfoBlock *dpib, ParamInfoBlock *spib,
+ IDSpace *ids)
+ {
+ dpib->objlist_len = spib->objlist_len;
+
+ for (uint i = 0; i < spib->objlist_len; i++) {
+ Object *obj = ids->lookup(spib->objlist[i]);
+
+ if (!obj)
+ throw_idl(InvalidReference, i, nullarray);
+
+ dpib->objlist[i] = reinterpret_cast<uintptr_t>(obj);
+ }
+ }
void invoke_method(ParamInfoBlock *user_pib, uintptr_t &stack)
{
+ printf("invoke_method: pib %p\n", user_pib);
+
ParamInfoBlock pib = Arch::copyin(user_pib);
- CallFrame *frame = new_frame(curthread);
+ printf("objlist len %lu\n", pib.objlist_len);
+
+ if (pib.objlist_len == 0)
+ throw_idl(InvalidArgument, 0, countarray("no objects"));
+
+ // FIXME: declare constants somewhere
+ if (pib.num_segments > 64)
+ throw_idl(InvalidArgument, 0, countarray("too many segments"));
+ if (pib.objlist_len > 4096)
+ throw_idl(InvalidArgument, 0, countarray("too many objects"));
+
+ printf("&pib.objlist[0] %p\n", &pib.objlist[0]);
+
+ IDSpace *ids = &curthread->aspace->idspace;
+ ID objid = Arch::copyin(&pib.objlist[0]);
+ Object *obj = ids->lookup(objid);
+
+ if (!obj) {
+ printf("no obj %d\n", objid);
+ throw_idl(InvalidReference, 0, nullarray);
+ }
-// if (pib->objlist_len == 0)
+ printf("obj %p\n", obj);
+
+ if (obj->aspace == Arch::init_thread->aspace) {
+ KernObject *ko = static_cast<KernObject *>(obj);
+ int datalen = round_up(pib.buffer_size, 8);
+ int buflen = datalen + pib.objlist_len * sizeof(void *);
+ int piboff = datalen;
+ buflen += sizeof(ParamInfoBlock);
+ buflen += pib.num_segments * sizeof(ParamInfoBlock::Segment);
+
+ u8 *args = new(orbmm) u8[buflen];
+ u8 *copy = new(orbmm) u8[pib.copy_size];
+ ParamInfoBlock *dpib = reinterpret_cast<ParamInfoBlock *>
+ (args + piboff);
+ dpib->objlist = reinterpret_cast<uintptr_t *>(args + datalen);
+
+ copy_ids_to_kern(dpib, &pib, ids);
+ copy_data(args, args, pib.buffer_size, dpib, &pib, ~0U, Segment::In);
+ copy_data(copy, copy, pib.copy_size, dpib, &pib, ~0U,
+ Segment::In | Segment::Copy);
+ ko->entry(dpib);
+
+ // FIXME: Copy return data
+ return;
+ }
-// frame->object = objid;
+ CallFrame *frame = new_frame(curthread);
frame->caller_user_pib = user_pib;
-// frame->ret_pc = ret_pc;
+ frame->ret_stack = stack;
- printf("invoke_method: frame %p pib %p ret %lx\n",
+ printf("invoke_method: frame %p pib %p ret_stack %lx obj %p\n",
frame, frame->caller_user_pib,
- frame->ret_pc);
-
-
+ frame->ret_stack, obj);
}
uintptr_t return_from_method(uintptr_t &exptr, size_t &exlen)
{
return 0;
}
+
+ void init()
+ {
+ kernobjs = new KIDTable;
+ }
}
DIRS += $(DIR)
# Only include one test at a time
-RAW_CXXFILES := aspace
+RAW_CXXFILES := orb
CXXFILES += $(RAW_CXXFILES:%=$(DIR)%)
extern IMappable physmem;
}
-void thread(void *arg)
+void thread(void *arg1, void *arg2)
{
// Yuck -- out param->return value promotion would turn it into
// AddrSpace aspace = AddrSpace::downcast(Mem::proc_addr_space_factory.create()),
void run_test()
{
- Threads::sched.new_thread(thread, NULL, "thread")->wake();
+ Threads::sched.new_thread(thread, NULL, NULL, "thread")->wake();
}
--- /dev/null
+// ORB test
+//
+// This software is copyright (c) 2007 Scott Wood <scott@buserror.net>.
+//
+// This software is provided 'as-is', without any express or implied warranty.
+// In no event will the authors or contributors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is hereby granted to everyone, free of charge, to use, copy,
+// modify, prepare derivative works of, publish, distribute, perform,
+// sublicense, and/or sell copies of the Software, provided that the above
+// copyright notice and disclaimer of warranty be included in all copies or
+// substantial portions of this software.
+
+#include <kern/types.h>
+#include <kern/libc.h>
+#include <kern/thread.h>
+#include <kern/mem.h>
+#include <kern/orb.h>
+#include <kern/process.h>
+
+extern "C" void arch_new_user_thread(void *, void *);
+extern u32 test_user_ptr;
+using namespace Threads;
+using namespace Mem;
+
+void run_test()
+{
+ Thread *t = sched.new_thread(arch_new_user_thread, (void *)test_user_ptr,
+ (void *)(Arch::stack_top + 1), "thread");
+
+ Object obj;
+ proc_addr_space_factory.create(&obj);
+ IAddrSpace aspace = IAddrSpace::downcast(obj);
+
+ printf("aspace %p created\n", (void *)aspace);
+
+ ProcAddrSpace *kaspace = static_cast<ProcAddrSpace *>(AddrSpace::classptr(aspace));
+
+ printf("kaspace %p\n", kaspace);
+
+ t->set_aspace(kaspace);
+ t->wake();
+}
// optimization by changing it now without testing how GCC
// behaves.
- do {
- if (*tbl->guid == new_guid_first &&
+ while (true) {
+ if (tbl->guid[0] == new_guid_first &&
tbl->guid[1] == new_guid[1] &&
(sizeof(long) == 8 ||
(tbl->guid[2] == new_guid[2] &&
break;
tbl++;
- } while (tbl->guid);
-
+ if (__builtin_expect(!tbl->guid, 0))
+ return 0;
+ }
+
uintptr_t ptr = reinterpret_cast<uintptr_t>(obj);
ptr += obj->info->concrete;